aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/ABI/testing/sysfs-bus-i2c-devices-lm353315
-rw-r--r--Documentation/ABI/testing/sysfs-class-backlight-driver-lm353348
-rw-r--r--Documentation/ABI/testing/sysfs-class-led-driver-lm353365
-rw-r--r--Documentation/SubmittingPatches3
-rw-r--r--Documentation/arm/SPEAr/overview.txt32
-rw-r--r--Documentation/cgroups/memory.txt37
-rw-r--r--Documentation/cgroups/resource_counter.txt8
-rw-r--r--Documentation/cris/README62
-rw-r--r--Documentation/devicetree/bindings/arm/fsl.txt12
-rw-r--r--Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt52
-rw-r--r--Documentation/devicetree/bindings/arm/spear-timer.txt18
-rw-r--r--Documentation/devicetree/bindings/arm/spear.txt14
-rw-r--r--Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt11
-rw-r--r--Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt19
-rw-r--r--Documentation/devicetree/bindings/dma/snps-dma.txt17
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt38
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-mxs.txt87
-rw-r--r--Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt42
-rw-r--r--Documentation/devicetree/bindings/i2c/i2c-mxs.txt16
-rw-r--r--Documentation/devicetree/bindings/i2c/mux.txt60
-rw-r--r--Documentation/devicetree/bindings/i2c/samsung-i2c.txt8
-rw-r--r--Documentation/devicetree/bindings/i2c/xiic.txt22
-rw-r--r--Documentation/devicetree/bindings/mfd/da9052-i2c.txt60
-rw-r--r--Documentation/devicetree/bindings/mfd/tps65910.txt133
-rw-r--r--Documentation/devicetree/bindings/mfd/twl6040.txt62
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-esdhc.txt6
-rw-r--r--Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt2
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt3
-rw-r--r--Documentation/devicetree/bindings/mmc/mmc.txt27
-rw-r--r--Documentation/devicetree/bindings/mmc/mmci.txt19
-rw-r--r--Documentation/devicetree/bindings/mmc/mxs-mmc.txt25
-rw-r--r--Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt4
-rw-r--r--Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt4
-rw-r--r--Documentation/devicetree/bindings/net/fsl-fec.txt2
-rw-r--r--Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt47
-rw-r--r--Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt15
-rw-r--r--Documentation/devicetree/bindings/rtc/spear-rtc.txt17
-rw-r--r--Documentation/devicetree/bindings/sound/omap-dmic.txt21
-rw-r--r--Documentation/devicetree/bindings/sound/omap-mcpdm.txt21
-rw-r--r--Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt2
-rw-r--r--Documentation/devicetree/bindings/usb/tegra-usb.txt3
-rw-r--r--Documentation/dma-buf-sharing.txt109
-rw-r--r--Documentation/feature-removal-schedule.txt18
-rw-r--r--Documentation/filesystems/Locking2
-rw-r--r--Documentation/filesystems/ext3.txt6
-rw-r--r--Documentation/filesystems/porting16
-rw-r--r--Documentation/filesystems/proc.txt2
-rw-r--r--Documentation/filesystems/vfs.txt13
-rw-r--r--Documentation/i2c/muxes/i2c-mux-gpio (renamed from Documentation/i2c/muxes/gpio-i2cmux)12
-rw-r--r--Documentation/initrd.txt4
-rw-r--r--Documentation/kbuild/kbuild.txt19
-rw-r--r--Documentation/kbuild/kconfig.txt18
-rw-r--r--Documentation/kernel-parameters.txt13
-rw-r--r--Documentation/leds/ledtrig-transient.txt152
-rw-r--r--Documentation/virtual/kvm/api.txt281
-rw-r--r--Documentation/virtual/kvm/cpuid.txt6
-rw-r--r--Documentation/virtual/kvm/msr.txt4
-rw-r--r--Documentation/vm/transhuge.txt62
-rw-r--r--MAINTAINERS51
-rw-r--r--Makefile231
-rw-r--r--arch/Kconfig3
-rw-r--r--arch/alpha/include/asm/kvm_para.h1
-rw-r--r--arch/arm/Kconfig16
-rw-r--r--arch/arm/Kconfig.debug37
-rw-r--r--arch/arm/Makefile8
-rw-r--r--arch/arm/boot/dts/exynos5250-smdk5250.dts48
-rw-r--r--arch/arm/boot/dts/exynos5250.dtsi60
-rw-r--r--arch/arm/boot/dts/imx23-evk.dts43
-rw-r--r--arch/arm/boot/dts/imx23.dtsi295
-rw-r--r--arch/arm/boot/dts/imx27-phytec-phycore.dts8
-rw-r--r--arch/arm/boot/dts/imx27.dtsi14
-rw-r--r--arch/arm/boot/dts/imx28-evk.dts114
-rw-r--r--arch/arm/boot/dts/imx28.dtsi497
-rw-r--r--arch/arm/boot/dts/imx51-babbage.dts40
-rw-r--r--arch/arm/boot/dts/imx51.dtsi41
-rw-r--r--arch/arm/boot/dts/imx53-ard.dts6
-rw-r--r--arch/arm/boot/dts/imx53-evk.dts8
-rw-r--r--arch/arm/boot/dts/imx53-qsb.dts121
-rw-r--r--arch/arm/boot/dts/imx53-smd.dts16
-rw-r--r--arch/arm/boot/dts/imx53.dtsi45
-rw-r--r--arch/arm/boot/dts/imx6q-arm2.dts15
-rw-r--r--arch/arm/boot/dts/imx6q-sabrelite.dts50
-rw-r--r--arch/arm/boot/dts/imx6q-sabresd.dts53
-rw-r--r--arch/arm/boot/dts/imx6q.dtsi171
-rw-r--r--arch/arm/boot/dts/omap3-beagle.dts2
-rw-r--r--arch/arm/boot/dts/omap4-panda.dts4
-rw-r--r--arch/arm/boot/dts/omap4-sdp.dts6
-rw-r--r--arch/arm/boot/dts/spear1310-evb.dts292
-rw-r--r--arch/arm/boot/dts/spear1310.dtsi184
-rw-r--r--arch/arm/boot/dts/spear1340-evb.dts308
-rw-r--r--arch/arm/boot/dts/spear1340.dtsi56
-rw-r--r--arch/arm/boot/dts/spear13xx.dtsi262
-rw-r--r--arch/arm/boot/dts/spear300-evb.dts25
-rw-r--r--arch/arm/boot/dts/spear310-evb.dts20
-rw-r--r--arch/arm/boot/dts/spear320-evb.dts25
-rw-r--r--arch/arm/boot/dts/spear3xx.dtsi6
-rw-r--r--arch/arm/boot/dts/spear600-evb.dts29
-rw-r--r--arch/arm/boot/dts/spear600.dtsi6
-rw-r--r--arch/arm/boot/dts/tegra-cardhu.dts110
-rw-r--r--arch/arm/boot/dts/tegra-harmony.dts118
-rw-r--r--arch/arm/boot/dts/tegra-paz00.dts128
-rw-r--r--arch/arm/boot/dts/tegra-seaboard.dts213
-rw-r--r--arch/arm/boot/dts/tegra-trimslice.dts99
-rw-r--r--arch/arm/boot/dts/tegra-ventana.dts96
-rw-r--r--arch/arm/boot/dts/tegra20.dtsi275
-rw-r--r--arch/arm/boot/dts/tegra30.dtsi305
-rw-r--r--arch/arm/common/dmabounce.c84
-rw-r--r--arch/arm/configs/imx_v4_v5_defconfig3
-rw-r--r--arch/arm/configs/imx_v6_v7_defconfig8
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/configs/prima2_defconfig69
-rw-r--r--arch/arm/configs/spear13xx_defconfig95
-rw-r--r--arch/arm/configs/spear3xx_defconfig4
-rw-r--r--arch/arm/configs/spear6xx_defconfig5
-rw-r--r--arch/arm/configs/tegra_defconfig11
-rw-r--r--arch/arm/include/asm/device.h4
-rw-r--r--arch/arm/include/asm/dma-contiguous.h15
-rw-r--r--arch/arm/include/asm/dma-iommu.h34
-rw-r--r--arch/arm/include/asm/dma-mapping.h407
-rw-r--r--arch/arm/include/asm/hardware/pl080.h2
-rw-r--r--arch/arm/include/asm/io.h24
-rw-r--r--arch/arm/include/asm/kvm_para.h1
-rw-r--r--arch/arm/include/asm/mach/arch.h1
-rw-r--r--arch/arm/include/asm/mach/map.h1
-rw-r--r--arch/arm/include/asm/thread_info.h8
-rw-r--r--arch/arm/kernel/entry-common.S8
-rw-r--r--arch/arm/kernel/ptrace.c3
-rw-r--r--arch/arm/kernel/setup.c17
-rw-r--r--arch/arm/kernel/signal.c85
-rw-r--r--arch/arm/kernel/signal.h2
-rw-r--r--arch/arm/kernel/traps.c2
-rw-r--r--arch/arm/mach-at91/at91sam9g45_devices.c1
-rw-r--r--arch/arm/mach-at91/include/mach/at_hdmac.h26
-rw-r--r--arch/arm/mach-davinci/board-da830-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-da850-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm355-leopard.c1
-rw-r--r--arch/arm/mach-davinci/board-dm365-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm644x-evm.c1
-rw-r--r--arch/arm/mach-davinci/board-dm646x-evm.c2
-rw-r--r--arch/arm/mach-davinci/board-mityomapl138.c1
-rw-r--r--arch/arm/mach-davinci/board-neuros-osd2.c1
-rw-r--r--arch/arm/mach-davinci/board-omapl138-hawk.c1
-rw-r--r--arch/arm/mach-davinci/board-sffsdr.c1
-rw-r--r--arch/arm/mach-davinci/board-tnetv107x-evm.c1
-rw-r--r--arch/arm/mach-davinci/clock.c3
-rw-r--r--arch/arm/mach-davinci/common.c7
-rw-r--r--arch/arm/mach-davinci/cpufreq.c3
-rw-r--r--arch/arm/mach-davinci/dma.c69
-rw-r--r--arch/arm/mach-davinci/include/mach/common.h19
-rw-r--r--arch/arm/mach-davinci/include/mach/debug-macro.S58
-rw-r--r--arch/arm/mach-davinci/include/mach/hardware.h2
-rw-r--r--arch/arm/mach-davinci/include/mach/serial.h10
-rw-r--r--arch/arm/mach-davinci/include/mach/uncompress.h30
-rw-r--r--arch/arm/mach-davinci/pm.c3
-rw-r--r--arch/arm/mach-dove/common.c39
-rw-r--r--arch/arm/mach-dove/dove-db-setup.c1
-rw-r--r--arch/arm/mach-ep93xx/adssphere.c1
-rw-r--r--arch/arm/mach-ep93xx/core.c7
-rw-r--r--arch/arm/mach-ep93xx/crunch.c4
-rw-r--r--arch/arm/mach-ep93xx/edb93xx.c8
-rw-r--r--arch/arm/mach-ep93xx/gesbc9312.c1
-rw-r--r--arch/arm/mach-ep93xx/include/mach/platform.h7
-rw-r--r--arch/arm/mach-ep93xx/micro9.c4
-rw-r--r--arch/arm/mach-ep93xx/simone.c1
-rw-r--r--arch/arm/mach-ep93xx/snappercl15.c1
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c1
-rw-r--r--arch/arm/mach-ep93xx/vision_ep9307.c1
-rw-r--r--arch/arm/mach-exynos/Kconfig24
-rw-r--r--arch/arm/mach-exynos/Makefile7
-rw-r--r--arch/arm/mach-exynos/Makefile.boot3
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.c79
-rw-r--r--arch/arm/mach-exynos/clock-exynos4.h2
-rw-r--r--arch/arm/mach-exynos/clock-exynos4210.c11
-rw-r--r--arch/arm/mach-exynos/clock-exynos4212.c38
-rw-r--r--arch/arm/mach-exynos/clock-exynos5.c141
-rw-r--r--arch/arm/mach-exynos/common.c187
-rw-r--r--arch/arm/mach-exynos/common.h7
-rw-r--r--arch/arm/mach-exynos/dev-drm.c29
-rw-r--r--arch/arm/mach-exynos/dev-sysmmu.c457
-rw-r--r--arch/arm/mach-exynos/dma.c141
-rw-r--r--arch/arm/mach-exynos/include/mach/gpio.h9
-rw-r--r--arch/arm/mach-exynos/include/mach/irqs.h65
-rw-r--r--arch/arm/mach-exynos/include/mach/map.h45
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-clock.h7
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-pmu.h10
-rw-r--r--arch/arm/mach-exynos/include/mach/regs-sysmmu.h28
-rw-r--r--arch/arm/mach-exynos/include/mach/spi-clocks.h2
-rw-r--r--arch/arm/mach-exynos/include/mach/sysmmu.h88
-rw-r--r--arch/arm/mach-exynos/mach-armlex4210.c2
-rw-r--r--arch/arm/mach-exynos/mach-exynos4-dt.c1
-rw-r--r--arch/arm/mach-exynos/mach-exynos5-dt.c5
-rw-r--r--arch/arm/mach-exynos/mach-nuri.c1
-rw-r--r--arch/arm/mach-exynos/mach-origen.c1
-rw-r--r--arch/arm/mach-exynos/mach-smdk4x12.c1
-rw-r--r--arch/arm/mach-exynos/mach-smdkv310.c2
-rw-r--r--arch/arm/mach-exynos/mach-universal_c210.c1
-rw-r--r--arch/arm/mach-exynos/mct.c17
-rw-r--r--arch/arm/mach-exynos/pm.c4
-rw-r--r--arch/arm/mach-exynos/pm_domains.c3
-rw-r--r--arch/arm/mach-exynos/pmu.c24
-rw-r--r--arch/arm/mach-imx/Kconfig8
-rw-r--r--arch/arm/mach-imx/Makefile19
-rw-r--r--arch/arm/mach-imx/Makefile.boot3
-rw-r--r--arch/arm/mach-imx/clk-busy.c189
-rw-r--r--arch/arm/mach-imx/clk-gate2.c118
-rw-r--r--arch/arm/mach-imx/clk-imx1.c115
-rw-r--r--arch/arm/mach-imx/clk-imx21.c186
-rw-r--r--arch/arm/mach-imx/clk-imx25.c248
-rw-r--r--arch/arm/mach-imx/clk-imx27.c290
-rw-r--r--arch/arm/mach-imx/clk-imx31.c182
-rw-r--r--arch/arm/mach-imx/clk-imx35.c278
-rw-r--r--arch/arm/mach-imx/clk-imx51-imx53.c506
-rw-r--r--arch/arm/mach-imx/clk-imx6q.c444
-rw-r--r--arch/arm/mach-imx/clk-pfd.c147
-rw-r--r--arch/arm/mach-imx/clk-pllv1.c66
-rw-r--r--arch/arm/mach-imx/clk-pllv2.c249
-rw-r--r--arch/arm/mach-imx/clk-pllv3.c419
-rw-r--r--arch/arm/mach-imx/clk.h83
-rw-r--r--arch/arm/mach-imx/clock-imx1.c636
-rw-r--r--arch/arm/mach-imx/clock-imx21.c1239
-rw-r--r--arch/arm/mach-imx/clock-imx25.c346
-rw-r--r--arch/arm/mach-imx/clock-imx27.c785
-rw-r--r--arch/arm/mach-imx/clock-imx31.c630
-rw-r--r--arch/arm/mach-imx/clock-imx35.c536
-rw-r--r--arch/arm/mach-imx/clock-imx6q.c2111
-rw-r--r--arch/arm/mach-imx/clock-mx51-mx53.c1675
-rw-r--r--arch/arm/mach-imx/cpu-imx5.c6
-rw-r--r--arch/arm/mach-imx/crmregs-imx3.h79
-rw-r--r--arch/arm/mach-imx/imx51-dt.c1
-rw-r--r--arch/arm/mach-imx/imx53-dt.c19
-rw-r--r--arch/arm/mach-imx/lluart.c6
-rw-r--r--arch/arm/mach-imx/mach-cpuimx51sd.c1
-rw-r--r--arch/arm/mach-imx/mach-imx6q.c55
-rw-r--r--arch/arm/mach-imx/mach-mx51_3ds.c1
-rw-r--r--arch/arm/mach-imx/mach-mx51_babbage.c7
-rw-r--r--arch/arm/mach-imx/mach-mx51_efikamx.c42
-rw-r--r--arch/arm/mach-imx/mach-mx51_efikasb.c28
-rw-r--r--arch/arm/mach-imx/mach-pcm037.c6
-rw-r--r--arch/arm/mach-imx/mach-pcm037_eet.c5
-rw-r--r--arch/arm/mach-imx/mm-imx3.c6
-rw-r--r--arch/arm/mach-imx/mm-imx5.c6
-rw-r--r--arch/arm/mach-imx/pcm037.h6
-rw-r--r--arch/arm/mach-imx/pm-imx3.c4
-rw-r--r--arch/arm/mach-kirkwood/board-dreamplug.c1
-rw-r--r--arch/arm/mach-kirkwood/board-dt.c3
-rw-r--r--arch/arm/mach-kirkwood/common.c286
-rw-r--r--arch/arm/mach-kirkwood/common.h1
-rw-r--r--arch/arm/mach-kirkwood/include/mach/bridge-regs.h16
-rw-r--r--arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/pcie.c25
-rw-r--r--arch/arm/mach-kirkwood/rd88f6192-nas-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/t5325-setup.c1
-rw-r--r--arch/arm/mach-kirkwood/tsx1x-common.c1
-rw-r--r--arch/arm/mach-msm/board-halibut.c6
-rw-r--r--arch/arm/mach-msm/board-mahimahi.c6
-rw-r--r--arch/arm/mach-msm/board-msm7x27.c9
-rw-r--r--arch/arm/mach-msm/board-msm7x30.c8
-rw-r--r--arch/arm/mach-msm/board-msm8960.c7
-rw-r--r--arch/arm/mach-msm/board-msm8x60.c10
-rw-r--r--arch/arm/mach-msm/board-qsd8x50.c7
-rw-r--r--arch/arm/mach-msm/board-sapphire.c6
-rw-r--r--arch/arm/mach-msm/board-trout.c6
-rw-r--r--arch/arm/mach-msm/include/mach/board.h6
-rw-r--r--arch/arm/mach-msm/smd_debug.c3
-rw-r--r--arch/arm/mach-mv78xx0/common.c45
-rw-r--r--arch/arm/mach-mxs/Kconfig10
-rw-r--r--arch/arm/mach-mxs/Makefile6
-rw-r--r--arch/arm/mach-mxs/clock-mx23.c536
-rw-r--r--arch/arm/mach-mxs/clock-mx28.c803
-rw-r--r--arch/arm/mach-mxs/clock.c211
-rw-r--r--arch/arm/mach-mxs/devices/Kconfig1
-rw-r--r--arch/arm/mach-mxs/devices/platform-dma.c21
-rw-r--r--arch/arm/mach-mxs/devices/platform-gpio-mxs.c24
-rw-r--r--arch/arm/mach-mxs/devices/platform-mxs-mmc.c21
-rw-r--r--arch/arm/mach-mxs/include/mach/clock.h62
-rw-r--r--arch/arm/mach-mxs/include/mach/common.h11
-rw-r--r--arch/arm/mach-mxs/include/mach/devices-common.h3
-rw-r--r--arch/arm/mach-mxs/mach-mx28evk.c2
-rw-r--r--arch/arm/mach-mxs/mach-mxs.c121
-rw-r--r--arch/arm/mach-mxs/mm.c16
-rw-r--r--arch/arm/mach-mxs/regs-clkctrl-mx23.h331
-rw-r--r--arch/arm/mach-mxs/regs-clkctrl-mx28.h486
-rw-r--r--arch/arm/mach-mxs/system.c16
-rw-r--r--arch/arm/mach-mxs/timer.c11
-rw-r--r--arch/arm/mach-omap1/board-ams-delta.c8
-rw-r--r--arch/arm/mach-omap1/board-fsample.c1
-rw-r--r--arch/arm/mach-omap1/board-generic.c1
-rw-r--r--arch/arm/mach-omap1/board-h2.c1
-rw-r--r--arch/arm/mach-omap1/board-h3.c1
-rw-r--r--arch/arm/mach-omap1/board-htcherald.c1
-rw-r--r--arch/arm/mach-omap1/board-innovator.c1
-rw-r--r--arch/arm/mach-omap1/board-nokia770.c1
-rw-r--r--arch/arm/mach-omap1/board-osk.c1
-rw-r--r--arch/arm/mach-omap1/board-palmte.c1
-rw-r--r--arch/arm/mach-omap1/board-palmtt.c1
-rw-r--r--arch/arm/mach-omap1/board-palmz71.c1
-rw-r--r--arch/arm/mach-omap1/board-perseus2.c1
-rw-r--r--arch/arm/mach-omap1/board-sx1.c1
-rw-r--r--arch/arm/mach-omap1/board-voiceblue.c1
-rw-r--r--arch/arm/mach-omap1/common.h19
-rw-r--r--arch/arm/mach-omap1/devices.c121
-rw-r--r--arch/arm/mach-omap1/io.c5
-rw-r--r--arch/arm/mach-omap1/serial.c3
-rw-r--r--arch/arm/mach-omap1/time.c16
-rw-r--r--arch/arm/mach-omap1/timer32k.c28
-rw-r--r--arch/arm/mach-omap2/Kconfig8
-rw-r--r--arch/arm/mach-omap2/Makefile167
-rw-r--r--arch/arm/mach-omap2/board-2430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-3430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-3630sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-4430sdp.c1
-rw-r--r--arch/arm/mach-omap2/board-am3517crane.c1
-rw-r--r--arch/arm/mach-omap2/board-am3517evm.c1
-rw-r--r--arch/arm/mach-omap2/board-apollon.c1
-rw-r--r--arch/arm/mach-omap2/board-cm-t35.c2
-rw-r--r--arch/arm/mach-omap2/board-cm-t3517.c1
-rw-r--r--arch/arm/mach-omap2/board-devkit8000.c1
-rw-r--r--arch/arm/mach-omap2/board-generic.c1
-rw-r--r--arch/arm/mach-omap2/board-h4.c1
-rw-r--r--arch/arm/mach-omap2/board-igep0020.c2
-rw-r--r--arch/arm/mach-omap2/board-ldp.c1
-rw-r--r--arch/arm/mach-omap2/board-n8x0.c3
-rw-r--r--arch/arm/mach-omap2/board-omap3beagle.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3evm.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3logic.c2
-rw-r--r--arch/arm/mach-omap2/board-omap3pandora.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3stalker.c1
-rw-r--r--arch/arm/mach-omap2/board-omap3touchbook.c1
-rw-r--r--arch/arm/mach-omap2/board-omap4panda.c1
-rw-r--r--arch/arm/mach-omap2/board-overo.c1
-rw-r--r--arch/arm/mach-omap2/board-rm680.c2
-rw-r--r--arch/arm/mach-omap2/board-rx51.c1
-rw-r--r--arch/arm/mach-omap2/board-ti8168evm.c2
-rw-r--r--arch/arm/mach-omap2/board-zoom.c2
-rw-r--r--arch/arm/mach-omap2/common.h51
-rw-r--r--arch/arm/mach-omap2/devices.c19
-rw-r--r--arch/arm/mach-omap2/dma.c11
-rw-r--r--arch/arm/mach-omap2/dsp.c27
-rw-r--r--arch/arm/mach-omap2/gpmc.c30
-rw-r--r--arch/arm/mach-omap2/hsmmc.c8
-rw-r--r--arch/arm/mach-omap2/id.c7
-rw-r--r--arch/arm/mach-omap2/include/mach/omap-wakeupgen.h8
-rw-r--r--arch/arm/mach-omap2/io.c101
-rw-r--r--arch/arm/mach-omap2/iomap.h28
-rw-r--r--arch/arm/mach-omap2/irq.c2
-rw-r--r--arch/arm/mach-omap2/mux.c3
-rw-r--r--arch/arm/mach-omap2/omap_hwmod_3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/pm.c3
-rw-r--r--arch/arm/mach-omap2/pm24xx.c17
-rw-r--r--arch/arm/mach-omap2/pm34xx.c7
-rw-r--r--arch/arm/mach-omap2/pm44xx.c6
-rw-r--r--arch/arm/mach-omap2/powerdomains3xxx_data.c2
-rw-r--r--arch/arm/mach-omap2/timer.c118
-rw-r--r--arch/arm/mach-omap2/usb-musb.c2
-rw-r--r--arch/arm/mach-omap2/voltagedomains3xxx_data.c2
-rw-r--r--arch/arm/mach-orion5x/common.c27
-rw-r--r--arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c1
-rw-r--r--arch/arm/mach-pnx4008/core.c12
-rw-r--r--arch/arm/mach-pnx4008/pm.c4
-rw-r--r--arch/arm/mach-prima2/common.h6
-rw-r--r--arch/arm/mach-prima2/pm.c3
-rw-r--r--arch/arm/mach-prima2/prima2.c6
-rw-r--r--arch/arm/mach-s3c24xx/Kconfig5
-rw-r--r--arch/arm/mach-s3c24xx/Makefile7
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2416.c1
-rw-r--r--arch/arm/mach-s3c24xx/clock-s3c2443.c6
-rw-r--r--arch/arm/mach-s3c24xx/common-s3c2443.c15
-rw-r--r--arch/arm/mach-s3c24xx/common.c (renamed from arch/arm/plat-s3c24xx/cpu.c)69
-rw-r--r--arch/arm/mach-s3c24xx/dma-s3c2443.c16
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/dma.h4
-rw-r--r--arch/arm/mach-s3c24xx/include/mach/map.h5
-rw-r--r--arch/arm/mach-s3c24xx/irq-pm.c (renamed from arch/arm/plat-s3c24xx/irq-pm.c)0
-rw-r--r--arch/arm/mach-s3c24xx/pm.c (renamed from arch/arm/plat-s3c24xx/pm.c)0
-rw-r--r--arch/arm/mach-s3c24xx/setup-spi.c39
-rw-r--r--arch/arm/mach-s3c24xx/sleep.S (renamed from arch/arm/plat-s3c24xx/sleep.S)0
-rw-r--r--arch/arm/mach-s3c64xx/common.c5
-rw-r--r--arch/arm/mach-s3c64xx/common.h7
-rw-r--r--arch/arm/mach-s3c64xx/mach-anw6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-crag6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-hmt.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-mini6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-ncp.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-real6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq5.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smartq7.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6400.c1
-rw-r--r--arch/arm/mach-s3c64xx/mach-smdk6410.c1
-rw-r--r--arch/arm/mach-s3c64xx/pm.c3
-rw-r--r--arch/arm/mach-sa1100/assabet.c1
-rw-r--r--arch/arm/mach-sa1100/badge4.c1
-rw-r--r--arch/arm/mach-sa1100/cerf.c1
-rw-r--r--arch/arm/mach-sa1100/collie.c1
-rw-r--r--arch/arm/mach-sa1100/generic.c4
-rw-r--r--arch/arm/mach-sa1100/generic.h7
-rw-r--r--arch/arm/mach-sa1100/h3100.c1
-rw-r--r--arch/arm/mach-sa1100/h3600.c1
-rw-r--r--arch/arm/mach-sa1100/hackkit.c1
-rw-r--r--arch/arm/mach-sa1100/jornada720.c1
-rw-r--r--arch/arm/mach-sa1100/lart.c1
-rw-r--r--arch/arm/mach-sa1100/nanoengine.c1
-rw-r--r--arch/arm/mach-sa1100/neponset.c1
-rw-r--r--arch/arm/mach-sa1100/pleb.c1
-rw-r--r--arch/arm/mach-sa1100/pm.c4
-rw-r--r--arch/arm/mach-sa1100/shannon.c1
-rw-r--r--arch/arm/mach-sa1100/simpad.c1
-rw-r--r--arch/arm/mach-shmobile/Makefile2
-rw-r--r--arch/arm/mach-shmobile/board-ag5evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-ap4evb.c1
-rw-r--r--arch/arm/mach-shmobile/board-bonito.c1
-rw-r--r--arch/arm/mach-shmobile/board-g3evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-g4evm.c1
-rw-r--r--arch/arm/mach-shmobile/board-kota2.c1
-rw-r--r--arch/arm/mach-shmobile/board-mackerel.c1
-rw-r--r--arch/arm/mach-shmobile/board-marzen.c1
-rw-r--r--arch/arm/mach-shmobile/common.c24
-rw-r--r--arch/arm/mach-shmobile/cpuidle.c3
-rw-r--r--arch/arm/mach-shmobile/include/mach/common.h14
-rw-r--r--arch/arm/mach-shmobile/suspend.c3
-rw-r--r--arch/arm/mach-spear13xx/Kconfig20
-rw-r--r--arch/arm/mach-spear13xx/Makefile10
-rw-r--r--arch/arm/mach-spear13xx/Makefile.boot6
-rw-r--r--arch/arm/mach-spear13xx/headsmp.S47
-rw-r--r--arch/arm/mach-spear13xx/hotplug.c119
-rw-r--r--arch/arm/mach-spear13xx/include/mach/debug-macro.S14
-rw-r--r--arch/arm/mach-spear13xx/include/mach/dma.h128
-rw-r--r--arch/arm/mach-spear13xx/include/mach/generic.h49
-rw-r--r--arch/arm/mach-spear13xx/include/mach/gpio.h19
-rw-r--r--arch/arm/mach-spear13xx/include/mach/hardware.h1
-rw-r--r--arch/arm/mach-spear13xx/include/mach/irqs.h20
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear.h62
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h0
-rw-r--r--arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h0
-rw-r--r--arch/arm/mach-spear13xx/include/mach/timex.h19
-rw-r--r--arch/arm/mach-spear13xx/include/mach/uncompress.h19
-rw-r--r--arch/arm/mach-spear13xx/platsmp.c127
-rw-r--r--arch/arm/mach-spear13xx/spear1310.c88
-rw-r--r--arch/arm/mach-spear13xx/spear1340.c192
-rw-r--r--arch/arm/mach-spear13xx/spear13xx.c197
-rw-r--r--arch/arm/mach-spear3xx/Makefile2
-rw-r--r--arch/arm/mach-spear3xx/clock.c892
-rw-r--r--arch/arm/mach-spear3xx/include/mach/generic.h21
-rw-r--r--arch/arm/mach-spear3xx/include/mach/irqs.h1
-rw-r--r--arch/arm/mach-spear3xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear3xx/include/mach/spear.h14
-rw-r--r--arch/arm/mach-spear3xx/spear300.c1
-rw-r--r--arch/arm/mach-spear3xx/spear310.c1
-rw-r--r--arch/arm/mach-spear3xx/spear320.c12
-rw-r--r--arch/arm/mach-spear3xx/spear3xx.c4
-rw-r--r--arch/arm/mach-spear6xx/Makefile2
-rw-r--r--arch/arm/mach-spear6xx/clock.c789
-rw-r--r--arch/arm/mach-spear6xx/include/mach/generic.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/irqs.h3
-rw-r--r--arch/arm/mach-spear6xx/include/mach/misc_regs.h2
-rw-r--r--arch/arm/mach-spear6xx/include/mach/spear.h1
-rw-r--r--arch/arm/mach-spear6xx/spear6xx.c7
-rw-r--r--arch/arm/mach-tegra/Kconfig37
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra20.c1
-rw-r--r--arch/arm/mach-tegra/board-dt-tegra30.c11
-rw-r--r--arch/arm/mach-tegra/board-harmony.c1
-rw-r--r--arch/arm/mach-tegra/board-paz00.c4
-rw-r--r--arch/arm/mach-tegra/board-seaboard.c3
-rw-r--r--arch/arm/mach-tegra/board-trimslice.c3
-rw-r--r--arch/arm/mach-tegra/board.h14
-rw-r--r--arch/arm/mach-tegra/clock.c3
-rw-r--r--arch/arm/mach-tegra/common.c28
-rw-r--r--arch/arm/mach-tegra/devices.c5
-rw-r--r--arch/arm/mach-tegra/devices.h4
-rw-r--r--arch/arm/mach-tegra/include/mach/tegra-ahb.h19
-rw-r--r--arch/arm/mach-tegra/include/mach/uncompress.h176
-rw-r--r--arch/arm/mach-tegra/include/mach/usb_phy.h4
-rw-r--r--arch/arm/mach-tegra/powergate.c4
-rw-r--r--arch/arm/mach-tegra/tegra2_clocks.c4
-rw-r--r--arch/arm/mach-tegra/tegra30_clocks.c9
-rw-r--r--arch/arm/mach-tegra/usb_phy.c15
-rw-r--r--arch/arm/mach-ux500/board-mop500.c6
-rw-r--r--arch/arm/mach-ux500/clock.c6
-rw-r--r--arch/arm/mach-ux500/clock.h12
-rw-r--r--arch/arm/mach-ux500/cpu.c6
-rw-r--r--arch/arm/mach-ux500/include/mach/setup.h1
-rw-r--r--arch/arm/mm/dma-mapping.c1348
-rw-r--r--arch/arm/mm/init.c23
-rw-r--r--arch/arm/mm/mm.h3
-rw-r--r--arch/arm/mm/mmu.c31
-rw-r--r--arch/arm/mm/vmregion.h2
-rw-r--r--arch/arm/plat-mxc/clock.c11
-rw-r--r--arch/arm/plat-mxc/include/mach/clock.h4
-rw-r--r--arch/arm/plat-mxc/include/mach/common.h7
-rw-r--r--arch/arm/plat-mxc/include/mach/debug-macro.S2
-rw-r--r--arch/arm/plat-mxc/include/mach/mx6q.h2
-rw-r--r--arch/arm/plat-mxc/time.c14
-rw-r--r--arch/arm/plat-omap/counter_32k.c93
-rw-r--r--arch/arm/plat-omap/devices.c122
-rw-r--r--arch/arm/plat-omap/dma.c4
-rw-r--r--arch/arm/plat-omap/dmtimer.c2
-rw-r--r--arch/arm/plat-omap/include/plat/common.h2
-rw-r--r--arch/arm/plat-omap/include/plat/cpu.h8
-rw-r--r--arch/arm/plat-omap/include/plat/dma.h5
-rw-r--r--arch/arm/plat-omap/include/plat/dmtimer.h1
-rw-r--r--arch/arm/plat-omap/include/plat/mmc.h9
-rw-r--r--arch/arm/plat-orion/common.c104
-rw-r--r--arch/arm/plat-orion/include/plat/common.h34
-rw-r--r--arch/arm/plat-orion/include/plat/orion_wdt.h18
-rw-r--r--arch/arm/plat-orion/pcie.c4
-rw-r--r--arch/arm/plat-pxa/include/plat/pxa27x_keypad.h4
-rw-r--r--arch/arm/plat-s3c24xx/Makefile6
-rw-r--r--arch/arm/plat-s3c24xx/clock.c59
-rw-r--r--arch/arm/plat-s3c24xx/dev-uart.c100
-rw-r--r--arch/arm/plat-s5p/Kconfig140
-rw-r--r--arch/arm/plat-s5p/Makefile28
-rw-r--r--arch/arm/plat-s5p/sysmmu.c313
-rw-r--r--arch/arm/plat-samsung/Kconfig142
-rw-r--r--arch/arm/plat-samsung/Makefile13
-rw-r--r--arch/arm/plat-samsung/include/plat/cpu.h2
-rw-r--r--arch/arm/plat-samsung/include/plat/devs.h3
-rw-r--r--arch/arm/plat-samsung/include/plat/dma-pl330.h1
-rw-r--r--arch/arm/plat-samsung/include/plat/s5p-clock.h4
-rw-r--r--arch/arm/plat-samsung/include/plat/sysmmu.h95
-rw-r--r--arch/arm/plat-samsung/s5p-clock.c (renamed from arch/arm/plat-s5p/clock.c)33
-rw-r--r--arch/arm/plat-samsung/s5p-dev-mfc.c (renamed from arch/arm/plat-s5p/dev-mfc.c)4
-rw-r--r--arch/arm/plat-samsung/s5p-dev-uart.c (renamed from arch/arm/plat-s5p/dev-uart.c)78
-rw-r--r--arch/arm/plat-samsung/s5p-irq-eint.c (renamed from arch/arm/plat-s5p/irq-eint.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq-gpioint.c (renamed from arch/arm/plat-s5p/irq-gpioint.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq-pm.c (renamed from arch/arm/plat-s5p/irq-pm.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-irq.c (renamed from arch/arm/plat-s5p/irq.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-pm.c (renamed from arch/arm/plat-s5p/pm.c)3
-rw-r--r--arch/arm/plat-samsung/s5p-sleep.S (renamed from arch/arm/plat-s5p/sleep.S)3
-rw-r--r--arch/arm/plat-samsung/s5p-time.c (renamed from arch/arm/plat-s5p/s5p-time.c)3
-rw-r--r--arch/arm/plat-samsung/setup-mipiphy.c (renamed from arch/arm/plat-s5p/setup-mipiphy.c)0
-rw-r--r--arch/arm/plat-spear/Kconfig12
-rw-r--r--arch/arm/plat-spear/Makefile5
-rw-r--r--arch/arm/plat-spear/clock.c1005
-rw-r--r--arch/arm/plat-spear/include/plat/clock.h249
-rw-r--r--arch/arm/plat-spear/restart.c5
-rw-r--r--arch/arm/plat-spear/time.c39
-rw-r--r--arch/avr32/include/asm/kvm_para.h1
-rw-r--r--arch/blackfin/include/asm/kvm_para.h1
-rw-r--r--arch/c6x/include/asm/kvm_para.h1
-rw-r--r--arch/cris/Kconfig1
-rw-r--r--arch/cris/arch-v10/drivers/ds1302.c515
-rw-r--r--arch/cris/arch-v10/drivers/pcf8563.c380
-rw-r--r--arch/cris/arch-v10/kernel/fasttimer.c2
-rw-r--r--arch/cris/arch-v10/kernel/kgdb.c2
-rw-r--r--arch/cris/arch-v10/kernel/time.c9
-rw-r--r--arch/cris/arch-v10/lib/Makefile3
-rw-r--r--arch/cris/arch-v32/drivers/cryptocop.c6
-rw-r--r--arch/cris/arch-v32/kernel/ptrace.c2
-rw-r--r--arch/cris/arch-v32/kernel/time.c7
-rw-r--r--arch/cris/include/arch-v32/arch/cache.h2
-rw-r--r--arch/cris/include/asm/Kbuild1
-rw-r--r--arch/cris/include/asm/posix_types.h2
-rw-r--r--arch/cris/include/asm/rtc.h107
-rw-r--r--arch/cris/kernel/time.c76
-rw-r--r--arch/cris/kernel/vmlinux.lds.S1
-rw-r--r--arch/cris/mm/fault.c31
-rw-r--r--arch/frv/include/asm/kvm_para.h1
-rw-r--r--arch/h8300/include/asm/kvm_para.h1
-rw-r--r--arch/hexagon/include/asm/kvm_para.h1
-rw-r--r--arch/ia64/include/asm/kvm_host.h3
-rw-r--r--arch/ia64/include/asm/kvm_para.h5
-rw-r--r--arch/ia64/kvm/kvm-ia64.c30
-rw-r--r--arch/m68k/include/asm/kvm_para.h1
-rw-r--r--arch/microblaze/Kconfig2
-rw-r--r--arch/microblaze/include/asm/kvm_para.h1
-rw-r--r--arch/microblaze/kernel/entry.S7
-rw-r--r--arch/microblaze/kernel/mcount.S2
-rw-r--r--arch/microblaze/kernel/process.c6
-rw-r--r--arch/microblaze/mm/fault.c33
-rw-r--r--arch/mips/Kconfig15
-rw-r--r--arch/mips/Makefile4
-rw-r--r--arch/mips/alchemy/devboards/db1200.c1
-rw-r--r--arch/mips/ath79/Kconfig25
-rw-r--r--arch/mips/ath79/Makefile2
-rw-r--r--arch/mips/ath79/clock.c81
-rw-r--r--arch/mips/ath79/common.c9
-rw-r--r--arch/mips/ath79/dev-common.c3
-rw-r--r--arch/mips/ath79/dev-gpio-buttons.c4
-rw-r--r--arch/mips/ath79/dev-leds-gpio.c4
-rw-r--r--arch/mips/ath79/dev-wmac.c30
-rw-r--r--arch/mips/ath79/early_printk.c3
-rw-r--r--arch/mips/ath79/gpio.c47
-rw-r--r--arch/mips/ath79/irq.c147
-rw-r--r--arch/mips/ath79/mach-db120.c134
-rw-r--r--arch/mips/ath79/mach-pb44.c2
-rw-r--r--arch/mips/ath79/mach-ubnt-xm.c43
-rw-r--r--arch/mips/ath79/machtypes.h1
-rw-r--r--arch/mips/ath79/pci.c130
-rw-r--r--arch/mips/ath79/pci.h34
-rw-r--r--arch/mips/ath79/setup.c45
-rw-r--r--arch/mips/bcm63xx/boards/Makefile2
-rw-r--r--arch/mips/cavium-octeon/setup.c1
-rw-r--r--arch/mips/cavium-octeon/smp.c6
-rw-r--r--arch/mips/fw/arc/Makefile2
-rw-r--r--arch/mips/include/asm/clkdev.h25
-rw-r--r--arch/mips/include/asm/kvm_para.h1
-rw-r--r--arch/mips/include/asm/mach-ath79/ar71xx_regs.h91
-rw-r--r--arch/mips/include/asm/mach-ath79/ath79.h23
-rw-r--r--arch/mips/include/asm/mach-ath79/irq.h10
-rw-r--r--arch/mips/include/asm/mach-ath79/pci-ath724x.h21
-rw-r--r--arch/mips/include/asm/mach-ath79/pci.h28
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h1
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h23
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/irq.h18
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h67
-rw-r--r--arch/mips/include/asm/mach-lantiq/gpio.h16
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h34
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq_platform.h33
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h44
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h136
-rw-r--r--arch/mips/include/asm/mips-boards/generic.h4
-rw-r--r--arch/mips/include/asm/module.h1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pcieep-defs.h1365
-rw-r--r--arch/mips/include/asm/pci.h6
-rw-r--r--arch/mips/include/asm/prom.h26
-rw-r--r--arch/mips/include/asm/setup.h3
-rw-r--r--arch/mips/include/asm/sparsemem.h6
-rw-r--r--arch/mips/include/asm/termios.h2
-rw-r--r--arch/mips/include/asm/traps.h1
-rw-r--r--arch/mips/include/asm/uasm.h2
-rw-r--r--arch/mips/jz4740/Makefile2
-rw-r--r--arch/mips/kernel/cpu-probe.c54
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c3
-rw-r--r--arch/mips/kernel/proc.c26
-rw-r--r--arch/mips/kernel/prom.c13
-rw-r--r--arch/mips/kernel/setup.c2
-rw-r--r--arch/mips/kernel/smp.c2
-rw-r--r--arch/mips/kernel/traps.c17
-rw-r--r--arch/mips/lantiq/Kconfig16
-rw-r--r--arch/mips/lantiq/Makefile5
-rw-r--r--arch/mips/lantiq/Platform1
-rw-r--r--arch/mips/lantiq/clk.c146
-rw-r--r--arch/mips/lantiq/clk.h68
-rw-r--r--arch/mips/lantiq/devices.c120
-rw-r--r--arch/mips/lantiq/devices.h23
-rw-r--r--arch/mips/lantiq/dts/Makefile4
-rw-r--r--arch/mips/lantiq/dts/danube.dtsi105
-rw-r--r--arch/mips/lantiq/dts/easy50712.dts113
-rw-r--r--arch/mips/lantiq/early_printk.c17
-rw-r--r--arch/mips/lantiq/falcon/Makefile1
-rw-r--r--arch/mips/lantiq/falcon/prom.c87
-rw-r--r--arch/mips/lantiq/falcon/reset.c90
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c260
-rw-r--r--arch/mips/lantiq/irq.c255
-rw-r--r--arch/mips/lantiq/machtypes.h20
-rw-r--r--arch/mips/lantiq/prom.c74
-rw-r--r--arch/mips/lantiq/prom.h8
-rw-r--r--arch/mips/lantiq/setup.c66
-rw-r--r--arch/mips/lantiq/xway/Kconfig23
-rw-r--r--arch/mips/lantiq/xway/Makefile8
-rw-r--r--arch/mips/lantiq/xway/clk-ase.c48
-rw-r--r--arch/mips/lantiq/xway/clk-xway.c223
-rw-r--r--arch/mips/lantiq/xway/clk.c151
-rw-r--r--arch/mips/lantiq/xway/devices.c119
-rw-r--r--arch/mips/lantiq/xway/devices.h20
-rw-r--r--arch/mips/lantiq/xway/dma.c61
-rw-r--r--arch/mips/lantiq/xway/ebu.c52
-rw-r--r--arch/mips/lantiq/xway/gpio.c12
-rw-r--r--arch/mips/lantiq/xway/gpio_ebu.c126
-rw-r--r--arch/mips/lantiq/xway/gpio_stp.c157
-rw-r--r--arch/mips/lantiq/xway/mach-easy50601.c57
-rw-r--r--arch/mips/lantiq/xway/mach-easy50712.c74
-rw-r--r--arch/mips/lantiq/xway/pmu.c69
-rw-r--r--arch/mips/lantiq/xway/prom-ase.c39
-rw-r--r--arch/mips/lantiq/xway/prom-xway.c54
-rw-r--r--arch/mips/lantiq/xway/prom.c115
-rw-r--r--arch/mips/lantiq/xway/reset.c77
-rw-r--r--arch/mips/lantiq/xway/setup-ase.c19
-rw-r--r--arch/mips/lantiq/xway/setup-xway.c20
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c371
-rw-r--r--arch/mips/mm/c-octeon.c14
-rw-r--r--arch/mips/mm/c-r4k.c14
-rw-r--r--arch/mips/oprofile/Makefile2
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c12
-rw-r--r--arch/mips/pci/Makefile6
-rw-r--r--arch/mips/pci/fixup-lantiq.c40
-rw-r--r--arch/mips/pci/ops-loongson2.c1
-rw-r--r--arch/mips/pci/pci-ar71xx.c375
-rw-r--r--arch/mips/pci/pci-ar724x.c292
-rw-r--r--arch/mips/pci/pci-ath724x.c174
-rw-r--r--arch/mips/pci/pci-lantiq.c219
-rw-r--r--arch/mips/pci/pci.c55
-rw-r--r--arch/mips/pmc-sierra/yosemite/Makefile2
-rw-r--r--arch/mips/pmc-sierra/yosemite/setup.c1
-rw-r--r--arch/mips/powertv/Makefile2
-rw-r--r--arch/mips/powertv/asic/Makefile2
-rw-r--r--arch/mips/powertv/pci/Makefile2
-rw-r--r--arch/mips/rb532/devices.c1
-rw-r--r--arch/mips/sni/setup.c1
-rw-r--r--arch/mn10300/include/asm/kvm_para.h1
-rw-r--r--arch/openrisc/Kconfig2
-rw-r--r--arch/openrisc/include/asm/Kbuild1
-rw-r--r--arch/openrisc/include/asm/kvm_para.h1
-rw-r--r--arch/openrisc/include/asm/uaccess.h40
-rw-r--r--arch/openrisc/lib/string.S99
-rw-r--r--arch/parisc/include/asm/kvm_para.h1
-rw-r--r--arch/powerpc/Kconfig2
-rw-r--r--arch/powerpc/boot/dts/mpc8569mds.dts1
-rw-r--r--arch/powerpc/include/asm/cputable.h23
-rw-r--r--arch/powerpc/include/asm/dbell.h3
-rw-r--r--arch/powerpc/include/asm/hvcall.h10
-rw-r--r--arch/powerpc/include/asm/hw_irq.h1
-rw-r--r--arch/powerpc/include/asm/kvm.h1
-rw-r--r--arch/powerpc/include/asm/kvm_asm.h18
-rw-r--r--arch/powerpc/include/asm/kvm_book3s.h3
-rw-r--r--arch/powerpc/include/asm/kvm_book3s_asm.h8
-rw-r--r--arch/powerpc/include/asm/kvm_booke.h3
-rw-r--r--arch/powerpc/include/asm/kvm_booke_hv_asm.h49
-rw-r--r--arch/powerpc/include/asm/kvm_e500.h96
-rw-r--r--arch/powerpc/include/asm/kvm_host.h60
-rw-r--r--arch/powerpc/include/asm/kvm_para.h5
-rw-r--r--arch/powerpc/include/asm/kvm_ppc.h20
-rw-r--r--arch/powerpc/include/asm/mmu-book3e.h6
-rw-r--r--arch/powerpc/include/asm/processor.h3
-rw-r--r--arch/powerpc/include/asm/reg.h2
-rw-r--r--arch/powerpc/include/asm/reg_booke.h34
-rw-r--r--arch/powerpc/include/asm/switch_to.h1
-rw-r--r--arch/powerpc/include/asm/time.h1
-rw-r--r--arch/powerpc/include/asm/uaccess.h41
-rw-r--r--arch/powerpc/include/asm/word-at-a-time.h41
-rw-r--r--arch/powerpc/kernel/asm-offsets.c19
-rw-r--r--arch/powerpc/kernel/cpu_setup_fsl_booke.S1
-rw-r--r--arch/powerpc/kernel/exceptions-64s.S12
-rw-r--r--arch/powerpc/kernel/head_44x.S23
-rw-r--r--arch/powerpc/kernel/head_booke.h69
-rw-r--r--arch/powerpc/kernel/head_fsl_booke.S98
-rw-r--r--arch/powerpc/kernel/idle_power7.S7
-rw-r--r--arch/powerpc/kernel/ppc_ksyms.c6
-rw-r--r--arch/powerpc/kernel/time.c3
-rw-r--r--arch/powerpc/kvm/44x.c12
-rw-r--r--arch/powerpc/kvm/44x_emulate.c51
-rw-r--r--arch/powerpc/kvm/Kconfig28
-rw-r--r--arch/powerpc/kvm/Makefile17
-rw-r--r--arch/powerpc/kvm/book3s.c7
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_hv.c31
-rw-r--r--arch/powerpc/kvm/book3s_64_slb.S2
-rw-r--r--arch/powerpc/kvm/book3s_64_vio.c150
-rw-r--r--arch/powerpc/kvm/book3s_64_vio_hv.c3
-rw-r--r--arch/powerpc/kvm/book3s_emulate.c106
-rw-r--r--arch/powerpc/kvm/book3s_hv.c467
-rw-r--r--arch/powerpc/kvm/book3s_hv_interrupts.S9
-rw-r--r--arch/powerpc/kvm/book3s_hv_rmhandlers.S185
-rw-r--r--arch/powerpc/kvm/book3s_pr.c59
-rw-r--r--arch/powerpc/kvm/book3s_pr_papr.c101
-rw-r--r--arch/powerpc/kvm/book3s_segment.S13
-rw-r--r--arch/powerpc/kvm/booke.c471
-rw-r--r--arch/powerpc/kvm/booke.h62
-rw-r--r--arch/powerpc/kvm/booke_emulate.c118
-rw-r--r--arch/powerpc/kvm/booke_interrupts.S8
-rw-r--r--arch/powerpc/kvm/bookehv_interrupts.S597
-rw-r--r--arch/powerpc/kvm/e500.c372
-rw-r--r--arch/powerpc/kvm/e500.h306
-rw-r--r--arch/powerpc/kvm/e500_emulate.c210
-rw-r--r--arch/powerpc/kvm/e500_tlb.c666
-rw-r--r--arch/powerpc/kvm/e500_tlb.h174
-rw-r--r--arch/powerpc/kvm/e500mc.c342
-rw-r--r--arch/powerpc/kvm/emulate.c197
-rw-r--r--arch/powerpc/kvm/powerpc.c94
-rw-r--r--arch/powerpc/kvm/timing.h6
-rw-r--r--arch/powerpc/lib/string.S45
-rw-r--r--arch/powerpc/platforms/cell/spufs/inode.c2
-rw-r--r--arch/s390/hypfs/inode.c2
-rw-r--r--arch/s390/include/asm/kvm.h5
-rw-r--r--arch/s390/include/asm/kvm_host.h1
-rw-r--r--arch/s390/include/asm/kvm_para.h5
-rw-r--r--arch/s390/include/asm/sclp.h1
-rw-r--r--arch/s390/kvm/diag.c29
-rw-r--r--arch/s390/kvm/intercept.c1
-rw-r--r--arch/s390/kvm/kvm-s390.c87
-rw-r--r--arch/s390/kvm/kvm-s390.h1
-rw-r--r--arch/s390/kvm/priv.c31
-rw-r--r--arch/score/include/asm/kvm_para.h1
-rw-r--r--arch/sh/include/asm/kvm_para.h1
-rw-r--r--arch/sparc/Kconfig1
-rw-r--r--arch/sparc/include/asm/Kbuild1
-rw-r--r--arch/sparc/include/asm/kvm_para.h1
-rw-r--r--arch/sparc/include/asm/uaccess_32.h22
-rw-r--r--arch/sparc/include/asm/uaccess_64.h8
-rw-r--r--arch/sparc/lib/Makefile1
-rw-r--r--arch/sparc/lib/ksyms.c2
-rw-r--r--arch/sparc/lib/strlen_user_32.S109
-rw-r--r--arch/sparc/lib/strlen_user_64.S97
-rw-r--r--arch/tile/Kconfig43
-rw-r--r--arch/tile/Makefile7
-rw-r--r--arch/tile/include/arch/spr_def_32.h56
-rw-r--r--arch/tile/include/arch/spr_def_64.h43
-rw-r--r--arch/tile/include/asm/Kbuild2
-rw-r--r--arch/tile/include/asm/atomic_32.h10
-rw-r--r--arch/tile/include/asm/bitops.h12
-rw-r--r--arch/tile/include/asm/byteorder.h20
-rw-r--r--arch/tile/include/asm/cachectl.h42
-rw-r--r--arch/tile/include/asm/compat.h3
-rw-r--r--arch/tile/include/asm/elf.h5
-rw-r--r--arch/tile/include/asm/futex.h143
-rw-r--r--arch/tile/include/asm/hardwall.h18
-rw-r--r--arch/tile/include/asm/hugetlb.h21
-rw-r--r--arch/tile/include/asm/irqflags.h34
-rw-r--r--arch/tile/include/asm/kexec.h12
-rw-r--r--arch/tile/include/asm/kvm_para.h1
-rw-r--r--arch/tile/include/asm/mmu.h2
-rw-r--r--arch/tile/include/asm/mmu_context.h8
-rw-r--r--arch/tile/include/asm/module.h40
-rw-r--r--arch/tile/include/asm/page.h18
-rw-r--r--arch/tile/include/asm/pgalloc.h92
-rw-r--r--arch/tile/include/asm/pgtable.h111
-rw-r--r--arch/tile/include/asm/pgtable_32.h40
-rw-r--r--arch/tile/include/asm/pgtable_64.h57
-rw-r--r--arch/tile/include/asm/processor.h17
-rw-r--r--arch/tile/include/asm/setup.h10
-rw-r--r--arch/tile/include/asm/syscalls.h3
-rw-r--r--arch/tile/include/asm/tlbflush.h17
-rw-r--r--arch/tile/include/asm/uaccess.h222
-rw-r--r--arch/tile/include/asm/unistd.h4
-rw-r--r--arch/tile/include/hv/drv_xgbe_intf.h2
-rw-r--r--arch/tile/include/hv/hypervisor.h325
-rw-r--r--arch/tile/kernel/Makefile3
-rw-r--r--arch/tile/kernel/entry.S3
-rw-r--r--arch/tile/kernel/hardwall.c754
-rw-r--r--arch/tile/kernel/head_32.S8
-rw-r--r--arch/tile/kernel/head_64.S22
-rw-r--r--arch/tile/kernel/hvglue.lds3
-rw-r--r--arch/tile/kernel/intvec_64.S80
-rw-r--r--arch/tile/kernel/machine_kexec.c42
-rw-r--r--arch/tile/kernel/module.c12
-rw-r--r--arch/tile/kernel/proc.c1
-rw-r--r--arch/tile/kernel/process.c16
-rw-r--r--arch/tile/kernel/relocate_kernel_32.S (renamed from arch/tile/kernel/relocate_kernel.S)0
-rw-r--r--arch/tile/kernel/relocate_kernel_64.S260
-rw-r--r--arch/tile/kernel/setup.c169
-rw-r--r--arch/tile/kernel/single_step.c16
-rw-r--r--arch/tile/kernel/smp.c2
-rw-r--r--arch/tile/kernel/sys.c10
-rw-r--r--arch/tile/kernel/sysfs.c8
-rw-r--r--arch/tile/kernel/tlb.c11
-rw-r--r--arch/tile/kernel/traps.c30
-rw-r--r--arch/tile/lib/atomic_32.c47
-rw-r--r--arch/tile/lib/exports.c8
-rw-r--r--arch/tile/lib/memchr_64.c8
-rw-r--r--arch/tile/lib/memcpy_64.c23
-rw-r--r--arch/tile/lib/memcpy_tile64.c8
-rw-r--r--arch/tile/lib/strchr_64.c15
-rw-r--r--arch/tile/lib/string-endian.h33
-rw-r--r--arch/tile/lib/strlen_64.c11
-rw-r--r--arch/tile/lib/usercopy_32.S76
-rw-r--r--arch/tile/lib/usercopy_64.S49
-rw-r--r--arch/tile/mm/fault.c34
-rw-r--r--arch/tile/mm/homecache.c1
-rw-r--r--arch/tile/mm/hugetlbpage.c285
-rw-r--r--arch/tile/mm/init.c19
-rw-r--r--arch/tile/mm/migrate.h6
-rw-r--r--arch/tile/mm/migrate_32.S36
-rw-r--r--arch/tile/mm/migrate_64.S34
-rw-r--r--arch/tile/mm/pgtable.c40
-rw-r--r--arch/um/Makefile11
-rw-r--r--arch/um/include/asm/kvm_para.h1
-rw-r--r--arch/unicore32/include/asm/kvm_para.h1
-rw-r--r--arch/x86/Kbuild2
-rw-r--r--arch/x86/Kconfig3
-rw-r--r--arch/x86/include/asm/acpi.h9
-rw-r--r--arch/x86/include/asm/dma-contiguous.h13
-rw-r--r--arch/x86/include/asm/dma-mapping.h5
-rw-r--r--arch/x86/include/asm/kvm_emulate.h4
-rw-r--r--arch/x86/include/asm/kvm_host.h13
-rw-r--r--arch/x86/include/asm/kvm_para.h24
-rw-r--r--arch/x86/include/asm/pgtable-3level.h50
-rw-r--r--arch/x86/include/asm/processor.h7
-rw-r--r--arch/x86/include/asm/pvclock-abi.h1
-rw-r--r--arch/x86/include/asm/realmode.h62
-rw-r--r--arch/x86/include/asm/sta2x11.h12
-rw-r--r--arch/x86/include/asm/trampoline.h39
-rw-r--r--arch/x86/include/asm/uaccess.h4
-rw-r--r--arch/x86/include/asm/uaccess_32.h17
-rw-r--r--arch/x86/include/asm/uaccess_64.h3
-rw-r--r--arch/x86/include/asm/word-at-a-time.h32
-rw-r--r--arch/x86/include/asm/xen/events.h1
-rw-r--r--arch/x86/include/asm/xen/page.h1
-rw-r--r--arch/x86/kernel/Makefile2
-rw-r--r--arch/x86/kernel/acpi/Makefile9
-rw-r--r--arch/x86/kernel/acpi/realmode/.gitignore3
-rw-r--r--arch/x86/kernel/acpi/realmode/Makefile59
-rw-r--r--arch/x86/kernel/acpi/realmode/bioscall.S1
-rw-r--r--arch/x86/kernel/acpi/realmode/copy.S1
-rw-r--r--arch/x86/kernel/acpi/realmode/regs.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-bios.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-mode.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-vesa.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/video-vga.c1
-rw-r--r--arch/x86/kernel/acpi/realmode/wakeup.lds.S62
-rw-r--r--arch/x86/kernel/acpi/sleep.c33
-rw-r--r--arch/x86/kernel/acpi/sleep.h2
-rw-r--r--arch/x86/kernel/acpi/wakeup_rm.S12
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-apei.c3
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce-severity.c26
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c24
-rw-r--r--arch/x86/kernel/e820.c53
-rw-r--r--arch/x86/kernel/head32.c1
-rw-r--r--arch/x86/kernel/head64.c1
-rw-r--r--arch/x86/kernel/head_32.S5
-rw-r--r--arch/x86/kernel/head_64.S4
-rw-r--r--arch/x86/kernel/kvmclock.c20
-rw-r--r--arch/x86/kernel/mpparse.c11
-rw-r--r--arch/x86/kernel/pci-dma.c18
-rw-r--r--arch/x86/kernel/pci-nommu.c8
-rw-r--r--arch/x86/kernel/reboot.c25
-rw-r--r--arch/x86/kernel/setup.c24
-rw-r--r--arch/x86/kernel/smpboot.c18
-rw-r--r--arch/x86/kernel/tboot.c7
-rw-r--r--arch/x86/kernel/trampoline.c42
-rw-r--r--arch/x86/kernel/trampoline_32.S83
-rw-r--r--arch/x86/kernel/vmlinux.lds.S12
-rw-r--r--arch/x86/kvm/Kconfig1
-rw-r--r--arch/x86/kvm/cpuid.c5
-rw-r--r--arch/x86/kvm/emulate.c293
-rw-r--r--arch/x86/kvm/i8254.c31
-rw-r--r--arch/x86/kvm/i8254.h7
-rw-r--r--arch/x86/kvm/lapic.c31
-rw-r--r--arch/x86/kvm/mmu.c345
-rw-r--r--arch/x86/kvm/mmu_audit.c10
-rw-r--r--arch/x86/kvm/paging_tmpl.h2
-rw-r--r--arch/x86/kvm/svm.c9
-rw-r--r--arch/x86/kvm/vmx.c41
-rw-r--r--arch/x86/kvm/x86.c280
-rw-r--r--arch/x86/kvm/x86.h2
-rw-r--r--arch/x86/lib/usercopy.c97
-rw-r--r--arch/x86/lib/usercopy_32.c41
-rw-r--r--arch/x86/lib/usercopy_64.c48
-rw-r--r--arch/x86/mm/init.c16
-rw-r--r--arch/x86/mm/numa.c32
-rw-r--r--arch/x86/mm/numa_emulation.c4
-rw-r--r--arch/x86/mm/pat.c42
-rw-r--r--arch/x86/mm/srat.c5
-rw-r--r--arch/x86/pci/xen.c4
-rw-r--r--arch/x86/realmode/Makefile18
-rw-r--r--arch/x86/realmode/init.c115
-rw-r--r--arch/x86/realmode/rm/.gitignore3
-rw-r--r--arch/x86/realmode/rm/Makefile82
-rw-r--r--arch/x86/realmode/rm/bioscall.S1
-rw-r--r--arch/x86/realmode/rm/copy.S1
-rw-r--r--arch/x86/realmode/rm/header.S41
-rw-r--r--arch/x86/realmode/rm/realmode.h21
-rw-r--r--arch/x86/realmode/rm/realmode.lds.S76
-rw-r--r--arch/x86/realmode/rm/reboot_32.S (renamed from arch/x86/kernel/reboot_32.S)89
-rw-r--r--arch/x86/realmode/rm/regs.c1
-rw-r--r--arch/x86/realmode/rm/stack.S19
-rw-r--r--arch/x86/realmode/rm/trampoline_32.S74
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S (renamed from arch/x86/kernel/trampoline_64.S)148
-rw-r--r--arch/x86/realmode/rm/trampoline_common.S7
-rw-r--r--arch/x86/realmode/rm/video-bios.c1
-rw-r--r--arch/x86/realmode/rm/video-mode.c1
-rw-r--r--arch/x86/realmode/rm/video-vesa.c1
-rw-r--r--arch/x86/realmode/rm/video-vga.c1
-rw-r--r--arch/x86/realmode/rm/wakemain.c (renamed from arch/x86/kernel/acpi/realmode/wakemain.c)3
-rw-r--r--arch/x86/realmode/rm/wakeup.h (renamed from arch/x86/kernel/acpi/realmode/wakeup.h)10
-rw-r--r--arch/x86/realmode/rm/wakeup_asm.S (renamed from arch/x86/kernel/acpi/realmode/wakeup.S)131
-rw-r--r--arch/x86/realmode/rmpiggy.S20
-rw-r--r--arch/x86/tools/relocs.c7
-rw-r--r--arch/x86/xen/debugfs.c104
-rw-r--r--arch/x86/xen/debugfs.h4
-rw-r--r--arch/x86/xen/enlighten.c13
-rw-r--r--arch/x86/xen/mmu.c23
-rw-r--r--arch/x86/xen/p2m.c104
-rw-r--r--arch/x86/xen/setup.c171
-rw-r--r--arch/x86/xen/smp.c112
-rw-r--r--arch/x86/xen/smp.h12
-rw-r--r--arch/x86/xen/spinlock.c12
-rw-r--r--arch/x86/xen/xen-ops.h1
-rw-r--r--arch/xtensa/include/asm/kvm_para.h1
-rw-r--r--drivers/Makefile2
-rw-r--r--drivers/acpi/bgrt.c1
-rw-r--r--drivers/acpi/sleep.c6
-rw-r--r--drivers/amba/Makefile4
-rw-r--r--drivers/amba/tegra-ahb.c293
-rw-r--r--drivers/ata/sata_mv.c40
-rw-r--r--drivers/base/Kconfig89
-rw-r--r--drivers/base/Makefile1
-rw-r--r--drivers/base/dma-buf.c99
-rw-r--r--drivers/base/dma-coherent.c42
-rw-r--r--drivers/base/dma-contiguous.c401
-rw-r--r--drivers/base/node.c8
-rw-r--r--drivers/clk/Kconfig12
-rw-r--r--drivers/clk/Makefile5
-rw-r--r--drivers/clk/clk-divider.c68
-rw-r--r--drivers/clk/clk-fixed-factor.c95
-rw-r--r--drivers/clk/clk-fixed-rate.c49
-rw-r--r--drivers/clk/clk-gate.c104
-rw-r--r--drivers/clk/clk-mux.c27
-rw-r--r--drivers/clk/clk.c279
-rw-r--r--drivers/clk/mxs/Makefile8
-rw-r--r--drivers/clk/mxs/clk-div.c110
-rw-r--r--drivers/clk/mxs/clk-frac.c139
-rw-r--r--drivers/clk/mxs/clk-imx23.c205
-rw-r--r--drivers/clk/mxs/clk-imx28.c338
-rw-r--r--drivers/clk/mxs/clk-pll.c116
-rw-r--r--drivers/clk/mxs/clk-ref.c154
-rw-r--r--drivers/clk/mxs/clk.c28
-rw-r--r--drivers/clk/mxs/clk.h66
-rw-r--r--drivers/clk/spear/Makefile10
-rw-r--r--drivers/clk/spear/clk-aux-synth.c198
-rw-r--r--drivers/clk/spear/clk-frac-synth.c165
-rw-r--r--drivers/clk/spear/clk-gpt-synth.c154
-rw-r--r--drivers/clk/spear/clk-vco-pll.c363
-rw-r--r--drivers/clk/spear/clk.c36
-rw-r--r--drivers/clk/spear/clk.h134
-rw-r--r--drivers/clk/spear/spear1310_clock.c1106
-rw-r--r--drivers/clk/spear/spear1340_clock.c964
-rw-r--r--drivers/clk/spear/spear3xx_clock.c612
-rw-r--r--drivers/clk/spear/spear6xx_clock.c342
-rw-r--r--drivers/crypto/mv_cesa.c14
-rw-r--r--drivers/dma/Kconfig1
-rw-r--r--drivers/dma/amba-pl08x.c52
-rw-r--r--drivers/dma/at_hdmac.c15
-rw-r--r--drivers/dma/at_hdmac_regs.h21
-rw-r--r--drivers/dma/coh901318.c2
-rw-r--r--drivers/dma/coh901318_lli.c4
-rw-r--r--drivers/dma/dw_dmac.c26
-rw-r--r--drivers/dma/ep93xx_dma.c117
-rw-r--r--drivers/dma/imx-dma.c12
-rw-r--r--drivers/dma/imx-sdma.c108
-rw-r--r--drivers/dma/intel_mid_dma.c8
-rw-r--r--drivers/dma/ipu/ipu_idmac.c6
-rw-r--r--drivers/dma/mv_xor.c15
-rw-r--r--drivers/dma/mv_xor.h1
-rw-r--r--drivers/dma/mxs-dma.c194
-rw-r--r--drivers/dma/pch_dma.c2
-rw-r--r--drivers/dma/pl330.c1
-rw-r--r--drivers/dma/ste_dma40.c2
-rw-r--r--drivers/edac/amd64_edac.c200
-rw-r--r--drivers/edac/amd76x_edac.c42
-rw-r--r--drivers/edac/cell_edac.c42
-rw-r--r--drivers/edac/cpc925_edac.c91
-rw-r--r--drivers/edac/e752x_edac.c116
-rw-r--r--drivers/edac/e7xxx_edac.c86
-rw-r--r--drivers/edac/edac_core.h47
-rw-r--r--drivers/edac/edac_device.c27
-rw-r--r--drivers/edac/edac_mc.c716
-rw-r--r--drivers/edac/edac_mc_sysfs.c70
-rw-r--r--drivers/edac/edac_module.h2
-rw-r--r--drivers/edac/edac_pci.c6
-rw-r--r--drivers/edac/i3000_edac.c49
-rw-r--r--drivers/edac/i3200_edac.c56
-rw-r--r--drivers/edac/i5000_edac.c236
-rw-r--r--drivers/edac/i5100_edac.c106
-rw-r--r--drivers/edac/i5400_edac.c265
-rw-r--r--drivers/edac/i7300_edac.c115
-rw-r--r--drivers/edac/i7core_edac.c270
-rw-r--r--drivers/edac/i82443bxgx_edac.c41
-rw-r--r--drivers/edac/i82860_edac.c55
-rw-r--r--drivers/edac/i82875p_edac.c51
-rw-r--r--drivers/edac/i82975x_edac.c58
-rw-r--r--drivers/edac/mpc85xx_edac.c37
-rw-r--r--drivers/edac/mv64x60_edac.c47
-rw-r--r--drivers/edac/pasemi_edac.c49
-rw-r--r--drivers/edac/ppc4xx_edac.c50
-rw-r--r--drivers/edac/r82600_edac.c40
-rw-r--r--drivers/edac/sb_edac.c212
-rw-r--r--drivers/edac/tile_edac.c33
-rw-r--r--drivers/edac/x38_edac.c52
-rw-r--r--drivers/gpio/Kconfig48
-rw-r--r--drivers/gpio/Makefile4
-rw-r--r--drivers/gpio/gpio-ich.c419
-rw-r--r--drivers/gpio/gpio-mm-lantiq.c158
-rw-r--r--drivers/gpio/gpio-mxs.c156
-rw-r--r--drivers/gpio/gpio-samsung.c11
-rw-r--r--drivers/gpio/gpio-sch.c8
-rw-r--r--drivers/gpio/gpio-sta2x11.c435
-rw-r--r--drivers/gpio/gpio-stp-xway.c301
-rw-r--r--drivers/gpio/gpio-tps65910.c188
-rw-r--r--drivers/gpio/gpio-wm831x.c6
-rw-r--r--drivers/i2c/Kconfig1
-rw-r--r--drivers/i2c/busses/Kconfig15
-rw-r--r--drivers/i2c/busses/Makefile1
-rw-r--r--drivers/i2c/busses/i2c-davinci.c2
-rw-r--r--drivers/i2c/busses/i2c-designware-core.c31
-rw-r--r--drivers/i2c/busses/i2c-designware-core.h5
-rw-r--r--drivers/i2c/busses/i2c-designware-platdrv.c33
-rw-r--r--drivers/i2c/busses/i2c-eg20t.c246
-rw-r--r--drivers/i2c/busses/i2c-gpio.c7
-rw-r--r--drivers/i2c/busses/i2c-imx.c2
-rw-r--r--drivers/i2c/busses/i2c-ixp2000.c157
-rw-r--r--drivers/i2c/busses/i2c-mpc.c30
-rw-r--r--drivers/i2c/busses/i2c-mxs.c22
-rw-r--r--drivers/i2c/busses/i2c-ocores.c3
-rw-r--r--drivers/i2c/busses/i2c-pca-platform.c2
-rw-r--r--drivers/i2c/busses/i2c-pxa.c5
-rw-r--r--drivers/i2c/busses/i2c-s3c2410.c109
-rw-r--r--drivers/i2c/busses/i2c-sh_mobile.c11
-rw-r--r--drivers/i2c/busses/i2c-tegra.c24
-rw-r--r--drivers/i2c/busses/i2c-versatile.c9
-rw-r--r--drivers/i2c/busses/i2c-xiic.c23
-rw-r--r--drivers/i2c/i2c-core.c17
-rw-r--r--drivers/i2c/i2c-mux.c42
-rw-r--r--drivers/i2c/muxes/Kconfig6
-rw-r--r--drivers/i2c/muxes/Makefile6
-rw-r--r--drivers/i2c/muxes/i2c-mux-gpio.c (renamed from drivers/i2c/muxes/gpio-i2cmux.c)42
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca9541.c (renamed from drivers/i2c/muxes/pca9541.c)3
-rw-r--r--drivers/i2c/muxes/i2c-mux-pca954x.c (renamed from drivers/i2c/muxes/pca954x.c)2
-rw-r--r--drivers/input/keyboard/pxa27x_keypad.c52
-rw-r--r--drivers/input/misc/wm831x-on.c2
-rw-r--r--drivers/input/touchscreen/wm831x-ts.c9
-rw-r--r--drivers/iommu/Kconfig21
-rw-r--r--drivers/iommu/Makefile1
-rw-r--r--drivers/iommu/exynos-iommu.c1076
-rw-r--r--drivers/iommu/intel-iommu.c40
-rw-r--r--drivers/leds/Kconfig29
-rw-r--r--drivers/leds/Makefile3
-rw-r--r--drivers/leds/led-class.c21
-rw-r--r--drivers/leds/leds-da9052.c214
-rw-r--r--drivers/leds/leds-lm3530.c100
-rw-r--r--drivers/leds/leds-lm3533.c785
-rw-r--r--drivers/leds/leds-lp5521.c12
-rw-r--r--drivers/leds/leds-mc13783.c2
-rw-r--r--drivers/leds/leds-pca955x.c95
-rw-r--r--drivers/leds/ledtrig-backlight.c4
-rw-r--r--drivers/leds/ledtrig-gpio.c4
-rw-r--r--drivers/leds/ledtrig-heartbeat.c32
-rw-r--r--drivers/leds/ledtrig-timer.c54
-rw-r--r--drivers/leds/ledtrig-transient.c237
-rw-r--r--drivers/media/video/mx3_camera.c4
-rw-r--r--drivers/mfd/Kconfig76
-rw-r--r--drivers/mfd/Makefile10
-rw-r--r--drivers/mfd/ab8500-core.c423
-rw-r--r--drivers/mfd/ab8500-debugfs.c6
-rw-r--r--drivers/mfd/ab8500-gpadc.c8
-rw-r--r--drivers/mfd/ab8500-i2c.c128
-rw-r--r--drivers/mfd/ab8500-sysctrl.c6
-rw-r--r--drivers/mfd/anatop-mfd.c35
-rw-r--r--drivers/mfd/asic3.c33
-rw-r--r--drivers/mfd/cs5535-mfd.c13
-rw-r--r--drivers/mfd/da9052-core.c140
-rw-r--r--drivers/mfd/da9052-i2c.c72
-rw-r--r--drivers/mfd/da9052-spi.c19
-rw-r--r--drivers/mfd/db8500-prcmu.c35
-rw-r--r--drivers/mfd/intel_msic.c31
-rw-r--r--drivers/mfd/janz-cmodio.c17
-rw-r--r--drivers/mfd/lm3533-core.c667
-rw-r--r--drivers/mfd/lm3533-ctrlbank.c148
-rw-r--r--drivers/mfd/lpc_ich.c888
-rw-r--r--drivers/mfd/lpc_sch.c26
-rw-r--r--drivers/mfd/max77693-irq.c309
-rw-r--r--drivers/mfd/max77693.c249
-rw-r--r--drivers/mfd/mc13xxx-core.c239
-rw-r--r--drivers/mfd/mc13xxx-i2c.c128
-rw-r--r--drivers/mfd/mc13xxx-spi.c140
-rw-r--r--drivers/mfd/mc13xxx.h45
-rw-r--r--drivers/mfd/pcf50633-core.c36
-rw-r--r--drivers/mfd/rc5t583.c8
-rw-r--r--drivers/mfd/rdc321x-southbridge.c13
-rw-r--r--drivers/mfd/s5m-core.c6
-rw-r--r--drivers/mfd/sta2x11-mfd.c467
-rw-r--r--drivers/mfd/stmpe-spi.c1
-rw-r--r--drivers/mfd/tps65090.c33
-rw-r--r--drivers/mfd/tps65217.c17
-rw-r--r--drivers/mfd/tps65910-irq.c130
-rw-r--r--drivers/mfd/tps65910.c205
-rw-r--r--drivers/mfd/twl4030-irq.c1
-rw-r--r--drivers/mfd/twl6040-core.c120
-rw-r--r--drivers/mfd/twl6040-irq.c32
-rw-r--r--drivers/mfd/vx855.c12
-rw-r--r--drivers/mfd/wm831x-auxadc.c6
-rw-r--r--drivers/mfd/wm831x-core.c45
-rw-r--r--drivers/mfd/wm831x-irq.c148
-rw-r--r--drivers/mfd/wm8350-core.c31
-rw-r--r--drivers/mfd/wm8350-i2c.c61
-rw-r--r--drivers/mfd/wm8400-core.c250
-rw-r--r--drivers/mfd/wm8994-core.c25
-rw-r--r--drivers/mfd/wm8994-regmap.c1
-rw-r--r--drivers/misc/ab8500-pwm.c6
-rw-r--r--drivers/mmc/card/block.c22
-rw-r--r--drivers/mmc/card/queue.c6
-rw-r--r--drivers/mmc/core/bus.c2
-rw-r--r--drivers/mmc/core/cd-gpio.c3
-rw-r--r--drivers/mmc/core/core.c18
-rw-r--r--drivers/mmc/core/mmc.c119
-rw-r--r--drivers/mmc/core/sdio.c2
-rw-r--r--drivers/mmc/core/sdio_irq.c11
-rw-r--r--drivers/mmc/host/Kconfig17
-rw-r--r--drivers/mmc/host/Makefile1
-rw-r--r--drivers/mmc/host/atmel-mci.c469
-rw-r--r--drivers/mmc/host/davinci_mmc.c1
-rw-r--r--drivers/mmc/host/dw_mmc.c18
-rw-r--r--drivers/mmc/host/imxmmc.c1169
-rw-r--r--drivers/mmc/host/imxmmc.h64
-rw-r--r--drivers/mmc/host/mmci.c65
-rw-r--r--drivers/mmc/host/mvsdio.c14
-rw-r--r--drivers/mmc/host/mxcmmc.c39
-rw-r--r--drivers/mmc/host/mxs-mmc.c197
-rw-r--r--drivers/mmc/host/omap.c48
-rw-r--r--drivers/mmc/host/omap_hsmmc.c86
-rw-r--r--drivers/mmc/host/sdhci-esdhc-imx.c44
-rw-r--r--drivers/mmc/host/sdhci-pltfm.c8
-rw-r--r--drivers/mmc/host/sdhci-spear.c82
-rw-r--r--drivers/mmc/host/sdhci-tegra.c26
-rw-r--r--drivers/mmc/host/sdhci.c4
-rw-r--r--drivers/mtd/maps/lantiq-flash.c76
-rw-r--r--drivers/mtd/nand/mxc_nand.c6
-rw-r--r--drivers/mtd/nand/orion_nand.c18
-rw-r--r--drivers/net/cris/eth_v10.c1
-rw-r--r--drivers/net/ethernet/freescale/fec.c35
-rw-r--r--drivers/net/ethernet/marvell/mv643xx_eth.c42
-rw-r--r--drivers/net/ethernet/ti/Kconfig2
-rw-r--r--drivers/of/of_i2c.c16
-rw-r--r--drivers/of/of_pci_irq.c2
-rw-r--r--drivers/pci/pci.c2
-rw-r--r--drivers/pinctrl/spear/Kconfig10
-rw-r--r--drivers/pinctrl/spear/Makefile2
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear.h251
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1310.c2198
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear1340.c1989
-rw-r--r--drivers/pinctrl/spear/pinctrl-spear3xx.c103
-rw-r--r--drivers/platform/x86/toshiba_acpi.c1
-rw-r--r--drivers/power/wm831x_power.c21
-rw-r--r--drivers/regulator/anatop-regulator.c18
-rw-r--r--drivers/regulator/tps65910-regulator.c82
-rw-r--r--drivers/regulator/wm831x-dcdc.c24
-rw-r--r--drivers/regulator/wm831x-isink.c4
-rw-r--r--drivers/regulator/wm831x-ldo.c10
-rw-r--r--drivers/rtc/Kconfig42
-rw-r--r--drivers/rtc/Makefile2
-rw-r--r--drivers/rtc/rtc-ds1307.c20
-rw-r--r--drivers/rtc/rtc-ep93xx.c24
-rw-r--r--drivers/rtc/rtc-imxdi.c6
-rw-r--r--drivers/rtc/rtc-lpc32xx.c12
-rw-r--r--drivers/rtc/rtc-m41t93.c46
-rw-r--r--drivers/rtc/rtc-pcf8563.c44
-rw-r--r--drivers/rtc/rtc-pl031.c14
-rw-r--r--drivers/rtc/rtc-s3c.c2
-rw-r--r--drivers/rtc/rtc-spear.c10
-rw-r--r--drivers/rtc/rtc-tegra.c50
-rw-r--r--drivers/rtc/rtc-wm831x.c2
-rw-r--r--drivers/s390/char/sclp_cmd.c12
-rw-r--r--drivers/spi/Kconfig2
-rw-r--r--drivers/spi/spi-imx.c30
-rw-r--r--drivers/spi/spi-orion.c30
-rw-r--r--drivers/staging/android/ashmem.c8
-rw-r--r--drivers/tty/pty.c2
-rw-r--r--drivers/tty/serial/imx.c38
-rw-r--r--drivers/tty/serial/lantiq.c83
-rw-r--r--drivers/tty/serial/sb1250-duart.c1
-rw-r--r--drivers/tty/serial/zs.c1
-rw-r--r--drivers/tty/tty_ldisc.c41
-rw-r--r--drivers/usb/host/ehci-mxc.c62
-rw-r--r--drivers/usb/host/ehci-orion.c16
-rw-r--r--drivers/usb/host/ehci-tegra.c5
-rw-r--r--drivers/video/backlight/Kconfig12
-rw-r--r--drivers/video/backlight/Makefile1
-rw-r--r--drivers/video/backlight/adp5520_bl.c4
-rw-r--r--drivers/video/backlight/adp8860_bl.c28
-rw-r--r--drivers/video/backlight/adp8870_bl.c28
-rw-r--r--drivers/video/backlight/ams369fg06.c16
-rw-r--r--drivers/video/backlight/apple_bl.c21
-rw-r--r--drivers/video/backlight/backlight.c11
-rw-r--r--drivers/video/backlight/corgi_lcd.c12
-rw-r--r--drivers/video/backlight/cr_bllcd.c9
-rw-r--r--drivers/video/backlight/da903x_bl.c1
-rw-r--r--drivers/video/backlight/generic_bl.c6
-rw-r--r--drivers/video/backlight/ili9320.c9
-rw-r--r--drivers/video/backlight/jornada720_bl.c14
-rw-r--r--drivers/video/backlight/jornada720_lcd.c8
-rw-r--r--drivers/video/backlight/l4f00242t03.c27
-rw-r--r--drivers/video/backlight/lcd.c20
-rw-r--r--drivers/video/backlight/ld9040.c15
-rw-r--r--drivers/video/backlight/lm3533_bl.c423
-rw-r--r--drivers/video/backlight/lms283gf05.c9
-rw-r--r--drivers/video/backlight/ltv350qv.c24
-rw-r--r--drivers/video/backlight/omap1_bl.c4
-rw-r--r--drivers/video/backlight/pcf50633-backlight.c1
-rw-r--r--drivers/video/backlight/progear_bl.c6
-rw-r--r--drivers/video/backlight/s6e63m0.c16
-rw-r--r--drivers/video/backlight/tdo24m.c21
-rw-r--r--drivers/video/backlight/tosa_bl.c11
-rw-r--r--drivers/video/backlight/tosa_lcd.c8
-rw-r--r--drivers/video/backlight/wm831x_bl.c1
-rw-r--r--drivers/video/fbmem.c21
-rw-r--r--drivers/video/imxfb.c50
-rw-r--r--drivers/video/omap2/displays/panel-acx565akm.c1
-rw-r--r--drivers/w1/masters/mxc_w1.c4
-rw-r--r--drivers/watchdog/Kconfig1
-rw-r--r--drivers/watchdog/iTCO_vendor.h6
-rw-r--r--drivers/watchdog/iTCO_vendor_support.c43
-rw-r--r--drivers/watchdog/iTCO_wdt.c529
-rw-r--r--drivers/watchdog/imx2_wdt.c2
-rw-r--r--drivers/watchdog/lantiq_wdt.c56
-rw-r--r--drivers/watchdog/orion_wdt.c16
-rw-r--r--drivers/xen/Makefile2
-rw-r--r--drivers/xen/acpi.c62
-rw-r--r--drivers/xen/events.c5
-rw-r--r--drivers/xen/grant-table.c125
-rw-r--r--drivers/xen/xen-acpi-processor.c1
-rw-r--r--drivers/xen/xen-selfballoon.c34
-rw-r--r--drivers/xen/xenbus/xenbus_comms.c6
-rw-r--r--drivers/xen/xenbus/xenbus_comms.h1
-rw-r--r--drivers/xen/xenbus/xenbus_dev_backend.c51
-rw-r--r--fs/9p/vfs_inode.c2
-rw-r--r--fs/affs/inode.c2
-rw-r--r--fs/afs/inode.c2
-rw-r--r--fs/autofs4/inode.c2
-rw-r--r--fs/bad_inode.c1
-rw-r--r--fs/bfs/inode.c2
-rw-r--r--fs/binfmt_misc.c2
-rw-r--r--fs/block_dev.c2
-rw-r--r--fs/btrfs/inode.c2
-rw-r--r--fs/cifs/Kconfig20
-rw-r--r--fs/cifs/Makefile4
-rw-r--r--fs/cifs/README5
-rw-r--r--fs/cifs/cifs_debug.c56
-rw-r--r--fs/cifs/cifs_debug.h4
-rw-r--r--fs/cifs/cifsfs.c25
-rw-r--r--fs/cifs/cifsglob.h107
-rw-r--r--fs/cifs/cifsproto.h19
-rw-r--r--fs/cifs/cifssmb.c181
-rw-r--r--fs/cifs/connect.c178
-rw-r--r--fs/cifs/file.c683
-rw-r--r--fs/cifs/ioctl.c8
-rw-r--r--fs/cifs/misc.c66
-rw-r--r--fs/cifs/readdir.c15
-rw-r--r--fs/cifs/smb1ops.c154
-rw-r--r--fs/cifs/smb2ops.c27
-rw-r--r--fs/cifs/transport.c76
-rw-r--r--fs/coda/inode.c2
-rw-r--r--fs/debugfs/file.c128
-rw-r--r--fs/ecryptfs/super.c2
-rw-r--r--fs/exofs/Kbuild2
-rw-r--r--fs/exofs/exofs.h14
-rw-r--r--fs/exofs/inode.c4
-rw-r--r--fs/exofs/super.c16
-rw-r--r--fs/exofs/sys.c200
-rw-r--r--fs/ext2/balloc.c4
-rw-r--r--fs/ext2/ialloc.c2
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext2/super.c18
-rw-r--r--fs/ext2/xattr.c1
-rw-r--r--fs/ext3/dir.c167
-rw-r--r--fs/ext3/ext3.h6
-rw-r--r--fs/ext3/hash.c4
-rw-r--r--fs/ext3/ialloc.c20
-rw-r--r--fs/ext3/inode.c6
-rw-r--r--fs/ext3/super.c6
-rw-r--r--fs/ext4/super.c8
-rw-r--r--fs/fat/inode.c2
-rw-r--r--fs/freevxfs/vxfs_inode.c2
-rw-r--r--fs/fs-writeback.c336
-rw-r--r--fs/fuse/inode.c2
-rw-r--r--fs/gfs2/super.c2
-rw-r--r--fs/hfs/inode.c2
-rw-r--r--fs/hfsplus/super.c2
-rw-r--r--fs/hostfs/hostfs_kern.c2
-rw-r--r--fs/hpfs/inode.c2
-rw-r--r--fs/hppfs/hppfs.c2
-rw-r--r--fs/hugetlbfs/inode.c2
-rw-r--r--fs/inode.c15
-rw-r--r--fs/jbd/checkpoint.c23
-rw-r--r--fs/jbd/commit.c21
-rw-r--r--fs/jbd/journal.c206
-rw-r--r--fs/jbd/transaction.c2
-rw-r--r--fs/jffs2/fs.c2
-rw-r--r--fs/jfs/inode.c2
-rw-r--r--fs/logfs/readwrite.c2
-rw-r--r--fs/minix/inode.c2
-rw-r--r--fs/namei.c22
-rw-r--r--fs/ncpfs/inode.c2
-rw-r--r--fs/nfs/Kconfig11
-rw-r--r--fs/nfs/Makefile5
-rw-r--r--fs/nfs/blocklayout/blocklayout.c90
-rw-r--r--fs/nfs/blocklayout/blocklayoutdev.c2
-rw-r--r--fs/nfs/client.c268
-rw-r--r--fs/nfs/delegation.c16
-rw-r--r--fs/nfs/delegation.h1
-rw-r--r--fs/nfs/dir.c39
-rw-r--r--fs/nfs/direct.c746
-rw-r--r--fs/nfs/file.c8
-rw-r--r--fs/nfs/fscache.c15
-rw-r--r--fs/nfs/fscache.h10
-rw-r--r--fs/nfs/getroot.c85
-rw-r--r--fs/nfs/idmap.c30
-rw-r--r--fs/nfs/inode.c122
-rw-r--r--fs/nfs/internal.h135
-rw-r--r--fs/nfs/namespace.c103
-rw-r--r--fs/nfs/netns.h5
-rw-r--r--fs/nfs/nfs2xdr.c5
-rw-r--r--fs/nfs/nfs3proc.c27
-rw-r--r--fs/nfs/nfs3xdr.c112
-rw-r--r--fs/nfs/nfs4_fs.h23
-rw-r--r--fs/nfs/nfs4filelayout.c688
-rw-r--r--fs/nfs/nfs4filelayout.h63
-rw-r--r--fs/nfs/nfs4filelayoutdev.c102
-rw-r--r--fs/nfs/nfs4namespace.c55
-rw-r--r--fs/nfs/nfs4proc.c537
-rw-r--r--fs/nfs/nfs4renewd.c2
-rw-r--r--fs/nfs/nfs4state.c225
-rw-r--r--fs/nfs/nfs4xdr.c399
-rw-r--r--fs/nfs/objlayout/objio_osd.c18
-rw-r--r--fs/nfs/objlayout/objlayout.c19
-rw-r--r--fs/nfs/pagelist.c61
-rw-r--r--fs/nfs/pnfs.c352
-rw-r--r--fs/nfs/pnfs.h127
-rw-r--r--fs/nfs/proc.c21
-rw-r--r--fs/nfs/read.c437
-rw-r--r--fs/nfs/super.c760
-rw-r--r--fs/nfs/write.c809
-rw-r--r--fs/nilfs2/inode.c4
-rw-r--r--fs/ntfs/inode.c2
-rw-r--r--fs/ocfs2/dlmfs/dlmfs.c2
-rw-r--r--fs/ocfs2/inode.c2
-rw-r--r--fs/omfs/inode.c2
-rw-r--r--fs/proc/base.c5
-rw-r--r--fs/proc/inode.c2
-rw-r--r--fs/proc/task_mmu.c2
-rw-r--r--fs/pstore/inode.c2
-rw-r--r--fs/quota/dquot.c32
-rw-r--r--fs/reiserfs/inode.c4
-rw-r--r--fs/reiserfs/super.c6
-rw-r--r--fs/sysfs/inode.c2
-rw-r--r--fs/sysv/inode.c2
-rw-r--r--fs/ubifs/super.c2
-rw-r--r--fs/udf/inode.c2
-rw-r--r--fs/ufs/inode.c2
-rw-r--r--fs/xfs/xfs_super.c2
-rw-r--r--include/asm-generic/dma-coherent.h4
-rw-r--r--include/asm-generic/dma-contiguous.h28
-rw-r--r--include/asm-generic/kvm_para.h22
-rw-r--r--include/asm-generic/pgtable.h27
-rw-r--r--include/asm-generic/word-at-a-time.h52
-rw-r--r--include/linux/Kbuild1
-rw-r--r--include/linux/amba/pl08x.h3
-rw-r--r--include/linux/apple_bl.h2
-rw-r--r--include/linux/bootmem.h3
-rw-r--r--include/linux/bug.h7
-rw-r--r--include/linux/clk-private.h99
-rw-r--r--include/linux/clk-provider.h120
-rw-r--r--include/linux/clk.h6
-rw-r--r--include/linux/compaction.h19
-rw-r--r--include/linux/debugfs.h11
-rw-r--r--include/linux/device.h4
-rw-r--r--include/linux/dma-buf.h33
-rw-r--r--include/linux/dma-contiguous.h110
-rw-r--r--include/linux/dmaengine.h6
-rw-r--r--include/linux/edac.h182
-rw-r--r--include/linux/fb.h4
-rw-r--r--include/linux/fs.h14
-rw-r--r--include/linux/fsl/mxs-dma.h12
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/huge_mm.h2
-rw-r--r--include/linux/hugetlb.h8
-rw-r--r--include/linux/i2c-mux-gpio.h (renamed from include/linux/gpio-i2cmux.h)14
-rw-r--r--include/linux/i2c-mux.h3
-rw-r--r--include/linux/i2c.h1
-rw-r--r--include/linux/jbd.h18
-rw-r--r--include/linux/kallsyms.h7
-rw-r--r--include/linux/kernel-page-flags.h4
-rw-r--r--include/linux/kvm.h42
-rw-r--r--include/linux/kvm_host.h55
-rw-r--r--include/linux/lcd.h10
-rw-r--r--include/linux/led-lm3530.h2
-rw-r--r--include/linux/leds.h2
-rw-r--r--include/linux/memcontrol.h69
-rw-r--r--include/linux/mempolicy.h9
-rw-r--r--include/linux/mfd/abx500/ab8500.h18
-rw-r--r--include/linux/mfd/anatop.h4
-rw-r--r--include/linux/mfd/asic3.h2
-rw-r--r--include/linux/mfd/da9052/da9052.h19
-rw-r--r--include/linux/mfd/lm3533.h104
-rw-r--r--include/linux/mfd/lpc_ich.h48
-rw-r--r--include/linux/mfd/max77693-private.h227
-rw-r--r--include/linux/mfd/max77693.h36
-rw-r--r--include/linux/mfd/sta2x11-mfd.h324
-rw-r--r--include/linux/mfd/stmpe.h2
-rw-r--r--include/linux/mfd/tps65910.h49
-rw-r--r--include/linux/mfd/twl6040.h2
-rw-r--r--include/linux/mfd/wm831x/core.h12
-rw-r--r--include/linux/mfd/wm8350/core.h9
-rw-r--r--include/linux/mfd/wm8400-private.h14
-rw-r--r--include/linux/mfd/wm8994/core.h1
-rw-r--r--include/linux/mfd/wm8994/registers.h3
-rw-r--r--include/linux/mm.h6
-rw-r--r--include/linux/mm_inline.h24
-rw-r--r--include/linux/mm_types.h11
-rw-r--r--include/linux/mmc/card.h4
-rw-r--r--include/linux/mmc/dw_mmc.h1
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/mmc.h60
-rw-r--r--include/linux/mmc/mxs-mmc.h (renamed from arch/arm/mach-mxs/include/mach/mmc.h)7
-rw-r--r--include/linux/mmdebug.h2
-rw-r--r--include/linux/mmzone.h100
-rw-r--r--include/linux/mv643xx_eth.h1
-rw-r--r--include/linux/nfs4.h13
-rw-r--r--include/linux/nfs_fs.h31
-rw-r--r--include/linux/nfs_fs_sb.h17
-rw-r--r--include/linux/nfs_page.h20
-rw-r--r--include/linux/nfs_xdr.h210
-rw-r--r--include/linux/of_i2c.h4
-rw-r--r--include/linux/of_pci.h2
-rw-r--r--include/linux/oom.h5
-rw-r--r--include/linux/page-isolation.h18
-rw-r--r--include/linux/pagemap.h8
-rw-r--r--include/linux/pci.h5
-rw-r--r--include/linux/pci_ids.h1
-rw-r--r--include/linux/res_counter.h5
-rw-r--r--include/linux/rmap.h2
-rw-r--r--include/linux/rtc.h3
-rw-r--r--include/linux/rtc/ds1307.h22
-rw-r--r--include/linux/spi/orion_spi.h17
-rw-r--r--include/linux/stmp_device.h20
-rw-r--r--include/linux/swap.h56
-rw-r--r--include/linux/writeback.h10
-rw-r--r--include/net/sock.h22
-rw-r--r--include/trace/events/jbd.h39
-rw-r--r--include/trace/events/vmscan.h122
-rw-r--r--include/trace/events/writeback.h36
-rw-r--r--include/xen/acpi.h58
-rw-r--r--include/xen/events.h3
-rw-r--r--include/xen/grant_table.h2
-rw-r--r--include/xen/xenbus_dev.h3
-rw-r--r--ipc/mqueue.c2
-rw-r--r--kernel/cgroup.c20
-rw-r--r--kernel/fork.c12
-rw-r--r--kernel/kallsyms.c32
-rw-r--r--kernel/res_counter.c10
-rw-r--r--kernel/watchdog.c12
-rw-r--r--lib/Kconfig6
-rw-r--r--lib/Makefile3
-rw-r--r--lib/bitmap.c12
-rw-r--r--lib/list_debug.c3
-rw-r--r--lib/radix-tree.c15
-rw-r--r--lib/spinlock_debug.c2
-rw-r--r--lib/stmp_device.c80
-rw-r--r--lib/string_helpers.c8
-rw-r--r--lib/strncpy_from_user.c47
-rw-r--r--lib/strnlen_user.c138
-rw-r--r--lib/swiotlb.c8
-rw-r--r--lib/test-kstrtox.c4
-rw-r--r--lib/vsprintf.c14
-rw-r--r--mm/Kconfig12
-rw-r--r--mm/Makefile12
-rw-r--r--mm/bootmem.c134
-rw-r--r--mm/compaction.c547
-rw-r--r--mm/filemap.c39
-rw-r--r--mm/huge_memory.c29
-rw-r--r--mm/hugetlb.c33
-rw-r--r--mm/internal.h45
-rw-r--r--mm/madvise.c15
-rw-r--r--mm/memblock.c42
-rw-r--r--mm/memcontrol.c642
-rw-r--r--mm/memory-failure.c10
-rw-r--r--mm/memory.c20
-rw-r--r--mm/memory_hotplug.c20
-rw-r--r--mm/mempolicy.c36
-rw-r--r--mm/mmap.c53
-rw-r--r--mm/mmzone.c14
-rw-r--r--mm/nobootmem.c112
-rw-r--r--mm/oom_kill.c44
-rw-r--r--mm/page-writeback.c3
-rw-r--r--mm/page_alloc.c485
-rw-r--r--mm/page_isolation.c15
-rw-r--r--mm/pgtable-generic.c4
-rw-r--r--mm/readahead.c40
-rw-r--r--mm/rmap.c6
-rw-r--r--mm/shmem.c515
-rw-r--r--mm/sparse.c25
-rw-r--r--mm/swap.c129
-rw-r--r--mm/swapfile.c33
-rw-r--r--mm/thrash.c155
-rw-r--r--mm/truncate.c25
-rw-r--r--mm/vmalloc.c7
-rw-r--r--mm/vmscan.c738
-rw-r--r--mm/vmstat.c13
-rw-r--r--net/ipv4/tcp_memcontrol.c34
-rw-r--r--net/sunrpc/clnt.c2
-rw-r--r--net/sunrpc/rpc_pipe.c10
-rw-r--r--net/sunrpc/rpcb_clnt.c2
-rw-r--r--net/sunrpc/xprt.c7
-rw-r--r--scripts/coccinelle/misc/ifaddr.cocci35
-rw-r--r--scripts/coccinelle/misc/noderef.cocci65
-rwxr-xr-xscripts/config11
-rw-r--r--scripts/kconfig/conf.c22
-rw-r--r--scripts/link-vmlinux.sh221
-rw-r--r--scripts/package/builddeb2
-rw-r--r--sound/core/pcm_lib.c23
-rw-r--r--sound/pci/hda/hda_codec.c66
-rw-r--r--sound/pci/hda/hda_codec.h3
-rw-r--r--sound/pci/hda/hda_intel.c314
-rw-r--r--sound/pci/hda/patch_realtek.c38
-rw-r--r--sound/soc/kirkwood/kirkwood-i2s.c13
-rw-r--r--sound/soc/kirkwood/kirkwood.h1
-rw-r--r--sound/soc/omap/Kconfig7
-rw-r--r--sound/soc/omap/Makefile4
-rw-r--r--sound/soc/omap/mcbsp.c115
-rw-r--r--sound/soc/omap/mcbsp.h8
-rw-r--r--sound/soc/omap/omap-abe-twl6040.c68
-rw-r--r--sound/soc/omap/omap-dmic.c8
-rw-r--r--sound/soc/omap/omap-hdmi-card.c87
-rw-r--r--sound/soc/omap/omap-hdmi.c238
-rw-r--r--sound/soc/omap/omap-hdmi.h4
-rw-r--r--sound/soc/omap/omap-mcbsp.c45
-rw-r--r--sound/soc/omap/omap-mcpdm.c8
-rw-r--r--sound/soc/omap/omap4-hdmi-card.c121
-rw-r--r--tools/vm/page-types.c50
-rw-r--r--virt/kvm/Kconfig3
-rw-r--r--virt/kvm/ioapic.c10
-rw-r--r--virt/kvm/ioapic.h1
-rw-r--r--virt/kvm/irq_comm.c14
-rw-r--r--virt/kvm/kvm_main.c132
1600 files changed, 64655 insertions, 40393 deletions
diff --git a/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533 b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
new file mode 100644
index 000000000000..1b62230b33b9
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-bus-i2c-devices-lm3533
@@ -0,0 +1,15 @@
1What: /sys/bus/i2c/devices/.../output_hvled[n]
2Date: April 2012
3KernelVersion: 3.5
4Contact: Johan Hovold <jhovold@gmail.com>
5Description:
6 Set the controlling backlight device for high-voltage current
7 sink HVLED[n] (n = 1, 2) (0, 1).
8
9What: /sys/bus/i2c/devices/.../output_lvled[n]
10Date: April 2012
11KernelVersion: 3.5
12Contact: Johan Hovold <jhovold@gmail.com>
13Description:
14 Set the controlling led device for low-voltage current sink
15 LVLED[n] (n = 1..5) (0..3).
diff --git a/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
new file mode 100644
index 000000000000..77cf7ac949af
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-backlight-driver-lm3533
@@ -0,0 +1,48 @@
1What: /sys/class/backlight/<backlight>/als_channel
2Date: May 2012
3KernelVersion: 3.5
4Contact: Johan Hovold <jhovold@gmail.com>
5Description:
6 Get the ALS output channel used as input in
7 ALS-current-control mode (0, 1), where
8
9 0 - out_current0 (backlight 0)
10 1 - out_current1 (backlight 1)
11
12What: /sys/class/backlight/<backlight>/als_en
13Date: May 2012
14KernelVersion: 3.5
15Contact: Johan Hovold <jhovold@gmail.com>
16Description:
17 Enable ALS-current-control mode (0, 1).
18
19What: /sys/class/backlight/<backlight>/id
20Date: April 2012
21KernelVersion: 3.5
22Contact: Johan Hovold <jhovold@gmail.com>
23Description:
24 Get the id of this backlight (0, 1).
25
26What: /sys/class/backlight/<backlight>/linear
27Date: April 2012
28KernelVersion: 3.5
29Contact: Johan Hovold <jhovold@gmail.com>
30Description:
31 Set the brightness-mapping mode (0, 1), where
32
33 0 - exponential mode
34 1 - linear mode
35
36What: /sys/class/backlight/<backlight>/pwm
37Date: April 2012
38KernelVersion: 3.5
39Contact: Johan Hovold <jhovold@gmail.com>
40Description:
41 Set the PWM-input control mask (5 bits), where
42
43 bit 5 - PWM-input enabled in Zone 4
44 bit 4 - PWM-input enabled in Zone 3
45 bit 3 - PWM-input enabled in Zone 2
46 bit 2 - PWM-input enabled in Zone 1
47 bit 1 - PWM-input enabled in Zone 0
48 bit 0 - PWM-input enabled
diff --git a/Documentation/ABI/testing/sysfs-class-led-driver-lm3533 b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
new file mode 100644
index 000000000000..620ebb3b9baa
--- /dev/null
+++ b/Documentation/ABI/testing/sysfs-class-led-driver-lm3533
@@ -0,0 +1,65 @@
1What: /sys/class/leds/<led>/als_channel
2Date: May 2012
3KernelVersion: 3.5
4Contact: Johan Hovold <jhovold@gmail.com>
5Description:
6 Set the ALS output channel to use as input in
7 ALS-current-control mode (1, 2), where
8
9 1 - out_current1
10 2 - out_current2
11
12What: /sys/class/leds/<led>/als_en
13Date: May 2012
14KernelVersion: 3.5
15Contact: Johan Hovold <jhovold@gmail.com>
16Description:
17 Enable ALS-current-control mode (0, 1).
18
19What: /sys/class/leds/<led>/falltime
20What: /sys/class/leds/<led>/risetime
21Date: April 2012
22KernelVersion: 3.5
23Contact: Johan Hovold <jhovold@gmail.com>
24Description:
25 Set the pattern generator fall and rise times (0..7), where
26
27 0 - 2048 us
28 1 - 262 ms
29 2 - 524 ms
30 3 - 1.049 s
31 4 - 2.097 s
32 5 - 4.194 s
33 6 - 8.389 s
34 7 - 16.78 s
35
36What: /sys/class/leds/<led>/id
37Date: April 2012
38KernelVersion: 3.5
39Contact: Johan Hovold <jhovold@gmail.com>
40Description:
41 Get the id of this led (0..3).
42
43What: /sys/class/leds/<led>/linear
44Date: April 2012
45KernelVersion: 3.5
46Contact: Johan Hovold <jhovold@gmail.com>
47Description:
48 Set the brightness-mapping mode (0, 1), where
49
50 0 - exponential mode
51 1 - linear mode
52
53What: /sys/class/leds/<led>/pwm
54Date: April 2012
55KernelVersion: 3.5
56Contact: Johan Hovold <jhovold@gmail.com>
57Description:
58 Set the PWM-input control mask (5 bits), where
59
60 bit 5 - PWM-input enabled in Zone 4
61 bit 4 - PWM-input enabled in Zone 3
62 bit 3 - PWM-input enabled in Zone 2
63 bit 2 - PWM-input enabled in Zone 1
64 bit 1 - PWM-input enabled in Zone 0
65 bit 0 - PWM-input enabled
diff --git a/Documentation/SubmittingPatches b/Documentation/SubmittingPatches
index 4468ce24427c..c379a2a6949f 100644
--- a/Documentation/SubmittingPatches
+++ b/Documentation/SubmittingPatches
@@ -150,7 +150,8 @@ be able to justify all violations that remain in your patch.
150 150
151Look through the MAINTAINERS file and the source code, and determine 151Look through the MAINTAINERS file and the source code, and determine
152if your change applies to a specific subsystem of the kernel, with 152if your change applies to a specific subsystem of the kernel, with
153an assigned maintainer. If so, e-mail that person. 153an assigned maintainer. If so, e-mail that person. The script
154scripts/get_maintainer.pl can be very useful at this step.
154 155
155If no maintainer is listed, or the maintainer does not respond, send 156If no maintainer is listed, or the maintainer does not respond, send
156your patch to the primary Linux kernel developer's mailing list, 157your patch to the primary Linux kernel developer's mailing list,
diff --git a/Documentation/arm/SPEAr/overview.txt b/Documentation/arm/SPEAr/overview.txt
index 28a9af953b9d..57aae7765c74 100644
--- a/Documentation/arm/SPEAr/overview.txt
+++ b/Documentation/arm/SPEAr/overview.txt
@@ -8,9 +8,8 @@ Introduction
8 weblink : http://www.st.com/spear 8 weblink : http://www.st.com/spear
9 9
10 The ST Microelectronics SPEAr range of ARM9/CortexA9 System-on-Chip CPUs are 10 The ST Microelectronics SPEAr range of ARM9/CortexA9 System-on-Chip CPUs are
11 supported by the 'spear' platform of ARM Linux. Currently SPEAr300, 11 supported by the 'spear' platform of ARM Linux. Currently SPEAr1310,
12 SPEAr310, SPEAr320 and SPEAr600 SOCs are supported. Support for the SPEAr13XX 12 SPEAr1340, SPEAr300, SPEAr310, SPEAr320 and SPEAr600 SOCs are supported.
13 series is in progress.
14 13
15 Hierarchy in SPEAr is as follows: 14 Hierarchy in SPEAr is as follows:
16 15
@@ -26,33 +25,36 @@ Introduction
26 - SPEAr600 (SOC) 25 - SPEAr600 (SOC)
27 - SPEAr600 Evaluation Board 26 - SPEAr600 Evaluation Board
28 - SPEAr13XX (13XX SOC series, based on ARM CORTEXA9) 27 - SPEAr13XX (13XX SOC series, based on ARM CORTEXA9)
29 - SPEAr1300 (SOC) 28 - SPEAr1310 (SOC)
29 - SPEAr1310 Evaluation Board
30 - SPEAr1340 (SOC)
31 - SPEAr1340 Evaluation Board
30 32
31 Configuration 33 Configuration
32 ------------- 34 -------------
33 35
34 A generic configuration is provided for each machine, and can be used as the 36 A generic configuration is provided for each machine, and can be used as the
35 default by 37 default by
36 make spear600_defconfig 38 make spear13xx_defconfig
37 make spear300_defconfig 39 make spear3xx_defconfig
38 make spear310_defconfig 40 make spear6xx_defconfig
39 make spear320_defconfig
40 41
41 Layout 42 Layout
42 ------ 43 ------
43 44
44 The common files for multiple machine families (SPEAr3XX, SPEAr6XX and 45 The common files for multiple machine families (SPEAr3xx, SPEAr6xx and
45 SPEAr13XX) are located in the platform code contained in arch/arm/plat-spear 46 SPEAr13xx) are located in the platform code contained in arch/arm/plat-spear
46 with headers in plat/. 47 with headers in plat/.
47 48
48 Each machine series have a directory with name arch/arm/mach-spear followed by 49 Each machine series have a directory with name arch/arm/mach-spear followed by
49 series name. Like mach-spear3xx, mach-spear6xx and mach-spear13xx. 50 series name. Like mach-spear3xx, mach-spear6xx and mach-spear13xx.
50 51
51 Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c and for 52 Common file for machines of spear3xx family is mach-spear3xx/spear3xx.c, for
52 spear6xx is mach-spear6xx/spear6xx.c. mach-spear* also contain soc/machine 53 spear6xx is mach-spear6xx/spear6xx.c and for spear13xx family is
53 specific files, like spear300.c, spear310.c, spear320.c and spear600.c. 54 mach-spear13xx/spear13xx.c. mach-spear* also contain soc/machine specific
54 mach-spear* doesn't contains board specific files as they fully support 55 files, like spear1310.c, spear1340.c spear300.c, spear310.c, spear320.c and
55 Flattened Device Tree. 56 spear600.c. mach-spear* doesn't contains board specific files as they fully
57 support Flattened Device Tree.
56 58
57 59
58 Document Author 60 Document Author
diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt
index 9b1067afb224..dd88540bb995 100644
--- a/Documentation/cgroups/memory.txt
+++ b/Documentation/cgroups/memory.txt
@@ -184,12 +184,14 @@ behind this approach is that a cgroup that aggressively uses a shared
184page will eventually get charged for it (once it is uncharged from 184page will eventually get charged for it (once it is uncharged from
185the cgroup that brought it in -- this will happen on memory pressure). 185the cgroup that brought it in -- this will happen on memory pressure).
186 186
187But see section 8.2: when moving a task to another cgroup, its pages may
188be recharged to the new cgroup, if move_charge_at_immigrate has been chosen.
189
187Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used. 190Exception: If CONFIG_CGROUP_CGROUP_MEM_RES_CTLR_SWAP is not used.
188When you do swapoff and make swapped-out pages of shmem(tmpfs) to 191When you do swapoff and make swapped-out pages of shmem(tmpfs) to
189be backed into memory in force, charges for pages are accounted against the 192be backed into memory in force, charges for pages are accounted against the
190caller of swapoff rather than the users of shmem. 193caller of swapoff rather than the users of shmem.
191 194
192
1932.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP) 1952.4 Swap Extension (CONFIG_CGROUP_MEM_RES_CTLR_SWAP)
194 196
195Swap Extension allows you to record charge for swap. A swapped-in page is 197Swap Extension allows you to record charge for swap. A swapped-in page is
@@ -374,14 +376,15 @@ cgroup might have some charge associated with it, even though all
374tasks have migrated away from it. (because we charge against pages, not 376tasks have migrated away from it. (because we charge against pages, not
375against tasks.) 377against tasks.)
376 378
377Such charges are freed or moved to their parent. At moving, both of RSS 379We move the stats to root (if use_hierarchy==0) or parent (if
378and CACHES are moved to parent. 380use_hierarchy==1), and no change on the charge except uncharging
379rmdir() may return -EBUSY if freeing/moving fails. See 5.1 also. 381from the child.
380 382
381Charges recorded in swap information is not updated at removal of cgroup. 383Charges recorded in swap information is not updated at removal of cgroup.
382Recorded information is discarded and a cgroup which uses swap (swapcache) 384Recorded information is discarded and a cgroup which uses swap (swapcache)
383will be charged as a new owner of it. 385will be charged as a new owner of it.
384 386
387About use_hierarchy, see Section 6.
385 388
3865. Misc. interfaces. 3895. Misc. interfaces.
387 390
@@ -394,13 +397,15 @@ will be charged as a new owner of it.
394 397
395 Almost all pages tracked by this memory cgroup will be unmapped and freed. 398 Almost all pages tracked by this memory cgroup will be unmapped and freed.
396 Some pages cannot be freed because they are locked or in-use. Such pages are 399 Some pages cannot be freed because they are locked or in-use. Such pages are
397 moved to parent and this cgroup will be empty. This may return -EBUSY if 400 moved to parent(if use_hierarchy==1) or root (if use_hierarchy==0) and this
398 VM is too busy to free/move all pages immediately. 401 cgroup will be empty.
399 402
400 Typical use case of this interface is that calling this before rmdir(). 403 Typical use case of this interface is that calling this before rmdir().
401 Because rmdir() moves all pages to parent, some out-of-use page caches can be 404 Because rmdir() moves all pages to parent, some out-of-use page caches can be
402 moved to the parent. If you want to avoid that, force_empty will be useful. 405 moved to the parent. If you want to avoid that, force_empty will be useful.
403 406
407 About use_hierarchy, see Section 6.
408
4045.2 stat file 4095.2 stat file
405 410
406memory.stat file includes following statistics 411memory.stat file includes following statistics
@@ -430,17 +435,10 @@ hierarchical_memory_limit - # of bytes of memory limit with regard to hierarchy
430hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to 435hierarchical_memsw_limit - # of bytes of memory+swap limit with regard to
431 hierarchy under which memory cgroup is. 436 hierarchy under which memory cgroup is.
432 437
433total_cache - sum of all children's "cache" 438total_<counter> - # hierarchical version of <counter>, which in
434total_rss - sum of all children's "rss" 439 addition to the cgroup's own value includes the
435total_mapped_file - sum of all children's "cache" 440 sum of all hierarchical children's values of
436total_pgpgin - sum of all children's "pgpgin" 441 <counter>, i.e. total_cache
437total_pgpgout - sum of all children's "pgpgout"
438total_swap - sum of all children's "swap"
439total_inactive_anon - sum of all children's "inactive_anon"
440total_active_anon - sum of all children's "active_anon"
441total_inactive_file - sum of all children's "inactive_file"
442total_active_file - sum of all children's "active_file"
443total_unevictable - sum of all children's "unevictable"
444 442
445# The following additional stats are dependent on CONFIG_DEBUG_VM. 443# The following additional stats are dependent on CONFIG_DEBUG_VM.
446 444
@@ -622,8 +620,7 @@ memory cgroup.
622 bit | what type of charges would be moved ? 620 bit | what type of charges would be moved ?
623 -----+------------------------------------------------------------------------ 621 -----+------------------------------------------------------------------------
624 0 | A charge of an anonymous page(or swap of it) used by the target task. 622 0 | A charge of an anonymous page(or swap of it) used by the target task.
625 | Those pages and swaps must be used only by the target task. You must 623 | You must enable Swap Extension(see 2.4) to enable move of swap charges.
626 | enable Swap Extension(see 2.4) to enable move of swap charges.
627 -----+------------------------------------------------------------------------ 624 -----+------------------------------------------------------------------------
628 1 | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory) 625 1 | A charge of file pages(normal file, tmpfs file(e.g. ipc shared memory)
629 | and swaps of tmpfs file) mmapped by the target task. Unlike the case of 626 | and swaps of tmpfs file) mmapped by the target task. Unlike the case of
@@ -636,8 +633,6 @@ memory cgroup.
636 633
6378.3 TODO 6348.3 TODO
638 635
639- Implement madvise(2) to let users decide the vma to be moved or not to be
640 moved.
641- All of moving charge operations are done under cgroup_mutex. It's not good 636- All of moving charge operations are done under cgroup_mutex. It's not good
642 behavior to hold the mutex too long, so we may need some trick. 637 behavior to hold the mutex too long, so we may need some trick.
643 638
diff --git a/Documentation/cgroups/resource_counter.txt b/Documentation/cgroups/resource_counter.txt
index f3c4ec3626a2..0c4a344e78fa 100644
--- a/Documentation/cgroups/resource_counter.txt
+++ b/Documentation/cgroups/resource_counter.txt
@@ -92,6 +92,14 @@ to work with it.
92 92
93 The _locked routines imply that the res_counter->lock is taken. 93 The _locked routines imply that the res_counter->lock is taken.
94 94
95 f. void res_counter_uncharge_until
96 (struct res_counter *rc, struct res_counter *top,
97 unsinged long val)
98
99 Almost same as res_cunter_uncharge() but propagation of uncharge
100 stops when rc == top. This is useful when kill a res_coutner in
101 child cgroup.
102
95 2.1 Other accounting routines 103 2.1 Other accounting routines
96 104
97 There are more routines that may help you with common needs, like 105 There are more routines that may help you with common needs, like
diff --git a/Documentation/cris/README b/Documentation/cris/README
index d9b086869a60..8dbdb1a44429 100644
--- a/Documentation/cris/README
+++ b/Documentation/cris/README
@@ -1,38 +1,34 @@
1Linux 2.4 on the CRIS architecture 1Linux on the CRIS architecture
2================================== 2==============================
3$Id: README,v 1.7 2001/04/19 12:38:32 bjornw Exp $
4 3
5This is a port of Linux 2.4 to Axis Communications ETRAX 100LX embedded 4This is a port of Linux to Axis Communications ETRAX 100LX,
6network CPU. For more information about CRIS and ETRAX please see further 5ETRAX FS and ARTPEC-3 embedded network CPUs.
7below. 6
7For more information about CRIS and ETRAX please see further below.
8 8
9In order to compile this you need a version of gcc with support for the 9In order to compile this you need a version of gcc with support for the
10ETRAX chip family. Please see this link for more information on how to 10ETRAX chip family. Please see this link for more information on how to
11download the compiler and other tools useful when building and booting 11download the compiler and other tools useful when building and booting
12software for the ETRAX platform: 12software for the ETRAX platform:
13 13
14http://developer.axis.com/doc/software/devboard_lx/install-howto.html 14http://developer.axis.com/wiki/doku.php?id=axis:install-howto-2_20
15
16<more specific information should come in this document later>
17 15
18What is CRIS ? 16What is CRIS ?
19-------------- 17--------------
20 18
21CRIS is an acronym for 'Code Reduced Instruction Set'. It is the CPU 19CRIS is an acronym for 'Code Reduced Instruction Set'. It is the CPU
22architecture in Axis Communication AB's range of embedded network CPU's, 20architecture in Axis Communication AB's range of embedded network CPU's,
23called ETRAX. The latest CPU is called ETRAX 100LX, where LX stands for 21called ETRAX.
24'Linux' because the chip was designed to be a good host for the Linux
25operating system.
26 22
27The ETRAX 100LX chip 23The ETRAX 100LX chip
28-------------------- 24--------------------
29 25
30For reference, please see the press-release: 26For reference, please see the following link:
31 27
32http://www.axis.com/news/us/001101_etrax.htm 28http://www.axis.com/products/dev_etrax_100lx/index.htm
33 29
34The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad 30The ETRAX 100LX is a 100 MIPS processor with 8kB cache, MMU, and a very broad
35range of built-in interfaces, all with modern scatter/gather DMA. 31range of built-in interfaces, all with modern scatter/gather DMA.
36 32
37Memory interfaces: 33Memory interfaces:
38 34
@@ -51,20 +47,28 @@ I/O interfaces:
51 * SCSI 47 * SCSI
52 * two parallel-ports 48 * two parallel-ports
53 * two generic 8-bit ports 49 * two generic 8-bit ports
54 50
55 (not all interfaces are available at the same time due to chip pin 51 (not all interfaces are available at the same time due to chip pin
56 multiplexing) 52 multiplexing)
57 53
58The previous version of the ETRAX, the ETRAX 100, sits in almost all of 54ETRAX 100LX is CRISv10 architecture.
59Axis shipping thin-servers like the Axis 2100 web camera or the ETRAX 100 55
60developer-board. It lacks an MMU so the Linux we run on that is a version 56
61of uClinux (Linux 2.0 without MM-support) ported to the CRIS architecture. 57The ETRAX FS and ARTPEC-3 chips
62The new Linux 2.4 port has full MM and needs a CPU with an MMU, so it will 58-------------------------------
63not run on the ETRAX 100.
64 59
65A version of the Axis developer-board with ETRAX 100LX (running Linux 60The ETRAX FS is a 200MHz 32-bit RISC processor with on-chip 16kB
662.4) is now available. For more information please see developer.axis.com. 61I-cache and 16kB D-cache and with a wide range of device interfaces
62including multiple high speed serial ports and an integrated USB 1.1 PHY.
67 63
64The ARTPEC-3 is a variant of the ETRAX FS with additional IO-units
65used by the Axis Communications network cameras.
66
67See below link for more information:
68
69http://www.axis.com/products/dev_etrax_fs/index.htm
70
71ETRAX FS and ARTPEC-3 are both CRISv32 architectures.
68 72
69Bootlog 73Bootlog
70------- 74-------
@@ -182,10 +186,6 @@ SwapFree: 0 kB
182-rwxr-xr-x 1 342 100 16252 Jan 01 00:00 telnetd 186-rwxr-xr-x 1 342 100 16252 Jan 01 00:00 telnetd
183 187
184 188
185(All programs are statically linked to the libc at this point - we have not ported the
186 shared libraries yet)
187
188
189 189
190 190
191 191
diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt
index bfbc771a65f8..ac9e7516756e 100644
--- a/Documentation/devicetree/bindings/arm/fsl.txt
+++ b/Documentation/devicetree/bindings/arm/fsl.txt
@@ -1,6 +1,14 @@
1Freescale i.MX Platforms Device Tree Bindings 1Freescale i.MX Platforms Device Tree Bindings
2----------------------------------------------- 2-----------------------------------------------
3 3
4i.MX23 Evaluation Kit
5Required root node properties:
6 - compatible = "fsl,imx23-evk", "fsl,imx23";
7
8i.MX28 Evaluation Kit
9Required root node properties:
10 - compatible = "fsl,imx28-evk", "fsl,imx28";
11
4i.MX51 Babbage Board 12i.MX51 Babbage Board
5Required root node properties: 13Required root node properties:
6 - compatible = "fsl,imx51-babbage", "fsl,imx51"; 14 - compatible = "fsl,imx51-babbage", "fsl,imx51";
@@ -29,6 +37,10 @@ i.MX6 Quad SABRE Lite Board
29Required root node properties: 37Required root node properties:
30 - compatible = "fsl,imx6q-sabrelite", "fsl,imx6q"; 38 - compatible = "fsl,imx6q-sabrelite", "fsl,imx6q";
31 39
40i.MX6 Quad SABRE Smart Device Board
41Required root node properties:
42 - compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
43
32Generic i.MX boards 44Generic i.MX boards
33------------------- 45-------------------
34 46
diff --git a/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
new file mode 100644
index 000000000000..f2f2171e530e
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/samsung/interrupt-combiner.txt
@@ -0,0 +1,52 @@
1* Samsung Exynos Interrupt Combiner Controller
2
3Samsung's Exynos4 architecture includes a interrupt combiner controller which
4can combine interrupt sources as a group and provide a single interrupt request
5for the group. The interrupt request from each group are connected to a parent
6interrupt controller, such as GIC in case of Exynos4210.
7
8The interrupt combiner controller consists of multiple combiners. Upto eight
9interrupt sources can be connected to a combiner. The combiner outputs one
10combined interrupt for its eight interrupt sources. The combined interrupt
11is usually connected to a parent interrupt controller.
12
13A single node in the device tree is used to describe the interrupt combiner
14controller module (which includes multiple combiners). A combiner in the
15interrupt controller module shares config/control registers with other
16combiners. For example, a 32-bit interrupt enable/disable config register
17can accommodate upto 4 interrupt combiners (with each combiner supporting
18upto 8 interrupt sources).
19
20Required properties:
21- compatible: should be "samsung,exynos4210-combiner".
22- interrupt-controller: Identifies the node as an interrupt controller.
23- #interrupt-cells: should be <2>. The meaning of the cells are
24 * First Cell: Combiner Group Number.
25 * Second Cell: Interrupt number within the group.
26- reg: Base address and size of interrupt combiner registers.
27- interrupts: The list of interrupts generated by the combiners which are then
28 connected to a parent interrupt controller. The format of the interrupt
29 specifier depends in the interrupt parent controller.
30
31Optional properties:
32- samsung,combiner-nr: The number of interrupt combiners supported. If this
33 property is not specified, the default number of combiners is assumed
34 to be 16.
35- interrupt-parent: pHandle of the parent interrupt controller, if not
36 inherited from the parent node.
37
38
39Example:
40
41 The following is a an example from the Exynos4210 SoC dtsi file.
42
43 combiner:interrupt-controller@10440000 {
44 compatible = "samsung,exynos4210-combiner";
45 interrupt-controller;
46 #interrupt-cells = <2>;
47 reg = <0x10440000 0x1000>;
48 interrupts = <0 0 0>, <0 1 0>, <0 2 0>, <0 3 0>,
49 <0 4 0>, <0 5 0>, <0 6 0>, <0 7 0>,
50 <0 8 0>, <0 9 0>, <0 10 0>, <0 11 0>,
51 <0 12 0>, <0 13 0>, <0 14 0>, <0 15 0>;
52 };
diff --git a/Documentation/devicetree/bindings/arm/spear-timer.txt b/Documentation/devicetree/bindings/arm/spear-timer.txt
new file mode 100644
index 000000000000..c0017221cf55
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/spear-timer.txt
@@ -0,0 +1,18 @@
1* SPEAr ARM Timer
2
3** Timer node required properties:
4
5- compatible : Should be:
6 "st,spear-timer"
7- reg: Address range of the timer registers
8- interrupt-parent: Should be the phandle for the interrupt controller
9 that services interrupts for this device
10- interrupt: Should contain the timer interrupt number
11
12Example:
13
14 timer@f0000000 {
15 compatible = "st,spear-timer";
16 reg = <0xf0000000 0x400>;
17 interrupts = <2>;
18 };
diff --git a/Documentation/devicetree/bindings/arm/spear.txt b/Documentation/devicetree/bindings/arm/spear.txt
index aa5f355cc947..0d42949df6c2 100644
--- a/Documentation/devicetree/bindings/arm/spear.txt
+++ b/Documentation/devicetree/bindings/arm/spear.txt
@@ -2,25 +2,25 @@ ST SPEAr Platforms Device Tree Bindings
2--------------------------------------- 2---------------------------------------
3 3
4Boards with the ST SPEAr600 SoC shall have the following properties: 4Boards with the ST SPEAr600 SoC shall have the following properties:
5
6Required root node property: 5Required root node property:
7
8compatible = "st,spear600"; 6compatible = "st,spear600";
9 7
10Boards with the ST SPEAr300 SoC shall have the following properties: 8Boards with the ST SPEAr300 SoC shall have the following properties:
11
12Required root node property: 9Required root node property:
13
14compatible = "st,spear300"; 10compatible = "st,spear300";
15 11
16Boards with the ST SPEAr310 SoC shall have the following properties: 12Boards with the ST SPEAr310 SoC shall have the following properties:
17
18Required root node property: 13Required root node property:
19
20compatible = "st,spear310"; 14compatible = "st,spear310";
21 15
22Boards with the ST SPEAr320 SoC shall have the following properties: 16Boards with the ST SPEAr320 SoC shall have the following properties:
17Required root node property:
18compatible = "st,spear320";
23 19
20Boards with the ST SPEAr1310 SoC shall have the following properties:
24Required root node property: 21Required root node property:
22compatible = "st,spear1310";
25 23
26compatible = "st,spear320"; 24Boards with the ST SPEAr1340 SoC shall have the following properties:
25Required root node property:
26compatible = "st,spear1340";
diff --git a/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
new file mode 100644
index 000000000000..234406d41c12
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/tegra/nvidia,tegra20-ahb.txt
@@ -0,0 +1,11 @@
1NVIDIA Tegra AHB
2
3Required properties:
4- compatible : "nvidia,tegra20-ahb" or "nvidia,tegra30-ahb"
5- reg : Should contain 1 register ranges(address and length)
6
7Example:
8 ahb: ahb@6000c004 {
9 compatible = "nvidia,tegra20-ahb";
10 reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
11 };
diff --git a/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
new file mode 100644
index 000000000000..ded0398d3bdc
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
@@ -0,0 +1,19 @@
1* Freescale MXS DMA
2
3Required properties:
4- compatible : Should be "fsl,<chip>-dma-apbh" or "fsl,<chip>-dma-apbx"
5- reg : Should contain registers location and length
6
7Supported chips:
8imx23, imx28.
9
10Examples:
11dma-apbh@80004000 {
12 compatible = "fsl,imx28-dma-apbh";
13 reg = <0x80004000 2000>;
14};
15
16dma-apbx@80024000 {
17 compatible = "fsl,imx28-dma-apbx";
18 reg = <0x80024000 2000>;
19};
diff --git a/Documentation/devicetree/bindings/dma/snps-dma.txt b/Documentation/devicetree/bindings/dma/snps-dma.txt
new file mode 100644
index 000000000000..c0d85dbcada5
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/snps-dma.txt
@@ -0,0 +1,17 @@
1* Synopsys Designware DMA Controller
2
3Required properties:
4- compatible: "snps,dma-spear1340"
5- reg: Address range of the DMAC registers
6- interrupt-parent: Should be the phandle for the interrupt controller
7 that services interrupts for this device
8- interrupt: Should contain the DMAC interrupt number
9
10Example:
11
12 dma@fc000000 {
13 compatible = "snps,dma-spear1340";
14 reg = <0xfc000000 0x1000>;
15 interrupt-parent = <&vic1>;
16 interrupts = <12>;
17 };
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
new file mode 100644
index 000000000000..f93d51478d5a
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mm-lantiq.txt
@@ -0,0 +1,38 @@
1Lantiq SoC External Bus memory mapped GPIO controller
2
3By attaching hardware latches to the EBU it is possible to create output
4only gpios. This driver configures a special memory address, which when
5written to outputs 16 bit to the latches.
6
7The node describing the memory mapped GPIOs needs to be a child of the node
8describing the "lantiq,localbus".
9
10Required properties:
11- compatible : Should be "lantiq,gpio-mm-lantiq"
12- reg : Address and length of the register set for the device
13- #gpio-cells : Should be two. The first cell is the pin number and
14 the second cell is used to specify optional parameters (currently
15 unused).
16- gpio-controller : Marks the device node as a gpio controller.
17
18Optional properties:
19- lantiq,shadow : The default value that we shall assume as already set on the
20 shift register cascade.
21
22Example:
23
24localbus@0 {
25 #address-cells = <2>;
26 #size-cells = <1>;
27 ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
28 1 0 0x4000000 0x4000010>; /* addsel1 */
29 compatible = "lantiq,localbus", "simple-bus";
30
31 gpio_mm0: gpio@4000000 {
32 compatible = "lantiq,gpio-mm";
33 reg = <1 0x0 0x10>;
34 gpio-controller;
35 #gpio-cells = <2>;
36 lantiq,shadow = <0x77f>
37 };
38}
diff --git a/Documentation/devicetree/bindings/gpio/gpio-mxs.txt b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
new file mode 100644
index 000000000000..0c35673f7a3e
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-mxs.txt
@@ -0,0 +1,87 @@
1* Freescale MXS GPIO controller
2
3The Freescale MXS GPIO controller is part of MXS PIN controller. The
4GPIOs are organized in port/bank. Each port consists of 32 GPIOs.
5
6As the GPIO controller is embedded in the PIN controller and all the
7GPIO ports share the same IO space with PIN controller, the GPIO node
8will be represented as sub-nodes of MXS pinctrl node.
9
10Required properties for GPIO node:
11- compatible : Should be "fsl,<soc>-gpio". The supported SoCs include
12 imx23 and imx28.
13- interrupts : Should be the port interrupt shared by all 32 pins.
14- gpio-controller : Marks the device node as a gpio controller.
15- #gpio-cells : Should be two. The first cell is the pin number and
16 the second cell is used to specify optional parameters (currently
17 unused).
18- interrupt-controller: Marks the device node as an interrupt controller.
19- #interrupt-cells : Should be 2. The first cell is the GPIO number.
20 The second cell bits[3:0] is used to specify trigger type and level flags:
21 1 = low-to-high edge triggered.
22 2 = high-to-low edge triggered.
23 4 = active high level-sensitive.
24 8 = active low level-sensitive.
25
26Note: Each GPIO port should have an alias correctly numbered in "aliases"
27node.
28
29Examples:
30
31aliases {
32 gpio0 = &gpio0;
33 gpio1 = &gpio1;
34 gpio2 = &gpio2;
35 gpio3 = &gpio3;
36 gpio4 = &gpio4;
37};
38
39pinctrl@80018000 {
40 compatible = "fsl,imx28-pinctrl", "simple-bus";
41 reg = <0x80018000 2000>;
42
43 gpio0: gpio@0 {
44 compatible = "fsl,imx28-gpio";
45 interrupts = <127>;
46 gpio-controller;
47 #gpio-cells = <2>;
48 interrupt-controller;
49 #interrupt-cells = <2>;
50 };
51
52 gpio1: gpio@1 {
53 compatible = "fsl,imx28-gpio";
54 interrupts = <126>;
55 gpio-controller;
56 #gpio-cells = <2>;
57 interrupt-controller;
58 #interrupt-cells = <2>;
59 };
60
61 gpio2: gpio@2 {
62 compatible = "fsl,imx28-gpio";
63 interrupts = <125>;
64 gpio-controller;
65 #gpio-cells = <2>;
66 interrupt-controller;
67 #interrupt-cells = <2>;
68 };
69
70 gpio3: gpio@3 {
71 compatible = "fsl,imx28-gpio";
72 interrupts = <124>;
73 gpio-controller;
74 #gpio-cells = <2>;
75 interrupt-controller;
76 #interrupt-cells = <2>;
77 };
78
79 gpio4: gpio@4 {
80 compatible = "fsl,imx28-gpio";
81 interrupts = <123>;
82 gpio-controller;
83 #gpio-cells = <2>;
84 interrupt-controller;
85 #interrupt-cells = <2>;
86 };
87};
diff --git a/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
new file mode 100644
index 000000000000..854de130a971
--- /dev/null
+++ b/Documentation/devicetree/bindings/gpio/gpio-stp-xway.txt
@@ -0,0 +1,42 @@
1Lantiq SoC Serial To Parallel (STP) GPIO controller
2
3The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
4peripheral controller used to drive external shift register cascades. At most
53 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
6to drive the 2 LSBs of the cascade automatically.
7
8
9Required properties:
10- compatible : Should be "lantiq,gpio-stp-xway"
11- reg : Address and length of the register set for the device
12- #gpio-cells : Should be two. The first cell is the pin number and
13 the second cell is used to specify optional parameters (currently
14 unused).
15- gpio-controller : Marks the device node as a gpio controller.
16
17Optional properties:
18- lantiq,shadow : The default value that we shall assume as already set on the
19 shift register cascade.
20- lantiq,groups : Set the 3 bit mask to select which of the 3 groups are enabled
21 in the shift register cascade.
22- lantiq,dsl : The dsl core can control the 2 LSBs of the gpio cascade. This 2 bit
23 property can enable this feature.
24- lantiq,phy1 : The gphy1 core can control 3 bits of the gpio cascade.
25- lantiq,phy2 : The gphy2 core can control 3 bits of the gpio cascade.
26- lantiq,rising : use rising instead of falling edge for the shift register
27
28Example:
29
30gpio1: stp@E100BB0 {
31 compatible = "lantiq,gpio-stp-xway";
32 reg = <0xE100BB0 0x40>;
33 #gpio-cells = <2>;
34 gpio-controller;
35
36 lantiq,shadow = <0xffff>;
37 lantiq,groups = <0x7>;
38 lantiq,dsl = <0x3>;
39 lantiq,phy1 = <0x7>;
40 lantiq,phy2 = <0x7>;
41 /* lantiq,rising; */
42};
diff --git a/Documentation/devicetree/bindings/i2c/i2c-mxs.txt b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
new file mode 100644
index 000000000000..1bfc02de1b0c
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/i2c-mxs.txt
@@ -0,0 +1,16 @@
1* Freescale MXS Inter IC (I2C) Controller
2
3Required properties:
4- compatible: Should be "fsl,<chip>-i2c"
5- reg: Should contain registers location and length
6- interrupts: Should contain ERROR and DMA interrupts
7
8Examples:
9
10i2c0: i2c@80058000 {
11 #address-cells = <1>;
12 #size-cells = <0>;
13 compatible = "fsl,imx28-i2c";
14 reg = <0x80058000 2000>;
15 interrupts = <111 68>;
16};
diff --git a/Documentation/devicetree/bindings/i2c/mux.txt b/Documentation/devicetree/bindings/i2c/mux.txt
new file mode 100644
index 000000000000..af84cce5cd7b
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/mux.txt
@@ -0,0 +1,60 @@
1Common i2c bus multiplexer/switch properties.
2
3An i2c bus multiplexer/switch will have several child busses that are
4numbered uniquely in a device dependent manner. The nodes for an i2c bus
5multiplexer/switch will have one child node for each child
6bus.
7
8Required properties:
9- #address-cells = <1>;
10- #size-cells = <0>;
11
12Required properties for child nodes:
13- #address-cells = <1>;
14- #size-cells = <0>;
15- reg : The sub-bus number.
16
17Optional properties for child nodes:
18- Other properties specific to the multiplexer/switch hardware.
19- Child nodes conforming to i2c bus binding
20
21
22Example :
23
24 /*
25 An NXP pca9548 8 channel I2C multiplexer at address 0x70
26 with two NXP pca8574 GPIO expanders attached, one each to
27 ports 3 and 4.
28 */
29
30 mux@70 {
31 compatible = "nxp,pca9548";
32 reg = <0x70>;
33 #address-cells = <1>;
34 #size-cells = <0>;
35
36 i2c@3 {
37 #address-cells = <1>;
38 #size-cells = <0>;
39 reg = <3>;
40
41 gpio1: gpio@38 {
42 compatible = "nxp,pca8574";
43 reg = <0x38>;
44 #gpio-cells = <2>;
45 gpio-controller;
46 };
47 };
48 i2c@4 {
49 #address-cells = <1>;
50 #size-cells = <0>;
51 reg = <4>;
52
53 gpio2: gpio@38 {
54 compatible = "nxp,pca8574";
55 reg = <0x38>;
56 #gpio-cells = <2>;
57 gpio-controller;
58 };
59 };
60 };
diff --git a/Documentation/devicetree/bindings/i2c/samsung-i2c.txt b/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
index 38832c712919..b6cb5a12c672 100644
--- a/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
+++ b/Documentation/devicetree/bindings/i2c/samsung-i2c.txt
@@ -6,14 +6,18 @@ Required properties:
6 - compatible: value should be either of the following. 6 - compatible: value should be either of the following.
7 (a) "samsung, s3c2410-i2c", for i2c compatible with s3c2410 i2c. 7 (a) "samsung, s3c2410-i2c", for i2c compatible with s3c2410 i2c.
8 (b) "samsung, s3c2440-i2c", for i2c compatible with s3c2440 i2c. 8 (b) "samsung, s3c2440-i2c", for i2c compatible with s3c2440 i2c.
9 (c) "samsung, s3c2440-hdmiphy-i2c", for s3c2440-like i2c used
10 inside HDMIPHY block found on several samsung SoCs
9 - reg: physical base address of the controller and length of memory mapped 11 - reg: physical base address of the controller and length of memory mapped
10 region. 12 region.
11 - interrupts: interrupt number to the cpu. 13 - interrupts: interrupt number to the cpu.
12 - samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges. 14 - samsung,i2c-sda-delay: Delay (in ns) applied to data line (SDA) edges.
13 - gpios: The order of the gpios should be the following: <SDA, SCL>.
14 The gpio specifier depends on the gpio controller.
15 15
16Optional properties: 16Optional properties:
17 - gpios: The order of the gpios should be the following: <SDA, SCL>.
18 The gpio specifier depends on the gpio controller. Required in all
19 cases except for "samsung,s3c2440-hdmiphy-i2c" whose input/output
20 lines are permanently wired to the respective client
17 - samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not 21 - samsung,i2c-slave-addr: Slave address in multi-master enviroment. If not
18 specified, default value is 0. 22 specified, default value is 0.
19 - samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not 23 - samsung,i2c-max-bus-freq: Desired frequency in Hz of the bus. If not
diff --git a/Documentation/devicetree/bindings/i2c/xiic.txt b/Documentation/devicetree/bindings/i2c/xiic.txt
new file mode 100644
index 000000000000..ceabbe91ae44
--- /dev/null
+++ b/Documentation/devicetree/bindings/i2c/xiic.txt
@@ -0,0 +1,22 @@
1Xilinx IIC controller:
2
3Required properties:
4- compatible : Must be "xlnx,xps-iic-2.00.a"
5- reg : IIC register location and length
6- interrupts : IIC controller unterrupt
7- #address-cells = <1>
8- #size-cells = <0>
9
10Optional properties:
11- Child nodes conforming to i2c bus binding
12
13Example:
14
15 axi_iic_0: i2c@40800000 {
16 compatible = "xlnx,xps-iic-2.00.a";
17 interrupts = < 1 2 >;
18 reg = < 0x40800000 0x10000 >;
19
20 #size-cells = <0>;
21 #address-cells = <1>;
22 };
diff --git a/Documentation/devicetree/bindings/mfd/da9052-i2c.txt b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
new file mode 100644
index 000000000000..1857f4a6b9a9
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/da9052-i2c.txt
@@ -0,0 +1,60 @@
1* Dialog DA9052/53 Power Management Integrated Circuit (PMIC)
2
3Required properties:
4- compatible : Should be "dlg,da9052", "dlg,da9053-aa",
5 "dlg,da9053-ab", or "dlg,da9053-bb"
6
7Sub-nodes:
8- regulators : Contain the regulator nodes. The DA9052/53 regulators are
9 bound using their names as listed below:
10
11 buck0 : regulator BUCK0
12 buck1 : regulator BUCK1
13 buck2 : regulator BUCK2
14 buck3 : regulator BUCK3
15 ldo4 : regulator LDO4
16 ldo5 : regulator LDO5
17 ldo6 : regulator LDO6
18 ldo7 : regulator LDO7
19 ldo8 : regulator LDO8
20 ldo9 : regulator LDO9
21 ldo10 : regulator LDO10
22 ldo11 : regulator LDO11
23 ldo12 : regulator LDO12
24 ldo13 : regulator LDO13
25
26 The bindings details of individual regulator device can be found in:
27 Documentation/devicetree/bindings/regulator/regulator.txt
28
29Examples:
30
31i2c@63fc8000 { /* I2C1 */
32 status = "okay";
33
34 pmic: dialog@48 {
35 compatible = "dlg,da9053-aa";
36 reg = <0x48>;
37
38 regulators {
39 buck0 {
40 regulator-min-microvolt = <500000>;
41 regulator-max-microvolt = <2075000>;
42 };
43
44 buck1 {
45 regulator-min-microvolt = <500000>;
46 regulator-max-microvolt = <2075000>;
47 };
48
49 buck2 {
50 regulator-min-microvolt = <925000>;
51 regulator-max-microvolt = <2500000>;
52 };
53
54 buck3 {
55 regulator-min-microvolt = <925000>;
56 regulator-max-microvolt = <2500000>;
57 };
58 };
59 };
60};
diff --git a/Documentation/devicetree/bindings/mfd/tps65910.txt b/Documentation/devicetree/bindings/mfd/tps65910.txt
new file mode 100644
index 000000000000..645f5eaadb3f
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/tps65910.txt
@@ -0,0 +1,133 @@
1TPS65910 Power Management Integrated Circuit
2
3Required properties:
4- compatible: "ti,tps65910" or "ti,tps65911"
5- reg: I2C slave address
6- interrupts: the interrupt outputs of the controller
7- #gpio-cells: number of cells to describe a GPIO, this should be 2.
8 The first cell is the GPIO number.
9 The second cell is used to specify additional options <unused>.
10- gpio-controller: mark the device as a GPIO controller
11- #interrupt-cells: the number of cells to describe an IRQ, this should be 2.
12 The first cell is the IRQ number.
13 The second cell is the flags, encoded as the trigger masks from
14 Documentation/devicetree/bindings/interrupts.txt
15- regulators: This is the list of child nodes that specify the regulator
16 initialization data for defined regulators. Not all regulators for the given
17 device need to be present. The definition for each of these nodes is defined
18 using the standard binding for regulators found at
19 Documentation/devicetree/bindings/regulator/regulator.txt.
20
21 The valid names for regulators are:
22 tps65910: vrtc, vio, vdd1, vdd2, vdd3, vdig1, vdig2, vpll, vdac, vaux1,
23 vaux2, vaux33, vmmc
24 tps65911: vrtc, vio, vdd1, vdd3, vddctrl, ldo1, ldo2, ldo3, ldo4, ldo5,
25 ldo6, ldo7, ldo8
26
27Optional properties:
28- ti,vmbch-threshold: (tps65911) main battery charged threshold
29 comparator. (see VMBCH_VSEL in TPS65910 datasheet)
30- ti,vmbch2-threshold: (tps65911) main battery discharged threshold
31 comparator. (see VMBCH_VSEL in TPS65910 datasheet)
32- ti,en-gpio-sleep: enable sleep control for gpios
33 There should be 9 entries here, one for each gpio.
34
35Regulator Optional properties:
36- ti,regulator-ext-sleep-control: enable external sleep
37 control through external inputs [0 (not enabled), 1 (EN1), 2 (EN2) or 4(EN3)]
38 If this property is not defined, it defaults to 0 (not enabled).
39
40Example:
41
42 pmu: tps65910@d2 {
43 compatible = "ti,tps65910";
44 reg = <0xd2>;
45 interrupt-parent = <&intc>;
46 interrupts = < 0 118 0x04 >;
47
48 #gpio-cells = <2>;
49 gpio-controller;
50
51 #interrupt-cells = <2>;
52 interrupt-controller;
53
54 ti,vmbch-threshold = 0;
55 ti,vmbch2-threshold = 0;
56
57 ti,en-gpio-sleep = <0 0 1 0 0 0 0 0 0>;
58
59 regulators {
60 vdd1_reg: vdd1 {
61 regulator-min-microvolt = < 600000>;
62 regulator-max-microvolt = <1500000>;
63 regulator-always-on;
64 regulator-boot-on;
65 ti,regulator-ext-sleep-control = <0>;
66 };
67 vdd2_reg: vdd2 {
68 regulator-min-microvolt = < 600000>;
69 regulator-max-microvolt = <1500000>;
70 regulator-always-on;
71 regulator-boot-on;
72 ti,regulator-ext-sleep-control = <4>;
73 };
74 vddctrl_reg: vddctrl {
75 regulator-min-microvolt = < 600000>;
76 regulator-max-microvolt = <1400000>;
77 regulator-always-on;
78 regulator-boot-on;
79 ti,regulator-ext-sleep-control = <0>;
80 };
81 vio_reg: vio {
82 regulator-min-microvolt = <1500000>;
83 regulator-max-microvolt = <1800000>;
84 regulator-always-on;
85 regulator-boot-on;
86 ti,regulator-ext-sleep-control = <1>;
87 };
88 ldo1_reg: ldo1 {
89 regulator-min-microvolt = <1000000>;
90 regulator-max-microvolt = <3300000>;
91 ti,regulator-ext-sleep-control = <0>;
92 };
93 ldo2_reg: ldo2 {
94 regulator-min-microvolt = <1050000>;
95 regulator-max-microvolt = <1050000>;
96 ti,regulator-ext-sleep-control = <0>;
97 };
98 ldo3_reg: ldo3 {
99 regulator-min-microvolt = <1000000>;
100 regulator-max-microvolt = <3300000>;
101 ti,regulator-ext-sleep-control = <0>;
102 };
103 ldo4_reg: ldo4 {
104 regulator-min-microvolt = <1000000>;
105 regulator-max-microvolt = <3300000>;
106 regulator-always-on;
107 ti,regulator-ext-sleep-control = <0>;
108 };
109 ldo5_reg: ldo5 {
110 regulator-min-microvolt = <1000000>;
111 regulator-max-microvolt = <3300000>;
112 ti,regulator-ext-sleep-control = <0>;
113 };
114 ldo6_reg: ldo6 {
115 regulator-min-microvolt = <1200000>;
116 regulator-max-microvolt = <1200000>;
117 ti,regulator-ext-sleep-control = <0>;
118 };
119 ldo7_reg: ldo7 {
120 regulator-min-microvolt = <1200000>;
121 regulator-max-microvolt = <1200000>;
122 regulator-always-on;
123 regulator-boot-on;
124 ti,regulator-ext-sleep-control = <1>;
125 };
126 ldo8_reg: ldo8 {
127 regulator-min-microvolt = <1000000>;
128 regulator-max-microvolt = <3300000>;
129 regulator-always-on;
130 ti,regulator-ext-sleep-control = <1>;
131 };
132 };
133 };
diff --git a/Documentation/devicetree/bindings/mfd/twl6040.txt b/Documentation/devicetree/bindings/mfd/twl6040.txt
new file mode 100644
index 000000000000..bc67c6f424aa
--- /dev/null
+++ b/Documentation/devicetree/bindings/mfd/twl6040.txt
@@ -0,0 +1,62 @@
1Texas Instruments TWL6040 family
2
3The TWL6040s are 8-channel high quality low-power audio codecs providing audio
4and vibra functionality on OMAP4+ platforms.
5They are connected ot the host processor via i2c for commands, McPDM for audio
6data and commands.
7
8Required properties:
9- compatible : Must be "ti,twl6040";
10- reg: must be 0x4b for i2c address
11- interrupts: twl6040 has one interrupt line connecteded to the main SoC
12- interrupt-parent: The parent interrupt controller
13- twl6040,audpwron-gpio: Power on GPIO line for the twl6040
14
15- vio-supply: Regulator for the twl6040 VIO supply
16- v2v1-supply: Regulator for the twl6040 V2V1 supply
17
18Optional properties, nodes:
19- enable-active-high: To power on the twl6040 during boot.
20
21Vibra functionality
22Required properties:
23- vddvibl-supply: Regulator for the left vibra motor
24- vddvibr-supply: Regulator for the right vibra motor
25- vibra { }: Configuration section for vibra parameters containing the following
26 properties:
27- ti,vibldrv-res: Resistance parameter for left driver
28- ti,vibrdrv-res: Resistance parameter for right driver
29- ti,viblmotor-res: Resistance parameter for left motor
30- ti,viblmotor-res: Resistance parameter for right motor
31
32Optional properties within vibra { } section:
33- vddvibl_uV: If the vddvibl default voltage need to be changed
34- vddvibr_uV: If the vddvibr default voltage need to be changed
35
36Example:
37&i2c1 {
38 twl6040: twl@4b {
39 compatible = "ti,twl6040";
40 reg = <0x4b>;
41
42 interrupts = <0 119 4>;
43 interrupt-parent = <&gic>;
44 twl6040,audpwron-gpio = <&gpio4 31 0>;
45
46 vio-supply = <&v1v8>;
47 v2v1-supply = <&v2v1>;
48 enable-active-high;
49
50 /* regulators for vibra motor */
51 vddvibl-supply = <&vbat>;
52 vddvibr-supply = <&vbat>;
53
54 vibra {
55 /* Vibra driver, motor resistance parameters */
56 ti,vibldrv-res = <8>;
57 ti,vibrdrv-res = <3>;
58 ti,viblmotor-res = <10>;
59 ti,vibrmotor-res = <10>;
60 };
61 };
62};
diff --git a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
index 64bcb8be973c..0d93b4b0e0e3 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-esdhc.txt
@@ -11,9 +11,11 @@ Required properties:
11 - interrupt-parent : interrupt source phandle. 11 - interrupt-parent : interrupt source phandle.
12 - clock-frequency : specifies eSDHC base clock frequency. 12 - clock-frequency : specifies eSDHC base clock frequency.
13 - sdhci,wp-inverted : (optional) specifies that eSDHC controller 13 - sdhci,wp-inverted : (optional) specifies that eSDHC controller
14 reports inverted write-protect state; 14 reports inverted write-protect state; New devices should use
15 the generic "wp-inverted" property.
15 - sdhci,1-bit-only : (optional) specifies that a controller can 16 - sdhci,1-bit-only : (optional) specifies that a controller can
16 only handle 1-bit data transfers. 17 only handle 1-bit data transfers. New devices should use the
18 generic "bus-width = <1>" property.
17 - sdhci,auto-cmd12: (optional) specifies that a controller can 19 - sdhci,auto-cmd12: (optional) specifies that a controller can
18 only handle auto CMD12. 20 only handle auto CMD12.
19 21
diff --git a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
index ab22fe6e73ab..c7e404b3ef05 100644
--- a/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
+++ b/Documentation/devicetree/bindings/mmc/fsl-imx-esdhc.txt
@@ -9,7 +9,7 @@ Required properties:
9- interrupts : Should contain eSDHC interrupt 9- interrupts : Should contain eSDHC interrupt
10 10
11Optional properties: 11Optional properties:
12- fsl,card-wired : Indicate the card is wired to host permanently 12- non-removable : Indicate the card is wired to host permanently
13- fsl,cd-internal : Indicate to use controller internal card detection 13- fsl,cd-internal : Indicate to use controller internal card detection
14- fsl,wp-internal : Indicate to use controller internal write protection 14- fsl,wp-internal : Indicate to use controller internal write protection
15- cd-gpios : Specify GPIOs for card detection 15- cd-gpios : Specify GPIOs for card detection
diff --git a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
index 89a0084df2f7..d64aea5a4203 100644
--- a/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
+++ b/Documentation/devicetree/bindings/mmc/mmc-spi-slot.txt
@@ -10,7 +10,8 @@ Required properties:
10 10
11Optional properties: 11Optional properties:
12- gpios : may specify GPIOs in this order: Card-Detect GPIO, 12- gpios : may specify GPIOs in this order: Card-Detect GPIO,
13 Write-Protect GPIO. 13 Write-Protect GPIO. Note that this does not follow the
14 binding from mmc.txt, for historic reasons.
14- interrupts : the interrupt of a card detect interrupt. 15- interrupts : the interrupt of a card detect interrupt.
15- interrupt-parent : the phandle for the interrupt controller that 16- interrupt-parent : the phandle for the interrupt controller that
16 services interrupts for this device. 17 services interrupts for this device.
diff --git a/Documentation/devicetree/bindings/mmc/mmc.txt b/Documentation/devicetree/bindings/mmc/mmc.txt
new file mode 100644
index 000000000000..6e70dcde0a71
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmc.txt
@@ -0,0 +1,27 @@
1These properties are common to multiple MMC host controllers. Any host
2that requires the respective functionality should implement them using
3these definitions.
4
5Required properties:
6- bus-width: Number of data lines, can be <1>, <4>, or <8>
7
8Optional properties:
9- cd-gpios : Specify GPIOs for card detection, see gpio binding
10- wp-gpios : Specify GPIOs for write protection, see gpio binding
11- cd-inverted: when present, polarity on the wp gpio line is inverted
12- wp-inverted: when present, polarity on the wp gpio line is inverted
13- non-removable: non-removable slot (like eMMC)
14- max-frequency: maximum operating clock frequency
15
16Example:
17
18sdhci@ab000000 {
19 compatible = "sdhci";
20 reg = <0xab000000 0x200>;
21 interrupts = <23>;
22 bus-width = <4>;
23 cd-gpios = <&gpio 69 0>;
24 cd-inverted;
25 wp-gpios = <&gpio 70 0>;
26 max-frequency = <50000000>;
27}
diff --git a/Documentation/devicetree/bindings/mmc/mmci.txt b/Documentation/devicetree/bindings/mmc/mmci.txt
new file mode 100644
index 000000000000..14a81d526118
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mmci.txt
@@ -0,0 +1,19 @@
1* ARM PrimeCell MultiMedia Card Interface (MMCI) PL180/1
2
3The ARM PrimeCell MMCI PL180 and PL181 provides and interface for
4reading and writing to MultiMedia and SD cards alike.
5
6Required properties:
7- compatible : contains "arm,pl18x", "arm,primecell".
8- reg : contains pl18x registers and length.
9- interrupts : contains the device IRQ(s).
10- arm,primecell-periphid : contains the PrimeCell Peripheral ID.
11
12Optional properties:
13- wp-gpios : contains any write protect (ro) gpios
14- cd-gpios : contains any card detection gpios
15- cd-inverted : indicates whether the cd gpio is inverted
16- max-frequency : contains the maximum operating frequency
17- bus-width : number of data lines, can be <1>, <4>, or <8>
18- mmc-cap-mmc-highspeed : indicates whether MMC is high speed capable
19- mmc-cap-sd-highspeed : indicates whether SD is high speed capable
diff --git a/Documentation/devicetree/bindings/mmc/mxs-mmc.txt b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
new file mode 100644
index 000000000000..14d870a9e3db
--- /dev/null
+++ b/Documentation/devicetree/bindings/mmc/mxs-mmc.txt
@@ -0,0 +1,25 @@
1* Freescale MXS MMC controller
2
3The Freescale MXS Synchronous Serial Ports (SSP) can act as a MMC controller
4to support MMC, SD, and SDIO types of memory cards.
5
6Required properties:
7- compatible: Should be "fsl,<chip>-mmc". The supported chips include
8 imx23 and imx28.
9- reg: Should contain registers location and length
10- interrupts: Should contain ERROR and DMA interrupts
11- fsl,ssp-dma-channel: APBH DMA channel for the SSP
12- bus-width: Number of data lines, can be <1>, <4>, or <8>
13
14Optional properties:
15- wp-gpios: Specify GPIOs for write protection
16
17Examples:
18
19ssp0: ssp@80010000 {
20 compatible = "fsl,imx28-mmc";
21 reg = <0x80010000 2000>;
22 interrupts = <96 82>;
23 fsl,ssp-dma-channel = <0>;
24 bus-width = <8>;
25};
diff --git a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt b/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
index 7e51154679a6..f77c3031607f 100644
--- a/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
+++ b/Documentation/devicetree/bindings/mmc/nvidia-sdhci.txt
@@ -7,12 +7,12 @@ Required properties:
7- compatible : Should be "nvidia,<chip>-sdhci" 7- compatible : Should be "nvidia,<chip>-sdhci"
8- reg : Should contain SD/MMC registers location and length 8- reg : Should contain SD/MMC registers location and length
9- interrupts : Should contain SD/MMC interrupt 9- interrupts : Should contain SD/MMC interrupt
10- bus-width : Number of data lines, can be <1>, <4>, or <8>
10 11
11Optional properties: 12Optional properties:
12- cd-gpios : Specify GPIOs for card detection 13- cd-gpios : Specify GPIOs for card detection
13- wp-gpios : Specify GPIOs for write protection 14- wp-gpios : Specify GPIOs for write protection
14- power-gpios : Specify GPIOs for power control 15- power-gpios : Specify GPIOs for power control
15- support-8bit : Boolean, indicates if 8-bit mode should be used.
16 16
17Example: 17Example:
18 18
@@ -23,5 +23,5 @@ sdhci@c8000200 {
23 cd-gpios = <&gpio 69 0>; /* gpio PI5 */ 23 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
24 wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 24 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
25 power-gpios = <&gpio 155 0>; /* gpio PT3 */ 25 power-gpios = <&gpio 155 0>; /* gpio PT3 */
26 support-8bit; 26 bus-width = <8>;
27}; 27};
diff --git a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
index dbd4368ab8cc..8a53958c9a9f 100644
--- a/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
+++ b/Documentation/devicetree/bindings/mmc/ti-omap-hsmmc.txt
@@ -15,7 +15,7 @@ Optional properties:
15ti,dual-volt: boolean, supports dual voltage cards 15ti,dual-volt: boolean, supports dual voltage cards
16<supply-name>-supply: phandle to the regulator device tree node 16<supply-name>-supply: phandle to the regulator device tree node
17"supply-name" examples are "vmmc", "vmmc_aux" etc 17"supply-name" examples are "vmmc", "vmmc_aux" etc
18ti,bus-width: Number of data lines, default assumed is 1 if the property is missing. 18bus-width: Number of data lines, default assumed is 1 if the property is missing.
19cd-gpios: GPIOs for card detection 19cd-gpios: GPIOs for card detection
20wp-gpios: GPIOs for write protection 20wp-gpios: GPIOs for write protection
21ti,non-removable: non-removable slot (like eMMC) 21ti,non-removable: non-removable slot (like eMMC)
@@ -27,7 +27,7 @@ Example:
27 reg = <0x4809c000 0x400>; 27 reg = <0x4809c000 0x400>;
28 ti,hwmods = "mmc1"; 28 ti,hwmods = "mmc1";
29 ti,dual-volt; 29 ti,dual-volt;
30 ti,bus-width = <4>; 30 bus-width = <4>;
31 vmmc-supply = <&vmmc>; /* phandle to regulator node */ 31 vmmc-supply = <&vmmc>; /* phandle to regulator node */
32 ti,non-removable; 32 ti,non-removable;
33 }; 33 };
diff --git a/Documentation/devicetree/bindings/net/fsl-fec.txt b/Documentation/devicetree/bindings/net/fsl-fec.txt
index de439517dff0..7ab9e1a2d8be 100644
--- a/Documentation/devicetree/bindings/net/fsl-fec.txt
+++ b/Documentation/devicetree/bindings/net/fsl-fec.txt
@@ -14,7 +14,7 @@ Optional properties:
14 14
15Example: 15Example:
16 16
17fec@83fec000 { 17ethernet@83fec000 {
18 compatible = "fsl,imx51-fec", "fsl,imx27-fec"; 18 compatible = "fsl,imx51-fec", "fsl,imx27-fec";
19 reg = <0x83fec000 0x4000>; 19 reg = <0x83fec000 0x4000>;
20 interrupts = <87>; 20 interrupts = <87>;
diff --git a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
index 3664d37e6799..b4480d5c3aca 100644
--- a/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
+++ b/Documentation/devicetree/bindings/pinctrl/pinctrl_spear.txt
@@ -4,6 +4,8 @@ Required properties:
4- compatible : "st,spear300-pinmux" 4- compatible : "st,spear300-pinmux"
5 : "st,spear310-pinmux" 5 : "st,spear310-pinmux"
6 : "st,spear320-pinmux" 6 : "st,spear320-pinmux"
7 : "st,spear1310-pinmux"
8 : "st,spear1340-pinmux"
7- reg : Address range of the pinctrl registers 9- reg : Address range of the pinctrl registers
8- st,pinmux-mode: Mandatory for SPEAr300 and SPEAr320 and invalid for others. 10- st,pinmux-mode: Mandatory for SPEAr300 and SPEAr320 and invalid for others.
9 - Its values for SPEAr300: 11 - Its values for SPEAr300:
@@ -89,6 +91,37 @@ For SPEAr320 machines:
89 "rmii0_1_grp", "i2c1_8_9_grp", "i2c1_98_99_grp", "i2c2_0_1_grp", 91 "rmii0_1_grp", "i2c1_8_9_grp", "i2c1_98_99_grp", "i2c2_0_1_grp",
90 "i2c2_2_3_grp", "i2c2_19_20_grp", "i2c2_75_76_grp", "i2c2_96_97_grp" 92 "i2c2_2_3_grp", "i2c2_19_20_grp", "i2c2_75_76_grp", "i2c2_96_97_grp"
91 93
94For SPEAr1310 machines:
95 "i2c0_grp", "ssp0_grp", "ssp0_cs0_grp", "ssp0_cs1_2_grp", "i2s0_grp",
96 "i2s1_grp", "clcd_grp", "clcd_high_res_grp", "arm_gpio_grp",
97 "smi_2_chips_grp", "smi_4_chips_grp", "gmii_grp", "rgmii_grp",
98 "smii_0_1_2_grp", "ras_mii_txclk_grp", "nand_8bit_grp",
99 "nand_16bit_grp", "nand_4_chips_grp", "keyboard_6x6_grp",
100 "keyboard_rowcol6_8_grp", "uart0_grp", "uart0_modem_grp",
101 "gpt0_tmr0_grp", "gpt0_tmr1_grp", "gpt1_tmr0_grp", "gpt1_tmr1_grp",
102 "sdhci_grp", "cf_grp", "xd_grp", "touch_xy_grp",
103 "uart1_disable_i2c_grp", "uart1_disable_sd_grp", "uart2_3_grp",
104 "uart4_grp", "uart5_grp", "rs485_0_1_tdm_0_1_grp", "i2c_1_2_grp",
105 "i2c3_dis_smi_clcd_grp", "i2c3_dis_sd_i2s0_grp", "i2c_4_5_dis_smi_grp",
106 "i2c4_dis_sd_grp", "i2c5_dis_sd_grp", "i2c_6_7_dis_kbd_grp",
107 "i2c6_dis_sd_grp", "i2c7_dis_sd_grp", "can0_dis_nor_grp",
108 "can0_dis_sd_grp", "can1_dis_sd_grp", "can1_dis_kbd_grp", "pcie0_grp",
109 "pcie1_grp", "pcie2_grp", "sata0_grp", "sata1_grp", "sata2_grp",
110 "ssp1_dis_kbd_grp", "ssp1_dis_sd_grp", "gpt64_grp"
111
112For SPEAr1340 machines:
113 "pads_as_gpio_grp", "fsmc_8bit_grp", "fsmc_16bit_grp", "fsmc_pnor_grp",
114 "keyboard_row_col_grp", "keyboard_col5_grp", "spdif_in_grp",
115 "spdif_out_grp", "gpt_0_1_grp", "pwm0_grp", "pwm1_grp", "pwm2_grp",
116 "pwm3_grp", "vip_mux_grp", "vip_mux_cam0_grp", "vip_mux_cam1_grp",
117 "vip_mux_cam2_grp", "vip_mux_cam3_grp", "cam0_grp", "cam1_grp",
118 "cam2_grp", "cam3_grp", "smi_grp", "ssp0_grp", "ssp0_cs1_grp",
119 "ssp0_cs2_grp", "ssp0_cs3_grp", "uart0_grp", "uart0_enh_grp",
120 "uart1_grp", "i2s_in_grp", "i2s_out_grp", "gmii_grp", "rgmii_grp",
121 "rmii_grp", "sgmii_grp", "i2c0_grp", "i2c1_grp", "cec0_grp", "cec1_grp",
122 "sdhci_grp", "cf_grp", "xd_grp", "clcd_grp", "arm_trace_grp",
123 "miphy_dbg_grp", "pcie_grp", "sata_grp"
124
92Valid values for function names are: 125Valid values for function names are:
93For All SPEAr3xx machines: 126For All SPEAr3xx machines:
94 "firda", "i2c0", "ssp_cs", "ssp0", "mii0", "gpio0", "uart0_ext", 127 "firda", "i2c0", "ssp_cs", "ssp0", "mii0", "gpio0", "uart0_ext",
@@ -106,3 +139,17 @@ For SPEAr320 machines:
106 "uart2", "uart3", "uart4", "uart5", "uart6", "rs485", "touchscreen", 139 "uart2", "uart3", "uart4", "uart5", "uart6", "rs485", "touchscreen",
107 "can0", "can1", "pwm0_1", "pwm2", "pwm3", "ssp1", "ssp2", "mii2", 140 "can0", "can1", "pwm0_1", "pwm2", "pwm3", "ssp1", "ssp2", "mii2",
108 "mii0_1", "i2c1", "i2c2" 141 "mii0_1", "i2c1", "i2c2"
142
143
144For SPEAr1310 machines:
145 "i2c0", "ssp0", "i2s0", "i2s1", "clcd", "arm_gpio", "smi", "gmii",
146 "rgmii", "smii_0_1_2", "ras_mii_txclk", "nand", "keyboard", "uart0",
147 "gpt0", "gpt1", "sdhci", "cf", "xd", "touchscreen", "uart1", "uart2_3",
148 "uart4", "uart5", "rs485_0_1_tdm_0_1", "i2c_1_2", "i2c3_i2s1",
149 "i2c_4_5", "i2c_6_7", "can0", "can1", "pci", "sata", "ssp1", "gpt64"
150
151For SPEAr1340 machines:
152 "pads_as_gpio", "fsmc", "keyboard", "spdif_in", "spdif_out", "gpt_0_1",
153 "pwm", "vip", "cam0", "cam1", "cam2", "cam3", "smi", "ssp0", "uart0",
154 "uart1", "i2s", "gmac", "i2c0", "i2c1", "cec0", "cec1", "sdhci", "cf",
155 "xd", "clcd", "arm_trace", "miphy_dbg", "pcie", "sata"
diff --git a/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
new file mode 100644
index 000000000000..a87a1e9bc060
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/lpc32xx-rtc.txt
@@ -0,0 +1,15 @@
1* NXP LPC32xx SoC Real Time Clock controller
2
3Required properties:
4- compatible: must be "nxp,lpc3220-rtc"
5- reg: physical base address of the controller and length of memory mapped
6 region.
7- interrupts: The RTC interrupt
8
9Example:
10
11 rtc@40024000 {
12 compatible = "nxp,lpc3220-rtc";
13 reg = <0x40024000 0x1000>;
14 interrupts = <52 0>;
15 };
diff --git a/Documentation/devicetree/bindings/rtc/spear-rtc.txt b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
new file mode 100644
index 000000000000..ca67ac62108e
--- /dev/null
+++ b/Documentation/devicetree/bindings/rtc/spear-rtc.txt
@@ -0,0 +1,17 @@
1* SPEAr RTC
2
3Required properties:
4- compatible : "st,spear600-rtc"
5- reg : Address range of the rtc registers
6- interrupt-parent: Should be the phandle for the interrupt controller
7 that services interrupts for this device
8- interrupt: Should contain the rtc interrupt number
9
10Example:
11
12 rtc@fc000000 {
13 compatible = "st,spear600-rtc";
14 reg = <0xfc000000 0x1000>;
15 interrupt-parent = <&vic1>;
16 interrupts = <12>;
17 };
diff --git a/Documentation/devicetree/bindings/sound/omap-dmic.txt b/Documentation/devicetree/bindings/sound/omap-dmic.txt
new file mode 100644
index 000000000000..fd8105f18978
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/omap-dmic.txt
@@ -0,0 +1,21 @@
1* Texas Instruments OMAP4+ Digital Microphone Module
2
3Required properties:
4- compatible: "ti,omap4-dmic"
5- reg: Register location and size as an array:
6 <MPU access base address, size>,
7 <L3 interconnect address, size>;
8- interrupts: Interrupt number for DMIC
9- interrupt-parent: The parent interrupt controller
10- ti,hwmods: Name of the hwmod associated with OMAP dmic IP
11
12Example:
13
14dmic: dmic@4012e000 {
15 compatible = "ti,omap4-dmic";
16 reg = <0x4012e000 0x7f>, /* MPU private access */
17 <0x4902e000 0x7f>; /* L3 Interconnect */
18 interrupts = <0 114 0x4>;
19 interrupt-parent = <&gic>;
20 ti,hwmods = "dmic";
21};
diff --git a/Documentation/devicetree/bindings/sound/omap-mcpdm.txt b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
new file mode 100644
index 000000000000..0741dff048dd
--- /dev/null
+++ b/Documentation/devicetree/bindings/sound/omap-mcpdm.txt
@@ -0,0 +1,21 @@
1* Texas Instruments OMAP4+ McPDM
2
3Required properties:
4- compatible: "ti,omap4-mcpdm"
5- reg: Register location and size as an array:
6 <MPU access base address, size>,
7 <L3 interconnect address, size>;
8- interrupts: Interrupt number for McPDM
9- interrupt-parent: The parent interrupt controller
10- ti,hwmods: Name of the hwmod associated to the McPDM
11
12Example:
13
14mcpdm: mcpdm@40132000 {
15 compatible = "ti,omap4-mcpdm";
16 reg = <0x40132000 0x7f>, /* MPU private access */
17 <0x49032000 0x7f>; /* L3 Interconnect */
18 interrupts = <0 112 0x4>;
19 interrupt-parent = <&gic>;
20 ti,hwmods = "mcpdm";
21};
diff --git a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
index a9c0406280e8..b462d0c54823 100644
--- a/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
+++ b/Documentation/devicetree/bindings/tty/serial/fsl-imx-uart.txt
@@ -11,7 +11,7 @@ Optional properties:
11 11
12Example: 12Example:
13 13
14uart@73fbc000 { 14serial@73fbc000 {
15 compatible = "fsl,imx51-uart", "fsl,imx21-uart"; 15 compatible = "fsl,imx51-uart", "fsl,imx21-uart";
16 reg = <0x73fbc000 0x4000>; 16 reg = <0x73fbc000 0x4000>;
17 interrupts = <31>; 17 interrupts = <31>;
diff --git a/Documentation/devicetree/bindings/usb/tegra-usb.txt b/Documentation/devicetree/bindings/usb/tegra-usb.txt
index 007005ddbe12..e9b005dc7625 100644
--- a/Documentation/devicetree/bindings/usb/tegra-usb.txt
+++ b/Documentation/devicetree/bindings/usb/tegra-usb.txt
@@ -12,6 +12,9 @@ Required properties :
12 - nvidia,vbus-gpio : If present, specifies a gpio that needs to be 12 - nvidia,vbus-gpio : If present, specifies a gpio that needs to be
13 activated for the bus to be powered. 13 activated for the bus to be powered.
14 14
15Required properties for phy_type == ulpi:
16 - nvidia,phy-reset-gpio : The GPIO used to reset the PHY.
17
15Optional properties: 18Optional properties:
16 - dr_mode : dual role mode. Indicates the working mode for 19 - dr_mode : dual role mode. Indicates the working mode for
17 nvidia,tegra20-ehci compatible controllers. Can be "host", "peripheral", 20 nvidia,tegra20-ehci compatible controllers. Can be "host", "peripheral",
diff --git a/Documentation/dma-buf-sharing.txt b/Documentation/dma-buf-sharing.txt
index 3bbd5c51605a..ad86fb86c9a0 100644
--- a/Documentation/dma-buf-sharing.txt
+++ b/Documentation/dma-buf-sharing.txt
@@ -29,13 +29,6 @@ The buffer-user
29 in memory, mapped into its own address space, so it can access the same area 29 in memory, mapped into its own address space, so it can access the same area
30 of memory. 30 of memory.
31 31
32*IMPORTANT*: [see https://lkml.org/lkml/2011/12/20/211 for more details]
33For this first version, A buffer shared using the dma_buf sharing API:
34- *may* be exported to user space using "mmap" *ONLY* by exporter, outside of
35 this framework.
36- with this new iteration of the dma-buf api cpu access from the kernel has been
37 enable, see below for the details.
38
39dma-buf operations for device dma only 32dma-buf operations for device dma only
40-------------------------------------- 33--------------------------------------
41 34
@@ -300,6 +293,17 @@ Access to a dma_buf from the kernel context involves three steps:
300 Note that these calls need to always succeed. The exporter needs to complete 293 Note that these calls need to always succeed. The exporter needs to complete
301 any preparations that might fail in begin_cpu_access. 294 any preparations that might fail in begin_cpu_access.
302 295
296 For some cases the overhead of kmap can be too high, a vmap interface
297 is introduced. This interface should be used very carefully, as vmalloc
298 space is a limited resources on many architectures.
299
300 Interfaces:
301 void *dma_buf_vmap(struct dma_buf *dmabuf)
302 void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
303
304 The vmap call can fail if there is no vmap support in the exporter, or if it
305 runs out of vmalloc space. Fallback to kmap should be implemented.
306
3033. Finish access 3073. Finish access
304 308
305 When the importer is done accessing the range specified in begin_cpu_access, 309 When the importer is done accessing the range specified in begin_cpu_access,
@@ -313,6 +317,83 @@ Access to a dma_buf from the kernel context involves three steps:
313 enum dma_data_direction dir); 317 enum dma_data_direction dir);
314 318
315 319
320Direct Userspace Access/mmap Support
321------------------------------------
322
323Being able to mmap an export dma-buf buffer object has 2 main use-cases:
324- CPU fallback processing in a pipeline and
325- supporting existing mmap interfaces in importers.
326
3271. CPU fallback processing in a pipeline
328
329 In many processing pipelines it is sometimes required that the cpu can access
330 the data in a dma-buf (e.g. for thumbnail creation, snapshots, ...). To avoid
331 the need to handle this specially in userspace frameworks for buffer sharing
332 it's ideal if the dma_buf fd itself can be used to access the backing storage
333 from userspace using mmap.
334
335 Furthermore Android's ION framework already supports this (and is otherwise
336 rather similar to dma-buf from a userspace consumer side with using fds as
337 handles, too). So it's beneficial to support this in a similar fashion on
338 dma-buf to have a good transition path for existing Android userspace.
339
340 No special interfaces, userspace simply calls mmap on the dma-buf fd.
341
3422. Supporting existing mmap interfaces in exporters
343
344 Similar to the motivation for kernel cpu access it is again important that
345 the userspace code of a given importing subsystem can use the same interfaces
346 with a imported dma-buf buffer object as with a native buffer object. This is
347 especially important for drm where the userspace part of contemporary OpenGL,
348 X, and other drivers is huge, and reworking them to use a different way to
349 mmap a buffer rather invasive.
350
351 The assumption in the current dma-buf interfaces is that redirecting the
352 initial mmap is all that's needed. A survey of some of the existing
353 subsystems shows that no driver seems to do any nefarious thing like syncing
354 up with outstanding asynchronous processing on the device or allocating
355 special resources at fault time. So hopefully this is good enough, since
356 adding interfaces to intercept pagefaults and allow pte shootdowns would
357 increase the complexity quite a bit.
358
359 Interface:
360 int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
361 unsigned long);
362
363 If the importing subsystem simply provides a special-purpose mmap call to set
364 up a mapping in userspace, calling do_mmap with dma_buf->file will equally
365 achieve that for a dma-buf object.
366
3673. Implementation notes for exporters
368
369 Because dma-buf buffers have invariant size over their lifetime, the dma-buf
370 core checks whether a vma is too large and rejects such mappings. The
371 exporter hence does not need to duplicate this check.
372
373 Because existing importing subsystems might presume coherent mappings for
374 userspace, the exporter needs to set up a coherent mapping. If that's not
375 possible, it needs to fake coherency by manually shooting down ptes when
376 leaving the cpu domain and flushing caches at fault time. Note that all the
377 dma_buf files share the same anon inode, hence the exporter needs to replace
378 the dma_buf file stored in vma->vm_file with it's own if pte shootdown is
379 requred. This is because the kernel uses the underlying inode's address_space
380 for vma tracking (and hence pte tracking at shootdown time with
381 unmap_mapping_range).
382
383 If the above shootdown dance turns out to be too expensive in certain
384 scenarios, we can extend dma-buf with a more explicit cache tracking scheme
385 for userspace mappings. But the current assumption is that using mmap is
386 always a slower path, so some inefficiencies should be acceptable.
387
388 Exporters that shoot down mappings (for any reasons) shall not do any
389 synchronization at fault time with outstanding device operations.
390 Synchronization is an orthogonal issue to sharing the backing storage of a
391 buffer and hence should not be handled by dma-buf itself. This is explictly
392 mentioned here because many people seem to want something like this, but if
393 different exporters handle this differently, buffer sharing can fail in
394 interesting ways depending upong the exporter (if userspace starts depending
395 upon this implicit synchronization).
396
316Miscellaneous notes 397Miscellaneous notes
317------------------- 398-------------------
318 399
@@ -336,6 +417,20 @@ Miscellaneous notes
336 the exporting driver to create a dmabuf fd must provide a way to let 417 the exporting driver to create a dmabuf fd must provide a way to let
337 userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd(). 418 userspace control setting of O_CLOEXEC flag passed in to dma_buf_fd().
338 419
420- If an exporter needs to manually flush caches and hence needs to fake
421 coherency for mmap support, it needs to be able to zap all the ptes pointing
422 at the backing storage. Now linux mm needs a struct address_space associated
423 with the struct file stored in vma->vm_file to do that with the function
424 unmap_mapping_range. But the dma_buf framework only backs every dma_buf fd
425 with the anon_file struct file, i.e. all dma_bufs share the same file.
426
427 Hence exporters need to setup their own file (and address_space) association
428 by setting vma->vm_file and adjusting vma->vm_pgoff in the dma_buf mmap
429 callback. In the specific case of a gem driver the exporter could use the
430 shmem file already provided by gem (and set vm_pgoff = 0). Exporters can then
431 zap ptes by unmapping the corresponding range of the struct address_space
432 associated with their own file.
433
339References: 434References:
340[1] struct dma_buf_ops in include/linux/dma-buf.h 435[1] struct dma_buf_ops in include/linux/dma-buf.h
341[2] All interfaces mentioned above defined in include/linux/dma-buf.h 436[2] All interfaces mentioned above defined in include/linux/dma-buf.h
diff --git a/Documentation/feature-removal-schedule.txt b/Documentation/feature-removal-schedule.txt
index 50d82ae09e2a..ebaffe208ccb 100644
--- a/Documentation/feature-removal-schedule.txt
+++ b/Documentation/feature-removal-schedule.txt
@@ -588,3 +588,21 @@ Why: Remount currently allows changing bound subsystems and
588 replaced with conventional fsnotify. 588 replaced with conventional fsnotify.
589 589
590---------------------------- 590----------------------------
591
592What: KVM debugfs statistics
593When: 2013
594Why: KVM tracepoints provide mostly equivalent information in a much more
595 flexible fashion.
596
597----------------------------
598
599What: at91-mci driver ("CONFIG_MMC_AT91")
600When: 3.7
601Why: There are two mci drivers: at91-mci and atmel-mci. The PDC support
602 was added to atmel-mci as a first step to support more chips.
603 Then at91-mci was kept only for old IP versions (on at91rm9200 and
604 at91sam9261). The support of these IP versions has just been added
605 to atmel-mci, so atmel-mci can be used for all chips.
606Who: Ludovic Desroches <ludovic.desroches@atmel.com>
607
608----------------------------
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 4fca82e5276e..d449e632e6a0 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -60,7 +60,6 @@ ata *);
60 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 60 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
61 ssize_t (*listxattr) (struct dentry *, char *, size_t); 61 ssize_t (*listxattr) (struct dentry *, char *, size_t);
62 int (*removexattr) (struct dentry *, const char *); 62 int (*removexattr) (struct dentry *, const char *);
63 void (*truncate_range)(struct inode *, loff_t, loff_t);
64 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len); 63 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, u64 len);
65 64
66locking rules: 65locking rules:
@@ -87,7 +86,6 @@ setxattr: yes
87getxattr: no 86getxattr: no
88listxattr: no 87listxattr: no
89removexattr: yes 88removexattr: yes
90truncate_range: yes
91fiemap: no 89fiemap: no
92 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on 90 Additionally, ->rmdir(), ->unlink() and ->rename() have ->i_mutex on
93victim. 91victim.
diff --git a/Documentation/filesystems/ext3.txt b/Documentation/filesystems/ext3.txt
index b100adc38adb..293855e95000 100644
--- a/Documentation/filesystems/ext3.txt
+++ b/Documentation/filesystems/ext3.txt
@@ -59,9 +59,9 @@ commit=nrsec (*) Ext3 can be told to sync all its data and metadata
59 Setting it to very large values will improve 59 Setting it to very large values will improve
60 performance. 60 performance.
61 61
62barrier=<0(*)|1> This enables/disables the use of write barriers in 62barrier=<0|1(*)> This enables/disables the use of write barriers in
63barrier the jbd code. barrier=0 disables, barrier=1 enables. 63barrier (*) the jbd code. barrier=0 disables, barrier=1 enables.
64nobarrier (*) This also requires an IO stack which can support 64nobarrier This also requires an IO stack which can support
65 barriers, and if jbd gets an error on a barrier 65 barriers, and if jbd gets an error on a barrier
66 write, it will disable again with a warning. 66 write, it will disable again with a warning.
67 Write barriers enforce proper on-disk ordering 67 Write barriers enforce proper on-disk ordering
diff --git a/Documentation/filesystems/porting b/Documentation/filesystems/porting
index 74acd9618819..8c91d1057d9a 100644
--- a/Documentation/filesystems/porting
+++ b/Documentation/filesystems/porting
@@ -297,7 +297,8 @@ in the beginning of ->setattr unconditionally.
297be used instead. It gets called whenever the inode is evicted, whether it has 297be used instead. It gets called whenever the inode is evicted, whether it has
298remaining links or not. Caller does *not* evict the pagecache or inode-associated 298remaining links or not. Caller does *not* evict the pagecache or inode-associated
299metadata buffers; getting rid of those is responsibility of method, as it had 299metadata buffers; getting rid of those is responsibility of method, as it had
300been for ->delete_inode(). 300been for ->delete_inode(). Caller makes sure async writeback cannot be running
301for the inode while (or after) ->evict_inode() is called.
301 302
302 ->drop_inode() returns int now; it's called on final iput() with 303 ->drop_inode() returns int now; it's called on final iput() with
303inode->i_lock held and it returns true if filesystems wants the inode to be 304inode->i_lock held and it returns true if filesystems wants the inode to be
@@ -306,14 +307,11 @@ updated appropriately. generic_delete_inode() is also alive and it consists
306simply of return 1. Note that all actual eviction work is done by caller after 307simply of return 1. Note that all actual eviction work is done by caller after
307->drop_inode() returns. 308->drop_inode() returns.
308 309
309 clear_inode() is gone; use end_writeback() instead. As before, it must 310 As before, clear_inode() must be called exactly once on each call of
310be called exactly once on each call of ->evict_inode() (as it used to be for 311->evict_inode() (as it used to be for each call of ->delete_inode()). Unlike
311each call of ->delete_inode()). Unlike before, if you are using inode-associated 312before, if you are using inode-associated metadata buffers (i.e.
312metadata buffers (i.e. mark_buffer_dirty_inode()), it's your responsibility to 313mark_buffer_dirty_inode()), it's your responsibility to call
313call invalidate_inode_buffers() before end_writeback(). 314invalidate_inode_buffers() before clear_inode().
314 No async writeback (and thus no calls of ->write_inode()) will happen
315after end_writeback() returns, so actions that should not overlap with ->write_inode()
316(e.g. freeing on-disk inode if i_nlink is 0) ought to be done after that call.
317 315
318 NOTE: checking i_nlink in the beginning of ->write_inode() and bailing out 316 NOTE: checking i_nlink in the beginning of ->write_inode() and bailing out
319if it's zero is not *and* *never* *had* *been* enough. Final unlink() and iput() 317if it's zero is not *and* *never* *had* *been* enough. Final unlink() and iput()
diff --git a/Documentation/filesystems/proc.txt b/Documentation/filesystems/proc.txt
index ef088e55ab2e..912af6ce5626 100644
--- a/Documentation/filesystems/proc.txt
+++ b/Documentation/filesystems/proc.txt
@@ -743,6 +743,7 @@ Committed_AS: 100056 kB
743VmallocTotal: 112216 kB 743VmallocTotal: 112216 kB
744VmallocUsed: 428 kB 744VmallocUsed: 428 kB
745VmallocChunk: 111088 kB 745VmallocChunk: 111088 kB
746AnonHugePages: 49152 kB
746 747
747 MemTotal: Total usable ram (i.e. physical ram minus a few reserved 748 MemTotal: Total usable ram (i.e. physical ram minus a few reserved
748 bits and the kernel binary code) 749 bits and the kernel binary code)
@@ -776,6 +777,7 @@ VmallocChunk: 111088 kB
776 Dirty: Memory which is waiting to get written back to the disk 777 Dirty: Memory which is waiting to get written back to the disk
777 Writeback: Memory which is actively being written back to the disk 778 Writeback: Memory which is actively being written back to the disk
778 AnonPages: Non-file backed pages mapped into userspace page tables 779 AnonPages: Non-file backed pages mapped into userspace page tables
780AnonHugePages: Non-file backed huge pages mapped into userspace page tables
779 Mapped: files which have been mmaped, such as libraries 781 Mapped: files which have been mmaped, such as libraries
780 Slab: in-kernel data structures cache 782 Slab: in-kernel data structures cache
781SReclaimable: Part of Slab, that might be reclaimed, such as caches 783SReclaimable: Part of Slab, that might be reclaimed, such as caches
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 0d0492028082..ef19f91a0f12 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -363,7 +363,6 @@ struct inode_operations {
363 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 363 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
364 ssize_t (*listxattr) (struct dentry *, char *, size_t); 364 ssize_t (*listxattr) (struct dentry *, char *, size_t);
365 int (*removexattr) (struct dentry *, const char *); 365 int (*removexattr) (struct dentry *, const char *);
366 void (*truncate_range)(struct inode *, loff_t, loff_t);
367}; 366};
368 367
369Again, all methods are called without any locks being held, unless 368Again, all methods are called without any locks being held, unless
@@ -472,9 +471,6 @@ otherwise noted.
472 removexattr: called by the VFS to remove an extended attribute from 471 removexattr: called by the VFS to remove an extended attribute from
473 a file. This method is called by removexattr(2) system call. 472 a file. This method is called by removexattr(2) system call.
474 473
475 truncate_range: a method provided by the underlying filesystem to truncate a
476 range of blocks , i.e. punch a hole somewhere in a file.
477
478 474
479The Address Space Object 475The Address Space Object
480======================== 476========================
@@ -760,7 +756,7 @@ struct file_operations
760---------------------- 756----------------------
761 757
762This describes how the VFS can manipulate an open file. As of kernel 758This describes how the VFS can manipulate an open file. As of kernel
7632.6.22, the following members are defined: 7593.5, the following members are defined:
764 760
765struct file_operations { 761struct file_operations {
766 struct module *owner; 762 struct module *owner;
@@ -790,6 +786,8 @@ struct file_operations {
790 int (*flock) (struct file *, int, struct file_lock *); 786 int (*flock) (struct file *, int, struct file_lock *);
791 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int); 787 ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, size_t, unsigned int);
792 ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int); 788 ssize_t (*splice_read)(struct file *, struct pipe_inode_info *, size_t, unsigned int);
789 int (*setlease)(struct file *, long arg, struct file_lock **);
790 long (*fallocate)(struct file *, int mode, loff_t offset, loff_t len);
793}; 791};
794 792
795Again, all methods are called without any locks being held, unless 793Again, all methods are called without any locks being held, unless
@@ -858,6 +856,11 @@ otherwise noted.
858 splice_read: called by the VFS to splice data from file to a pipe. This 856 splice_read: called by the VFS to splice data from file to a pipe. This
859 method is used by the splice(2) system call 857 method is used by the splice(2) system call
860 858
859 setlease: called by the VFS to set or release a file lock lease.
860 setlease has the file_lock_lock held and must not sleep.
861
862 fallocate: called by the VFS to preallocate blocks or punch a hole.
863
861Note that the file operations are implemented by the specific 864Note that the file operations are implemented by the specific
862filesystem in which the inode resides. When opening a device node 865filesystem in which the inode resides. When opening a device node
863(character or block special) most filesystems will call special 866(character or block special) most filesystems will call special
diff --git a/Documentation/i2c/muxes/gpio-i2cmux b/Documentation/i2c/muxes/i2c-mux-gpio
index 811cd78d4cdc..bd9b2299b739 100644
--- a/Documentation/i2c/muxes/gpio-i2cmux
+++ b/Documentation/i2c/muxes/i2c-mux-gpio
@@ -1,11 +1,11 @@
1Kernel driver gpio-i2cmux 1Kernel driver i2c-gpio-mux
2 2
3Author: Peter Korsgaard <peter.korsgaard@barco.com> 3Author: Peter Korsgaard <peter.korsgaard@barco.com>
4 4
5Description 5Description
6----------- 6-----------
7 7
8gpio-i2cmux is an i2c mux driver providing access to I2C bus segments 8i2c-gpio-mux is an i2c mux driver providing access to I2C bus segments
9from a master I2C bus and a hardware MUX controlled through GPIO pins. 9from a master I2C bus and a hardware MUX controlled through GPIO pins.
10 10
11E.G.: 11E.G.:
@@ -26,16 +26,16 @@ according to the settings of the GPIO pins 1..N.
26Usage 26Usage
27----- 27-----
28 28
29gpio-i2cmux uses the platform bus, so you need to provide a struct 29i2c-gpio-mux uses the platform bus, so you need to provide a struct
30platform_device with the platform_data pointing to a struct 30platform_device with the platform_data pointing to a struct
31gpio_i2cmux_platform_data with the I2C adapter number of the master 31gpio_i2cmux_platform_data with the I2C adapter number of the master
32bus, the number of bus segments to create and the GPIO pins used 32bus, the number of bus segments to create and the GPIO pins used
33to control it. See include/linux/gpio-i2cmux.h for details. 33to control it. See include/linux/i2c-gpio-mux.h for details.
34 34
35E.G. something like this for a MUX providing 4 bus segments 35E.G. something like this for a MUX providing 4 bus segments
36controlled through 3 GPIO pins: 36controlled through 3 GPIO pins:
37 37
38#include <linux/gpio-i2cmux.h> 38#include <linux/i2c-gpio-mux.h>
39#include <linux/platform_device.h> 39#include <linux/platform_device.h>
40 40
41static const unsigned myboard_gpiomux_gpios[] = { 41static const unsigned myboard_gpiomux_gpios[] = {
@@ -57,7 +57,7 @@ static struct gpio_i2cmux_platform_data myboard_i2cmux_data = {
57}; 57};
58 58
59static struct platform_device myboard_i2cmux = { 59static struct platform_device myboard_i2cmux = {
60 .name = "gpio-i2cmux", 60 .name = "i2c-gpio-mux",
61 .id = 0, 61 .id = 0,
62 .dev = { 62 .dev = {
63 .platform_data = &myboard_i2cmux_data, 63 .platform_data = &myboard_i2cmux_data,
diff --git a/Documentation/initrd.txt b/Documentation/initrd.txt
index 1ba84f3584e3..4e1839ccb555 100644
--- a/Documentation/initrd.txt
+++ b/Documentation/initrd.txt
@@ -362,5 +362,5 @@ Resources
362 http://www.almesberger.net/cv/papers/ols2k-9.ps.gz 362 http://www.almesberger.net/cv/papers/ols2k-9.ps.gz
363[2] newlib package (experimental), with initrd example 363[2] newlib package (experimental), with initrd example
364 http://sources.redhat.com/newlib/ 364 http://sources.redhat.com/newlib/
365[3] Brouwer, Andries; "util-linux: Miscellaneous utilities for Linux" 365[3] util-linux: Miscellaneous utilities for Linux
366 ftp://ftp.win.tue.nl/pub/linux-local/utils/util-linux/ 366 http://www.kernel.org/pub/linux/utils/util-linux/
diff --git a/Documentation/kbuild/kbuild.txt b/Documentation/kbuild/kbuild.txt
index 68e32bb6bd80..6466704d47b5 100644
--- a/Documentation/kbuild/kbuild.txt
+++ b/Documentation/kbuild/kbuild.txt
@@ -50,6 +50,10 @@ LDFLAGS_MODULE
50-------------------------------------------------- 50--------------------------------------------------
51Additional options used for $(LD) when linking modules. 51Additional options used for $(LD) when linking modules.
52 52
53LDFLAGS_vmlinux
54--------------------------------------------------
55Additional options passed to final link of vmlinux.
56
53KBUILD_VERBOSE 57KBUILD_VERBOSE
54-------------------------------------------------- 58--------------------------------------------------
55Set the kbuild verbosity. Can be assigned same values as "V=...". 59Set the kbuild verbosity. Can be assigned same values as "V=...".
@@ -214,3 +218,18 @@ KBUILD_BUILD_USER, KBUILD_BUILD_HOST
214These two variables allow to override the user@host string displayed during 218These two variables allow to override the user@host string displayed during
215boot and in /proc/version. The default value is the output of the commands 219boot and in /proc/version. The default value is the output of the commands
216whoami and host, respectively. 220whoami and host, respectively.
221
222KBUILD_LDS
223--------------------------------------------------
224The linker script with full path. Assigned by the top-level Makefile.
225
226KBUILD_VMLINUX_INIT
227--------------------------------------------------
228All object files for the init (first) part of vmlinux.
229Files specified with KBUILD_VMLINUX_INIT are linked first.
230
231KBUILD_VMLINUX_MAIN
232--------------------------------------------------
233All object files for the main part of vmlinux.
234KBUILD_VMLINUX_INIT and KBUILD_VMLINUX_MAIN together specify
235all the object files used to link vmlinux.
diff --git a/Documentation/kbuild/kconfig.txt b/Documentation/kbuild/kconfig.txt
index 9d5f2a90dca9..a09f1a6a830c 100644
--- a/Documentation/kbuild/kconfig.txt
+++ b/Documentation/kbuild/kconfig.txt
@@ -53,15 +53,15 @@ KCONFIG_ALLCONFIG
53-------------------------------------------------- 53--------------------------------------------------
54(partially based on lkml email from/by Rob Landley, re: miniconfig) 54(partially based on lkml email from/by Rob Landley, re: miniconfig)
55-------------------------------------------------- 55--------------------------------------------------
56The allyesconfig/allmodconfig/allnoconfig/randconfig variants can 56The allyesconfig/allmodconfig/allnoconfig/randconfig variants can also
57also use the environment variable KCONFIG_ALLCONFIG as a flag or a 57use the environment variable KCONFIG_ALLCONFIG as a flag or a filename
58filename that contains config symbols that the user requires to be 58that contains config symbols that the user requires to be set to a
59set to a specific value. If KCONFIG_ALLCONFIG is used without a 59specific value. If KCONFIG_ALLCONFIG is used without a filename where
60filename, "make *config" checks for a file named 60KCONFIG_ALLCONFIG == "" or KCONFIG_ALLCONFIG == "1", "make *config"
61"all{yes/mod/no/def/random}.config" (corresponding to the *config command 61checks for a file named "all{yes/mod/no/def/random}.config"
62that was used) for symbol values that are to be forced. If this file 62(corresponding to the *config command that was used) for symbol values
63is not found, it checks for a file named "all.config" to contain forced 63that are to be forced. If this file is not found, it checks for a
64values. 64file named "all.config" to contain forced values.
65 65
66This enables you to create "miniature" config (miniconfig) or custom 66This enables you to create "miniature" config (miniconfig) or custom
67config files containing just the config symbols that you are interested 67config files containing just the config symbols that you are interested
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index b69cfdc12112..b40b413db88e 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -397,8 +397,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
397 atkbd.softrepeat= [HW] 397 atkbd.softrepeat= [HW]
398 Use software keyboard repeat 398 Use software keyboard repeat
399 399
400 autotest [IA-64]
401
402 baycom_epp= [HW,AX25] 400 baycom_epp= [HW,AX25]
403 Format: <io>,<mode> 401 Format: <io>,<mode>
404 402
@@ -508,6 +506,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
508 Also note the kernel might malfunction if you disable 506 Also note the kernel might malfunction if you disable
509 some critical bits. 507 some critical bits.
510 508
509 cma=nn[MG] [ARM,KNL]
510 Sets the size of kernel global memory area for contiguous
511 memory allocations. For more information, see
512 include/linux/dma-contiguous.h
513
511 cmo_free_hint= [PPC] Format: { yes | no } 514 cmo_free_hint= [PPC] Format: { yes | no }
512 Specify whether pages are marked as being inactive 515 Specify whether pages are marked as being inactive
513 when they are freed. This is used in CMO environments 516 when they are freed. This is used in CMO environments
@@ -515,6 +518,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
515 a hypervisor. 518 a hypervisor.
516 Default: yes 519 Default: yes
517 520
521 coherent_pool=nn[KMG] [ARM,KNL]
522 Sets the size of memory pool for coherent, atomic dma
523 allocations if Contiguous Memory Allocator (CMA) is used.
524
518 code_bytes [X86] How many bytes of object code to print 525 code_bytes [X86] How many bytes of object code to print
519 in an oops report. 526 in an oops report.
520 Range: 0 - 8192 527 Range: 0 - 8192
@@ -1444,8 +1451,6 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
1444 devices can be requested on-demand with the 1451 devices can be requested on-demand with the
1445 /dev/loop-control interface. 1452 /dev/loop-control interface.
1446 1453
1447 mcatest= [IA-64]
1448
1449 mce [X86-32] Machine Check Exception 1454 mce [X86-32] Machine Check Exception
1450 1455
1451 mce=option [X86-64] See Documentation/x86/x86_64/boot-options.txt 1456 mce=option [X86-64] See Documentation/x86/x86_64/boot-options.txt
diff --git a/Documentation/leds/ledtrig-transient.txt b/Documentation/leds/ledtrig-transient.txt
new file mode 100644
index 000000000000..3bd38b487df1
--- /dev/null
+++ b/Documentation/leds/ledtrig-transient.txt
@@ -0,0 +1,152 @@
1LED Transient Trigger
2=====================
3
4The leds timer trigger does not currently have an interface to activate
5a one shot timer. The current support allows for setting two timers, one for
6specifying how long a state to be on, and the second for how long the state
7to be off. The delay_on value specifies the time period an LED should stay
8in on state, followed by a delay_off value that specifies how long the LED
9should stay in off state. The on and off cycle repeats until the trigger
10gets deactivated. There is no provision for one time activation to implement
11features that require an on or off state to be held just once and then stay in
12the original state forever.
13
14Without one shot timer interface, user space can still use timer trigger to
15set a timer to hold a state, however when user space application crashes or
16goes away without deactivating the timer, the hardware will be left in that
17state permanently.
18
19As a specific example of this use-case, let's look at vibrate feature on
20phones. Vibrate function on phones is implemented using PWM pins on SoC or
21PMIC. There is a need to activate one shot timer to control the vibrate
22feature, to prevent user space crashes leaving the phone in vibrate mode
23permanently causing the battery to drain.
24
25Transient trigger addresses the need for one shot timer activation. The
26transient trigger can be enabled and disabled just like the other leds
27triggers.
28
29When an led class device driver registers itself, it can specify all leds
30triggers it supports and a default trigger. During registration, activation
31routine for the default trigger gets called. During registration of an led
32class device, the LED state does not change.
33
34When the driver unregisters, deactivation routine for the currently active
35trigger will be called, and LED state is changed to LED_OFF.
36
37Driver suspend changes the LED state to LED_OFF and resume doesn't change
38the state. Please note that there is no explicit interaction between the
39suspend and resume actions and the currently enabled trigger. LED state
40changes are suspended while the driver is in suspend state. Any timers
41that are active at the time driver gets suspended, continue to run, without
42being able to actually change the LED state. Once driver is resumed, triggers
43start functioning again.
44
45LED state changes are controlled using brightness which is a common led
46class device property. When brightness is set to 0 from user space via
47echo 0 > brightness, it will result in deactivating the current trigger.
48
49Transient trigger uses standard register and unregister interfaces. During
50trigger registration, for each led class device that specifies this trigger
51as its default trigger, trigger activation routine will get called. During
52registration, the LED state does not change, unless there is another trigger
53active, in which case LED state changes to LED_OFF.
54
55During trigger unregistration, LED state gets changed to LED_OFF.
56
57Transient trigger activation routine doesn't change the LED state. It
58creates its properties and does its initialization. Transient trigger
59deactivation routine, will cancel any timer that is active before it cleans
60up and removes the properties it created. It will restore the LED state to
61non-transient state. When driver gets suspended, irrespective of the transient
62state, the LED state changes to LED_OFF.
63
64Transient trigger can be enabled and disabled from user space on led class
65devices, that support this trigger as shown below:
66
67echo transient > trigger
68echo none > trigger
69
70NOTE: Add a new property trigger state to control the state.
71
72This trigger exports three properties, activate, state, and duration. When
73transient trigger is activated these properties are set to default values.
74
75- duration allows setting timer value in msecs. The initial value is 0.
76- activate allows activating and deactivating the timer specified by
77 duration as needed. The initial and default value is 0. This will allow
78 duration to be set after trigger activation.
79- state allows user to specify a transient state to be held for the specified
80 duration.
81
82 activate - one shot timer activate mechanism.
83 1 when activated, 0 when deactivated.
84 default value is zero when transient trigger is enabled,
85 to allow duration to be set.
86
87 activate state indicates a timer with a value of specified
88 duration running.
89 deactivated state indicates that there is no active timer
90 running.
91
92 duration - one shot timer value. When activate is set, duration value
93 is used to start a timer that runs once. This value doesn't
94 get changed by the trigger unless user does a set via
95 echo new_value > duration
96
97 state - transient state to be held. It has two values 0 or 1. 0 maps
98 to LED_OFF and 1 maps to LED_FULL. The specified state is
99 held for the duration of the one shot timer and then the
100 state gets changed to the non-transient state which is the
101 inverse of transient state.
102 If state = LED_FULL, when the timer runs out the state will
103 go back to LED_OFF.
104 If state = LED_OFF, when the timer runs out the state will
105 go back to LED_FULL.
106 Please note that current LED state is not checked prior to
107 changing the state to the specified state.
108 Driver could map these values to inverted depending on the
109 default states it defines for the LED in its brightness_set()
110 interface which is called from the led brightness_set()
111 interfaces to control the LED state.
112
113When timer expires activate goes back to deactivated state, duration is left
114at the set value to be used when activate is set at a future time. This will
115allow user app to set the time once and activate it to run it once for the
116specified value as needed. When timer expires, state is restored to the
117non-transient state which is the inverse of the transient state.
118
119 echo 1 > activate - starts timer = duration when duration is not 0.
120 echo 0 > activate - cancels currently running timer.
121 echo n > duration - stores timer value to be used upon next
122 activate. Currently active timer if
123 any, continues to run for the specified time.
124 echo 0 > duration - stores timer value to be used upon next
125 activate. Currently active timer if any,
126 continues to run for the specified time.
127 echo 1 > state - stores desired transient state LED_FULL to be
128 held for the specified duration.
129 echo 0 > state - stores desired transient state LED_OFF to be
130 held for the specified duration.
131
132What is not supported:
133======================
134- Timer activation is one shot and extending and/or shortening the timer
135 is not supported.
136
137Example use-case 1:
138 echo transient > trigger
139 echo n > duration
140 echo 1 > state
141repeat the following step as needed:
142 echo 1 > activate - start timer = duration to run once
143 echo 1 > activate - start timer = duration to run once
144 echo none > trigger
145
146This trigger is intended to be used for for the following example use cases:
147 - Control of vibrate (phones, tablets etc.) hardware by user space app.
148 - Use of LED by user space app as activity indicator.
149 - Use of LED by user space app as a kind of watchdog indicator -- as
150 long as the app is alive, it can keep the LED illuminated, if it dies
151 the LED will be extinguished automatically.
152 - Use by any user space app that needs a transient GPIO output.
diff --git a/Documentation/virtual/kvm/api.txt b/Documentation/virtual/kvm/api.txt
index 6386f8c0482e..930126698a0f 100644
--- a/Documentation/virtual/kvm/api.txt
+++ b/Documentation/virtual/kvm/api.txt
@@ -2,6 +2,7 @@ The Definitive KVM (Kernel-based Virtual Machine) API Documentation
2=================================================================== 2===================================================================
3 3
41. General description 41. General description
5----------------------
5 6
6The kvm API is a set of ioctls that are issued to control various aspects 7The kvm API is a set of ioctls that are issued to control various aspects
7of a virtual machine. The ioctls belong to three classes 8of a virtual machine. The ioctls belong to three classes
@@ -23,7 +24,9 @@ of a virtual machine. The ioctls belong to three classes
23 Only run vcpu ioctls from the same thread that was used to create the 24 Only run vcpu ioctls from the same thread that was used to create the
24 vcpu. 25 vcpu.
25 26
27
262. File descriptors 282. File descriptors
29-------------------
27 30
28The kvm API is centered around file descriptors. An initial 31The kvm API is centered around file descriptors. An initial
29open("/dev/kvm") obtains a handle to the kvm subsystem; this handle 32open("/dev/kvm") obtains a handle to the kvm subsystem; this handle
@@ -41,7 +44,9 @@ not cause harm to the host, their actual behavior is not guaranteed by
41the API. The only supported use is one virtual machine per process, 44the API. The only supported use is one virtual machine per process,
42and one vcpu per thread. 45and one vcpu per thread.
43 46
47
443. Extensions 483. Extensions
49-------------
45 50
46As of Linux 2.6.22, the KVM ABI has been stabilized: no backward 51As of Linux 2.6.22, the KVM ABI has been stabilized: no backward
47incompatible change are allowed. However, there is an extension 52incompatible change are allowed. However, there is an extension
@@ -53,7 +58,9 @@ Instead, kvm defines extension identifiers and a facility to query
53whether a particular extension identifier is available. If it is, a 58whether a particular extension identifier is available. If it is, a
54set of ioctls is available for application use. 59set of ioctls is available for application use.
55 60
61
564. API description 624. API description
63------------------
57 64
58This section describes ioctls that can be used to control kvm guests. 65This section describes ioctls that can be used to control kvm guests.
59For each ioctl, the following information is provided along with a 66For each ioctl, the following information is provided along with a
@@ -75,6 +82,7 @@ description:
75 Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) 82 Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
76 are not detailed, but errors with specific meanings are. 83 are not detailed, but errors with specific meanings are.
77 84
85
784.1 KVM_GET_API_VERSION 864.1 KVM_GET_API_VERSION
79 87
80Capability: basic 88Capability: basic
@@ -90,6 +98,7 @@ supported. Applications should refuse to run if KVM_GET_API_VERSION
90returns a value other than 12. If this check passes, all ioctls 98returns a value other than 12. If this check passes, all ioctls
91described as 'basic' will be available. 99described as 'basic' will be available.
92 100
101
934.2 KVM_CREATE_VM 1024.2 KVM_CREATE_VM
94 103
95Capability: basic 104Capability: basic
@@ -109,6 +118,7 @@ In order to create user controlled virtual machines on S390, check
109KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as 118KVM_CAP_S390_UCONTROL and use the flag KVM_VM_S390_UCONTROL as
110privileged user (CAP_SYS_ADMIN). 119privileged user (CAP_SYS_ADMIN).
111 120
121
1124.3 KVM_GET_MSR_INDEX_LIST 1224.3 KVM_GET_MSR_INDEX_LIST
113 123
114Capability: basic 124Capability: basic
@@ -135,6 +145,7 @@ Note: if kvm indicates supports MCE (KVM_CAP_MCE), then the MCE bank MSRs are
135not returned in the MSR list, as different vcpus can have a different number 145not returned in the MSR list, as different vcpus can have a different number
136of banks, as set via the KVM_X86_SETUP_MCE ioctl. 146of banks, as set via the KVM_X86_SETUP_MCE ioctl.
137 147
148
1384.4 KVM_CHECK_EXTENSION 1494.4 KVM_CHECK_EXTENSION
139 150
140Capability: basic 151Capability: basic
@@ -149,6 +160,7 @@ receives an integer that describes the extension availability.
149Generally 0 means no and 1 means yes, but some extensions may report 160Generally 0 means no and 1 means yes, but some extensions may report
150additional information in the integer return value. 161additional information in the integer return value.
151 162
163
1524.5 KVM_GET_VCPU_MMAP_SIZE 1644.5 KVM_GET_VCPU_MMAP_SIZE
153 165
154Capability: basic 166Capability: basic
@@ -161,6 +173,7 @@ The KVM_RUN ioctl (cf.) communicates with userspace via a shared
161memory region. This ioctl returns the size of that region. See the 173memory region. This ioctl returns the size of that region. See the
162KVM_RUN documentation for details. 174KVM_RUN documentation for details.
163 175
176
1644.6 KVM_SET_MEMORY_REGION 1774.6 KVM_SET_MEMORY_REGION
165 178
166Capability: basic 179Capability: basic
@@ -171,6 +184,7 @@ Returns: 0 on success, -1 on error
171 184
172This ioctl is obsolete and has been removed. 185This ioctl is obsolete and has been removed.
173 186
187
1744.7 KVM_CREATE_VCPU 1884.7 KVM_CREATE_VCPU
175 189
176Capability: basic 190Capability: basic
@@ -223,6 +237,7 @@ machines, the resulting vcpu fd can be memory mapped at page offset
223KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual 237KVM_S390_SIE_PAGE_OFFSET in order to obtain a memory map of the virtual
224cpu's hardware control block. 238cpu's hardware control block.
225 239
240
2264.8 KVM_GET_DIRTY_LOG (vm ioctl) 2414.8 KVM_GET_DIRTY_LOG (vm ioctl)
227 242
228Capability: basic 243Capability: basic
@@ -246,6 +261,7 @@ since the last call to this ioctl. Bit 0 is the first page in the
246memory slot. Ensure the entire structure is cleared to avoid padding 261memory slot. Ensure the entire structure is cleared to avoid padding
247issues. 262issues.
248 263
264
2494.9 KVM_SET_MEMORY_ALIAS 2654.9 KVM_SET_MEMORY_ALIAS
250 266
251Capability: basic 267Capability: basic
@@ -256,6 +272,7 @@ Returns: 0 (success), -1 (error)
256 272
257This ioctl is obsolete and has been removed. 273This ioctl is obsolete and has been removed.
258 274
275
2594.10 KVM_RUN 2764.10 KVM_RUN
260 277
261Capability: basic 278Capability: basic
@@ -272,6 +289,7 @@ obtained by mmap()ing the vcpu fd at offset 0, with the size given by
272KVM_GET_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct 289KVM_GET_VCPU_MMAP_SIZE. The parameter block is formatted as a 'struct
273kvm_run' (see below). 290kvm_run' (see below).
274 291
292
2754.11 KVM_GET_REGS 2934.11 KVM_GET_REGS
276 294
277Capability: basic 295Capability: basic
@@ -292,6 +310,7 @@ struct kvm_regs {
292 __u64 rip, rflags; 310 __u64 rip, rflags;
293}; 311};
294 312
313
2954.12 KVM_SET_REGS 3144.12 KVM_SET_REGS
296 315
297Capability: basic 316Capability: basic
@@ -304,6 +323,7 @@ Writes the general purpose registers into the vcpu.
304 323
305See KVM_GET_REGS for the data structure. 324See KVM_GET_REGS for the data structure.
306 325
326
3074.13 KVM_GET_SREGS 3274.13 KVM_GET_SREGS
308 328
309Capability: basic 329Capability: basic
@@ -331,6 +351,7 @@ interrupt_bitmap is a bitmap of pending external interrupts. At most
331one bit may be set. This interrupt has been acknowledged by the APIC 351one bit may be set. This interrupt has been acknowledged by the APIC
332but not yet injected into the cpu core. 352but not yet injected into the cpu core.
333 353
354
3344.14 KVM_SET_SREGS 3554.14 KVM_SET_SREGS
335 356
336Capability: basic 357Capability: basic
@@ -342,6 +363,7 @@ Returns: 0 on success, -1 on error
342Writes special registers into the vcpu. See KVM_GET_SREGS for the 363Writes special registers into the vcpu. See KVM_GET_SREGS for the
343data structures. 364data structures.
344 365
366
3454.15 KVM_TRANSLATE 3674.15 KVM_TRANSLATE
346 368
347Capability: basic 369Capability: basic
@@ -365,6 +387,7 @@ struct kvm_translation {
365 __u8 pad[5]; 387 __u8 pad[5];
366}; 388};
367 389
390
3684.16 KVM_INTERRUPT 3914.16 KVM_INTERRUPT
369 392
370Capability: basic 393Capability: basic
@@ -413,6 +436,7 @@ c) KVM_INTERRUPT_SET_LEVEL
413Note that any value for 'irq' other than the ones stated above is invalid 436Note that any value for 'irq' other than the ones stated above is invalid
414and incurs unexpected behavior. 437and incurs unexpected behavior.
415 438
439
4164.17 KVM_DEBUG_GUEST 4404.17 KVM_DEBUG_GUEST
417 441
418Capability: basic 442Capability: basic
@@ -423,6 +447,7 @@ Returns: -1 on error
423 447
424Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead. 448Support for this has been removed. Use KVM_SET_GUEST_DEBUG instead.
425 449
450
4264.18 KVM_GET_MSRS 4514.18 KVM_GET_MSRS
427 452
428Capability: basic 453Capability: basic
@@ -451,6 +476,7 @@ Application code should set the 'nmsrs' member (which indicates the
451size of the entries array) and the 'index' member of each array entry. 476size of the entries array) and the 'index' member of each array entry.
452kvm will fill in the 'data' member. 477kvm will fill in the 'data' member.
453 478
479
4544.19 KVM_SET_MSRS 4804.19 KVM_SET_MSRS
455 481
456Capability: basic 482Capability: basic
@@ -466,6 +492,7 @@ Application code should set the 'nmsrs' member (which indicates the
466size of the entries array), and the 'index' and 'data' members of each 492size of the entries array), and the 'index' and 'data' members of each
467array entry. 493array entry.
468 494
495
4694.20 KVM_SET_CPUID 4964.20 KVM_SET_CPUID
470 497
471Capability: basic 498Capability: basic
@@ -494,6 +521,7 @@ struct kvm_cpuid {
494 struct kvm_cpuid_entry entries[0]; 521 struct kvm_cpuid_entry entries[0];
495}; 522};
496 523
524
4974.21 KVM_SET_SIGNAL_MASK 5254.21 KVM_SET_SIGNAL_MASK
498 526
499Capability: basic 527Capability: basic
@@ -516,6 +544,7 @@ struct kvm_signal_mask {
516 __u8 sigset[0]; 544 __u8 sigset[0];
517}; 545};
518 546
547
5194.22 KVM_GET_FPU 5484.22 KVM_GET_FPU
520 549
521Capability: basic 550Capability: basic
@@ -541,6 +570,7 @@ struct kvm_fpu {
541 __u32 pad2; 570 __u32 pad2;
542}; 571};
543 572
573
5444.23 KVM_SET_FPU 5744.23 KVM_SET_FPU
545 575
546Capability: basic 576Capability: basic
@@ -566,6 +596,7 @@ struct kvm_fpu {
566 __u32 pad2; 596 __u32 pad2;
567}; 597};
568 598
599
5694.24 KVM_CREATE_IRQCHIP 6004.24 KVM_CREATE_IRQCHIP
570 601
571Capability: KVM_CAP_IRQCHIP 602Capability: KVM_CAP_IRQCHIP
@@ -579,6 +610,7 @@ ioapic, a virtual PIC (two PICs, nested), and sets up future vcpus to have a
579local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23 610local APIC. IRQ routing for GSIs 0-15 is set to both PIC and IOAPIC; GSI 16-23
580only go to the IOAPIC. On ia64, a IOSAPIC is created. 611only go to the IOAPIC. On ia64, a IOSAPIC is created.
581 612
613
5824.25 KVM_IRQ_LINE 6144.25 KVM_IRQ_LINE
583 615
584Capability: KVM_CAP_IRQCHIP 616Capability: KVM_CAP_IRQCHIP
@@ -600,6 +632,7 @@ struct kvm_irq_level {
600 __u32 level; /* 0 or 1 */ 632 __u32 level; /* 0 or 1 */
601}; 633};
602 634
635
6034.26 KVM_GET_IRQCHIP 6364.26 KVM_GET_IRQCHIP
604 637
605Capability: KVM_CAP_IRQCHIP 638Capability: KVM_CAP_IRQCHIP
@@ -621,6 +654,7 @@ struct kvm_irqchip {
621 } chip; 654 } chip;
622}; 655};
623 656
657
6244.27 KVM_SET_IRQCHIP 6584.27 KVM_SET_IRQCHIP
625 659
626Capability: KVM_CAP_IRQCHIP 660Capability: KVM_CAP_IRQCHIP
@@ -642,6 +676,7 @@ struct kvm_irqchip {
642 } chip; 676 } chip;
643}; 677};
644 678
679
6454.28 KVM_XEN_HVM_CONFIG 6804.28 KVM_XEN_HVM_CONFIG
646 681
647Capability: KVM_CAP_XEN_HVM 682Capability: KVM_CAP_XEN_HVM
@@ -666,6 +701,7 @@ struct kvm_xen_hvm_config {
666 __u8 pad2[30]; 701 __u8 pad2[30];
667}; 702};
668 703
704
6694.29 KVM_GET_CLOCK 7054.29 KVM_GET_CLOCK
670 706
671Capability: KVM_CAP_ADJUST_CLOCK 707Capability: KVM_CAP_ADJUST_CLOCK
@@ -684,6 +720,7 @@ struct kvm_clock_data {
684 __u32 pad[9]; 720 __u32 pad[9];
685}; 721};
686 722
723
6874.30 KVM_SET_CLOCK 7244.30 KVM_SET_CLOCK
688 725
689Capability: KVM_CAP_ADJUST_CLOCK 726Capability: KVM_CAP_ADJUST_CLOCK
@@ -702,6 +739,7 @@ struct kvm_clock_data {
702 __u32 pad[9]; 739 __u32 pad[9];
703}; 740};
704 741
742
7054.31 KVM_GET_VCPU_EVENTS 7434.31 KVM_GET_VCPU_EVENTS
706 744
707Capability: KVM_CAP_VCPU_EVENTS 745Capability: KVM_CAP_VCPU_EVENTS
@@ -741,6 +779,7 @@ struct kvm_vcpu_events {
741KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that 779KVM_VCPUEVENT_VALID_SHADOW may be set in the flags field to signal that
742interrupt.shadow contains a valid state. Otherwise, this field is undefined. 780interrupt.shadow contains a valid state. Otherwise, this field is undefined.
743 781
782
7444.32 KVM_SET_VCPU_EVENTS 7834.32 KVM_SET_VCPU_EVENTS
745 784
746Capability: KVM_CAP_VCPU_EVENTS 785Capability: KVM_CAP_VCPU_EVENTS
@@ -767,6 +806,7 @@ If KVM_CAP_INTR_SHADOW is available, KVM_VCPUEVENT_VALID_SHADOW can be set in
767the flags field to signal that interrupt.shadow contains a valid state and 806the flags field to signal that interrupt.shadow contains a valid state and
768shall be written into the VCPU. 807shall be written into the VCPU.
769 808
809
7704.33 KVM_GET_DEBUGREGS 8104.33 KVM_GET_DEBUGREGS
771 811
772Capability: KVM_CAP_DEBUGREGS 812Capability: KVM_CAP_DEBUGREGS
@@ -785,6 +825,7 @@ struct kvm_debugregs {
785 __u64 reserved[9]; 825 __u64 reserved[9];
786}; 826};
787 827
828
7884.34 KVM_SET_DEBUGREGS 8294.34 KVM_SET_DEBUGREGS
789 830
790Capability: KVM_CAP_DEBUGREGS 831Capability: KVM_CAP_DEBUGREGS
@@ -798,6 +839,7 @@ Writes debug registers into the vcpu.
798See KVM_GET_DEBUGREGS for the data structure. The flags field is unused 839See KVM_GET_DEBUGREGS for the data structure. The flags field is unused
799yet and must be cleared on entry. 840yet and must be cleared on entry.
800 841
842
8014.35 KVM_SET_USER_MEMORY_REGION 8434.35 KVM_SET_USER_MEMORY_REGION
802 844
803Capability: KVM_CAP_USER_MEM 845Capability: KVM_CAP_USER_MEM
@@ -844,6 +886,7 @@ It is recommended to use this API instead of the KVM_SET_MEMORY_REGION ioctl.
844The KVM_SET_MEMORY_REGION does not allow fine grained control over memory 886The KVM_SET_MEMORY_REGION does not allow fine grained control over memory
845allocation and is deprecated. 887allocation and is deprecated.
846 888
889
8474.36 KVM_SET_TSS_ADDR 8904.36 KVM_SET_TSS_ADDR
848 891
849Capability: KVM_CAP_SET_TSS_ADDR 892Capability: KVM_CAP_SET_TSS_ADDR
@@ -862,6 +905,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
862because of a quirk in the virtualization implementation (see the internals 905because of a quirk in the virtualization implementation (see the internals
863documentation when it pops into existence). 906documentation when it pops into existence).
864 907
908
8654.37 KVM_ENABLE_CAP 9094.37 KVM_ENABLE_CAP
866 910
867Capability: KVM_CAP_ENABLE_CAP 911Capability: KVM_CAP_ENABLE_CAP
@@ -897,6 +941,7 @@ function properly, this is the place to put them.
897 __u8 pad[64]; 941 __u8 pad[64];
898}; 942};
899 943
944
9004.38 KVM_GET_MP_STATE 9454.38 KVM_GET_MP_STATE
901 946
902Capability: KVM_CAP_MP_STATE 947Capability: KVM_CAP_MP_STATE
@@ -927,6 +972,7 @@ Possible values are:
927This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel 972This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
928irqchip, the multiprocessing state must be maintained by userspace. 973irqchip, the multiprocessing state must be maintained by userspace.
929 974
975
9304.39 KVM_SET_MP_STATE 9764.39 KVM_SET_MP_STATE
931 977
932Capability: KVM_CAP_MP_STATE 978Capability: KVM_CAP_MP_STATE
@@ -941,6 +987,7 @@ arguments.
941This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel 987This ioctl is only useful after KVM_CREATE_IRQCHIP. Without an in-kernel
942irqchip, the multiprocessing state must be maintained by userspace. 988irqchip, the multiprocessing state must be maintained by userspace.
943 989
990
9444.40 KVM_SET_IDENTITY_MAP_ADDR 9914.40 KVM_SET_IDENTITY_MAP_ADDR
945 992
946Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR 993Capability: KVM_CAP_SET_IDENTITY_MAP_ADDR
@@ -959,6 +1006,7 @@ This ioctl is required on Intel-based hosts. This is needed on Intel hardware
959because of a quirk in the virtualization implementation (see the internals 1006because of a quirk in the virtualization implementation (see the internals
960documentation when it pops into existence). 1007documentation when it pops into existence).
961 1008
1009
9624.41 KVM_SET_BOOT_CPU_ID 10104.41 KVM_SET_BOOT_CPU_ID
963 1011
964Capability: KVM_CAP_SET_BOOT_CPU_ID 1012Capability: KVM_CAP_SET_BOOT_CPU_ID
@@ -971,6 +1019,7 @@ Define which vcpu is the Bootstrap Processor (BSP). Values are the same
971as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default 1019as the vcpu id in KVM_CREATE_VCPU. If this ioctl is not called, the default
972is vcpu 0. 1020is vcpu 0.
973 1021
1022
9744.42 KVM_GET_XSAVE 10234.42 KVM_GET_XSAVE
975 1024
976Capability: KVM_CAP_XSAVE 1025Capability: KVM_CAP_XSAVE
@@ -985,6 +1034,7 @@ struct kvm_xsave {
985 1034
986This ioctl would copy current vcpu's xsave struct to the userspace. 1035This ioctl would copy current vcpu's xsave struct to the userspace.
987 1036
1037
9884.43 KVM_SET_XSAVE 10384.43 KVM_SET_XSAVE
989 1039
990Capability: KVM_CAP_XSAVE 1040Capability: KVM_CAP_XSAVE
@@ -999,6 +1049,7 @@ struct kvm_xsave {
999 1049
1000This ioctl would copy userspace's xsave struct to the kernel. 1050This ioctl would copy userspace's xsave struct to the kernel.
1001 1051
1052
10024.44 KVM_GET_XCRS 10534.44 KVM_GET_XCRS
1003 1054
1004Capability: KVM_CAP_XCRS 1055Capability: KVM_CAP_XCRS
@@ -1022,6 +1073,7 @@ struct kvm_xcrs {
1022 1073
1023This ioctl would copy current vcpu's xcrs to the userspace. 1074This ioctl would copy current vcpu's xcrs to the userspace.
1024 1075
1076
10254.45 KVM_SET_XCRS 10774.45 KVM_SET_XCRS
1026 1078
1027Capability: KVM_CAP_XCRS 1079Capability: KVM_CAP_XCRS
@@ -1045,6 +1097,7 @@ struct kvm_xcrs {
1045 1097
1046This ioctl would set vcpu's xcr to the value userspace specified. 1098This ioctl would set vcpu's xcr to the value userspace specified.
1047 1099
1100
10484.46 KVM_GET_SUPPORTED_CPUID 11014.46 KVM_GET_SUPPORTED_CPUID
1049 1102
1050Capability: KVM_CAP_EXT_CPUID 1103Capability: KVM_CAP_EXT_CPUID
@@ -1119,6 +1172,7 @@ support. Instead it is reported via
1119if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the 1172if that returns true and you use KVM_CREATE_IRQCHIP, or if you emulate the
1120feature in userspace, then you can enable the feature for KVM_SET_CPUID2. 1173feature in userspace, then you can enable the feature for KVM_SET_CPUID2.
1121 1174
1175
11224.47 KVM_PPC_GET_PVINFO 11764.47 KVM_PPC_GET_PVINFO
1123 1177
1124Capability: KVM_CAP_PPC_GET_PVINFO 1178Capability: KVM_CAP_PPC_GET_PVINFO
@@ -1142,6 +1196,7 @@ of 4 instructions that make up a hypercall.
1142If any additional field gets added to this structure later on, a bit for that 1196If any additional field gets added to this structure later on, a bit for that
1143additional piece of information will be set in the flags bitmap. 1197additional piece of information will be set in the flags bitmap.
1144 1198
1199
11454.48 KVM_ASSIGN_PCI_DEVICE 12004.48 KVM_ASSIGN_PCI_DEVICE
1146 1201
1147Capability: KVM_CAP_DEVICE_ASSIGNMENT 1202Capability: KVM_CAP_DEVICE_ASSIGNMENT
@@ -1185,6 +1240,7 @@ Only PCI header type 0 devices with PCI BAR resources are supported by
1185device assignment. The user requesting this ioctl must have read/write 1240device assignment. The user requesting this ioctl must have read/write
1186access to the PCI sysfs resource files associated with the device. 1241access to the PCI sysfs resource files associated with the device.
1187 1242
1243
11884.49 KVM_DEASSIGN_PCI_DEVICE 12444.49 KVM_DEASSIGN_PCI_DEVICE
1189 1245
1190Capability: KVM_CAP_DEVICE_DEASSIGNMENT 1246Capability: KVM_CAP_DEVICE_DEASSIGNMENT
@@ -1198,6 +1254,7 @@ Ends PCI device assignment, releasing all associated resources.
1198See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is 1254See KVM_CAP_DEVICE_ASSIGNMENT for the data structure. Only assigned_dev_id is
1199used in kvm_assigned_pci_dev to identify the device. 1255used in kvm_assigned_pci_dev to identify the device.
1200 1256
1257
12014.50 KVM_ASSIGN_DEV_IRQ 12584.50 KVM_ASSIGN_DEV_IRQ
1202 1259
1203Capability: KVM_CAP_ASSIGN_DEV_IRQ 1260Capability: KVM_CAP_ASSIGN_DEV_IRQ
@@ -1231,6 +1288,7 @@ The following flags are defined:
1231It is not valid to specify multiple types per host or guest IRQ. However, the 1288It is not valid to specify multiple types per host or guest IRQ. However, the
1232IRQ type of host and guest can differ or can even be null. 1289IRQ type of host and guest can differ or can even be null.
1233 1290
1291
12344.51 KVM_DEASSIGN_DEV_IRQ 12924.51 KVM_DEASSIGN_DEV_IRQ
1235 1293
1236Capability: KVM_CAP_ASSIGN_DEV_IRQ 1294Capability: KVM_CAP_ASSIGN_DEV_IRQ
@@ -1245,6 +1303,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
1245by assigned_dev_id, flags must correspond to the IRQ type specified on 1303by assigned_dev_id, flags must correspond to the IRQ type specified on
1246KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed. 1304KVM_ASSIGN_DEV_IRQ. Partial deassignment of host or guest IRQ is allowed.
1247 1305
1306
12484.52 KVM_SET_GSI_ROUTING 13074.52 KVM_SET_GSI_ROUTING
1249 1308
1250Capability: KVM_CAP_IRQ_ROUTING 1309Capability: KVM_CAP_IRQ_ROUTING
@@ -1293,6 +1352,7 @@ struct kvm_irq_routing_msi {
1293 __u32 pad; 1352 __u32 pad;
1294}; 1353};
1295 1354
1355
12964.53 KVM_ASSIGN_SET_MSIX_NR 13564.53 KVM_ASSIGN_SET_MSIX_NR
1297 1357
1298Capability: KVM_CAP_DEVICE_MSIX 1358Capability: KVM_CAP_DEVICE_MSIX
@@ -1314,6 +1374,7 @@ struct kvm_assigned_msix_nr {
1314 1374
1315#define KVM_MAX_MSIX_PER_DEV 256 1375#define KVM_MAX_MSIX_PER_DEV 256
1316 1376
1377
13174.54 KVM_ASSIGN_SET_MSIX_ENTRY 13784.54 KVM_ASSIGN_SET_MSIX_ENTRY
1318 1379
1319Capability: KVM_CAP_DEVICE_MSIX 1380Capability: KVM_CAP_DEVICE_MSIX
@@ -1332,7 +1393,8 @@ struct kvm_assigned_msix_entry {
1332 __u16 padding[3]; 1393 __u16 padding[3];
1333}; 1394};
1334 1395
13354.54 KVM_SET_TSC_KHZ 1396
13974.55 KVM_SET_TSC_KHZ
1336 1398
1337Capability: KVM_CAP_TSC_CONTROL 1399Capability: KVM_CAP_TSC_CONTROL
1338Architectures: x86 1400Architectures: x86
@@ -1343,7 +1405,8 @@ Returns: 0 on success, -1 on error
1343Specifies the tsc frequency for the virtual machine. The unit of the 1405Specifies the tsc frequency for the virtual machine. The unit of the
1344frequency is KHz. 1406frequency is KHz.
1345 1407
13464.55 KVM_GET_TSC_KHZ 1408
14094.56 KVM_GET_TSC_KHZ
1347 1410
1348Capability: KVM_CAP_GET_TSC_KHZ 1411Capability: KVM_CAP_GET_TSC_KHZ
1349Architectures: x86 1412Architectures: x86
@@ -1355,7 +1418,8 @@ Returns the tsc frequency of the guest. The unit of the return value is
1355KHz. If the host has unstable tsc this ioctl returns -EIO instead as an 1418KHz. If the host has unstable tsc this ioctl returns -EIO instead as an
1356error. 1419error.
1357 1420
13584.56 KVM_GET_LAPIC 1421
14224.57 KVM_GET_LAPIC
1359 1423
1360Capability: KVM_CAP_IRQCHIP 1424Capability: KVM_CAP_IRQCHIP
1361Architectures: x86 1425Architectures: x86
@@ -1371,7 +1435,8 @@ struct kvm_lapic_state {
1371Reads the Local APIC registers and copies them into the input argument. The 1435Reads the Local APIC registers and copies them into the input argument. The
1372data format and layout are the same as documented in the architecture manual. 1436data format and layout are the same as documented in the architecture manual.
1373 1437
13744.57 KVM_SET_LAPIC 1438
14394.58 KVM_SET_LAPIC
1375 1440
1376Capability: KVM_CAP_IRQCHIP 1441Capability: KVM_CAP_IRQCHIP
1377Architectures: x86 1442Architectures: x86
@@ -1387,7 +1452,8 @@ struct kvm_lapic_state {
1387Copies the input argument into the the Local APIC registers. The data format 1452Copies the input argument into the the Local APIC registers. The data format
1388and layout are the same as documented in the architecture manual. 1453and layout are the same as documented in the architecture manual.
1389 1454
13904.58 KVM_IOEVENTFD 1455
14564.59 KVM_IOEVENTFD
1391 1457
1392Capability: KVM_CAP_IOEVENTFD 1458Capability: KVM_CAP_IOEVENTFD
1393Architectures: all 1459Architectures: all
@@ -1417,7 +1483,8 @@ The following flags are defined:
1417If datamatch flag is set, the event will be signaled only if the written value 1483If datamatch flag is set, the event will be signaled only if the written value
1418to the registered address is equal to datamatch in struct kvm_ioeventfd. 1484to the registered address is equal to datamatch in struct kvm_ioeventfd.
1419 1485
14204.59 KVM_DIRTY_TLB 1486
14874.60 KVM_DIRTY_TLB
1421 1488
1422Capability: KVM_CAP_SW_TLB 1489Capability: KVM_CAP_SW_TLB
1423Architectures: ppc 1490Architectures: ppc
@@ -1449,7 +1516,8 @@ The "num_dirty" field is a performance hint for KVM to determine whether it
1449should skip processing the bitmap and just invalidate everything. It must 1516should skip processing the bitmap and just invalidate everything. It must
1450be set to the number of set bits in the bitmap. 1517be set to the number of set bits in the bitmap.
1451 1518
14524.60 KVM_ASSIGN_SET_INTX_MASK 1519
15204.61 KVM_ASSIGN_SET_INTX_MASK
1453 1521
1454Capability: KVM_CAP_PCI_2_3 1522Capability: KVM_CAP_PCI_2_3
1455Architectures: x86 1523Architectures: x86
@@ -1482,6 +1550,7 @@ See KVM_ASSIGN_DEV_IRQ for the data structure. The target device is specified
1482by assigned_dev_id. In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is 1550by assigned_dev_id. In the flags field, only KVM_DEV_ASSIGN_MASK_INTX is
1483evaluated. 1551evaluated.
1484 1552
1553
14854.62 KVM_CREATE_SPAPR_TCE 15544.62 KVM_CREATE_SPAPR_TCE
1486 1555
1487Capability: KVM_CAP_SPAPR_TCE 1556Capability: KVM_CAP_SPAPR_TCE
@@ -1517,6 +1586,7 @@ the entries written by kernel-handled H_PUT_TCE calls, and also lets
1517userspace update the TCE table directly which is useful in some 1586userspace update the TCE table directly which is useful in some
1518circumstances. 1587circumstances.
1519 1588
1589
15204.63 KVM_ALLOCATE_RMA 15904.63 KVM_ALLOCATE_RMA
1521 1591
1522Capability: KVM_CAP_PPC_RMA 1592Capability: KVM_CAP_PPC_RMA
@@ -1549,6 +1619,7 @@ is supported; 2 if the processor requires all virtual machines to have
1549an RMA, or 1 if the processor can use an RMA but doesn't require it, 1619an RMA, or 1 if the processor can use an RMA but doesn't require it,
1550because it supports the Virtual RMA (VRMA) facility. 1620because it supports the Virtual RMA (VRMA) facility.
1551 1621
1622
15524.64 KVM_NMI 16234.64 KVM_NMI
1553 1624
1554Capability: KVM_CAP_USER_NMI 1625Capability: KVM_CAP_USER_NMI
@@ -1574,6 +1645,7 @@ following algorithm:
1574Some guests configure the LINT1 NMI input to cause a panic, aiding in 1645Some guests configure the LINT1 NMI input to cause a panic, aiding in
1575debugging. 1646debugging.
1576 1647
1648
15774.65 KVM_S390_UCAS_MAP 16494.65 KVM_S390_UCAS_MAP
1578 1650
1579Capability: KVM_CAP_S390_UCONTROL 1651Capability: KVM_CAP_S390_UCONTROL
@@ -1593,6 +1665,7 @@ This ioctl maps the memory at "user_addr" with the length "length" to
1593the vcpu's address space starting at "vcpu_addr". All parameters need to 1665the vcpu's address space starting at "vcpu_addr". All parameters need to
1594be alligned by 1 megabyte. 1666be alligned by 1 megabyte.
1595 1667
1668
15964.66 KVM_S390_UCAS_UNMAP 16694.66 KVM_S390_UCAS_UNMAP
1597 1670
1598Capability: KVM_CAP_S390_UCONTROL 1671Capability: KVM_CAP_S390_UCONTROL
@@ -1612,6 +1685,7 @@ This ioctl unmaps the memory in the vcpu's address space starting at
1612"vcpu_addr" with the length "length". The field "user_addr" is ignored. 1685"vcpu_addr" with the length "length". The field "user_addr" is ignored.
1613All parameters need to be alligned by 1 megabyte. 1686All parameters need to be alligned by 1 megabyte.
1614 1687
1688
16154.67 KVM_S390_VCPU_FAULT 16894.67 KVM_S390_VCPU_FAULT
1616 1690
1617Capability: KVM_CAP_S390_UCONTROL 1691Capability: KVM_CAP_S390_UCONTROL
@@ -1628,6 +1702,7 @@ table upfront. This is useful to handle validity intercepts for user
1628controlled virtual machines to fault in the virtual cpu's lowcore pages 1702controlled virtual machines to fault in the virtual cpu's lowcore pages
1629prior to calling the KVM_RUN ioctl. 1703prior to calling the KVM_RUN ioctl.
1630 1704
1705
16314.68 KVM_SET_ONE_REG 17064.68 KVM_SET_ONE_REG
1632 1707
1633Capability: KVM_CAP_ONE_REG 1708Capability: KVM_CAP_ONE_REG
@@ -1653,6 +1728,7 @@ registers, find a list below:
1653 | | 1728 | |
1654 PPC | KVM_REG_PPC_HIOR | 64 1729 PPC | KVM_REG_PPC_HIOR | 64
1655 1730
1731
16564.69 KVM_GET_ONE_REG 17324.69 KVM_GET_ONE_REG
1657 1733
1658Capability: KVM_CAP_ONE_REG 1734Capability: KVM_CAP_ONE_REG
@@ -1669,7 +1745,193 @@ at the memory location pointed to by "addr".
1669The list of registers accessible using this interface is identical to the 1745The list of registers accessible using this interface is identical to the
1670list in 4.64. 1746list in 4.64.
1671 1747
1748
17494.70 KVM_KVMCLOCK_CTRL
1750
1751Capability: KVM_CAP_KVMCLOCK_CTRL
1752Architectures: Any that implement pvclocks (currently x86 only)
1753Type: vcpu ioctl
1754Parameters: None
1755Returns: 0 on success, -1 on error
1756
1757This signals to the host kernel that the specified guest is being paused by
1758userspace. The host will set a flag in the pvclock structure that is checked
1759from the soft lockup watchdog. The flag is part of the pvclock structure that
1760is shared between guest and host, specifically the second bit of the flags
1761field of the pvclock_vcpu_time_info structure. It will be set exclusively by
1762the host and read/cleared exclusively by the guest. The guest operation of
1763checking and clearing the flag must an atomic operation so
1764load-link/store-conditional, or equivalent must be used. There are two cases
1765where the guest will clear the flag: when the soft lockup watchdog timer resets
1766itself or when a soft lockup is detected. This ioctl can be called any time
1767after pausing the vcpu, but before it is resumed.
1768
1769
17704.71 KVM_SIGNAL_MSI
1771
1772Capability: KVM_CAP_SIGNAL_MSI
1773Architectures: x86
1774Type: vm ioctl
1775Parameters: struct kvm_msi (in)
1776Returns: >0 on delivery, 0 if guest blocked the MSI, and -1 on error
1777
1778Directly inject a MSI message. Only valid with in-kernel irqchip that handles
1779MSI messages.
1780
1781struct kvm_msi {
1782 __u32 address_lo;
1783 __u32 address_hi;
1784 __u32 data;
1785 __u32 flags;
1786 __u8 pad[16];
1787};
1788
1789No flags are defined so far. The corresponding field must be 0.
1790
1791
17924.71 KVM_CREATE_PIT2
1793
1794Capability: KVM_CAP_PIT2
1795Architectures: x86
1796Type: vm ioctl
1797Parameters: struct kvm_pit_config (in)
1798Returns: 0 on success, -1 on error
1799
1800Creates an in-kernel device model for the i8254 PIT. This call is only valid
1801after enabling in-kernel irqchip support via KVM_CREATE_IRQCHIP. The following
1802parameters have to be passed:
1803
1804struct kvm_pit_config {
1805 __u32 flags;
1806 __u32 pad[15];
1807};
1808
1809Valid flags are:
1810
1811#define KVM_PIT_SPEAKER_DUMMY 1 /* emulate speaker port stub */
1812
1813PIT timer interrupts may use a per-VM kernel thread for injection. If it
1814exists, this thread will have a name of the following pattern:
1815
1816kvm-pit/<owner-process-pid>
1817
1818When running a guest with elevated priorities, the scheduling parameters of
1819this thread may have to be adjusted accordingly.
1820
1821This IOCTL replaces the obsolete KVM_CREATE_PIT.
1822
1823
18244.72 KVM_GET_PIT2
1825
1826Capability: KVM_CAP_PIT_STATE2
1827Architectures: x86
1828Type: vm ioctl
1829Parameters: struct kvm_pit_state2 (out)
1830Returns: 0 on success, -1 on error
1831
1832Retrieves the state of the in-kernel PIT model. Only valid after
1833KVM_CREATE_PIT2. The state is returned in the following structure:
1834
1835struct kvm_pit_state2 {
1836 struct kvm_pit_channel_state channels[3];
1837 __u32 flags;
1838 __u32 reserved[9];
1839};
1840
1841Valid flags are:
1842
1843/* disable PIT in HPET legacy mode */
1844#define KVM_PIT_FLAGS_HPET_LEGACY 0x00000001
1845
1846This IOCTL replaces the obsolete KVM_GET_PIT.
1847
1848
18494.73 KVM_SET_PIT2
1850
1851Capability: KVM_CAP_PIT_STATE2
1852Architectures: x86
1853Type: vm ioctl
1854Parameters: struct kvm_pit_state2 (in)
1855Returns: 0 on success, -1 on error
1856
1857Sets the state of the in-kernel PIT model. Only valid after KVM_CREATE_PIT2.
1858See KVM_GET_PIT2 for details on struct kvm_pit_state2.
1859
1860This IOCTL replaces the obsolete KVM_SET_PIT.
1861
1862
18634.74 KVM_PPC_GET_SMMU_INFO
1864
1865Capability: KVM_CAP_PPC_GET_SMMU_INFO
1866Architectures: powerpc
1867Type: vm ioctl
1868Parameters: None
1869Returns: 0 on success, -1 on error
1870
1871This populates and returns a structure describing the features of
1872the "Server" class MMU emulation supported by KVM.
1873This can in turn be used by userspace to generate the appropariate
1874device-tree properties for the guest operating system.
1875
1876The structure contains some global informations, followed by an
1877array of supported segment page sizes:
1878
1879 struct kvm_ppc_smmu_info {
1880 __u64 flags;
1881 __u32 slb_size;
1882 __u32 pad;
1883 struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
1884 };
1885
1886The supported flags are:
1887
1888 - KVM_PPC_PAGE_SIZES_REAL:
1889 When that flag is set, guest page sizes must "fit" the backing
1890 store page sizes. When not set, any page size in the list can
1891 be used regardless of how they are backed by userspace.
1892
1893 - KVM_PPC_1T_SEGMENTS
1894 The emulated MMU supports 1T segments in addition to the
1895 standard 256M ones.
1896
1897The "slb_size" field indicates how many SLB entries are supported
1898
1899The "sps" array contains 8 entries indicating the supported base
1900page sizes for a segment in increasing order. Each entry is defined
1901as follow:
1902
1903 struct kvm_ppc_one_seg_page_size {
1904 __u32 page_shift; /* Base page shift of segment (or 0) */
1905 __u32 slb_enc; /* SLB encoding for BookS */
1906 struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
1907 };
1908
1909An entry with a "page_shift" of 0 is unused. Because the array is
1910organized in increasing order, a lookup can stop when encoutering
1911such an entry.
1912
1913The "slb_enc" field provides the encoding to use in the SLB for the
1914page size. The bits are in positions such as the value can directly
1915be OR'ed into the "vsid" argument of the slbmte instruction.
1916
1917The "enc" array is a list which for each of those segment base page
1918size provides the list of supported actual page sizes (which can be
1919only larger or equal to the base page size), along with the
1920corresponding encoding in the hash PTE. Similarily, the array is
19218 entries sorted by increasing sizes and an entry with a "0" shift
1922is an empty entry and a terminator:
1923
1924 struct kvm_ppc_one_page_size {
1925 __u32 page_shift; /* Page shift (or 0) */
1926 __u32 pte_enc; /* Encoding in the HPTE (>>12) */
1927 };
1928
1929The "pte_enc" field provides a value that can OR'ed into the hash
1930PTE's RPN field (ie, it needs to be shifted left by 12 to OR it
1931into the hash PTE second double word).
1932
16725. The kvm_run structure 19335. The kvm_run structure
1934------------------------
1673 1935
1674Application code obtains a pointer to the kvm_run structure by 1936Application code obtains a pointer to the kvm_run structure by
1675mmap()ing a vcpu fd. From that point, application code can control 1937mmap()ing a vcpu fd. From that point, application code can control
@@ -1910,7 +2172,9 @@ and usually define the validity of a groups of registers. (e.g. one bit
1910 2172
1911}; 2173};
1912 2174
2175
19136. Capabilities that can be enabled 21766. Capabilities that can be enabled
2177-----------------------------------
1914 2178
1915There are certain capabilities that change the behavior of the virtual CPU when 2179There are certain capabilities that change the behavior of the virtual CPU when
1916enabled. To enable them, please see section 4.37. Below you can find a list of 2180enabled. To enable them, please see section 4.37. Below you can find a list of
@@ -1926,6 +2190,7 @@ The following information is provided along with the description:
1926 Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL) 2190 Returns: the return value. General error numbers (EBADF, ENOMEM, EINVAL)
1927 are not detailed, but errors with specific meanings are. 2191 are not detailed, but errors with specific meanings are.
1928 2192
2193
19296.1 KVM_CAP_PPC_OSI 21946.1 KVM_CAP_PPC_OSI
1930 2195
1931Architectures: ppc 2196Architectures: ppc
@@ -1939,6 +2204,7 @@ between the guest and the host.
1939 2204
1940When this capability is enabled, KVM_EXIT_OSI can occur. 2205When this capability is enabled, KVM_EXIT_OSI can occur.
1941 2206
2207
19426.2 KVM_CAP_PPC_PAPR 22086.2 KVM_CAP_PPC_PAPR
1943 2209
1944Architectures: ppc 2210Architectures: ppc
@@ -1957,6 +2223,7 @@ HTAB invisible to the guest.
1957 2223
1958When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur. 2224When this capability is enabled, KVM_EXIT_PAPR_HCALL can occur.
1959 2225
2226
19606.3 KVM_CAP_SW_TLB 22276.3 KVM_CAP_SW_TLB
1961 2228
1962Architectures: ppc 2229Architectures: ppc
diff --git a/Documentation/virtual/kvm/cpuid.txt b/Documentation/virtual/kvm/cpuid.txt
index 882068538c9c..83afe65d4966 100644
--- a/Documentation/virtual/kvm/cpuid.txt
+++ b/Documentation/virtual/kvm/cpuid.txt
@@ -10,11 +10,15 @@ a guest.
10KVM cpuid functions are: 10KVM cpuid functions are:
11 11
12function: KVM_CPUID_SIGNATURE (0x40000000) 12function: KVM_CPUID_SIGNATURE (0x40000000)
13returns : eax = 0, 13returns : eax = 0x40000001,
14 ebx = 0x4b4d564b, 14 ebx = 0x4b4d564b,
15 ecx = 0x564b4d56, 15 ecx = 0x564b4d56,
16 edx = 0x4d. 16 edx = 0x4d.
17Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM". 17Note that this value in ebx, ecx and edx corresponds to the string "KVMKVMKVM".
18The value in eax corresponds to the maximum cpuid function present in this leaf,
19and will be updated if more functions are added in the future.
20Note also that old hosts set eax value to 0x0. This should
21be interpreted as if the value was 0x40000001.
18This function queries the presence of KVM cpuid leafs. 22This function queries the presence of KVM cpuid leafs.
19 23
20 24
diff --git a/Documentation/virtual/kvm/msr.txt b/Documentation/virtual/kvm/msr.txt
index 50317809113d..96b41bd97523 100644
--- a/Documentation/virtual/kvm/msr.txt
+++ b/Documentation/virtual/kvm/msr.txt
@@ -109,6 +109,10 @@ MSR_KVM_SYSTEM_TIME_NEW: 0x4b564d01
109 0 | 24 | multiple cpus are guaranteed to 109 0 | 24 | multiple cpus are guaranteed to
110 | | be monotonic 110 | | be monotonic
111 ------------------------------------------------------------- 111 -------------------------------------------------------------
112 | | guest vcpu has been paused by
113 1 | N/A | the host
114 | | See 4.70 in api.txt
115 -------------------------------------------------------------
112 116
113 Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid 117 Availability of this MSR must be checked via bit 3 in 0x4000001 cpuid
114 leaf prior to usage. 118 leaf prior to usage.
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt
index 29bdf62aac09..f734bb2a78dc 100644
--- a/Documentation/vm/transhuge.txt
+++ b/Documentation/vm/transhuge.txt
@@ -166,6 +166,68 @@ behavior. So to make them effective you need to restart any
166application that could have been using hugepages. This also applies to 166application that could have been using hugepages. This also applies to
167the regions registered in khugepaged. 167the regions registered in khugepaged.
168 168
169== Monitoring usage ==
170
171The number of transparent huge pages currently used by the system is
172available by reading the AnonHugePages field in /proc/meminfo. To
173identify what applications are using transparent huge pages, it is
174necessary to read /proc/PID/smaps and count the AnonHugePages fields
175for each mapping. Note that reading the smaps file is expensive and
176reading it frequently will incur overhead.
177
178There are a number of counters in /proc/vmstat that may be used to
179monitor how successfully the system is providing huge pages for use.
180
181thp_fault_alloc is incremented every time a huge page is successfully
182 allocated to handle a page fault. This applies to both the
183 first time a page is faulted and for COW faults.
184
185thp_collapse_alloc is incremented by khugepaged when it has found
186 a range of pages to collapse into one huge page and has
187 successfully allocated a new huge page to store the data.
188
189thp_fault_fallback is incremented if a page fault fails to allocate
190 a huge page and instead falls back to using small pages.
191
192thp_collapse_alloc_failed is incremented if khugepaged found a range
193 of pages that should be collapsed into one huge page but failed
194 the allocation.
195
196thp_split is incremented every time a huge page is split into base
197 pages. This can happen for a variety of reasons but a common
198 reason is that a huge page is old and is being reclaimed.
199
200As the system ages, allocating huge pages may be expensive as the
201system uses memory compaction to copy data around memory to free a
202huge page for use. There are some counters in /proc/vmstat to help
203monitor this overhead.
204
205compact_stall is incremented every time a process stalls to run
206 memory compaction so that a huge page is free for use.
207
208compact_success is incremented if the system compacted memory and
209 freed a huge page for use.
210
211compact_fail is incremented if the system tries to compact memory
212 but failed.
213
214compact_pages_moved is incremented each time a page is moved. If
215 this value is increasing rapidly, it implies that the system
216 is copying a lot of data to satisfy the huge page allocation.
217 It is possible that the cost of copying exceeds any savings
218 from reduced TLB misses.
219
220compact_pagemigrate_failed is incremented when the underlying mechanism
221 for moving a page failed.
222
223compact_blocks_moved is incremented each time memory compaction examines
224 a huge page aligned range of pages.
225
226It is possible to establish how long the stalls were using the function
227tracer to record how long was spent in __alloc_pages_nodemask and
228using the mm_page_alloc tracepoint to identify which allocations were
229for huge pages.
230
169== get_user_pages and follow_page == 231== get_user_pages and follow_page ==
170 232
171get_user_pages and follow_page if run on a hugepage, will return the 233get_user_pages and follow_page if run on a hugepage, will return the
diff --git a/MAINTAINERS b/MAINTAINERS
index 150a29f3cd33..a246490c95eb 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -1905,6 +1905,16 @@ F: Documentation/filesystems/coda.txt
1905F: fs/coda/ 1905F: fs/coda/
1906F: include/linux/coda*.h 1906F: include/linux/coda*.h
1907 1907
1908COMMON CLK FRAMEWORK
1909M: Mike Turquette <mturquette@ti.com>
1910M: Mike Turquette <mturquette@linaro.org>
1911L: linux-arm-kernel@lists.infradead.org (same as CLK API & CLKDEV)
1912T: git git://git.linaro.org/people/mturquette/linux.git
1913S: Maintained
1914F: drivers/clk/clk.c
1915F: drivers/clk/clk-*
1916F: include/linux/clk-pr*
1917
1908COMMON INTERNET FILE SYSTEM (CIFS) 1918COMMON INTERNET FILE SYSTEM (CIFS)
1909M: Steve French <sfrench@samba.org> 1919M: Steve French <sfrench@samba.org>
1910L: linux-cifs@vger.kernel.org 1920L: linux-cifs@vger.kernel.org
@@ -2978,9 +2988,9 @@ GENERIC GPIO I2C MULTIPLEXER DRIVER
2978M: Peter Korsgaard <peter.korsgaard@barco.com> 2988M: Peter Korsgaard <peter.korsgaard@barco.com>
2979L: linux-i2c@vger.kernel.org 2989L: linux-i2c@vger.kernel.org
2980S: Supported 2990S: Supported
2981F: drivers/i2c/muxes/gpio-i2cmux.c 2991F: drivers/i2c/muxes/i2c-mux-gpio.c
2982F: include/linux/gpio-i2cmux.h 2992F: include/linux/i2c-mux-gpio.h
2983F: Documentation/i2c/muxes/gpio-i2cmux 2993F: Documentation/i2c/muxes/i2c-mux-gpio
2984 2994
2985GENERIC HDLC (WAN) DRIVERS 2995GENERIC HDLC (WAN) DRIVERS
2986M: Krzysztof Halasa <khc@pm.waw.pl> 2996M: Krzysztof Halasa <khc@pm.waw.pl>
@@ -3222,10 +3232,8 @@ F: include/linux/clockchips.h
3222F: include/linux/hrtimer.h 3232F: include/linux/hrtimer.h
3223 3233
3224HIGH-SPEED SCC DRIVER FOR AX.25 3234HIGH-SPEED SCC DRIVER FOR AX.25
3225M: Klaus Kudielka <klaus.kudielka@ieee.org>
3226L: linux-hams@vger.kernel.org 3235L: linux-hams@vger.kernel.org
3227W: http://www.nt.tuwien.ac.at/~kkudielk/Linux/ 3236S: Orphan
3228S: Maintained
3229F: drivers/net/hamradio/dmascc.c 3237F: drivers/net/hamradio/dmascc.c
3230F: drivers/net/hamradio/scc.c 3238F: drivers/net/hamradio/scc.c
3231 3239
@@ -3372,6 +3380,12 @@ W: http://www.developer.ibm.com/welcome/netfinity/serveraid.html
3372S: Supported 3380S: Supported
3373F: drivers/scsi/ips.* 3381F: drivers/scsi/ips.*
3374 3382
3383ICH LPC AND GPIO DRIVER
3384M: Peter Tyser <ptyser@xes-inc.com>
3385S: Maintained
3386F: drivers/mfd/lpc_ich.c
3387F: drivers/gpio/gpio-ich.c
3388
3375IDE SUBSYSTEM 3389IDE SUBSYSTEM
3376M: "David S. Miller" <davem@davemloft.net> 3390M: "David S. Miller" <davem@davemloft.net>
3377L: linux-ide@vger.kernel.org 3391L: linux-ide@vger.kernel.org
@@ -4495,12 +4509,6 @@ L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
4495S: Maintained 4509S: Maintained
4496F: drivers/mmc/host/imxmmc.* 4510F: drivers/mmc/host/imxmmc.*
4497 4511
4498MOUSE AND MISC DEVICES [GENERAL]
4499M: Alessandro Rubini <rubini@ipvvis.unipv.it>
4500S: Maintained
4501F: drivers/input/mouse/
4502F: include/linux/gpio_mouse.h
4503
4504MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD 4512MOXA SMARTIO/INDUSTIO/INTELLIO SERIAL CARD
4505M: Jiri Slaby <jirislaby@gmail.com> 4513M: Jiri Slaby <jirislaby@gmail.com>
4506S: Maintained 4514S: Maintained
@@ -5138,7 +5146,7 @@ PCA9541 I2C BUS MASTER SELECTOR DRIVER
5138M: Guenter Roeck <guenter.roeck@ericsson.com> 5146M: Guenter Roeck <guenter.roeck@ericsson.com>
5139L: linux-i2c@vger.kernel.org 5147L: linux-i2c@vger.kernel.org
5140S: Maintained 5148S: Maintained
5141F: drivers/i2c/muxes/pca9541.c 5149F: drivers/i2c/muxes/i2c-mux-pca9541.c
5142 5150
5143PCA9564/PCA9665 I2C BUS DRIVER 5151PCA9564/PCA9665 I2C BUS DRIVER
5144M: Wolfram Sang <w.sang@pengutronix.de> 5152M: Wolfram Sang <w.sang@pengutronix.de>
@@ -6340,14 +6348,25 @@ F: include/linux/compiler.h
6340 6348
6341SPEAR PLATFORM SUPPORT 6349SPEAR PLATFORM SUPPORT
6342M: Viresh Kumar <viresh.kumar@st.com> 6350M: Viresh Kumar <viresh.kumar@st.com>
6351M: Shiraz Hashim <shiraz.hashim@st.com>
6343L: spear-devel@list.st.com 6352L: spear-devel@list.st.com
6344L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6353L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6345W: http://www.st.com/spear 6354W: http://www.st.com/spear
6346S: Maintained 6355S: Maintained
6347F: arch/arm/plat-spear/ 6356F: arch/arm/plat-spear/
6348 6357
6358SPEAR13XX MACHINE SUPPORT
6359M: Viresh Kumar <viresh.kumar@st.com>
6360M: Shiraz Hashim <shiraz.hashim@st.com>
6361L: spear-devel@list.st.com
6362L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6363W: http://www.st.com/spear
6364S: Maintained
6365F: arch/arm/mach-spear13xx/
6366
6349SPEAR3XX MACHINE SUPPORT 6367SPEAR3XX MACHINE SUPPORT
6350M: Viresh Kumar <viresh.kumar@st.com> 6368M: Viresh Kumar <viresh.kumar@st.com>
6369M: Shiraz Hashim <shiraz.hashim@st.com>
6351L: spear-devel@list.st.com 6370L: spear-devel@list.st.com
6352L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6371L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6353W: http://www.st.com/spear 6372W: http://www.st.com/spear
@@ -6356,6 +6375,8 @@ F: arch/arm/mach-spear3xx/
6356 6375
6357SPEAR6XX MACHINE SUPPORT 6376SPEAR6XX MACHINE SUPPORT
6358M: Rajeev Kumar <rajeev-dlh.kumar@st.com> 6377M: Rajeev Kumar <rajeev-dlh.kumar@st.com>
6378M: Shiraz Hashim <shiraz.hashim@st.com>
6379M: Viresh Kumar <viresh.kumar@st.com>
6359L: spear-devel@list.st.com 6380L: spear-devel@list.st.com
6360L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6381L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6361W: http://www.st.com/spear 6382W: http://www.st.com/spear
@@ -6368,9 +6389,7 @@ L: spear-devel@list.st.com
6368L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) 6389L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
6369W: http://www.st.com/spear 6390W: http://www.st.com/spear
6370S: Maintained 6391S: Maintained
6371F: arch/arm/mach-spear*/clock.c 6392F: drivers/clk/spear/
6372F: arch/arm/plat-spear/clock.c
6373F: arch/arm/plat-spear/include/plat/clock.h
6374 6393
6375SPI SUBSYSTEM 6394SPI SUBSYSTEM
6376M: Grant Likely <grant.likely@secretlab.ca> 6395M: Grant Likely <grant.likely@secretlab.ca>
diff --git a/Makefile b/Makefile
index b62c1e09444a..dda21c3efc7b 100644
--- a/Makefile
+++ b/Makefile
@@ -231,10 +231,6 @@ endif
231# Where to locate arch specific headers 231# Where to locate arch specific headers
232hdr-arch := $(SRCARCH) 232hdr-arch := $(SRCARCH)
233 233
234ifeq ($(ARCH),m68knommu)
235 hdr-arch := m68k
236endif
237
238KCONFIG_CONFIG ?= .config 234KCONFIG_CONFIG ?= .config
239export KCONFIG_CONFIG 235export KCONFIG_CONFIG
240 236
@@ -341,7 +337,6 @@ AWK = awk
341GENKSYMS = scripts/genksyms/genksyms 337GENKSYMS = scripts/genksyms/genksyms
342INSTALLKERNEL := installkernel 338INSTALLKERNEL := installkernel
343DEPMOD = /sbin/depmod 339DEPMOD = /sbin/depmod
344KALLSYMS = scripts/kallsyms
345PERL = perl 340PERL = perl
346CHECK = sparse 341CHECK = sparse
347 342
@@ -739,197 +734,21 @@ libs-y1 := $(patsubst %/, %/lib.a, $(libs-y))
739libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y)) 734libs-y2 := $(patsubst %/, %/built-in.o, $(libs-y))
740libs-y := $(libs-y1) $(libs-y2) 735libs-y := $(libs-y1) $(libs-y2)
741 736
742# Build vmlinux 737# Externally visible symbols (used by link-vmlinux.sh)
743# --------------------------------------------------------------------------- 738export KBUILD_VMLINUX_INIT := $(head-y) $(init-y)
744# vmlinux is built from the objects selected by $(vmlinux-init) and 739export KBUILD_VMLINUX_MAIN := $(core-y) $(libs-y) $(drivers-y) $(net-y)
745# $(vmlinux-main). Most are built-in.o files from top-level directories 740export KBUILD_LDS := arch/$(SRCARCH)/kernel/vmlinux.lds
746# in the kernel tree, others are specified in arch/$(ARCH)/Makefile. 741export LDFLAGS_vmlinux
747# Ordering when linking is important, and $(vmlinux-init) must be first.
748#
749# vmlinux
750# ^
751# |
752# +-< $(vmlinux-init)
753# | +--< init/version.o + more
754# |
755# +--< $(vmlinux-main)
756# | +--< driver/built-in.o mm/built-in.o + more
757# |
758# +-< kallsyms.o (see description in CONFIG_KALLSYMS section)
759#
760# vmlinux version (uname -v) cannot be updated during normal
761# descending-into-subdirs phase since we do not yet know if we need to
762# update vmlinux.
763# Therefore this step is delayed until just before final link of vmlinux -
764# except in the kallsyms case where it is done just before adding the
765# symbols to the kernel.
766#
767# System.map is generated to document addresses of all kernel symbols
768
769vmlinux-init := $(head-y) $(init-y)
770vmlinux-main := $(core-y) $(libs-y) $(drivers-y) $(net-y)
771vmlinux-all := $(vmlinux-init) $(vmlinux-main)
772vmlinux-lds := arch/$(SRCARCH)/kernel/vmlinux.lds
773export KBUILD_VMLINUX_OBJS := $(vmlinux-all)
774
775# Rule to link vmlinux - also used during CONFIG_KALLSYMS
776# May be overridden by arch/$(ARCH)/Makefile
777quiet_cmd_vmlinux__ ?= LD $@
778 cmd_vmlinux__ ?= $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux) -o $@ \
779 -T $(vmlinux-lds) $(vmlinux-init) \
780 --start-group $(vmlinux-main) --end-group \
781 $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o FORCE ,$^)
782
783# Generate new vmlinux version
784quiet_cmd_vmlinux_version = GEN .version
785 cmd_vmlinux_version = set -e; \
786 if [ ! -r .version ]; then \
787 rm -f .version; \
788 echo 1 >.version; \
789 else \
790 mv .version .old_version; \
791 expr 0$$(cat .old_version) + 1 >.version; \
792 fi; \
793 $(MAKE) $(build)=init
794
795# Generate System.map
796quiet_cmd_sysmap = SYSMAP
797 cmd_sysmap = $(CONFIG_SHELL) $(srctree)/scripts/mksysmap
798
799# Sort exception table at build time
800quiet_cmd_sortextable = SORTEX
801 cmd_sortextable = $(objtree)/scripts/sortextable
802
803# Link of vmlinux
804# If CONFIG_KALLSYMS is set .version is already updated
805# Generate System.map and verify that the content is consistent
806# Use + in front of the vmlinux_version rule to silent warning with make -j2
807# First command is ':' to allow us to use + in front of the rule
808define rule_vmlinux__
809 :
810 $(if $(CONFIG_KALLSYMS),,+$(call cmd,vmlinux_version))
811
812 $(call cmd,vmlinux__)
813 $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
814
815 $(if $(CONFIG_BUILDTIME_EXTABLE_SORT), \
816 $(Q)$(if $($(quiet)cmd_sortextable), \
817 echo ' $($(quiet)cmd_sortextable) vmlinux' &&) \
818 $(cmd_sortextable) vmlinux)
819
820
821 $(Q)$(if $($(quiet)cmd_sysmap), \
822 echo ' $($(quiet)cmd_sysmap) System.map' &&) \
823 $(cmd_sysmap) $@ System.map; \
824 if [ $$? -ne 0 ]; then \
825 rm -f $@; \
826 /bin/false; \
827 fi;
828 $(verify_kallsyms)
829endef
830
831
832ifdef CONFIG_KALLSYMS
833# Generate section listing all symbols and add it into vmlinux $(kallsyms.o)
834# It's a three stage process:
835# o .tmp_vmlinux1 has all symbols and sections, but __kallsyms is
836# empty
837# Running kallsyms on that gives us .tmp_kallsyms1.o with
838# the right size - vmlinux version (uname -v) is updated during this step
839# o .tmp_vmlinux2 now has a __kallsyms section of the right size,
840# but due to the added section, some addresses have shifted.
841# From here, we generate a correct .tmp_kallsyms2.o
842# o The correct .tmp_kallsyms2.o is linked into the final vmlinux.
843# o Verify that the System.map from vmlinux matches the map from
844# .tmp_vmlinux2, just in case we did not generate kallsyms correctly.
845# o If 'make KALLSYMS_EXTRA_PASS=1" was used, do an extra pass using
846# .tmp_vmlinux3 and .tmp_kallsyms3.o. This is only meant as a
847# temporary bypass to allow the kernel to be built while the
848# maintainers work out what went wrong with kallsyms.
849
850last_kallsyms := 2
851
852ifdef KALLSYMS_EXTRA_PASS
853ifneq ($(KALLSYMS_EXTRA_PASS),0)
854last_kallsyms := 3
855endif
856endif
857
858kallsyms.o := .tmp_kallsyms$(last_kallsyms).o
859
860define verify_kallsyms
861 $(Q)$(if $($(quiet)cmd_sysmap), \
862 echo ' $($(quiet)cmd_sysmap) .tmp_System.map' &&) \
863 $(cmd_sysmap) .tmp_vmlinux$(last_kallsyms) .tmp_System.map
864 $(Q)cmp -s System.map .tmp_System.map || \
865 (echo Inconsistent kallsyms data; \
866 echo This is a bug - please report about it; \
867 echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround; \
868 rm .tmp_kallsyms* ; /bin/false )
869endef
870
871# Update vmlinux version before link
872# Use + in front of this rule to silent warning about make -j1
873# First command is ':' to allow us to use + in front of this rule
874cmd_ksym_ld = $(cmd_vmlinux__)
875define rule_ksym_ld
876 :
877 +$(call cmd,vmlinux_version)
878 $(call cmd,vmlinux__)
879 $(Q)echo 'cmd_$@ := $(cmd_vmlinux__)' > $(@D)/.$(@F).cmd
880endef
881
882# Generate .S file with all kernel symbols
883quiet_cmd_kallsyms = KSYM $@
884 cmd_kallsyms = $(NM) -n $< | $(KALLSYMS) \
885 $(if $(CONFIG_KALLSYMS_ALL),--all-symbols) > $@
886 742
887.tmp_kallsyms1.o .tmp_kallsyms2.o .tmp_kallsyms3.o: %.o: %.S scripts FORCE 743vmlinux-deps := $(KBUILD_LDS) $(KBUILD_VMLINUX_INIT) $(KBUILD_VMLINUX_MAIN)
888 $(call if_changed_dep,as_o_S)
889 744
890.tmp_kallsyms%.S: .tmp_vmlinux% $(KALLSYMS) 745# Final link of vmlinux
891 $(call cmd,kallsyms) 746 cmd_link-vmlinux = $(CONFIG_SHELL) $< $(LD) $(LDFLAGS) $(LDFLAGS_vmlinux)
747quiet_cmd_link-vmlinux = LINK $@
892 748
893# .tmp_vmlinux1 must be complete except kallsyms, so update vmlinux version 749# Include targets which we want to
894.tmp_vmlinux1: $(vmlinux-lds) $(vmlinux-all) FORCE 750# execute if the rest of the kernel build went well.
895 $(call if_changed_rule,ksym_ld) 751vmlinux: scripts/link-vmlinux.sh $(vmlinux-deps) FORCE
896
897.tmp_vmlinux2: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms1.o FORCE
898 $(call if_changed,vmlinux__)
899
900.tmp_vmlinux3: $(vmlinux-lds) $(vmlinux-all) .tmp_kallsyms2.o FORCE
901 $(call if_changed,vmlinux__)
902
903# Needs to visit scripts/ before $(KALLSYMS) can be used.
904$(KALLSYMS): scripts ;
905
906# Generate some data for debugging strange kallsyms problems
907debug_kallsyms: .tmp_map$(last_kallsyms)
908
909.tmp_map%: .tmp_vmlinux% FORCE
910 ($(OBJDUMP) -h $< | $(AWK) '/^ +[0-9]/{print $$4 " 0 " $$2}'; $(NM) $<) | sort > $@
911
912.tmp_map3: .tmp_map2
913
914.tmp_map2: .tmp_map1
915
916endif # ifdef CONFIG_KALLSYMS
917
918# Do modpost on a prelinked vmlinux. The finally linked vmlinux has
919# relevant sections renamed as per the linker script.
920quiet_cmd_vmlinux-modpost = LD $@
921 cmd_vmlinux-modpost = $(LD) $(LDFLAGS) -r -o $@ \
922 $(vmlinux-init) --start-group $(vmlinux-main) --end-group \
923 $(filter-out $(vmlinux-init) $(vmlinux-main) FORCE ,$^)
924define rule_vmlinux-modpost
925 :
926 +$(call cmd,vmlinux-modpost)
927 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost $@
928 $(Q)echo 'cmd_$@ := $(cmd_vmlinux-modpost)' > $(dot-target).cmd
929endef
930
931# vmlinux image - including updated kernel symbols
932vmlinux: $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o $(kallsyms.o) FORCE
933ifdef CONFIG_HEADERS_CHECK 752ifdef CONFIG_HEADERS_CHECK
934 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check 753 $(Q)$(MAKE) -f $(srctree)/Makefile headers_check
935endif 754endif
@@ -939,22 +758,11 @@ endif
939ifdef CONFIG_BUILD_DOCSRC 758ifdef CONFIG_BUILD_DOCSRC
940 $(Q)$(MAKE) $(build)=Documentation 759 $(Q)$(MAKE) $(build)=Documentation
941endif 760endif
942 $(call vmlinux-modpost) 761 +$(call if_changed,link-vmlinux)
943 $(call if_changed_rule,vmlinux__)
944 $(Q)rm -f .old_version
945
946# build vmlinux.o first to catch section mismatch errors early
947ifdef CONFIG_KALLSYMS
948.tmp_vmlinux1: vmlinux.o
949endif
950
951modpost-init := $(filter-out init/built-in.o, $(vmlinux-init))
952vmlinux.o: $(modpost-init) $(vmlinux-main) FORCE
953 $(call if_changed_rule,vmlinux-modpost)
954 762
955# The actual objects are generated when descending, 763# The actual objects are generated when descending,
956# make sure no implicit rule kicks in 764# make sure no implicit rule kicks in
957$(sort $(vmlinux-init) $(vmlinux-main)) $(vmlinux-lds): $(vmlinux-dirs) ; 765$(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
958 766
959# Handle descending into subdirectories listed in $(vmlinux-dirs) 767# Handle descending into subdirectories listed in $(vmlinux-dirs)
960# Preset locale variables to speed up the build process. Limit locale 768# Preset locale variables to speed up the build process. Limit locale
@@ -1181,8 +989,6 @@ endif # CONFIG_MODULES
1181 989
1182# Directories & files removed with 'make clean' 990# Directories & files removed with 'make clean'
1183CLEAN_DIRS += $(MODVERDIR) 991CLEAN_DIRS += $(MODVERDIR)
1184CLEAN_FILES += vmlinux System.map \
1185 .tmp_kallsyms* .tmp_version .tmp_vmlinux* .tmp_System.map
1186 992
1187# Directories & files removed with 'make mrproper' 993# Directories & files removed with 'make mrproper'
1188MRPROPER_DIRS += include/config usr/include include/generated \ 994MRPROPER_DIRS += include/config usr/include include/generated \
@@ -1428,6 +1234,7 @@ scripts: ;
1428endif # KBUILD_EXTMOD 1234endif # KBUILD_EXTMOD
1429 1235
1430clean: $(clean-dirs) 1236clean: $(clean-dirs)
1237 $(Q)$(CONFIG_SHELL) $(srctree)/scripts/link-vmlinux.sh clean
1431 $(call cmd,rmdirs) 1238 $(call cmd,rmdirs)
1432 $(call cmd,rmfiles) 1239 $(call cmd,rmfiles)
1433 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \ 1240 @find $(if $(KBUILD_EXTMOD), $(KBUILD_EXTMOD), .) $(RCS_FIND_IGNORE) \
@@ -1568,14 +1375,6 @@ quiet_cmd_depmod = DEPMOD $(KERNELRELEASE)
1568cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \ 1375cmd_crmodverdir = $(Q)mkdir -p $(MODVERDIR) \
1569 $(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*) 1376 $(if $(KBUILD_MODULES),; rm -f $(MODVERDIR)/*)
1570 1377
1571a_flags = -Wp,-MD,$(depfile) $(KBUILD_AFLAGS) $(AFLAGS_KERNEL) \
1572 $(KBUILD_AFLAGS_KERNEL) \
1573 $(NOSTDINC_FLAGS) $(LINUXINCLUDE) $(KBUILD_CPPFLAGS) \
1574 $(modkern_aflags) $(EXTRA_AFLAGS) $(AFLAGS_$(basetarget).o)
1575
1576quiet_cmd_as_o_S = AS $@
1577cmd_as_o_S = $(CC) $(a_flags) -c -o $@ $<
1578
1579# read all saved command lines 1378# read all saved command lines
1580 1379
1581targets := $(wildcard $(sort $(targets))) 1380targets := $(wildcard $(sort $(targets)))
diff --git a/arch/Kconfig b/arch/Kconfig
index e9a910876cda..8c3d957fa8e2 100644
--- a/arch/Kconfig
+++ b/arch/Kconfig
@@ -159,6 +159,9 @@ config HAVE_ARCH_TRACEHOOK
159config HAVE_DMA_ATTRS 159config HAVE_DMA_ATTRS
160 bool 160 bool
161 161
162config HAVE_DMA_CONTIGUOUS
163 bool
164
162config USE_GENERIC_SMP_HELPERS 165config USE_GENERIC_SMP_HELPERS
163 bool 166 bool
164 167
diff --git a/arch/alpha/include/asm/kvm_para.h b/arch/alpha/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/alpha/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 5458aa9db067..5e7601301b41 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -5,6 +5,9 @@ config ARM
5 select HAVE_AOUT 5 select HAVE_AOUT
6 select HAVE_DMA_API_DEBUG 6 select HAVE_DMA_API_DEBUG
7 select HAVE_IDE if PCI || ISA || PCMCIA 7 select HAVE_IDE if PCI || ISA || PCMCIA
8 select HAVE_DMA_ATTRS
9 select HAVE_DMA_CONTIGUOUS if (CPU_V6 || CPU_V6K || CPU_V7)
10 select CMA if (CPU_V6 || CPU_V6K || CPU_V7)
8 select HAVE_MEMBLOCK 11 select HAVE_MEMBLOCK
9 select RTC_LIB 12 select RTC_LIB
10 select SYS_SUPPORTS_APM_EMULATION 13 select SYS_SUPPORTS_APM_EMULATION
@@ -54,6 +57,14 @@ config ARM
54config ARM_HAS_SG_CHAIN 57config ARM_HAS_SG_CHAIN
55 bool 58 bool
56 59
60config NEED_SG_DMA_LENGTH
61 bool
62
63config ARM_DMA_USE_IOMMU
64 select NEED_SG_DMA_LENGTH
65 select ARM_HAS_SG_CHAIN
66 bool
67
57config HAVE_PWM 68config HAVE_PWM
58 bool 69 bool
59 70
@@ -445,8 +456,10 @@ config ARCH_MXS
445 select ARCH_REQUIRE_GPIOLIB 456 select ARCH_REQUIRE_GPIOLIB
446 select CLKDEV_LOOKUP 457 select CLKDEV_LOOKUP
447 select CLKSRC_MMIO 458 select CLKSRC_MMIO
459 select COMMON_CLK
448 select HAVE_CLK_PREPARE 460 select HAVE_CLK_PREPARE
449 select PINCTRL 461 select PINCTRL
462 select USE_OF
450 help 463 help
451 Support for Freescale MXS-based family of processors 464 Support for Freescale MXS-based family of processors
452 465
@@ -936,6 +949,7 @@ config PLAT_SPEAR
936 select ARM_AMBA 949 select ARM_AMBA
937 select ARCH_REQUIRE_GPIOLIB 950 select ARCH_REQUIRE_GPIOLIB
938 select CLKDEV_LOOKUP 951 select CLKDEV_LOOKUP
952 select COMMON_CLK
939 select CLKSRC_MMIO 953 select CLKSRC_MMIO
940 select GENERIC_CLOCKEVENTS 954 select GENERIC_CLOCKEVENTS
941 select HAVE_CLK 955 select HAVE_CLK
@@ -1040,7 +1054,6 @@ source "arch/arm/mach-sa1100/Kconfig"
1040 1054
1041source "arch/arm/plat-samsung/Kconfig" 1055source "arch/arm/plat-samsung/Kconfig"
1042source "arch/arm/plat-s3c24xx/Kconfig" 1056source "arch/arm/plat-s3c24xx/Kconfig"
1043source "arch/arm/plat-s5p/Kconfig"
1044 1057
1045source "arch/arm/plat-spear/Kconfig" 1058source "arch/arm/plat-spear/Kconfig"
1046 1059
@@ -1091,6 +1104,7 @@ config PLAT_ORION
1091 bool 1104 bool
1092 select CLKSRC_MMIO 1105 select CLKSRC_MMIO
1093 select GENERIC_IRQ_CHIP 1106 select GENERIC_IRQ_CHIP
1107 select COMMON_CLK
1094 1108
1095config PLAT_PXA 1109config PLAT_PXA
1096 bool 1110 bool
diff --git a/arch/arm/Kconfig.debug b/arch/arm/Kconfig.debug
index 85348a09d655..01a134141216 100644
--- a/arch/arm/Kconfig.debug
+++ b/arch/arm/Kconfig.debug
@@ -103,6 +103,35 @@ choice
103 Say Y here if you want the debug print routines to direct 103 Say Y here if you want the debug print routines to direct
104 their output to the second serial port on these devices. 104 their output to the second serial port on these devices.
105 105
106 config DEBUG_DAVINCI_DA8XX_UART1
107 bool "Kernel low-level debugging on DaVinci DA8XX using UART1"
108 depends on ARCH_DAVINCI_DA8XX
109 help
110 Say Y here if you want the debug print routines to direct
111 their output to UART1 serial port on DaVinci DA8XX devices.
112
113 config DEBUG_DAVINCI_DA8XX_UART2
114 bool "Kernel low-level debugging on DaVinci DA8XX using UART2"
115 depends on ARCH_DAVINCI_DA8XX
116 help
117 Say Y here if you want the debug print routines to direct
118 their output to UART2 serial port on DaVinci DA8XX devices.
119
120 config DEBUG_DAVINCI_DMx_UART0
121 bool "Kernel low-level debugging on DaVinci DMx using UART0"
122 depends on ARCH_DAVINCI_DMx
123 help
124 Say Y here if you want the debug print routines to direct
125 their output to UART0 serial port on DaVinci DMx devices.
126
127 config DEBUG_DAVINCI_TNETV107X_UART1
128 bool "Kernel low-level debugging on DaVinci TNETV107x using UART1"
129 depends on ARCH_DAVINCI_TNETV107X
130 help
131 Say Y here if you want the debug print routines to direct
132 their output to UART1 serial port on DaVinci TNETV107X
133 devices.
134
106 config DEBUG_DC21285_PORT 135 config DEBUG_DC21285_PORT
107 bool "Kernel low-level debugging messages via footbridge serial port" 136 bool "Kernel low-level debugging messages via footbridge serial port"
108 depends on FOOTBRIDGE 137 depends on FOOTBRIDGE
@@ -180,6 +209,14 @@ choice
180 Say Y here if you want kernel low-level debugging support 209 Say Y here if you want kernel low-level debugging support
181 on i.MX50 or i.MX53. 210 on i.MX50 or i.MX53.
182 211
212 config DEBUG_IMX6Q_UART2
213 bool "i.MX6Q Debug UART2"
214 depends on SOC_IMX6Q
215 help
216 Say Y here if you want kernel low-level debugging support
217 on i.MX6Q UART2. This is correct for e.g. the SabreLite
218 board.
219
183 config DEBUG_IMX6Q_UART4 220 config DEBUG_IMX6Q_UART4
184 bool "i.MX6Q Debug UART4" 221 bool "i.MX6Q Debug UART4"
185 depends on SOC_IMX6Q 222 depends on SOC_IMX6Q
diff --git a/arch/arm/Makefile b/arch/arm/Makefile
index 157900da8782..0298b00fe241 100644
--- a/arch/arm/Makefile
+++ b/arch/arm/Makefile
@@ -160,9 +160,7 @@ machine-$(CONFIG_ARCH_MXS) := mxs
160machine-$(CONFIG_ARCH_NETX) := netx 160machine-$(CONFIG_ARCH_NETX) := netx
161machine-$(CONFIG_ARCH_NOMADIK) := nomadik 161machine-$(CONFIG_ARCH_NOMADIK) := nomadik
162machine-$(CONFIG_ARCH_OMAP1) := omap1 162machine-$(CONFIG_ARCH_OMAP1) := omap1
163machine-$(CONFIG_ARCH_OMAP2) := omap2 163machine-$(CONFIG_ARCH_OMAP2PLUS) := omap2
164machine-$(CONFIG_ARCH_OMAP3) := omap2
165machine-$(CONFIG_ARCH_OMAP4) := omap2
166machine-$(CONFIG_ARCH_ORION5X) := orion5x 164machine-$(CONFIG_ARCH_ORION5X) := orion5x
167machine-$(CONFIG_ARCH_PICOXCELL) := picoxcell 165machine-$(CONFIG_ARCH_PICOXCELL) := picoxcell
168machine-$(CONFIG_ARCH_PNX4008) := pnx4008 166machine-$(CONFIG_ARCH_PNX4008) := pnx4008
@@ -188,6 +186,8 @@ machine-$(CONFIG_ARCH_VEXPRESS) := vexpress
188machine-$(CONFIG_ARCH_VT8500) := vt8500 186machine-$(CONFIG_ARCH_VT8500) := vt8500
189machine-$(CONFIG_ARCH_W90X900) := w90x900 187machine-$(CONFIG_ARCH_W90X900) := w90x900
190machine-$(CONFIG_FOOTBRIDGE) := footbridge 188machine-$(CONFIG_FOOTBRIDGE) := footbridge
189machine-$(CONFIG_MACH_SPEAR1310) := spear13xx
190machine-$(CONFIG_MACH_SPEAR1340) := spear13xx
191machine-$(CONFIG_MACH_SPEAR300) := spear3xx 191machine-$(CONFIG_MACH_SPEAR300) := spear3xx
192machine-$(CONFIG_MACH_SPEAR310) := spear3xx 192machine-$(CONFIG_MACH_SPEAR310) := spear3xx
193machine-$(CONFIG_MACH_SPEAR320) := spear3xx 193machine-$(CONFIG_MACH_SPEAR320) := spear3xx
@@ -205,7 +205,7 @@ plat-$(CONFIG_PLAT_NOMADIK) := nomadik
205plat-$(CONFIG_PLAT_ORION) := orion 205plat-$(CONFIG_PLAT_ORION) := orion
206plat-$(CONFIG_PLAT_PXA) := pxa 206plat-$(CONFIG_PLAT_PXA) := pxa
207plat-$(CONFIG_PLAT_S3C24XX) := s3c24xx samsung 207plat-$(CONFIG_PLAT_S3C24XX) := s3c24xx samsung
208plat-$(CONFIG_PLAT_S5P) := s5p samsung 208plat-$(CONFIG_PLAT_S5P) := samsung
209plat-$(CONFIG_PLAT_SPEAR) := spear 209plat-$(CONFIG_PLAT_SPEAR) := spear
210plat-$(CONFIG_PLAT_VERSATILE) := versatile 210plat-$(CONFIG_PLAT_VERSATILE) := versatile
211 211
diff --git a/arch/arm/boot/dts/exynos5250-smdk5250.dts b/arch/arm/boot/dts/exynos5250-smdk5250.dts
index 399d17b231d2..49945cc1bc7d 100644
--- a/arch/arm/boot/dts/exynos5250-smdk5250.dts
+++ b/arch/arm/boot/dts/exynos5250-smdk5250.dts
@@ -23,4 +23,52 @@
23 chosen { 23 chosen {
24 bootargs = "root=/dev/ram0 rw ramdisk=8192 console=ttySAC1,115200"; 24 bootargs = "root=/dev/ram0 rw ramdisk=8192 console=ttySAC1,115200";
25 }; 25 };
26
27 i2c@12C60000 {
28 samsung,i2c-sda-delay = <100>;
29 samsung,i2c-max-bus-freq = <20000>;
30 gpios = <&gpb3 0 2 3 0>,
31 <&gpb3 1 2 3 0>;
32
33 eeprom@50 {
34 compatible = "samsung,s524ad0xd1";
35 reg = <0x50>;
36 };
37 };
38
39 i2c@12C70000 {
40 samsung,i2c-sda-delay = <100>;
41 samsung,i2c-max-bus-freq = <20000>;
42 gpios = <&gpb3 2 2 3 0>,
43 <&gpb3 3 2 3 0>;
44
45 eeprom@51 {
46 compatible = "samsung,s524ad0xd1";
47 reg = <0x51>;
48 };
49 };
50
51 i2c@12C80000 {
52 status = "disabled";
53 };
54
55 i2c@12C90000 {
56 status = "disabled";
57 };
58
59 i2c@12CA0000 {
60 status = "disabled";
61 };
62
63 i2c@12CB0000 {
64 status = "disabled";
65 };
66
67 i2c@12CC0000 {
68 status = "disabled";
69 };
70
71 i2c@12CD0000 {
72 status = "disabled";
73 };
26}; 74};
diff --git a/arch/arm/boot/dts/exynos5250.dtsi b/arch/arm/boot/dts/exynos5250.dtsi
index dfc433599436..5ca0cdb76413 100644
--- a/arch/arm/boot/dts/exynos5250.dtsi
+++ b/arch/arm/boot/dts/exynos5250.dtsi
@@ -23,11 +23,11 @@
23 compatible = "samsung,exynos5250"; 23 compatible = "samsung,exynos5250";
24 interrupt-parent = <&gic>; 24 interrupt-parent = <&gic>;
25 25
26 gic:interrupt-controller@10490000 { 26 gic:interrupt-controller@10481000 {
27 compatible = "arm,cortex-a9-gic"; 27 compatible = "arm,cortex-a9-gic";
28 #interrupt-cells = <3>; 28 #interrupt-cells = <3>;
29 interrupt-controller; 29 interrupt-controller;
30 reg = <0x10490000 0x1000>, <0x10480000 0x100>; 30 reg = <0x10481000 0x1000>, <0x10482000 0x2000>;
31 }; 31 };
32 32
33 watchdog { 33 watchdog {
@@ -42,30 +42,6 @@
42 interrupts = <0 43 0>, <0 44 0>; 42 interrupts = <0 43 0>, <0 44 0>;
43 }; 43 };
44 44
45 sdhci@12200000 {
46 compatible = "samsung,exynos4210-sdhci";
47 reg = <0x12200000 0x100>;
48 interrupts = <0 75 0>;
49 };
50
51 sdhci@12210000 {
52 compatible = "samsung,exynos4210-sdhci";
53 reg = <0x12210000 0x100>;
54 interrupts = <0 76 0>;
55 };
56
57 sdhci@12220000 {
58 compatible = "samsung,exynos4210-sdhci";
59 reg = <0x12220000 0x100>;
60 interrupts = <0 77 0>;
61 };
62
63 sdhci@12230000 {
64 compatible = "samsung,exynos4210-sdhci";
65 reg = <0x12230000 0x100>;
66 interrupts = <0 78 0>;
67 };
68
69 serial@12C00000 { 45 serial@12C00000 {
70 compatible = "samsung,exynos4210-uart"; 46 compatible = "samsung,exynos4210-uart";
71 reg = <0x12C00000 0x100>; 47 reg = <0x12C00000 0x100>;
@@ -94,48 +70,64 @@
94 compatible = "samsung,s3c2440-i2c"; 70 compatible = "samsung,s3c2440-i2c";
95 reg = <0x12C60000 0x100>; 71 reg = <0x12C60000 0x100>;
96 interrupts = <0 56 0>; 72 interrupts = <0 56 0>;
73 #address-cells = <1>;
74 #size-cells = <0>;
97 }; 75 };
98 76
99 i2c@12C70000 { 77 i2c@12C70000 {
100 compatible = "samsung,s3c2440-i2c"; 78 compatible = "samsung,s3c2440-i2c";
101 reg = <0x12C70000 0x100>; 79 reg = <0x12C70000 0x100>;
102 interrupts = <0 57 0>; 80 interrupts = <0 57 0>;
81 #address-cells = <1>;
82 #size-cells = <0>;
103 }; 83 };
104 84
105 i2c@12C80000 { 85 i2c@12C80000 {
106 compatible = "samsung,s3c2440-i2c"; 86 compatible = "samsung,s3c2440-i2c";
107 reg = <0x12C80000 0x100>; 87 reg = <0x12C80000 0x100>;
108 interrupts = <0 58 0>; 88 interrupts = <0 58 0>;
89 #address-cells = <1>;
90 #size-cells = <0>;
109 }; 91 };
110 92
111 i2c@12C90000 { 93 i2c@12C90000 {
112 compatible = "samsung,s3c2440-i2c"; 94 compatible = "samsung,s3c2440-i2c";
113 reg = <0x12C90000 0x100>; 95 reg = <0x12C90000 0x100>;
114 interrupts = <0 59 0>; 96 interrupts = <0 59 0>;
97 #address-cells = <1>;
98 #size-cells = <0>;
115 }; 99 };
116 100
117 i2c@12CA0000 { 101 i2c@12CA0000 {
118 compatible = "samsung,s3c2440-i2c"; 102 compatible = "samsung,s3c2440-i2c";
119 reg = <0x12CA0000 0x100>; 103 reg = <0x12CA0000 0x100>;
120 interrupts = <0 60 0>; 104 interrupts = <0 60 0>;
105 #address-cells = <1>;
106 #size-cells = <0>;
121 }; 107 };
122 108
123 i2c@12CB0000 { 109 i2c@12CB0000 {
124 compatible = "samsung,s3c2440-i2c"; 110 compatible = "samsung,s3c2440-i2c";
125 reg = <0x12CB0000 0x100>; 111 reg = <0x12CB0000 0x100>;
126 interrupts = <0 61 0>; 112 interrupts = <0 61 0>;
113 #address-cells = <1>;
114 #size-cells = <0>;
127 }; 115 };
128 116
129 i2c@12CC0000 { 117 i2c@12CC0000 {
130 compatible = "samsung,s3c2440-i2c"; 118 compatible = "samsung,s3c2440-i2c";
131 reg = <0x12CC0000 0x100>; 119 reg = <0x12CC0000 0x100>;
132 interrupts = <0 62 0>; 120 interrupts = <0 62 0>;
121 #address-cells = <1>;
122 #size-cells = <0>;
133 }; 123 };
134 124
135 i2c@12CD0000 { 125 i2c@12CD0000 {
136 compatible = "samsung,s3c2440-i2c"; 126 compatible = "samsung,s3c2440-i2c";
137 reg = <0x12CD0000 0x100>; 127 reg = <0x12CD0000 0x100>;
138 interrupts = <0 63 0>; 128 interrupts = <0 63 0>;
129 #address-cells = <1>;
130 #size-cells = <0>;
139 }; 131 };
140 132
141 amba { 133 amba {
@@ -157,13 +149,13 @@
157 interrupts = <0 35 0>; 149 interrupts = <0 35 0>;
158 }; 150 };
159 151
160 mdma0: pdma@10800000 { 152 mdma0: mdma@10800000 {
161 compatible = "arm,pl330", "arm,primecell"; 153 compatible = "arm,pl330", "arm,primecell";
162 reg = <0x10800000 0x1000>; 154 reg = <0x10800000 0x1000>;
163 interrupts = <0 33 0>; 155 interrupts = <0 33 0>;
164 }; 156 };
165 157
166 mdma1: pdma@11C10000 { 158 mdma1: mdma@11C10000 {
167 compatible = "arm,pl330", "arm,primecell"; 159 compatible = "arm,pl330", "arm,primecell";
168 reg = <0x11C10000 0x1000>; 160 reg = <0x11C10000 0x1000>;
169 interrupts = <0 124 0>; 161 interrupts = <0 124 0>;
@@ -242,6 +234,12 @@
242 #gpio-cells = <4>; 234 #gpio-cells = <4>;
243 }; 235 };
244 236
237 gpc4: gpio-controller@114002E0 {
238 compatible = "samsung,exynos4-gpio";
239 reg = <0x114002E0 0x20>;
240 #gpio-cells = <4>;
241 };
242
245 gpd0: gpio-controller@11400160 { 243 gpd0: gpio-controller@11400160 {
246 compatible = "samsung,exynos4-gpio"; 244 compatible = "samsung,exynos4-gpio";
247 reg = <0x11400160 0x20>; 245 reg = <0x11400160 0x20>;
@@ -388,19 +386,19 @@
388 386
389 gpv2: gpio-controller@10D10040 { 387 gpv2: gpio-controller@10D10040 {
390 compatible = "samsung,exynos4-gpio"; 388 compatible = "samsung,exynos4-gpio";
391 reg = <0x10D10040 0x20>; 389 reg = <0x10D10060 0x20>;
392 #gpio-cells = <4>; 390 #gpio-cells = <4>;
393 }; 391 };
394 392
395 gpv3: gpio-controller@10D10060 { 393 gpv3: gpio-controller@10D10060 {
396 compatible = "samsung,exynos4-gpio"; 394 compatible = "samsung,exynos4-gpio";
397 reg = <0x10D10060 0x20>; 395 reg = <0x10D10080 0x20>;
398 #gpio-cells = <4>; 396 #gpio-cells = <4>;
399 }; 397 };
400 398
401 gpv4: gpio-controller@10D10080 { 399 gpv4: gpio-controller@10D10080 {
402 compatible = "samsung,exynos4-gpio"; 400 compatible = "samsung,exynos4-gpio";
403 reg = <0x10D10080 0x20>; 401 reg = <0x10D100C0 0x20>;
404 #gpio-cells = <4>; 402 #gpio-cells = <4>;
405 }; 403 };
406 404
diff --git a/arch/arm/boot/dts/imx23-evk.dts b/arch/arm/boot/dts/imx23-evk.dts
new file mode 100644
index 000000000000..70bffa929b65
--- /dev/null
+++ b/arch/arm/boot/dts/imx23-evk.dts
@@ -0,0 +1,43 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12/dts-v1/;
13/include/ "imx23.dtsi"
14
15/ {
16 model = "Freescale i.MX23 Evaluation Kit";
17 compatible = "fsl,imx23-evk", "fsl,imx23";
18
19 memory {
20 reg = <0x40000000 0x08000000>;
21 };
22
23 apb@80000000 {
24 apbh@80000000 {
25 ssp0: ssp@80010000 {
26 compatible = "fsl,imx23-mmc";
27 pinctrl-names = "default";
28 pinctrl-0 = <&mmc0_8bit_pins_a &mmc0_pins_fixup>;
29 bus-width = <8>;
30 wp-gpios = <&gpio1 30 0>;
31 status = "okay";
32 };
33 };
34
35 apbx@80040000 {
36 duart: serial@80070000 {
37 pinctrl-names = "default";
38 pinctrl-0 = <&duart_pins_a>;
39 status = "okay";
40 };
41 };
42 };
43};
diff --git a/arch/arm/boot/dts/imx23.dtsi b/arch/arm/boot/dts/imx23.dtsi
new file mode 100644
index 000000000000..8c5f9994f3fc
--- /dev/null
+++ b/arch/arm/boot/dts/imx23.dtsi
@@ -0,0 +1,295 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12/include/ "skeleton.dtsi"
13
14/ {
15 interrupt-parent = <&icoll>;
16
17 aliases {
18 gpio0 = &gpio0;
19 gpio1 = &gpio1;
20 gpio2 = &gpio2;
21 };
22
23 cpus {
24 cpu@0 {
25 compatible = "arm,arm926ejs";
26 };
27 };
28
29 apb@80000000 {
30 compatible = "simple-bus";
31 #address-cells = <1>;
32 #size-cells = <1>;
33 reg = <0x80000000 0x80000>;
34 ranges;
35
36 apbh@80000000 {
37 compatible = "simple-bus";
38 #address-cells = <1>;
39 #size-cells = <1>;
40 reg = <0x80000000 0x40000>;
41 ranges;
42
43 icoll: interrupt-controller@80000000 {
44 compatible = "fsl,imx23-icoll", "fsl,mxs-icoll";
45 interrupt-controller;
46 #interrupt-cells = <1>;
47 reg = <0x80000000 0x2000>;
48 };
49
50 dma-apbh@80004000 {
51 compatible = "fsl,imx23-dma-apbh";
52 reg = <0x80004000 2000>;
53 };
54
55 ecc@80008000 {
56 reg = <0x80008000 2000>;
57 status = "disabled";
58 };
59
60 bch@8000a000 {
61 reg = <0x8000a000 2000>;
62 status = "disabled";
63 };
64
65 gpmi@8000c000 {
66 reg = <0x8000c000 2000>;
67 status = "disabled";
68 };
69
70 ssp0: ssp@80010000 {
71 reg = <0x80010000 2000>;
72 interrupts = <15 14>;
73 fsl,ssp-dma-channel = <1>;
74 status = "disabled";
75 };
76
77 etm@80014000 {
78 reg = <0x80014000 2000>;
79 status = "disabled";
80 };
81
82 pinctrl@80018000 {
83 #address-cells = <1>;
84 #size-cells = <0>;
85 compatible = "fsl,imx23-pinctrl", "simple-bus";
86 reg = <0x80018000 2000>;
87
88 gpio0: gpio@0 {
89 compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
90 interrupts = <16>;
91 gpio-controller;
92 #gpio-cells = <2>;
93 interrupt-controller;
94 #interrupt-cells = <2>;
95 };
96
97 gpio1: gpio@1 {
98 compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
99 interrupts = <17>;
100 gpio-controller;
101 #gpio-cells = <2>;
102 interrupt-controller;
103 #interrupt-cells = <2>;
104 };
105
106 gpio2: gpio@2 {
107 compatible = "fsl,imx23-gpio", "fsl,mxs-gpio";
108 interrupts = <18>;
109 gpio-controller;
110 #gpio-cells = <2>;
111 interrupt-controller;
112 #interrupt-cells = <2>;
113 };
114
115 duart_pins_a: duart@0 {
116 reg = <0>;
117 fsl,pinmux-ids = <0x11a2 0x11b2>;
118 fsl,drive-strength = <0>;
119 fsl,voltage = <1>;
120 fsl,pull-up = <0>;
121 };
122
123 mmc0_8bit_pins_a: mmc0-8bit@0 {
124 reg = <0>;
125 fsl,pinmux-ids = <0x2020 0x2030 0x2040
126 0x2050 0x0082 0x0092 0x00a2
127 0x00b2 0x2000 0x2010 0x2060>;
128 fsl,drive-strength = <1>;
129 fsl,voltage = <1>;
130 fsl,pull-up = <1>;
131 };
132
133 mmc0_pins_fixup: mmc0-pins-fixup {
134 fsl,pinmux-ids = <0x2010 0x2060>;
135 fsl,pull-up = <0>;
136 };
137 };
138
139 digctl@8001c000 {
140 reg = <0x8001c000 2000>;
141 status = "disabled";
142 };
143
144 emi@80020000 {
145 reg = <0x80020000 2000>;
146 status = "disabled";
147 };
148
149 dma-apbx@80024000 {
150 compatible = "fsl,imx23-dma-apbx";
151 reg = <0x80024000 2000>;
152 };
153
154 dcp@80028000 {
155 reg = <0x80028000 2000>;
156 status = "disabled";
157 };
158
159 pxp@8002a000 {
160 reg = <0x8002a000 2000>;
161 status = "disabled";
162 };
163
164 ocotp@8002c000 {
165 reg = <0x8002c000 2000>;
166 status = "disabled";
167 };
168
169 axi-ahb@8002e000 {
170 reg = <0x8002e000 2000>;
171 status = "disabled";
172 };
173
174 lcdif@80030000 {
175 reg = <0x80030000 2000>;
176 status = "disabled";
177 };
178
179 ssp1: ssp@80034000 {
180 reg = <0x80034000 2000>;
181 interrupts = <2 20>;
182 fsl,ssp-dma-channel = <2>;
183 status = "disabled";
184 };
185
186 tvenc@80038000 {
187 reg = <0x80038000 2000>;
188 status = "disabled";
189 };
190 };
191
192 apbx@80040000 {
193 compatible = "simple-bus";
194 #address-cells = <1>;
195 #size-cells = <1>;
196 reg = <0x80040000 0x40000>;
197 ranges;
198
199 clkctl@80040000 {
200 reg = <0x80040000 2000>;
201 status = "disabled";
202 };
203
204 saif0: saif@80042000 {
205 reg = <0x80042000 2000>;
206 status = "disabled";
207 };
208
209 power@80044000 {
210 reg = <0x80044000 2000>;
211 status = "disabled";
212 };
213
214 saif1: saif@80046000 {
215 reg = <0x80046000 2000>;
216 status = "disabled";
217 };
218
219 audio-out@80048000 {
220 reg = <0x80048000 2000>;
221 status = "disabled";
222 };
223
224 audio-in@8004c000 {
225 reg = <0x8004c000 2000>;
226 status = "disabled";
227 };
228
229 lradc@80050000 {
230 reg = <0x80050000 2000>;
231 status = "disabled";
232 };
233
234 spdif@80054000 {
235 reg = <0x80054000 2000>;
236 status = "disabled";
237 };
238
239 i2c@80058000 {
240 reg = <0x80058000 2000>;
241 status = "disabled";
242 };
243
244 rtc@8005c000 {
245 reg = <0x8005c000 2000>;
246 status = "disabled";
247 };
248
249 pwm@80064000 {
250 reg = <0x80064000 2000>;
251 status = "disabled";
252 };
253
254 timrot@80068000 {
255 reg = <0x80068000 2000>;
256 status = "disabled";
257 };
258
259 auart0: serial@8006c000 {
260 reg = <0x8006c000 0x2000>;
261 status = "disabled";
262 };
263
264 auart1: serial@8006e000 {
265 reg = <0x8006e000 0x2000>;
266 status = "disabled";
267 };
268
269 duart: serial@80070000 {
270 compatible = "arm,pl011", "arm,primecell";
271 reg = <0x80070000 0x2000>;
272 interrupts = <0>;
273 status = "disabled";
274 };
275
276 usbphy@8007c000 {
277 reg = <0x8007c000 0x2000>;
278 status = "disabled";
279 };
280 };
281 };
282
283 ahb@80080000 {
284 compatible = "simple-bus";
285 #address-cells = <1>;
286 #size-cells = <1>;
287 reg = <0x80080000 0x80000>;
288 ranges;
289
290 usbctrl@80080000 {
291 reg = <0x80080000 0x10000>;
292 status = "disabled";
293 };
294 };
295};
diff --git a/arch/arm/boot/dts/imx27-phytec-phycore.dts b/arch/arm/boot/dts/imx27-phytec-phycore.dts
index a51a08fc2af9..2b0ff60247a4 100644
--- a/arch/arm/boot/dts/imx27-phytec-phycore.dts
+++ b/arch/arm/boot/dts/imx27-phytec-phycore.dts
@@ -27,22 +27,22 @@
27 status = "okay"; 27 status = "okay";
28 }; 28 };
29 29
30 uart@1000a000 { 30 serial@1000a000 {
31 fsl,uart-has-rtscts; 31 fsl,uart-has-rtscts;
32 status = "okay"; 32 status = "okay";
33 }; 33 };
34 34
35 uart@1000b000 { 35 serial@1000b000 {
36 fsl,uart-has-rtscts; 36 fsl,uart-has-rtscts;
37 status = "okay"; 37 status = "okay";
38 }; 38 };
39 39
40 uart@1000c000 { 40 serial@1000c000 {
41 fsl,uart-has-rtscts; 41 fsl,uart-has-rtscts;
42 status = "okay"; 42 status = "okay";
43 }; 43 };
44 44
45 fec@1002b000 { 45 ethernet@1002b000 {
46 status = "okay"; 46 status = "okay";
47 }; 47 };
48 48
diff --git a/arch/arm/boot/dts/imx27.dtsi b/arch/arm/boot/dts/imx27.dtsi
index bc5e7d5ddd54..2b1a166d41f9 100644
--- a/arch/arm/boot/dts/imx27.dtsi
+++ b/arch/arm/boot/dts/imx27.dtsi
@@ -59,28 +59,28 @@
59 status = "disabled"; 59 status = "disabled";
60 }; 60 };
61 61
62 uart1: uart@1000a000 { 62 uart1: serial@1000a000 {
63 compatible = "fsl,imx27-uart", "fsl,imx21-uart"; 63 compatible = "fsl,imx27-uart", "fsl,imx21-uart";
64 reg = <0x1000a000 0x1000>; 64 reg = <0x1000a000 0x1000>;
65 interrupts = <20>; 65 interrupts = <20>;
66 status = "disabled"; 66 status = "disabled";
67 }; 67 };
68 68
69 uart2: uart@1000b000 { 69 uart2: serial@1000b000 {
70 compatible = "fsl,imx27-uart", "fsl,imx21-uart"; 70 compatible = "fsl,imx27-uart", "fsl,imx21-uart";
71 reg = <0x1000b000 0x1000>; 71 reg = <0x1000b000 0x1000>;
72 interrupts = <19>; 72 interrupts = <19>;
73 status = "disabled"; 73 status = "disabled";
74 }; 74 };
75 75
76 uart3: uart@1000c000 { 76 uart3: serial@1000c000 {
77 compatible = "fsl,imx27-uart", "fsl,imx21-uart"; 77 compatible = "fsl,imx27-uart", "fsl,imx21-uart";
78 reg = <0x1000c000 0x1000>; 78 reg = <0x1000c000 0x1000>;
79 interrupts = <18>; 79 interrupts = <18>;
80 status = "disabled"; 80 status = "disabled";
81 }; 81 };
82 82
83 uart4: uart@1000d000 { 83 uart4: serial@1000d000 {
84 compatible = "fsl,imx27-uart", "fsl,imx21-uart"; 84 compatible = "fsl,imx27-uart", "fsl,imx21-uart";
85 reg = <0x1000d000 0x1000>; 85 reg = <0x1000d000 0x1000>;
86 interrupts = <17>; 86 interrupts = <17>;
@@ -183,14 +183,14 @@
183 status = "disabled"; 183 status = "disabled";
184 }; 184 };
185 185
186 uart5: uart@1001b000 { 186 uart5: serial@1001b000 {
187 compatible = "fsl,imx27-uart", "fsl,imx21-uart"; 187 compatible = "fsl,imx27-uart", "fsl,imx21-uart";
188 reg = <0x1001b000 0x1000>; 188 reg = <0x1001b000 0x1000>;
189 interrupts = <49>; 189 interrupts = <49>;
190 status = "disabled"; 190 status = "disabled";
191 }; 191 };
192 192
193 uart6: uart@1001c000 { 193 uart6: serial@1001c000 {
194 compatible = "fsl,imx27-uart", "fsl,imx21-uart"; 194 compatible = "fsl,imx27-uart", "fsl,imx21-uart";
195 reg = <0x1001c000 0x1000>; 195 reg = <0x1001c000 0x1000>;
196 interrupts = <48>; 196 interrupts = <48>;
@@ -206,7 +206,7 @@
206 status = "disabled"; 206 status = "disabled";
207 }; 207 };
208 208
209 fec: fec@1002b000 { 209 fec: ethernet@1002b000 {
210 compatible = "fsl,imx27-fec"; 210 compatible = "fsl,imx27-fec";
211 reg = <0x1002b000 0x4000>; 211 reg = <0x1002b000 0x4000>;
212 interrupts = <50>; 212 interrupts = <50>;
diff --git a/arch/arm/boot/dts/imx28-evk.dts b/arch/arm/boot/dts/imx28-evk.dts
new file mode 100644
index 000000000000..ee520a529cb4
--- /dev/null
+++ b/arch/arm/boot/dts/imx28-evk.dts
@@ -0,0 +1,114 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12/dts-v1/;
13/include/ "imx28.dtsi"
14
15/ {
16 model = "Freescale i.MX28 Evaluation Kit";
17 compatible = "fsl,imx28-evk", "fsl,imx28";
18
19 memory {
20 reg = <0x40000000 0x08000000>;
21 };
22
23 apb@80000000 {
24 apbh@80000000 {
25 ssp0: ssp@80010000 {
26 compatible = "fsl,imx28-mmc";
27 pinctrl-names = "default";
28 pinctrl-0 = <&mmc0_8bit_pins_a
29 &mmc0_cd_cfg &mmc0_sck_cfg>;
30 bus-width = <8>;
31 wp-gpios = <&gpio2 12 0>;
32 status = "okay";
33 };
34
35 ssp1: ssp@80012000 {
36 compatible = "fsl,imx28-mmc";
37 bus-width = <8>;
38 wp-gpios = <&gpio0 28 0>;
39 status = "okay";
40 };
41 };
42
43 apbx@80040000 {
44 saif0: saif@80042000 {
45 pinctrl-names = "default";
46 pinctrl-0 = <&saif0_pins_a>;
47 status = "okay";
48 };
49
50 saif1: saif@80046000 {
51 pinctrl-names = "default";
52 pinctrl-0 = <&saif1_pins_a>;
53 fsl,saif-master = <&saif0>;
54 status = "okay";
55 };
56
57 i2c0: i2c@80058000 {
58 pinctrl-names = "default";
59 pinctrl-0 = <&i2c0_pins_a>;
60 status = "okay";
61
62 sgtl5000: codec@0a {
63 compatible = "fsl,sgtl5000";
64 reg = <0x0a>;
65 VDDA-supply = <&reg_3p3v>;
66 VDDIO-supply = <&reg_3p3v>;
67
68 };
69 };
70
71 duart: serial@80074000 {
72 pinctrl-names = "default";
73 pinctrl-0 = <&duart_pins_a>;
74 status = "okay";
75 };
76 };
77 };
78
79 ahb@80080000 {
80 mac0: ethernet@800f0000 {
81 phy-mode = "rmii";
82 pinctrl-names = "default";
83 pinctrl-0 = <&mac0_pins_a>;
84 status = "okay";
85 };
86
87 mac1: ethernet@800f4000 {
88 phy-mode = "rmii";
89 pinctrl-names = "default";
90 pinctrl-0 = <&mac1_pins_a>;
91 status = "okay";
92 };
93 };
94
95 regulators {
96 compatible = "simple-bus";
97
98 reg_3p3v: 3p3v {
99 compatible = "regulator-fixed";
100 regulator-name = "3P3V";
101 regulator-min-microvolt = <3300000>;
102 regulator-max-microvolt = <3300000>;
103 regulator-always-on;
104 };
105 };
106
107 sound {
108 compatible = "fsl,imx28-evk-sgtl5000",
109 "fsl,mxs-audio-sgtl5000";
110 model = "imx28-evk-sgtl5000";
111 saif-controllers = <&saif0 &saif1>;
112 audio-codec = <&sgtl5000>;
113 };
114};
diff --git a/arch/arm/boot/dts/imx28.dtsi b/arch/arm/boot/dts/imx28.dtsi
new file mode 100644
index 000000000000..4634cb861a59
--- /dev/null
+++ b/arch/arm/boot/dts/imx28.dtsi
@@ -0,0 +1,497 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12/include/ "skeleton.dtsi"
13
14/ {
15 interrupt-parent = <&icoll>;
16
17 aliases {
18 gpio0 = &gpio0;
19 gpio1 = &gpio1;
20 gpio2 = &gpio2;
21 gpio3 = &gpio3;
22 gpio4 = &gpio4;
23 saif0 = &saif0;
24 saif1 = &saif1;
25 };
26
27 cpus {
28 cpu@0 {
29 compatible = "arm,arm926ejs";
30 };
31 };
32
33 apb@80000000 {
34 compatible = "simple-bus";
35 #address-cells = <1>;
36 #size-cells = <1>;
37 reg = <0x80000000 0x80000>;
38 ranges;
39
40 apbh@80000000 {
41 compatible = "simple-bus";
42 #address-cells = <1>;
43 #size-cells = <1>;
44 reg = <0x80000000 0x3c900>;
45 ranges;
46
47 icoll: interrupt-controller@80000000 {
48 compatible = "fsl,imx28-icoll", "fsl,mxs-icoll";
49 interrupt-controller;
50 #interrupt-cells = <1>;
51 reg = <0x80000000 0x2000>;
52 };
53
54 hsadc@80002000 {
55 reg = <0x80002000 2000>;
56 interrupts = <13 87>;
57 status = "disabled";
58 };
59
60 dma-apbh@80004000 {
61 compatible = "fsl,imx28-dma-apbh";
62 reg = <0x80004000 2000>;
63 };
64
65 perfmon@80006000 {
66 reg = <0x80006000 800>;
67 interrupts = <27>;
68 status = "disabled";
69 };
70
71 bch@8000a000 {
72 reg = <0x8000a000 2000>;
73 interrupts = <41>;
74 status = "disabled";
75 };
76
77 gpmi@8000c000 {
78 reg = <0x8000c000 2000>;
79 interrupts = <42 88>;
80 status = "disabled";
81 };
82
83 ssp0: ssp@80010000 {
84 reg = <0x80010000 2000>;
85 interrupts = <96 82>;
86 fsl,ssp-dma-channel = <0>;
87 status = "disabled";
88 };
89
90 ssp1: ssp@80012000 {
91 reg = <0x80012000 2000>;
92 interrupts = <97 83>;
93 fsl,ssp-dma-channel = <1>;
94 status = "disabled";
95 };
96
97 ssp2: ssp@80014000 {
98 reg = <0x80014000 2000>;
99 interrupts = <98 84>;
100 fsl,ssp-dma-channel = <2>;
101 status = "disabled";
102 };
103
104 ssp3: ssp@80016000 {
105 reg = <0x80016000 2000>;
106 interrupts = <99 85>;
107 fsl,ssp-dma-channel = <3>;
108 status = "disabled";
109 };
110
111 pinctrl@80018000 {
112 #address-cells = <1>;
113 #size-cells = <0>;
114 compatible = "fsl,imx28-pinctrl", "simple-bus";
115 reg = <0x80018000 2000>;
116
117 gpio0: gpio@0 {
118 compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
119 interrupts = <127>;
120 gpio-controller;
121 #gpio-cells = <2>;
122 interrupt-controller;
123 #interrupt-cells = <2>;
124 };
125
126 gpio1: gpio@1 {
127 compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
128 interrupts = <126>;
129 gpio-controller;
130 #gpio-cells = <2>;
131 interrupt-controller;
132 #interrupt-cells = <2>;
133 };
134
135 gpio2: gpio@2 {
136 compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
137 interrupts = <125>;
138 gpio-controller;
139 #gpio-cells = <2>;
140 interrupt-controller;
141 #interrupt-cells = <2>;
142 };
143
144 gpio3: gpio@3 {
145 compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
146 interrupts = <124>;
147 gpio-controller;
148 #gpio-cells = <2>;
149 interrupt-controller;
150 #interrupt-cells = <2>;
151 };
152
153 gpio4: gpio@4 {
154 compatible = "fsl,imx28-gpio", "fsl,mxs-gpio";
155 interrupts = <123>;
156 gpio-controller;
157 #gpio-cells = <2>;
158 interrupt-controller;
159 #interrupt-cells = <2>;
160 };
161
162 duart_pins_a: duart@0 {
163 reg = <0>;
164 fsl,pinmux-ids = <0x3102 0x3112>;
165 fsl,drive-strength = <0>;
166 fsl,voltage = <1>;
167 fsl,pull-up = <0>;
168 };
169
170 mac0_pins_a: mac0@0 {
171 reg = <0>;
172 fsl,pinmux-ids = <0x4000 0x4010 0x4020
173 0x4030 0x4040 0x4060 0x4070
174 0x4080 0x4100>;
175 fsl,drive-strength = <1>;
176 fsl,voltage = <1>;
177 fsl,pull-up = <1>;
178 };
179
180 mac1_pins_a: mac1@0 {
181 reg = <0>;
182 fsl,pinmux-ids = <0x40f1 0x4091 0x40a1
183 0x40e1 0x40b1 0x40c1>;
184 fsl,drive-strength = <1>;
185 fsl,voltage = <1>;
186 fsl,pull-up = <1>;
187 };
188
189 mmc0_8bit_pins_a: mmc0-8bit@0 {
190 reg = <0>;
191 fsl,pinmux-ids = <0x2000 0x2010 0x2020
192 0x2030 0x2040 0x2050 0x2060
193 0x2070 0x2080 0x2090 0x20a0>;
194 fsl,drive-strength = <1>;
195 fsl,voltage = <1>;
196 fsl,pull-up = <1>;
197 };
198
199 mmc0_cd_cfg: mmc0-cd-cfg {
200 fsl,pinmux-ids = <0x2090>;
201 fsl,pull-up = <0>;
202 };
203
204 mmc0_sck_cfg: mmc0-sck-cfg {
205 fsl,pinmux-ids = <0x20a0>;
206 fsl,drive-strength = <2>;
207 fsl,pull-up = <0>;
208 };
209
210 i2c0_pins_a: i2c0@0 {
211 reg = <0>;
212 fsl,pinmux-ids = <0x3180 0x3190>;
213 fsl,drive-strength = <1>;
214 fsl,voltage = <1>;
215 fsl,pull-up = <1>;
216 };
217
218 saif0_pins_a: saif0@0 {
219 reg = <0>;
220 fsl,pinmux-ids =
221 <0x3140 0x3150 0x3160 0x3170>;
222 fsl,drive-strength = <2>;
223 fsl,voltage = <1>;
224 fsl,pull-up = <1>;
225 };
226
227 saif1_pins_a: saif1@0 {
228 reg = <0>;
229 fsl,pinmux-ids = <0x31a0>;
230 fsl,drive-strength = <2>;
231 fsl,voltage = <1>;
232 fsl,pull-up = <1>;
233 };
234 };
235
236 digctl@8001c000 {
237 reg = <0x8001c000 2000>;
238 interrupts = <89>;
239 status = "disabled";
240 };
241
242 etm@80022000 {
243 reg = <0x80022000 2000>;
244 status = "disabled";
245 };
246
247 dma-apbx@80024000 {
248 compatible = "fsl,imx28-dma-apbx";
249 reg = <0x80024000 2000>;
250 };
251
252 dcp@80028000 {
253 reg = <0x80028000 2000>;
254 interrupts = <52 53 54>;
255 status = "disabled";
256 };
257
258 pxp@8002a000 {
259 reg = <0x8002a000 2000>;
260 interrupts = <39>;
261 status = "disabled";
262 };
263
264 ocotp@8002c000 {
265 reg = <0x8002c000 2000>;
266 status = "disabled";
267 };
268
269 axi-ahb@8002e000 {
270 reg = <0x8002e000 2000>;
271 status = "disabled";
272 };
273
274 lcdif@80030000 {
275 reg = <0x80030000 2000>;
276 interrupts = <38 86>;
277 status = "disabled";
278 };
279
280 can0: can@80032000 {
281 reg = <0x80032000 2000>;
282 interrupts = <8>;
283 status = "disabled";
284 };
285
286 can1: can@80034000 {
287 reg = <0x80034000 2000>;
288 interrupts = <9>;
289 status = "disabled";
290 };
291
292 simdbg@8003c000 {
293 reg = <0x8003c000 200>;
294 status = "disabled";
295 };
296
297 simgpmisel@8003c200 {
298 reg = <0x8003c200 100>;
299 status = "disabled";
300 };
301
302 simsspsel@8003c300 {
303 reg = <0x8003c300 100>;
304 status = "disabled";
305 };
306
307 simmemsel@8003c400 {
308 reg = <0x8003c400 100>;
309 status = "disabled";
310 };
311
312 gpiomon@8003c500 {
313 reg = <0x8003c500 100>;
314 status = "disabled";
315 };
316
317 simenet@8003c700 {
318 reg = <0x8003c700 100>;
319 status = "disabled";
320 };
321
322 armjtag@8003c800 {
323 reg = <0x8003c800 100>;
324 status = "disabled";
325 };
326 };
327
328 apbx@80040000 {
329 compatible = "simple-bus";
330 #address-cells = <1>;
331 #size-cells = <1>;
332 reg = <0x80040000 0x40000>;
333 ranges;
334
335 clkctl@80040000 {
336 reg = <0x80040000 2000>;
337 status = "disabled";
338 };
339
340 saif0: saif@80042000 {
341 compatible = "fsl,imx28-saif";
342 reg = <0x80042000 2000>;
343 interrupts = <59 80>;
344 fsl,saif-dma-channel = <4>;
345 status = "disabled";
346 };
347
348 power@80044000 {
349 reg = <0x80044000 2000>;
350 status = "disabled";
351 };
352
353 saif1: saif@80046000 {
354 compatible = "fsl,imx28-saif";
355 reg = <0x80046000 2000>;
356 interrupts = <58 81>;
357 fsl,saif-dma-channel = <5>;
358 status = "disabled";
359 };
360
361 lradc@80050000 {
362 reg = <0x80050000 2000>;
363 status = "disabled";
364 };
365
366 spdif@80054000 {
367 reg = <0x80054000 2000>;
368 interrupts = <45 66>;
369 status = "disabled";
370 };
371
372 rtc@80056000 {
373 reg = <0x80056000 2000>;
374 interrupts = <28 29>;
375 status = "disabled";
376 };
377
378 i2c0: i2c@80058000 {
379 #address-cells = <1>;
380 #size-cells = <0>;
381 compatible = "fsl,imx28-i2c";
382 reg = <0x80058000 2000>;
383 interrupts = <111 68>;
384 status = "disabled";
385 };
386
387 i2c1: i2c@8005a000 {
388 #address-cells = <1>;
389 #size-cells = <0>;
390 compatible = "fsl,imx28-i2c";
391 reg = <0x8005a000 2000>;
392 interrupts = <110 69>;
393 status = "disabled";
394 };
395
396 pwm@80064000 {
397 reg = <0x80064000 2000>;
398 status = "disabled";
399 };
400
401 timrot@80068000 {
402 reg = <0x80068000 2000>;
403 status = "disabled";
404 };
405
406 auart0: serial@8006a000 {
407 reg = <0x8006a000 0x2000>;
408 interrupts = <112 70 71>;
409 status = "disabled";
410 };
411
412 auart1: serial@8006c000 {
413 reg = <0x8006c000 0x2000>;
414 interrupts = <113 72 73>;
415 status = "disabled";
416 };
417
418 auart2: serial@8006e000 {
419 reg = <0x8006e000 0x2000>;
420 interrupts = <114 74 75>;
421 status = "disabled";
422 };
423
424 auart3: serial@80070000 {
425 reg = <0x80070000 0x2000>;
426 interrupts = <115 76 77>;
427 status = "disabled";
428 };
429
430 auart4: serial@80072000 {
431 reg = <0x80072000 0x2000>;
432 interrupts = <116 78 79>;
433 status = "disabled";
434 };
435
436 duart: serial@80074000 {
437 compatible = "arm,pl011", "arm,primecell";
438 reg = <0x80074000 0x1000>;
439 interrupts = <47>;
440 status = "disabled";
441 };
442
443 usbphy0: usbphy@8007c000 {
444 reg = <0x8007c000 0x2000>;
445 status = "disabled";
446 };
447
448 usbphy1: usbphy@8007e000 {
449 reg = <0x8007e000 0x2000>;
450 status = "disabled";
451 };
452 };
453 };
454
455 ahb@80080000 {
456 compatible = "simple-bus";
457 #address-cells = <1>;
458 #size-cells = <1>;
459 reg = <0x80080000 0x80000>;
460 ranges;
461
462 usbctrl0: usbctrl@80080000 {
463 reg = <0x80080000 0x10000>;
464 status = "disabled";
465 };
466
467 usbctrl1: usbctrl@80090000 {
468 reg = <0x80090000 0x10000>;
469 status = "disabled";
470 };
471
472 dflpt@800c0000 {
473 reg = <0x800c0000 0x10000>;
474 status = "disabled";
475 };
476
477 mac0: ethernet@800f0000 {
478 compatible = "fsl,imx28-fec";
479 reg = <0x800f0000 0x4000>;
480 interrupts = <101>;
481 status = "disabled";
482 };
483
484 mac1: ethernet@800f4000 {
485 compatible = "fsl,imx28-fec";
486 reg = <0x800f4000 0x4000>;
487 interrupts = <102>;
488 status = "disabled";
489 };
490
491 switch@800f8000 {
492 reg = <0x800f8000 0x8000>;
493 status = "disabled";
494 };
495
496 };
497};
diff --git a/arch/arm/boot/dts/imx51-babbage.dts b/arch/arm/boot/dts/imx51-babbage.dts
index 9949e6060dee..de065b5976e6 100644
--- a/arch/arm/boot/dts/imx51-babbage.dts
+++ b/arch/arm/boot/dts/imx51-babbage.dts
@@ -17,10 +17,6 @@
17 model = "Freescale i.MX51 Babbage Board"; 17 model = "Freescale i.MX51 Babbage Board";
18 compatible = "fsl,imx51-babbage", "fsl,imx51"; 18 compatible = "fsl,imx51-babbage", "fsl,imx51";
19 19
20 chosen {
21 bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
22 };
23
24 memory { 20 memory {
25 reg = <0x90000000 0x20000000>; 21 reg = <0x90000000 0x20000000>;
26 }; 22 };
@@ -40,7 +36,7 @@
40 status = "okay"; 36 status = "okay";
41 }; 37 };
42 38
43 uart3: uart@7000c000 { 39 uart3: serial@7000c000 {
44 fsl,uart-has-rtscts; 40 fsl,uart-has-rtscts;
45 status = "okay"; 41 status = "okay";
46 }; 42 };
@@ -166,6 +162,11 @@
166 }; 162 };
167 }; 163 };
168 }; 164 };
165
166 ssi2: ssi@70014000 {
167 fsl,mode = "i2s-slave";
168 status = "okay";
169 };
169 }; 170 };
170 171
171 wdog@73f98000 { /* WDOG1 */ 172 wdog@73f98000 { /* WDOG1 */
@@ -177,12 +178,12 @@
177 reg = <0x73fa8000 0x4000>; 178 reg = <0x73fa8000 0x4000>;
178 }; 179 };
179 180
180 uart1: uart@73fbc000 { 181 uart1: serial@73fbc000 {
181 fsl,uart-has-rtscts; 182 fsl,uart-has-rtscts;
182 status = "okay"; 183 status = "okay";
183 }; 184 };
184 185
185 uart2: uart@73fc0000 { 186 uart2: serial@73fc0000 {
186 status = "okay"; 187 status = "okay";
187 }; 188 };
188 }; 189 };
@@ -195,13 +196,20 @@
195 i2c@83fc4000 { /* I2C2 */ 196 i2c@83fc4000 { /* I2C2 */
196 status = "okay"; 197 status = "okay";
197 198
198 codec: sgtl5000@0a { 199 sgtl5000: codec@0a {
199 compatible = "fsl,sgtl5000"; 200 compatible = "fsl,sgtl5000";
200 reg = <0x0a>; 201 reg = <0x0a>;
202 clock-frequency = <26000000>;
203 VDDA-supply = <&vdig_reg>;
204 VDDIO-supply = <&vvideo_reg>;
201 }; 205 };
202 }; 206 };
203 207
204 fec@83fec000 { 208 audmux@83fd0000 {
209 status = "okay";
210 };
211
212 ethernet@83fec000 {
205 phy-mode = "mii"; 213 phy-mode = "mii";
206 status = "okay"; 214 status = "okay";
207 }; 215 };
@@ -218,4 +226,18 @@
218 gpio-key,wakeup; 226 gpio-key,wakeup;
219 }; 227 };
220 }; 228 };
229
230 sound {
231 compatible = "fsl,imx51-babbage-sgtl5000",
232 "fsl,imx-audio-sgtl5000";
233 model = "imx51-babbage-sgtl5000";
234 ssi-controller = <&ssi2>;
235 audio-codec = <&sgtl5000>;
236 audio-routing =
237 "MIC_IN", "Mic Jack",
238 "Mic Jack", "Mic Bias",
239 "Headphone Jack", "HP_OUT";
240 mux-int-port = <2>;
241 mux-ext-port = <3>;
242 };
221}; 243};
diff --git a/arch/arm/boot/dts/imx51.dtsi b/arch/arm/boot/dts/imx51.dtsi
index 6663986fe1c8..bfa65abe8ef2 100644
--- a/arch/arm/boot/dts/imx51.dtsi
+++ b/arch/arm/boot/dts/imx51.dtsi
@@ -86,7 +86,7 @@
86 status = "disabled"; 86 status = "disabled";
87 }; 87 };
88 88
89 uart3: uart@7000c000 { 89 uart3: serial@7000c000 {
90 compatible = "fsl,imx51-uart", "fsl,imx21-uart"; 90 compatible = "fsl,imx51-uart", "fsl,imx21-uart";
91 reg = <0x7000c000 0x4000>; 91 reg = <0x7000c000 0x4000>;
92 interrupts = <33>; 92 interrupts = <33>;
@@ -102,6 +102,15 @@
102 status = "disabled"; 102 status = "disabled";
103 }; 103 };
104 104
105 ssi2: ssi@70014000 {
106 compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
107 reg = <0x70014000 0x4000>;
108 interrupts = <30>;
109 fsl,fifo-depth = <15>;
110 fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
111 status = "disabled";
112 };
113
105 esdhc@70020000 { /* ESDHC3 */ 114 esdhc@70020000 { /* ESDHC3 */
106 compatible = "fsl,imx51-esdhc"; 115 compatible = "fsl,imx51-esdhc";
107 reg = <0x70020000 0x4000>; 116 reg = <0x70020000 0x4000>;
@@ -171,14 +180,14 @@
171 status = "disabled"; 180 status = "disabled";
172 }; 181 };
173 182
174 uart1: uart@73fbc000 { 183 uart1: serial@73fbc000 {
175 compatible = "fsl,imx51-uart", "fsl,imx21-uart"; 184 compatible = "fsl,imx51-uart", "fsl,imx21-uart";
176 reg = <0x73fbc000 0x4000>; 185 reg = <0x73fbc000 0x4000>;
177 interrupts = <31>; 186 interrupts = <31>;
178 status = "disabled"; 187 status = "disabled";
179 }; 188 };
180 189
181 uart2: uart@73fc0000 { 190 uart2: serial@73fc0000 {
182 compatible = "fsl,imx51-uart", "fsl,imx21-uart"; 191 compatible = "fsl,imx51-uart", "fsl,imx21-uart";
183 reg = <0x73fc0000 0x4000>; 192 reg = <0x73fc0000 0x4000>;
184 interrupts = <32>; 193 interrupts = <32>;
@@ -235,7 +244,31 @@
235 status = "disabled"; 244 status = "disabled";
236 }; 245 };
237 246
238 fec@83fec000 { 247 ssi1: ssi@83fcc000 {
248 compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
249 reg = <0x83fcc000 0x4000>;
250 interrupts = <29>;
251 fsl,fifo-depth = <15>;
252 fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
253 status = "disabled";
254 };
255
256 audmux@83fd0000 {
257 compatible = "fsl,imx51-audmux", "fsl,imx31-audmux";
258 reg = <0x83fd0000 0x4000>;
259 status = "disabled";
260 };
261
262 ssi3: ssi@83fe8000 {
263 compatible = "fsl,imx51-ssi", "fsl,imx21-ssi";
264 reg = <0x83fe8000 0x4000>;
265 interrupts = <96>;
266 fsl,fifo-depth = <15>;
267 fsl,ssi-dma-events = <47 46 37 35>; /* TX0 RX0 TX1 RX1 */
268 status = "disabled";
269 };
270
271 ethernet@83fec000 {
239 compatible = "fsl,imx51-fec", "fsl,imx27-fec"; 272 compatible = "fsl,imx51-fec", "fsl,imx27-fec";
240 reg = <0x83fec000 0x4000>; 273 reg = <0x83fec000 0x4000>;
241 interrupts = <87>; 274 interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx53-ard.dts b/arch/arm/boot/dts/imx53-ard.dts
index 2dccce46ed81..5b8eafcdbeec 100644
--- a/arch/arm/boot/dts/imx53-ard.dts
+++ b/arch/arm/boot/dts/imx53-ard.dts
@@ -17,10 +17,6 @@
17 model = "Freescale i.MX53 Automotive Reference Design Board"; 17 model = "Freescale i.MX53 Automotive Reference Design Board";
18 compatible = "fsl,imx53-ard", "fsl,imx53"; 18 compatible = "fsl,imx53-ard", "fsl,imx53";
19 19
20 chosen {
21 bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
22 };
23
24 memory { 20 memory {
25 reg = <0x70000000 0x40000000>; 21 reg = <0x70000000 0x40000000>;
26 }; 22 };
@@ -44,7 +40,7 @@
44 reg = <0x53fa8000 0x4000>; 40 reg = <0x53fa8000 0x4000>;
45 }; 41 };
46 42
47 uart1: uart@53fbc000 { 43 uart1: serial@53fbc000 {
48 status = "okay"; 44 status = "okay";
49 }; 45 };
50 }; 46 };
diff --git a/arch/arm/boot/dts/imx53-evk.dts b/arch/arm/boot/dts/imx53-evk.dts
index 5bac4aa4800b..9c798034675e 100644
--- a/arch/arm/boot/dts/imx53-evk.dts
+++ b/arch/arm/boot/dts/imx53-evk.dts
@@ -17,10 +17,6 @@
17 model = "Freescale i.MX53 Evaluation Kit"; 17 model = "Freescale i.MX53 Evaluation Kit";
18 compatible = "fsl,imx53-evk", "fsl,imx53"; 18 compatible = "fsl,imx53-evk", "fsl,imx53";
19 19
20 chosen {
21 bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
22 };
23
24 memory { 20 memory {
25 reg = <0x70000000 0x80000000>; 21 reg = <0x70000000 0x80000000>;
26 }; 22 };
@@ -75,7 +71,7 @@
75 reg = <0x53fa8000 0x4000>; 71 reg = <0x53fa8000 0x4000>;
76 }; 72 };
77 73
78 uart1: uart@53fbc000 { 74 uart1: serial@53fbc000 {
79 status = "okay"; 75 status = "okay";
80 }; 76 };
81 }; 77 };
@@ -99,7 +95,7 @@
99 }; 95 };
100 }; 96 };
101 97
102 fec@63fec000 { 98 ethernet@63fec000 {
103 phy-mode = "rmii"; 99 phy-mode = "rmii";
104 phy-reset-gpios = <&gpio7 6 0>; 100 phy-reset-gpios = <&gpio7 6 0>;
105 status = "okay"; 101 status = "okay";
diff --git a/arch/arm/boot/dts/imx53-qsb.dts b/arch/arm/boot/dts/imx53-qsb.dts
index 5c57c8672c36..2d803a9a6949 100644
--- a/arch/arm/boot/dts/imx53-qsb.dts
+++ b/arch/arm/boot/dts/imx53-qsb.dts
@@ -17,10 +17,6 @@
17 model = "Freescale i.MX53 Quick Start Board"; 17 model = "Freescale i.MX53 Quick Start Board";
18 compatible = "fsl,imx53-qsb", "fsl,imx53"; 18 compatible = "fsl,imx53-qsb", "fsl,imx53";
19 19
20 chosen {
21 bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
22 };
23
24 memory { 20 memory {
25 reg = <0x70000000 0x40000000>; 21 reg = <0x70000000 0x40000000>;
26 }; 22 };
@@ -33,6 +29,11 @@
33 status = "okay"; 29 status = "okay";
34 }; 30 };
35 31
32 ssi2: ssi@50014000 {
33 fsl,mode = "i2s-slave";
34 status = "okay";
35 };
36
36 esdhc@50020000 { /* ESDHC3 */ 37 esdhc@50020000 { /* ESDHC3 */
37 cd-gpios = <&gpio3 11 0>; 38 cd-gpios = <&gpio3 11 0>;
38 wp-gpios = <&gpio3 12 0>; 39 wp-gpios = <&gpio3 12 0>;
@@ -49,7 +50,7 @@
49 reg = <0x53fa8000 0x4000>; 50 reg = <0x53fa8000 0x4000>;
50 }; 51 };
51 52
52 uart1: uart@53fbc000 { 53 uart1: serial@53fbc000 {
53 status = "okay"; 54 status = "okay";
54 }; 55 };
55 }; 56 };
@@ -62,9 +63,11 @@
62 i2c@63fc4000 { /* I2C2 */ 63 i2c@63fc4000 { /* I2C2 */
63 status = "okay"; 64 status = "okay";
64 65
65 codec: sgtl5000@0a { 66 sgtl5000: codec@0a {
66 compatible = "fsl,sgtl5000"; 67 compatible = "fsl,sgtl5000";
67 reg = <0x0a>; 68 reg = <0x0a>;
69 VDDA-supply = <&reg_3p2v>;
70 VDDIO-supply = <&reg_3p2v>;
68 }; 71 };
69 }; 72 };
70 73
@@ -77,12 +80,88 @@
77 }; 80 };
78 81
79 pmic: dialog@48 { 82 pmic: dialog@48 {
80 compatible = "dialog,da9053", "dialog,da9052"; 83 compatible = "dlg,da9053-aa", "dlg,da9052";
81 reg = <0x48>; 84 reg = <0x48>;
85
86 regulators {
87 buck0 {
88 regulator-min-microvolt = <500000>;
89 regulator-max-microvolt = <2075000>;
90 };
91
92 buck1 {
93 regulator-min-microvolt = <500000>;
94 regulator-max-microvolt = <2075000>;
95 };
96
97 buck2 {
98 regulator-min-microvolt = <925000>;
99 regulator-max-microvolt = <2500000>;
100 };
101
102 buck3 {
103 regulator-min-microvolt = <925000>;
104 regulator-max-microvolt = <2500000>;
105 };
106
107 ldo4 {
108 regulator-min-microvolt = <600000>;
109 regulator-max-microvolt = <1800000>;
110 };
111
112 ldo5 {
113 regulator-min-microvolt = <600000>;
114 regulator-max-microvolt = <1800000>;
115 };
116
117 ldo6 {
118 regulator-min-microvolt = <1725000>;
119 regulator-max-microvolt = <3300000>;
120 };
121
122 ldo7 {
123 regulator-min-microvolt = <1725000>;
124 regulator-max-microvolt = <3300000>;
125 };
126
127 ldo8 {
128 regulator-min-microvolt = <1200000>;
129 regulator-max-microvolt = <3600000>;
130 };
131
132 ldo9 {
133 regulator-min-microvolt = <1200000>;
134 regulator-max-microvolt = <3600000>;
135 };
136
137 ldo10 {
138 regulator-min-microvolt = <1200000>;
139 regulator-max-microvolt = <3600000>;
140 };
141
142 ldo11 {
143 regulator-min-microvolt = <1200000>;
144 regulator-max-microvolt = <3600000>;
145 };
146
147 ldo12 {
148 regulator-min-microvolt = <1250000>;
149 regulator-max-microvolt = <3650000>;
150 };
151
152 ldo13 {
153 regulator-min-microvolt = <1200000>;
154 regulator-max-microvolt = <3600000>;
155 };
156 };
82 }; 157 };
83 }; 158 };
84 159
85 fec@63fec000 { 160 audmux@63fd0000 {
161 status = "okay";
162 };
163
164 ethernet@63fec000 {
86 phy-mode = "rmii"; 165 phy-mode = "rmii";
87 phy-reset-gpios = <&gpio7 6 0>; 166 phy-reset-gpios = <&gpio7 6 0>;
88 status = "okay"; 167 status = "okay";
@@ -122,4 +201,30 @@
122 linux,default-trigger = "heartbeat"; 201 linux,default-trigger = "heartbeat";
123 }; 202 };
124 }; 203 };
204
205 regulators {
206 compatible = "simple-bus";
207
208 reg_3p2v: 3p2v {
209 compatible = "regulator-fixed";
210 regulator-name = "3P2V";
211 regulator-min-microvolt = <3200000>;
212 regulator-max-microvolt = <3200000>;
213 regulator-always-on;
214 };
215 };
216
217 sound {
218 compatible = "fsl,imx53-qsb-sgtl5000",
219 "fsl,imx-audio-sgtl5000";
220 model = "imx53-qsb-sgtl5000";
221 ssi-controller = <&ssi2>;
222 audio-codec = <&sgtl5000>;
223 audio-routing =
224 "MIC_IN", "Mic Jack",
225 "Mic Jack", "Mic Bias",
226 "Headphone Jack", "HP_OUT";
227 mux-int-port = <2>;
228 mux-ext-port = <5>;
229 };
125}; 230};
diff --git a/arch/arm/boot/dts/imx53-smd.dts b/arch/arm/boot/dts/imx53-smd.dts
index c7ee86c2dfb5..08091029168e 100644
--- a/arch/arm/boot/dts/imx53-smd.dts
+++ b/arch/arm/boot/dts/imx53-smd.dts
@@ -17,10 +17,6 @@
17 model = "Freescale i.MX53 Smart Mobile Reference Design Board"; 17 model = "Freescale i.MX53 Smart Mobile Reference Design Board";
18 compatible = "fsl,imx53-smd", "fsl,imx53"; 18 compatible = "fsl,imx53-smd", "fsl,imx53";
19 19
20 chosen {
21 bootargs = "console=ttymxc0,115200 root=/dev/mmcblk0p3 rootwait";
22 };
23
24 memory { 20 memory {
25 reg = <0x70000000 0x40000000>; 21 reg = <0x70000000 0x40000000>;
26 }; 22 };
@@ -35,11 +31,11 @@
35 }; 31 };
36 32
37 esdhc@50008000 { /* ESDHC2 */ 33 esdhc@50008000 { /* ESDHC2 */
38 fsl,card-wired; 34 non-removable;
39 status = "okay"; 35 status = "okay";
40 }; 36 };
41 37
42 uart3: uart@5000c000 { 38 uart3: serial@5000c000 {
43 fsl,uart-has-rtscts; 39 fsl,uart-has-rtscts;
44 status = "okay"; 40 status = "okay";
45 }; 41 };
@@ -76,7 +72,7 @@
76 }; 72 };
77 73
78 esdhc@50020000 { /* ESDHC3 */ 74 esdhc@50020000 { /* ESDHC3 */
79 fsl,card-wired; 75 non-removable;
80 status = "okay"; 76 status = "okay";
81 }; 77 };
82 }; 78 };
@@ -90,11 +86,11 @@
90 reg = <0x53fa8000 0x4000>; 86 reg = <0x53fa8000 0x4000>;
91 }; 87 };
92 88
93 uart1: uart@53fbc000 { 89 uart1: serial@53fbc000 {
94 status = "okay"; 90 status = "okay";
95 }; 91 };
96 92
97 uart2: uart@53fc0000 { 93 uart2: serial@53fc0000 {
98 status = "okay"; 94 status = "okay";
99 }; 95 };
100 }; 96 };
@@ -142,7 +138,7 @@
142 }; 138 };
143 }; 139 };
144 140
145 fec@63fec000 { 141 ethernet@63fec000 {
146 phy-mode = "rmii"; 142 phy-mode = "rmii";
147 phy-reset-gpios = <&gpio7 6 0>; 143 phy-reset-gpios = <&gpio7 6 0>;
148 status = "okay"; 144 status = "okay";
diff --git a/arch/arm/boot/dts/imx53.dtsi b/arch/arm/boot/dts/imx53.dtsi
index 5dd91b942c91..e3e869470cd3 100644
--- a/arch/arm/boot/dts/imx53.dtsi
+++ b/arch/arm/boot/dts/imx53.dtsi
@@ -88,7 +88,7 @@
88 status = "disabled"; 88 status = "disabled";
89 }; 89 };
90 90
91 uart3: uart@5000c000 { 91 uart3: serial@5000c000 {
92 compatible = "fsl,imx53-uart", "fsl,imx21-uart"; 92 compatible = "fsl,imx53-uart", "fsl,imx21-uart";
93 reg = <0x5000c000 0x4000>; 93 reg = <0x5000c000 0x4000>;
94 interrupts = <33>; 94 interrupts = <33>;
@@ -104,6 +104,15 @@
104 status = "disabled"; 104 status = "disabled";
105 }; 105 };
106 106
107 ssi2: ssi@50014000 {
108 compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
109 reg = <0x50014000 0x4000>;
110 interrupts = <30>;
111 fsl,fifo-depth = <15>;
112 fsl,ssi-dma-events = <25 24 23 22>; /* TX0 RX0 TX1 RX1 */
113 status = "disabled";
114 };
115
107 esdhc@50020000 { /* ESDHC3 */ 116 esdhc@50020000 { /* ESDHC3 */
108 compatible = "fsl,imx53-esdhc"; 117 compatible = "fsl,imx53-esdhc";
109 reg = <0x50020000 0x4000>; 118 reg = <0x50020000 0x4000>;
@@ -173,14 +182,14 @@
173 status = "disabled"; 182 status = "disabled";
174 }; 183 };
175 184
176 uart1: uart@53fbc000 { 185 uart1: serial@53fbc000 {
177 compatible = "fsl,imx53-uart", "fsl,imx21-uart"; 186 compatible = "fsl,imx53-uart", "fsl,imx21-uart";
178 reg = <0x53fbc000 0x4000>; 187 reg = <0x53fbc000 0x4000>;
179 interrupts = <31>; 188 interrupts = <31>;
180 status = "disabled"; 189 status = "disabled";
181 }; 190 };
182 191
183 uart2: uart@53fc0000 { 192 uart2: serial@53fc0000 {
184 compatible = "fsl,imx53-uart", "fsl,imx21-uart"; 193 compatible = "fsl,imx53-uart", "fsl,imx21-uart";
185 reg = <0x53fc0000 0x4000>; 194 reg = <0x53fc0000 0x4000>;
186 interrupts = <32>; 195 interrupts = <32>;
@@ -226,7 +235,7 @@
226 status = "disabled"; 235 status = "disabled";
227 }; 236 };
228 237
229 uart4: uart@53ff0000 { 238 uart4: serial@53ff0000 {
230 compatible = "fsl,imx53-uart", "fsl,imx21-uart"; 239 compatible = "fsl,imx53-uart", "fsl,imx21-uart";
231 reg = <0x53ff0000 0x4000>; 240 reg = <0x53ff0000 0x4000>;
232 interrupts = <13>; 241 interrupts = <13>;
@@ -241,7 +250,7 @@
241 reg = <0x60000000 0x10000000>; 250 reg = <0x60000000 0x10000000>;
242 ranges; 251 ranges;
243 252
244 uart5: uart@63f90000 { 253 uart5: serial@63f90000 {
245 compatible = "fsl,imx53-uart", "fsl,imx21-uart"; 254 compatible = "fsl,imx53-uart", "fsl,imx21-uart";
246 reg = <0x63f90000 0x4000>; 255 reg = <0x63f90000 0x4000>;
247 interrupts = <86>; 256 interrupts = <86>;
@@ -290,7 +299,31 @@
290 status = "disabled"; 299 status = "disabled";
291 }; 300 };
292 301
293 fec@63fec000 { 302 ssi1: ssi@63fcc000 {
303 compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
304 reg = <0x63fcc000 0x4000>;
305 interrupts = <29>;
306 fsl,fifo-depth = <15>;
307 fsl,ssi-dma-events = <29 28 27 26>; /* TX0 RX0 TX1 RX1 */
308 status = "disabled";
309 };
310
311 audmux@63fd0000 {
312 compatible = "fsl,imx53-audmux", "fsl,imx31-audmux";
313 reg = <0x63fd0000 0x4000>;
314 status = "disabled";
315 };
316
317 ssi3: ssi@63fe8000 {
318 compatible = "fsl,imx53-ssi", "fsl,imx21-ssi";
319 reg = <0x63fe8000 0x4000>;
320 interrupts = <96>;
321 fsl,fifo-depth = <15>;
322 fsl,ssi-dma-events = <47 46 45 44>; /* TX0 RX0 TX1 RX1 */
323 status = "disabled";
324 };
325
326 ethernet@63fec000 {
294 compatible = "fsl,imx53-fec", "fsl,imx25-fec"; 327 compatible = "fsl,imx53-fec", "fsl,imx25-fec";
295 reg = <0x63fec000 0x4000>; 328 reg = <0x63fec000 0x4000>;
296 interrupts = <87>; 329 interrupts = <87>;
diff --git a/arch/arm/boot/dts/imx6q-arm2.dts b/arch/arm/boot/dts/imx6q-arm2.dts
index ce1c8238c897..db4c6096c562 100644
--- a/arch/arm/boot/dts/imx6q-arm2.dts
+++ b/arch/arm/boot/dts/imx6q-arm2.dts
@@ -17,19 +17,14 @@
17 model = "Freescale i.MX6 Quad Armadillo2 Board"; 17 model = "Freescale i.MX6 Quad Armadillo2 Board";
18 compatible = "fsl,imx6q-arm2", "fsl,imx6q"; 18 compatible = "fsl,imx6q-arm2", "fsl,imx6q";
19 19
20 chosen {
21 bootargs = "console=ttymxc0,115200 root=/dev/mmcblk3p3 rootwait";
22 };
23
24 memory { 20 memory {
25 reg = <0x10000000 0x80000000>; 21 reg = <0x10000000 0x80000000>;
26 }; 22 };
27 23
28 soc { 24 soc {
29 aips-bus@02100000 { /* AIPS2 */ 25 aips-bus@02100000 { /* AIPS2 */
30 enet@02188000 { 26 ethernet@02188000 {
31 phy-mode = "rgmii"; 27 phy-mode = "rgmii";
32 local-mac-address = [00 04 9F 01 1B 61];
33 status = "okay"; 28 status = "okay";
34 }; 29 };
35 30
@@ -37,16 +32,20 @@
37 cd-gpios = <&gpio6 11 0>; 32 cd-gpios = <&gpio6 11 0>;
38 wp-gpios = <&gpio6 14 0>; 33 wp-gpios = <&gpio6 14 0>;
39 vmmc-supply = <&reg_3p3v>; 34 vmmc-supply = <&reg_3p3v>;
35 pinctrl-names = "default";
36 pinctrl-0 = <&pinctrl_usdhc3_1>;
40 status = "okay"; 37 status = "okay";
41 }; 38 };
42 39
43 usdhc@0219c000 { /* uSDHC4 */ 40 usdhc@0219c000 { /* uSDHC4 */
44 fsl,card-wired; 41 non-removable;
45 vmmc-supply = <&reg_3p3v>; 42 vmmc-supply = <&reg_3p3v>;
43 pinctrl-names = "default";
44 pinctrl-0 = <&pinctrl_usdhc4_1>;
46 status = "okay"; 45 status = "okay";
47 }; 46 };
48 47
49 uart4: uart@021f0000 { 48 uart4: serial@021f0000 {
50 status = "okay"; 49 status = "okay";
51 }; 50 };
52 }; 51 };
diff --git a/arch/arm/boot/dts/imx6q-sabrelite.dts b/arch/arm/boot/dts/imx6q-sabrelite.dts
index 4663a4e5a285..e0ec92973e7e 100644
--- a/arch/arm/boot/dts/imx6q-sabrelite.dts
+++ b/arch/arm/boot/dts/imx6q-sabrelite.dts
@@ -22,8 +22,30 @@
22 }; 22 };
23 23
24 soc { 24 soc {
25 aips-bus@02000000 { /* AIPS1 */
26 spba-bus@02000000 {
27 ecspi@02008000 { /* eCSPI1 */
28 fsl,spi-num-chipselects = <1>;
29 cs-gpios = <&gpio3 19 0>;
30 status = "okay";
31
32 flash: m25p80@0 {
33 compatible = "sst,sst25vf016b";
34 spi-max-frequency = <20000000>;
35 reg = <0>;
36 };
37 };
38
39 ssi1: ssi@02028000 {
40 fsl,mode = "i2s-slave";
41 status = "okay";
42 };
43 };
44
45 };
46
25 aips-bus@02100000 { /* AIPS2 */ 47 aips-bus@02100000 { /* AIPS2 */
26 enet@02188000 { 48 ethernet@02188000 {
27 phy-mode = "rgmii"; 49 phy-mode = "rgmii";
28 phy-reset-gpios = <&gpio3 23 0>; 50 phy-reset-gpios = <&gpio3 23 0>;
29 status = "okay"; 51 status = "okay";
@@ -43,13 +65,23 @@
43 status = "okay"; 65 status = "okay";
44 }; 66 };
45 67
46 uart2: uart@021e8000 { 68 audmux@021d8000 {
69 status = "okay";
70 pinctrl-names = "default";
71 pinctrl-0 = <&pinctrl_audmux_1>;
72 };
73
74 uart2: serial@021e8000 {
47 status = "okay"; 75 status = "okay";
76 pinctrl-names = "default";
77 pinctrl-0 = <&pinctrl_serial2_1>;
48 }; 78 };
49 79
50 i2c@021a0000 { /* I2C1 */ 80 i2c@021a0000 { /* I2C1 */
51 status = "okay"; 81 status = "okay";
52 clock-frequency = <100000>; 82 clock-frequency = <100000>;
83 pinctrl-names = "default";
84 pinctrl-0 = <&pinctrl_i2c1_1>;
53 85
54 codec: sgtl5000@0a { 86 codec: sgtl5000@0a {
55 compatible = "fsl,sgtl5000"; 87 compatible = "fsl,sgtl5000";
@@ -80,4 +112,18 @@
80 regulator-always-on; 112 regulator-always-on;
81 }; 113 };
82 }; 114 };
115
116 sound {
117 compatible = "fsl,imx6q-sabrelite-sgtl5000",
118 "fsl,imx-audio-sgtl5000";
119 model = "imx6q-sabrelite-sgtl5000";
120 ssi-controller = <&ssi1>;
121 audio-codec = <&codec>;
122 audio-routing =
123 "MIC_IN", "Mic Jack",
124 "Mic Jack", "Mic Bias",
125 "Headphone Jack", "HP_OUT";
126 mux-int-port = <1>;
127 mux-ext-port = <4>;
128 };
83}; 129};
diff --git a/arch/arm/boot/dts/imx6q-sabresd.dts b/arch/arm/boot/dts/imx6q-sabresd.dts
new file mode 100644
index 000000000000..07509a181178
--- /dev/null
+++ b/arch/arm/boot/dts/imx6q-sabresd.dts
@@ -0,0 +1,53 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 * Copyright 2011 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13/dts-v1/;
14/include/ "imx6q.dtsi"
15
16/ {
17 model = "Freescale i.MX6Q SABRE Smart Device Board";
18 compatible = "fsl,imx6q-sabresd", "fsl,imx6q";
19
20 memory {
21 reg = <0x10000000 0x40000000>;
22 };
23
24 soc {
25
26 aips-bus@02000000 { /* AIPS1 */
27 spba-bus@02000000 {
28 uart1: serial@02020000 {
29 status = "okay";
30 };
31 };
32 };
33
34 aips-bus@02100000 { /* AIPS2 */
35 ethernet@02188000 {
36 phy-mode = "rgmii";
37 status = "okay";
38 };
39
40 usdhc@02194000 { /* uSDHC2 */
41 cd-gpios = <&gpio2 2 0>;
42 wp-gpios = <&gpio2 3 0>;
43 status = "okay";
44 };
45
46 usdhc@02198000 { /* uSDHC3 */
47 cd-gpios = <&gpio2 0 0>;
48 wp-gpios = <&gpio2 1 0>;
49 status = "okay";
50 };
51 };
52 };
53};
diff --git a/arch/arm/boot/dts/imx6q.dtsi b/arch/arm/boot/dts/imx6q.dtsi
index 4905f51a106f..8c90cbac945f 100644
--- a/arch/arm/boot/dts/imx6q.dtsi
+++ b/arch/arm/boot/dts/imx6q.dtsi
@@ -165,7 +165,7 @@
165 status = "disabled"; 165 status = "disabled";
166 }; 166 };
167 167
168 uart1: uart@02020000 { 168 uart1: serial@02020000 {
169 compatible = "fsl,imx6q-uart", "fsl,imx21-uart"; 169 compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
170 reg = <0x02020000 0x4000>; 170 reg = <0x02020000 0x4000>;
171 interrupts = <0 26 0x04>; 171 interrupts = <0 26 0x04>;
@@ -177,19 +177,31 @@
177 interrupts = <0 51 0x04>; 177 interrupts = <0 51 0x04>;
178 }; 178 };
179 179
180 ssi@02028000 { /* SSI1 */ 180 ssi1: ssi@02028000 {
181 compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
181 reg = <0x02028000 0x4000>; 182 reg = <0x02028000 0x4000>;
182 interrupts = <0 46 0x04>; 183 interrupts = <0 46 0x04>;
184 fsl,fifo-depth = <15>;
185 fsl,ssi-dma-events = <38 37>;
186 status = "disabled";
183 }; 187 };
184 188
185 ssi@0202c000 { /* SSI2 */ 189 ssi2: ssi@0202c000 {
190 compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
186 reg = <0x0202c000 0x4000>; 191 reg = <0x0202c000 0x4000>;
187 interrupts = <0 47 0x04>; 192 interrupts = <0 47 0x04>;
193 fsl,fifo-depth = <15>;
194 fsl,ssi-dma-events = <42 41>;
195 status = "disabled";
188 }; 196 };
189 197
190 ssi@02030000 { /* SSI3 */ 198 ssi3: ssi@02030000 {
199 compatible = "fsl,imx6q-ssi","fsl,imx21-ssi";
191 reg = <0x02030000 0x4000>; 200 reg = <0x02030000 0x4000>;
192 interrupts = <0 48 0x04>; 201 interrupts = <0 48 0x04>;
202 fsl,fifo-depth = <15>;
203 fsl,ssi-dma-events = <46 45>;
204 status = "disabled";
193 }; 205 };
194 206
195 asrc@02034000 { 207 asrc@02034000 {
@@ -346,6 +358,90 @@
346 compatible = "fsl,imx6q-anatop"; 358 compatible = "fsl,imx6q-anatop";
347 reg = <0x020c8000 0x1000>; 359 reg = <0x020c8000 0x1000>;
348 interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>; 360 interrupts = <0 49 0x04 0 54 0x04 0 127 0x04>;
361
362 regulator-1p1@110 {
363 compatible = "fsl,anatop-regulator";
364 regulator-name = "vdd1p1";
365 regulator-min-microvolt = <800000>;
366 regulator-max-microvolt = <1375000>;
367 regulator-always-on;
368 anatop-reg-offset = <0x110>;
369 anatop-vol-bit-shift = <8>;
370 anatop-vol-bit-width = <5>;
371 anatop-min-bit-val = <4>;
372 anatop-min-voltage = <800000>;
373 anatop-max-voltage = <1375000>;
374 };
375
376 regulator-3p0@120 {
377 compatible = "fsl,anatop-regulator";
378 regulator-name = "vdd3p0";
379 regulator-min-microvolt = <2800000>;
380 regulator-max-microvolt = <3150000>;
381 regulator-always-on;
382 anatop-reg-offset = <0x120>;
383 anatop-vol-bit-shift = <8>;
384 anatop-vol-bit-width = <5>;
385 anatop-min-bit-val = <0>;
386 anatop-min-voltage = <2625000>;
387 anatop-max-voltage = <3400000>;
388 };
389
390 regulator-2p5@130 {
391 compatible = "fsl,anatop-regulator";
392 regulator-name = "vdd2p5";
393 regulator-min-microvolt = <2000000>;
394 regulator-max-microvolt = <2750000>;
395 regulator-always-on;
396 anatop-reg-offset = <0x130>;
397 anatop-vol-bit-shift = <8>;
398 anatop-vol-bit-width = <5>;
399 anatop-min-bit-val = <0>;
400 anatop-min-voltage = <2000000>;
401 anatop-max-voltage = <2750000>;
402 };
403
404 regulator-vddcore@140 {
405 compatible = "fsl,anatop-regulator";
406 regulator-name = "cpu";
407 regulator-min-microvolt = <725000>;
408 regulator-max-microvolt = <1450000>;
409 regulator-always-on;
410 anatop-reg-offset = <0x140>;
411 anatop-vol-bit-shift = <0>;
412 anatop-vol-bit-width = <5>;
413 anatop-min-bit-val = <1>;
414 anatop-min-voltage = <725000>;
415 anatop-max-voltage = <1450000>;
416 };
417
418 regulator-vddpu@140 {
419 compatible = "fsl,anatop-regulator";
420 regulator-name = "vddpu";
421 regulator-min-microvolt = <725000>;
422 regulator-max-microvolt = <1450000>;
423 regulator-always-on;
424 anatop-reg-offset = <0x140>;
425 anatop-vol-bit-shift = <9>;
426 anatop-vol-bit-width = <5>;
427 anatop-min-bit-val = <1>;
428 anatop-min-voltage = <725000>;
429 anatop-max-voltage = <1450000>;
430 };
431
432 regulator-vddsoc@140 {
433 compatible = "fsl,anatop-regulator";
434 regulator-name = "vddsoc";
435 regulator-min-microvolt = <725000>;
436 regulator-max-microvolt = <1450000>;
437 regulator-always-on;
438 anatop-reg-offset = <0x140>;
439 anatop-vol-bit-shift = <18>;
440 anatop-vol-bit-width = <5>;
441 anatop-min-bit-val = <1>;
442 anatop-min-voltage = <725000>;
443 anatop-max-voltage = <1450000>;
444 };
349 }; 445 };
350 446
351 usbphy@020c9000 { /* USBPHY1 */ 447 usbphy@020c9000 { /* USBPHY1 */
@@ -386,7 +482,62 @@
386 }; 482 };
387 483
388 iomuxc@020e0000 { 484 iomuxc@020e0000 {
485 compatible = "fsl,imx6q-iomuxc";
389 reg = <0x020e0000 0x4000>; 486 reg = <0x020e0000 0x4000>;
487
488 /* shared pinctrl settings */
489 audmux {
490 pinctrl_audmux_1: audmux-1 {
491 fsl,pins = <18 0x80000000 /* MX6Q_PAD_SD2_DAT0__AUDMUX_AUD4_RXD */
492 1586 0x80000000 /* MX6Q_PAD_SD2_DAT3__AUDMUX_AUD4_TXC */
493 11 0x80000000 /* MX6Q_PAD_SD2_DAT2__AUDMUX_AUD4_TXD */
494 3 0x80000000>; /* MX6Q_PAD_SD2_DAT1__AUDMUX_AUD4_TXFS */
495 };
496 };
497
498 i2c1 {
499 pinctrl_i2c1_1: i2c1grp-1 {
500 fsl,pins = <137 0x4001b8b1 /* MX6Q_PAD_EIM_D21__I2C1_SCL */
501 196 0x4001b8b1>; /* MX6Q_PAD_EIM_D28__I2C1_SDA */
502 };
503 };
504
505 serial2 {
506 pinctrl_serial2_1: serial2grp-1 {
507 fsl,pins = <183 0x1b0b1 /* MX6Q_PAD_EIM_D26__UART2_TXD */
508 191 0x1b0b1>; /* MX6Q_PAD_EIM_D27__UART2_RXD */
509 };
510 };
511
512 usdhc3 {
513 pinctrl_usdhc3_1: usdhc3grp-1 {
514 fsl,pins = <1273 0x17059 /* MX6Q_PAD_SD3_CMD__USDHC3_CMD */
515 1281 0x10059 /* MX6Q_PAD_SD3_CLK__USDHC3_CLK */
516 1289 0x17059 /* MX6Q_PAD_SD3_DAT0__USDHC3_DAT0 */
517 1297 0x17059 /* MX6Q_PAD_SD3_DAT1__USDHC3_DAT1 */
518 1305 0x17059 /* MX6Q_PAD_SD3_DAT2__USDHC3_DAT2 */
519 1312 0x17059 /* MX6Q_PAD_SD3_DAT3__USDHC3_DAT3 */
520 1265 0x17059 /* MX6Q_PAD_SD3_DAT4__USDHC3_DAT4 */
521 1257 0x17059 /* MX6Q_PAD_SD3_DAT5__USDHC3_DAT5 */
522 1249 0x17059 /* MX6Q_PAD_SD3_DAT6__USDHC3_DAT6 */
523 1241 0x17059>; /* MX6Q_PAD_SD3_DAT7__USDHC3_DAT7 */
524 };
525 };
526
527 usdhc4 {
528 pinctrl_usdhc4_1: usdhc4grp-1 {
529 fsl,pins = <1386 0x17059 /* MX6Q_PAD_SD4_CMD__USDHC4_CMD */
530 1392 0x10059 /* MX6Q_PAD_SD4_CLK__USDHC4_CLK */
531 1462 0x17059 /* MX6Q_PAD_SD4_DAT0__USDHC4_DAT0 */
532 1470 0x17059 /* MX6Q_PAD_SD4_DAT1__USDHC4_DAT1 */
533 1478 0x17059 /* MX6Q_PAD_SD4_DAT2__USDHC4_DAT2 */
534 1486 0x17059 /* MX6Q_PAD_SD4_DAT3__USDHC4_DAT3 */
535 1493 0x17059 /* MX6Q_PAD_SD4_DAT4__USDHC4_DAT4 */
536 1501 0x17059 /* MX6Q_PAD_SD4_DAT5__USDHC4_DAT5 */
537 1509 0x17059 /* MX6Q_PAD_SD4_DAT6__USDHC4_DAT6 */
538 1517 0x17059>; /* MX6Q_PAD_SD4_DAT7__USDHC4_DAT7 */
539 };
540 };
390 }; 541 };
391 542
392 dcic@020e4000 { /* DCIC1 */ 543 dcic@020e4000 { /* DCIC1 */
@@ -422,7 +573,7 @@
422 reg = <0x0217c000 0x4000>; 573 reg = <0x0217c000 0x4000>;
423 }; 574 };
424 575
425 enet@02188000 { 576 ethernet@02188000 {
426 compatible = "fsl,imx6q-fec"; 577 compatible = "fsl,imx6q-fec";
427 reg = <0x02188000 0x4000>; 578 reg = <0x02188000 0x4000>;
428 interrupts = <0 118 0x04 0 119 0x04>; 579 interrupts = <0 118 0x04 0 119 0x04>;
@@ -527,7 +678,9 @@
527 }; 678 };
528 679
529 audmux@021d8000 { 680 audmux@021d8000 {
681 compatible = "fsl,imx6q-audmux", "fsl,imx31-audmux";
530 reg = <0x021d8000 0x4000>; 682 reg = <0x021d8000 0x4000>;
683 status = "disabled";
531 }; 684 };
532 685
533 mipi@021dc000 { /* MIPI-CSI */ 686 mipi@021dc000 { /* MIPI-CSI */
@@ -543,28 +696,28 @@
543 interrupts = <0 18 0x04>; 696 interrupts = <0 18 0x04>;
544 }; 697 };
545 698
546 uart2: uart@021e8000 { 699 uart2: serial@021e8000 {
547 compatible = "fsl,imx6q-uart", "fsl,imx21-uart"; 700 compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
548 reg = <0x021e8000 0x4000>; 701 reg = <0x021e8000 0x4000>;
549 interrupts = <0 27 0x04>; 702 interrupts = <0 27 0x04>;
550 status = "disabled"; 703 status = "disabled";
551 }; 704 };
552 705
553 uart3: uart@021ec000 { 706 uart3: serial@021ec000 {
554 compatible = "fsl,imx6q-uart", "fsl,imx21-uart"; 707 compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
555 reg = <0x021ec000 0x4000>; 708 reg = <0x021ec000 0x4000>;
556 interrupts = <0 28 0x04>; 709 interrupts = <0 28 0x04>;
557 status = "disabled"; 710 status = "disabled";
558 }; 711 };
559 712
560 uart4: uart@021f0000 { 713 uart4: serial@021f0000 {
561 compatible = "fsl,imx6q-uart", "fsl,imx21-uart"; 714 compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
562 reg = <0x021f0000 0x4000>; 715 reg = <0x021f0000 0x4000>;
563 interrupts = <0 29 0x04>; 716 interrupts = <0 29 0x04>;
564 status = "disabled"; 717 status = "disabled";
565 }; 718 };
566 719
567 uart5: uart@021f4000 { 720 uart5: serial@021f4000 {
568 compatible = "fsl,imx6q-uart", "fsl,imx21-uart"; 721 compatible = "fsl,imx6q-uart", "fsl,imx21-uart";
569 reg = <0x021f4000 0x4000>; 722 reg = <0x021f4000 0x4000>;
570 interrupts = <0 30 0x04>; 723 interrupts = <0 30 0x04>;
diff --git a/arch/arm/boot/dts/omap3-beagle.dts b/arch/arm/boot/dts/omap3-beagle.dts
index 8c756be4d7ad..5b4506c0a8c4 100644
--- a/arch/arm/boot/dts/omap3-beagle.dts
+++ b/arch/arm/boot/dts/omap3-beagle.dts
@@ -57,7 +57,7 @@
57&mmc1 { 57&mmc1 {
58 vmmc-supply = <&vmmc1>; 58 vmmc-supply = <&vmmc1>;
59 vmmc_aux-supply = <&vsim>; 59 vmmc_aux-supply = <&vsim>;
60 ti,bus-width = <8>; 60 bus-width = <8>;
61}; 61};
62 62
63&mmc2 { 63&mmc2 {
diff --git a/arch/arm/boot/dts/omap4-panda.dts b/arch/arm/boot/dts/omap4-panda.dts
index e671361bc791..1efe0c587985 100644
--- a/arch/arm/boot/dts/omap4-panda.dts
+++ b/arch/arm/boot/dts/omap4-panda.dts
@@ -70,7 +70,7 @@
70 70
71&mmc1 { 71&mmc1 {
72 vmmc-supply = <&vmmc>; 72 vmmc-supply = <&vmmc>;
73 ti,bus-width = <8>; 73 bus-width = <8>;
74}; 74};
75 75
76&mmc2 { 76&mmc2 {
@@ -87,5 +87,5 @@
87 87
88&mmc5 { 88&mmc5 {
89 ti,non-removable; 89 ti,non-removable;
90 ti,bus-width = <4>; 90 bus-width = <4>;
91}; 91};
diff --git a/arch/arm/boot/dts/omap4-sdp.dts b/arch/arm/boot/dts/omap4-sdp.dts
index e5eeb6f9c6e6..d08c4d137280 100644
--- a/arch/arm/boot/dts/omap4-sdp.dts
+++ b/arch/arm/boot/dts/omap4-sdp.dts
@@ -137,12 +137,12 @@
137 137
138&mmc1 { 138&mmc1 {
139 vmmc-supply = <&vmmc>; 139 vmmc-supply = <&vmmc>;
140 ti,bus-width = <8>; 140 bus-width = <8>;
141}; 141};
142 142
143&mmc2 { 143&mmc2 {
144 vmmc-supply = <&vaux1>; 144 vmmc-supply = <&vaux1>;
145 ti,bus-width = <8>; 145 bus-width = <8>;
146 ti,non-removable; 146 ti,non-removable;
147}; 147};
148 148
@@ -155,6 +155,6 @@
155}; 155};
156 156
157&mmc5 { 157&mmc5 {
158 ti,bus-width = <4>; 158 bus-width = <4>;
159 ti,non-removable; 159 ti,non-removable;
160}; 160};
diff --git a/arch/arm/boot/dts/spear1310-evb.dts b/arch/arm/boot/dts/spear1310-evb.dts
new file mode 100644
index 000000000000..8314e4171884
--- /dev/null
+++ b/arch/arm/boot/dts/spear1310-evb.dts
@@ -0,0 +1,292 @@
1/*
2 * DTS file for SPEAr1310 Evaluation Baord
3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
5 *
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14/dts-v1/;
15/include/ "spear1310.dtsi"
16
17/ {
18 model = "ST SPEAr1310 Evaluation Board";
19 compatible = "st,spear1310-evb", "st,spear1310";
20 #address-cells = <1>;
21 #size-cells = <1>;
22
23 memory {
24 reg = <0 0x40000000>;
25 };
26
27 ahb {
28 pinmux@e0700000 {
29 pinctrl-names = "default";
30 pinctrl-0 = <&state_default>;
31
32 state_default: pinmux {
33 i2c0-pmx {
34 st,pins = "i2c0_grp";
35 st,function = "i2c0";
36 };
37 i2s1 {
38 st,pins = "i2s1_grp";
39 st,function = "i2s1";
40 };
41 gpio {
42 st,pins = "arm_gpio_grp";
43 st,function = "arm_gpio";
44 };
45 eth {
46 st,pins = "gmii_grp";
47 st,function = "gmii";
48 };
49 ssp0 {
50 st,pins = "ssp0_grp";
51 st,function = "ssp0";
52 };
53 kbd {
54 st,pins = "keyboard_6x6_grp";
55 st,function = "keyboard";
56 };
57 sdhci {
58 st,pins = "sdhci_grp";
59 st,function = "sdhci";
60 };
61 smi-pmx {
62 st,pins = "smi_2_chips_grp";
63 st,function = "smi";
64 };
65 uart0 {
66 st,pins = "uart0_grp";
67 st,function = "uart0";
68 };
69 rs485 {
70 st,pins = "rs485_0_1_tdm_0_1_grp";
71 st,function = "rs485_0_1_tdm_0_1";
72 };
73 i2c1_2 {
74 st,pins = "i2c_1_2_grp";
75 st,function = "i2c_1_2";
76 };
77 pci {
78 st,pins = "pcie0_grp","pcie1_grp",
79 "pcie2_grp";
80 st,function = "pci";
81 };
82 smii {
83 st,pins = "smii_0_1_2_grp";
84 st,function = "smii_0_1_2";
85 };
86 nand {
87 st,pins = "nand_8bit_grp",
88 "nand_16bit_grp";
89 st,function = "nand";
90 };
91 };
92 };
93
94 ahci@b1000000 {
95 status = "okay";
96 };
97
98 cf@b2800000 {
99 status = "okay";
100 };
101
102 dma@ea800000 {
103 status = "okay";
104 };
105
106 dma@eb000000 {
107 status = "okay";
108 };
109
110 fsmc: flash@b0000000 {
111 status = "okay";
112 };
113
114 gmac0: eth@e2000000 {
115 status = "okay";
116 };
117
118 sdhci@b3000000 {
119 status = "okay";
120 };
121
122 smi: flash@ea000000 {
123 status = "okay";
124 clock-rate=<50000000>;
125
126 flash@e6000000 {
127 #address-cells = <1>;
128 #size-cells = <1>;
129 reg = <0xe6000000 0x800000>;
130 st,smi-fast-mode;
131
132 partition@0 {
133 label = "xloader";
134 reg = <0x0 0x10000>;
135 };
136 partition@10000 {
137 label = "u-boot";
138 reg = <0x10000 0x40000>;
139 };
140 partition@50000 {
141 label = "linux";
142 reg = <0x50000 0x2c0000>;
143 };
144 partition@310000 {
145 label = "rootfs";
146 reg = <0x310000 0x4f0000>;
147 };
148 };
149 };
150
151 spi0: spi@e0100000 {
152 status = "okay";
153 };
154
155 ehci@e4800000 {
156 status = "okay";
157 };
158
159 ehci@e5800000 {
160 status = "okay";
161 };
162
163 ohci@e4000000 {
164 status = "okay";
165 };
166
167 ohci@e5000000 {
168 status = "okay";
169 };
170
171 apb {
172 adc@e0080000 {
173 status = "okay";
174 };
175
176 gpio0: gpio@e0600000 {
177 status = "okay";
178 };
179
180 gpio1: gpio@e0680000 {
181 status = "okay";
182 };
183
184 i2c0: i2c@e0280000 {
185 status = "okay";
186 };
187
188 i2c1: i2c@5cd00000 {
189 status = "okay";
190 };
191
192 kbd@e0300000 {
193 linux,keymap = < 0x00000001
194 0x00010002
195 0x00020003
196 0x00030004
197 0x00040005
198 0x00050006
199 0x00060007
200 0x00070008
201 0x00080009
202 0x0100000a
203 0x0101000c
204 0x0102000d
205 0x0103000e
206 0x0104000f
207 0x01050010
208 0x01060011
209 0x01070012
210 0x01080013
211 0x02000014
212 0x02010015
213 0x02020016
214 0x02030017
215 0x02040018
216 0x02050019
217 0x0206001a
218 0x0207001b
219 0x0208001c
220 0x0300001d
221 0x0301001e
222 0x0302001f
223 0x03030020
224 0x03040021
225 0x03050022
226 0x03060023
227 0x03070024
228 0x03080025
229 0x04000026
230 0x04010027
231 0x04020028
232 0x04030029
233 0x0404002a
234 0x0405002b
235 0x0406002c
236 0x0407002d
237 0x0408002e
238 0x0500002f
239 0x05010030
240 0x05020031
241 0x05030032
242 0x05040033
243 0x05050034
244 0x05060035
245 0x05070036
246 0x05080037
247 0x06000038
248 0x06010039
249 0x0602003a
250 0x0603003b
251 0x0604003c
252 0x0605003d
253 0x0606003e
254 0x0607003f
255 0x06080040
256 0x07000041
257 0x07010042
258 0x07020043
259 0x07030044
260 0x07040045
261 0x07050046
262 0x07060047
263 0x07070048
264 0x07080049
265 0x0800004a
266 0x0801004b
267 0x0802004c
268 0x0803004d
269 0x0804004e
270 0x0805004f
271 0x08060050
272 0x08070051
273 0x08080052 >;
274 autorepeat;
275 st,mode = <0>;
276 status = "okay";
277 };
278
279 rtc@e0580000 {
280 status = "okay";
281 };
282
283 serial@e0000000 {
284 status = "okay";
285 };
286
287 wdt@ec800620 {
288 status = "okay";
289 };
290 };
291 };
292};
diff --git a/arch/arm/boot/dts/spear1310.dtsi b/arch/arm/boot/dts/spear1310.dtsi
new file mode 100644
index 000000000000..9e61da404d57
--- /dev/null
+++ b/arch/arm/boot/dts/spear1310.dtsi
@@ -0,0 +1,184 @@
1/*
2 * DTS file for all SPEAr1310 SoCs
3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
5 *
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14/include/ "spear13xx.dtsi"
15
16/ {
17 compatible = "st,spear1310";
18
19 ahb {
20 ahci@b1000000 {
21 compatible = "snps,spear-ahci";
22 reg = <0xb1000000 0x10000>;
23 interrupts = <0 68 0x4>;
24 status = "disabled";
25 };
26
27 ahci@b1800000 {
28 compatible = "snps,spear-ahci";
29 reg = <0xb1800000 0x10000>;
30 interrupts = <0 69 0x4>;
31 status = "disabled";
32 };
33
34 ahci@b4000000 {
35 compatible = "snps,spear-ahci";
36 reg = <0xb4000000 0x10000>;
37 interrupts = <0 70 0x4>;
38 status = "disabled";
39 };
40
41 gmac1: eth@5c400000 {
42 compatible = "st,spear600-gmac";
43 reg = <0x5c400000 0x8000>;
44 interrupts = <0 95 0x4>;
45 interrupt-names = "macirq";
46 status = "disabled";
47 };
48
49 gmac2: eth@5c500000 {
50 compatible = "st,spear600-gmac";
51 reg = <0x5c500000 0x8000>;
52 interrupts = <0 96 0x4>;
53 interrupt-names = "macirq";
54 status = "disabled";
55 };
56
57 gmac3: eth@5c600000 {
58 compatible = "st,spear600-gmac";
59 reg = <0x5c600000 0x8000>;
60 interrupts = <0 97 0x4>;
61 interrupt-names = "macirq";
62 status = "disabled";
63 };
64
65 gmac4: eth@5c700000 {
66 compatible = "st,spear600-gmac";
67 reg = <0x5c700000 0x8000>;
68 interrupts = <0 98 0x4>;
69 interrupt-names = "macirq";
70 status = "disabled";
71 };
72
73 spi1: spi@5d400000 {
74 compatible = "arm,pl022", "arm,primecell";
75 reg = <0x5d400000 0x1000>;
76 interrupts = <0 99 0x4>;
77 status = "disabled";
78 };
79
80 apb {
81 i2c1: i2c@5cd00000 {
82 #address-cells = <1>;
83 #size-cells = <0>;
84 compatible = "snps,designware-i2c";
85 reg = <0x5cd00000 0x1000>;
86 interrupts = <0 87 0x4>;
87 status = "disabled";
88 };
89
90 i2c2: i2c@5ce00000 {
91 #address-cells = <1>;
92 #size-cells = <0>;
93 compatible = "snps,designware-i2c";
94 reg = <0x5ce00000 0x1000>;
95 interrupts = <0 88 0x4>;
96 status = "disabled";
97 };
98
99 i2c3: i2c@5cf00000 {
100 #address-cells = <1>;
101 #size-cells = <0>;
102 compatible = "snps,designware-i2c";
103 reg = <0x5cf00000 0x1000>;
104 interrupts = <0 89 0x4>;
105 status = "disabled";
106 };
107
108 i2c4: i2c@5d000000 {
109 #address-cells = <1>;
110 #size-cells = <0>;
111 compatible = "snps,designware-i2c";
112 reg = <0x5d000000 0x1000>;
113 interrupts = <0 90 0x4>;
114 status = "disabled";
115 };
116
117 i2c5: i2c@5d100000 {
118 #address-cells = <1>;
119 #size-cells = <0>;
120 compatible = "snps,designware-i2c";
121 reg = <0x5d100000 0x1000>;
122 interrupts = <0 91 0x4>;
123 status = "disabled";
124 };
125
126 i2c6: i2c@5d200000 {
127 #address-cells = <1>;
128 #size-cells = <0>;
129 compatible = "snps,designware-i2c";
130 reg = <0x5d200000 0x1000>;
131 interrupts = <0 92 0x4>;
132 status = "disabled";
133 };
134
135 i2c7: i2c@5d300000 {
136 #address-cells = <1>;
137 #size-cells = <0>;
138 compatible = "snps,designware-i2c";
139 reg = <0x5d300000 0x1000>;
140 interrupts = <0 93 0x4>;
141 status = "disabled";
142 };
143
144 serial@5c800000 {
145 compatible = "arm,pl011", "arm,primecell";
146 reg = <0x5c800000 0x1000>;
147 interrupts = <0 82 0x4>;
148 status = "disabled";
149 };
150
151 serial@5c900000 {
152 compatible = "arm,pl011", "arm,primecell";
153 reg = <0x5c900000 0x1000>;
154 interrupts = <0 83 0x4>;
155 status = "disabled";
156 };
157
158 serial@5ca00000 {
159 compatible = "arm,pl011", "arm,primecell";
160 reg = <0x5ca00000 0x1000>;
161 interrupts = <0 84 0x4>;
162 status = "disabled";
163 };
164
165 serial@5cb00000 {
166 compatible = "arm,pl011", "arm,primecell";
167 reg = <0x5cb00000 0x1000>;
168 interrupts = <0 85 0x4>;
169 status = "disabled";
170 };
171
172 serial@5cc00000 {
173 compatible = "arm,pl011", "arm,primecell";
174 reg = <0x5cc00000 0x1000>;
175 interrupts = <0 86 0x4>;
176 status = "disabled";
177 };
178
179 thermal@e07008c4 {
180 st,thermal-flags = <0x7000>;
181 };
182 };
183 };
184};
diff --git a/arch/arm/boot/dts/spear1340-evb.dts b/arch/arm/boot/dts/spear1340-evb.dts
new file mode 100644
index 000000000000..0d8472e5ab9f
--- /dev/null
+++ b/arch/arm/boot/dts/spear1340-evb.dts
@@ -0,0 +1,308 @@
1/*
2 * DTS file for SPEAr1340 Evaluation Baord
3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
5 *
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14/dts-v1/;
15/include/ "spear1340.dtsi"
16
17/ {
18 model = "ST SPEAr1340 Evaluation Board";
19 compatible = "st,spear1340-evb", "st,spear1340";
20 #address-cells = <1>;
21 #size-cells = <1>;
22
23 memory {
24 reg = <0 0x40000000>;
25 };
26
27 ahb {
28 pinmux@e0700000 {
29 pinctrl-names = "default";
30 pinctrl-0 = <&state_default>;
31
32 state_default: pinmux {
33 pads_as_gpio {
34 st,pins = "pads_as_gpio_grp";
35 st,function = "pads_as_gpio";
36 };
37 fsmc {
38 st,pins = "fsmc_8bit_grp";
39 st,function = "fsmc";
40 };
41 kbd {
42 st,pins = "keyboard_row_col_grp",
43 "keyboard_col5_grp";
44 st,function = "keyboard";
45 };
46 uart0 {
47 st,pins = "uart0_grp", "uart0_enh_grp";
48 st,function = "uart0";
49 };
50 i2c0-pmx {
51 st,pins = "i2c0_grp";
52 st,function = "i2c0";
53 };
54 i2c1-pmx {
55 st,pins = "i2c1_grp";
56 st,function = "i2c1";
57 };
58 spdif-in {
59 st,pins = "spdif_in_grp";
60 st,function = "spdif_in";
61 };
62 spdif-out {
63 st,pins = "spdif_out_grp";
64 st,function = "spdif_out";
65 };
66 ssp0 {
67 st,pins = "ssp0_grp", "ssp0_cs1_grp",
68 "ssp0_cs3_grp";
69 st,function = "ssp0";
70 };
71 pwm {
72 st,pins = "pwm2_grp", "pwm3_grp";
73 st,function = "pwm";
74 };
75 smi-pmx {
76 st,pins = "smi_grp";
77 st,function = "smi";
78 };
79 i2s {
80 st,pins = "i2s_in_grp", "i2s_out_grp";
81 st,function = "i2s";
82 };
83 gmac {
84 st,pins = "gmii_grp", "rgmii_grp";
85 st,function = "gmac";
86 };
87 cam3 {
88 st,pins = "cam3_grp";
89 st,function = "cam3";
90 };
91 cec0 {
92 st,pins = "cec0_grp";
93 st,function = "cec0";
94 };
95 cec1 {
96 st,pins = "cec1_grp";
97 st,function = "cec1";
98 };
99 sdhci {
100 st,pins = "sdhci_grp";
101 st,function = "sdhci";
102 };
103 clcd {
104 st,pins = "clcd_grp";
105 st,function = "clcd";
106 };
107 sata {
108 st,pins = "sata_grp";
109 st,function = "sata";
110 };
111 };
112 };
113
114 dma@ea800000 {
115 status = "okay";
116 };
117
118 dma@eb000000 {
119 status = "okay";
120 };
121
122 fsmc: flash@b0000000 {
123 status = "okay";
124 };
125
126 gmac0: eth@e2000000 {
127 status = "okay";
128 };
129
130 sdhci@b3000000 {
131 status = "okay";
132 };
133
134 smi: flash@ea000000 {
135 status = "okay";
136 clock-rate=<50000000>;
137
138 flash@e6000000 {
139 #address-cells = <1>;
140 #size-cells = <1>;
141 reg = <0xe6000000 0x800000>;
142 st,smi-fast-mode;
143
144 partition@0 {
145 label = "xloader";
146 reg = <0x0 0x10000>;
147 };
148 partition@10000 {
149 label = "u-boot";
150 reg = <0x10000 0x40000>;
151 };
152 partition@50000 {
153 label = "linux";
154 reg = <0x50000 0x2c0000>;
155 };
156 partition@310000 {
157 label = "rootfs";
158 reg = <0x310000 0x4f0000>;
159 };
160 };
161 };
162
163 spi0: spi@e0100000 {
164 status = "okay";
165 };
166
167 ehci@e4800000 {
168 status = "okay";
169 };
170
171 ehci@e5800000 {
172 status = "okay";
173 };
174
175 ohci@e4000000 {
176 status = "okay";
177 };
178
179 ohci@e5000000 {
180 status = "okay";
181 };
182
183 apb {
184 adc@e0080000 {
185 status = "okay";
186 };
187
188 gpio0: gpio@e0600000 {
189 status = "okay";
190 };
191
192 gpio1: gpio@e0680000 {
193 status = "okay";
194 };
195
196 i2c0: i2c@e0280000 {
197 status = "okay";
198 };
199
200 i2c1: i2c@b4000000 {
201 status = "okay";
202 };
203
204 kbd@e0300000 {
205 linux,keymap = < 0x00000001
206 0x00010002
207 0x00020003
208 0x00030004
209 0x00040005
210 0x00050006
211 0x00060007
212 0x00070008
213 0x00080009
214 0x0100000a
215 0x0101000c
216 0x0102000d
217 0x0103000e
218 0x0104000f
219 0x01050010
220 0x01060011
221 0x01070012
222 0x01080013
223 0x02000014
224 0x02010015
225 0x02020016
226 0x02030017
227 0x02040018
228 0x02050019
229 0x0206001a
230 0x0207001b
231 0x0208001c
232 0x0300001d
233 0x0301001e
234 0x0302001f
235 0x03030020
236 0x03040021
237 0x03050022
238 0x03060023
239 0x03070024
240 0x03080025
241 0x04000026
242 0x04010027
243 0x04020028
244 0x04030029
245 0x0404002a
246 0x0405002b
247 0x0406002c
248 0x0407002d
249 0x0408002e
250 0x0500002f
251 0x05010030
252 0x05020031
253 0x05030032
254 0x05040033
255 0x05050034
256 0x05060035
257 0x05070036
258 0x05080037
259 0x06000038
260 0x06010039
261 0x0602003a
262 0x0603003b
263 0x0604003c
264 0x0605003d
265 0x0606003e
266 0x0607003f
267 0x06080040
268 0x07000041
269 0x07010042
270 0x07020043
271 0x07030044
272 0x07040045
273 0x07050046
274 0x07060047
275 0x07070048
276 0x07080049
277 0x0800004a
278 0x0801004b
279 0x0802004c
280 0x0803004d
281 0x0804004e
282 0x0805004f
283 0x08060050
284 0x08070051
285 0x08080052 >;
286 autorepeat;
287 st,mode = <0>;
288 status = "okay";
289 };
290
291 rtc@e0580000 {
292 status = "okay";
293 };
294
295 serial@e0000000 {
296 status = "okay";
297 };
298
299 serial@b4100000 {
300 status = "okay";
301 };
302
303 wdt@ec800620 {
304 status = "okay";
305 };
306 };
307 };
308};
diff --git a/arch/arm/boot/dts/spear1340.dtsi b/arch/arm/boot/dts/spear1340.dtsi
new file mode 100644
index 000000000000..a26fc47a55e8
--- /dev/null
+++ b/arch/arm/boot/dts/spear1340.dtsi
@@ -0,0 +1,56 @@
1/*
2 * DTS file for all SPEAr1340 SoCs
3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
5 *
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14/include/ "spear13xx.dtsi"
15
16/ {
17 compatible = "st,spear1340";
18
19 ahb {
20 ahci@b1000000 {
21 compatible = "snps,spear-ahci";
22 reg = <0xb1000000 0x10000>;
23 interrupts = <0 72 0x4>;
24 status = "disabled";
25 };
26
27 spi1: spi@5d400000 {
28 compatible = "arm,pl022", "arm,primecell";
29 reg = <0x5d400000 0x1000>;
30 interrupts = <0 99 0x4>;
31 status = "disabled";
32 };
33
34 apb {
35 i2c1: i2c@b4000000 {
36 #address-cells = <1>;
37 #size-cells = <0>;
38 compatible = "snps,designware-i2c";
39 reg = <0xb4000000 0x1000>;
40 interrupts = <0 104 0x4>;
41 status = "disabled";
42 };
43
44 serial@b4100000 {
45 compatible = "arm,pl011", "arm,primecell";
46 reg = <0xb4100000 0x1000>;
47 interrupts = <0 105 0x4>;
48 status = "disabled";
49 };
50
51 thermal@e07008c4 {
52 st,thermal-flags = <0x2a00>;
53 };
54 };
55 };
56};
diff --git a/arch/arm/boot/dts/spear13xx.dtsi b/arch/arm/boot/dts/spear13xx.dtsi
new file mode 100644
index 000000000000..1f8e1e1481df
--- /dev/null
+++ b/arch/arm/boot/dts/spear13xx.dtsi
@@ -0,0 +1,262 @@
1/*
2 * DTS file for all SPEAr13xx SoCs
3 *
4 * Copyright 2012 Viresh Kumar <viresh.kumar@st.com>
5 *
6 * The code contained herein is licensed under the GNU General Public
7 * License. You may obtain a copy of the GNU General Public License
8 * Version 2 or later at the following locations:
9 *
10 * http://www.opensource.org/licenses/gpl-license.html
11 * http://www.gnu.org/copyleft/gpl.html
12 */
13
14/include/ "skeleton.dtsi"
15
16/ {
17 interrupt-parent = <&gic>;
18
19 cpus {
20 #address-cells = <1>;
21 #size-cells = <0>;
22
23 cpu@0 {
24 compatible = "arm,cortex-a9";
25 reg = <0>;
26 next-level-cache = <&L2>;
27 };
28
29 cpu@1 {
30 compatible = "arm,cortex-a9";
31 reg = <1>;
32 next-level-cache = <&L2>;
33 };
34 };
35
36 gic: interrupt-controller@ec801000 {
37 compatible = "arm,cortex-a9-gic";
38 interrupt-controller;
39 #interrupt-cells = <3>;
40 reg = < 0xec801000 0x1000 >,
41 < 0xec800100 0x0100 >;
42 };
43
44 pmu {
45 compatible = "arm,cortex-a9-pmu";
46 interrupts = <0 8 0x04
47 0 9 0x04>;
48 };
49
50 L2: l2-cache {
51 compatible = "arm,pl310-cache";
52 reg = <0xed000000 0x1000>;
53 cache-unified;
54 cache-level = <2>;
55 };
56
57 memory {
58 name = "memory";
59 device_type = "memory";
60 reg = <0 0x40000000>;
61 };
62
63 chosen {
64 bootargs = "console=ttyAMA0,115200";
65 };
66
67 ahb {
68 #address-cells = <1>;
69 #size-cells = <1>;
70 compatible = "simple-bus";
71 ranges = <0x50000000 0x50000000 0x10000000
72 0xb0000000 0xb0000000 0x10000000
73 0xe0000000 0xe0000000 0x10000000>;
74
75 sdhci@b3000000 {
76 compatible = "st,sdhci-spear";
77 reg = <0xb3000000 0x100>;
78 interrupts = <0 28 0x4>;
79 status = "disabled";
80 };
81
82 cf@b2800000 {
83 compatible = "arasan,cf-spear1340";
84 reg = <0xb2800000 0x100>;
85 interrupts = <0 29 0x4>;
86 status = "disabled";
87 };
88
89 dma@ea800000 {
90 compatible = "snps,dma-spear1340";
91 reg = <0xea800000 0x1000>;
92 interrupts = <0 19 0x4>;
93 status = "disabled";
94 };
95
96 dma@eb000000 {
97 compatible = "snps,dma-spear1340";
98 reg = <0xeb000000 0x1000>;
99 interrupts = <0 59 0x4>;
100 status = "disabled";
101 };
102
103 fsmc: flash@b0000000 {
104 compatible = "st,spear600-fsmc-nand";
105 #address-cells = <1>;
106 #size-cells = <1>;
107 reg = <0xb0000000 0x1000 /* FSMC Register */
108 0xb0800000 0x0010>; /* NAND Base */
109 reg-names = "fsmc_regs", "nand_data";
110 interrupts = <0 20 0x4
111 0 21 0x4
112 0 22 0x4
113 0 23 0x4>;
114 st,ale-off = <0x20000>;
115 st,cle-off = <0x10000>;
116 status = "disabled";
117 };
118
119 gmac0: eth@e2000000 {
120 compatible = "st,spear600-gmac";
121 reg = <0xe2000000 0x8000>;
122 interrupts = <0 23 0x4
123 0 24 0x4>;
124 interrupt-names = "macirq", "eth_wake_irq";
125 status = "disabled";
126 };
127
128 smi: flash@ea000000 {
129 compatible = "st,spear600-smi";
130 #address-cells = <1>;
131 #size-cells = <1>;
132 reg = <0xea000000 0x1000>;
133 interrupts = <0 30 0x4>;
134 status = "disabled";
135 };
136
137 spi0: spi@e0100000 {
138 compatible = "arm,pl022", "arm,primecell";
139 reg = <0xe0100000 0x1000>;
140 interrupts = <0 31 0x4>;
141 status = "disabled";
142 };
143
144 ehci@e4800000 {
145 compatible = "st,spear600-ehci", "usb-ehci";
146 reg = <0xe4800000 0x1000>;
147 interrupts = <0 64 0x4>;
148 status = "disabled";
149 };
150
151 ehci@e5800000 {
152 compatible = "st,spear600-ehci", "usb-ehci";
153 reg = <0xe5800000 0x1000>;
154 interrupts = <0 66 0x4>;
155 status = "disabled";
156 };
157
158 ohci@e4000000 {
159 compatible = "st,spear600-ohci", "usb-ohci";
160 reg = <0xe4000000 0x1000>;
161 interrupts = <0 65 0x4>;
162 status = "disabled";
163 };
164
165 ohci@e5000000 {
166 compatible = "st,spear600-ohci", "usb-ohci";
167 reg = <0xe5000000 0x1000>;
168 interrupts = <0 67 0x4>;
169 status = "disabled";
170 };
171
172 apb {
173 #address-cells = <1>;
174 #size-cells = <1>;
175 compatible = "simple-bus";
176 ranges = <0x50000000 0x50000000 0x10000000
177 0xb0000000 0xb0000000 0x10000000
178 0xe0000000 0xe0000000 0x10000000>;
179
180 gpio0: gpio@e0600000 {
181 compatible = "arm,pl061", "arm,primecell";
182 reg = <0xe0600000 0x1000>;
183 interrupts = <0 24 0x4>;
184 gpio-controller;
185 #gpio-cells = <2>;
186 interrupt-controller;
187 #interrupt-cells = <2>;
188 status = "disabled";
189 };
190
191 gpio1: gpio@e0680000 {
192 compatible = "arm,pl061", "arm,primecell";
193 reg = <0xe0680000 0x1000>;
194 interrupts = <0 25 0x4>;
195 gpio-controller;
196 #gpio-cells = <2>;
197 interrupt-controller;
198 #interrupt-cells = <2>;
199 status = "disabled";
200 };
201
202 kbd@e0300000 {
203 compatible = "st,spear300-kbd";
204 reg = <0xe0300000 0x1000>;
205 status = "disabled";
206 };
207
208 i2c0: i2c@e0280000 {
209 #address-cells = <1>;
210 #size-cells = <0>;
211 compatible = "snps,designware-i2c";
212 reg = <0xe0280000 0x1000>;
213 interrupts = <0 41 0x4>;
214 status = "disabled";
215 };
216
217 rtc@e0580000 {
218 compatible = "st,spear-rtc";
219 reg = <0xe0580000 0x1000>;
220 interrupts = <0 36 0x4>;
221 status = "disabled";
222 };
223
224 serial@e0000000 {
225 compatible = "arm,pl011", "arm,primecell";
226 reg = <0xe0000000 0x1000>;
227 interrupts = <0 36 0x4>;
228 status = "disabled";
229 };
230
231 adc@e0080000 {
232 compatible = "st,spear600-adc";
233 reg = <0xe0080000 0x1000>;
234 interrupts = <0 44 0x4>;
235 status = "disabled";
236 };
237
238 timer@e0380000 {
239 compatible = "st,spear-timer";
240 reg = <0xe0380000 0x400>;
241 interrupts = <0 37 0x4>;
242 };
243
244 timer@ec800600 {
245 compatible = "arm,cortex-a9-twd-timer";
246 reg = <0xec800600 0x20>;
247 interrupts = <1 13 0x301>;
248 };
249
250 wdt@ec800620 {
251 compatible = "arm,cortex-a9-twd-wdt";
252 reg = <0xec800620 0x20>;
253 status = "disabled";
254 };
255
256 thermal@e07008c4 {
257 compatible = "st,thermal-spear1340";
258 reg = <0xe07008c4 0x4>;
259 };
260 };
261 };
262};
diff --git a/arch/arm/boot/dts/spear300-evb.dts b/arch/arm/boot/dts/spear300-evb.dts
index 910e264b87c0..fc82b1a26458 100644
--- a/arch/arm/boot/dts/spear300-evb.dts
+++ b/arch/arm/boot/dts/spear300-evb.dts
@@ -87,6 +87,31 @@
87 87
88 smi: flash@fc000000 { 88 smi: flash@fc000000 {
89 status = "okay"; 89 status = "okay";
90 clock-rate=<50000000>;
91
92 flash@f8000000 {
93 #address-cells = <1>;
94 #size-cells = <1>;
95 reg = <0xf8000000 0x800000>;
96 st,smi-fast-mode;
97
98 partition@0 {
99 label = "xloader";
100 reg = <0x0 0x10000>;
101 };
102 partition@10000 {
103 label = "u-boot";
104 reg = <0x10000 0x40000>;
105 };
106 partition@50000 {
107 label = "linux";
108 reg = <0x50000 0x2c0000>;
109 };
110 partition@310000 {
111 label = "rootfs";
112 reg = <0x310000 0x4f0000>;
113 };
114 };
90 }; 115 };
91 116
92 spi0: spi@d0100000 { 117 spi0: spi@d0100000 {
diff --git a/arch/arm/boot/dts/spear310-evb.dts b/arch/arm/boot/dts/spear310-evb.dts
index 6d95317100ad..dc5e2d445a93 100644
--- a/arch/arm/boot/dts/spear310-evb.dts
+++ b/arch/arm/boot/dts/spear310-evb.dts
@@ -103,11 +103,27 @@
103 clock-rate=<50000000>; 103 clock-rate=<50000000>;
104 104
105 flash@f8000000 { 105 flash@f8000000 {
106 label = "m25p64";
107 reg = <0xf8000000 0x800000>;
108 #address-cells = <1>; 106 #address-cells = <1>;
109 #size-cells = <1>; 107 #size-cells = <1>;
108 reg = <0xf8000000 0x800000>;
110 st,smi-fast-mode; 109 st,smi-fast-mode;
110
111 partition@0 {
112 label = "xloader";
113 reg = <0x0 0x10000>;
114 };
115 partition@10000 {
116 label = "u-boot";
117 reg = <0x10000 0x40000>;
118 };
119 partition@50000 {
120 label = "linux";
121 reg = <0x50000 0x2c0000>;
122 };
123 partition@310000 {
124 label = "rootfs";
125 reg = <0x310000 0x4f0000>;
126 };
111 }; 127 };
112 }; 128 };
113 129
diff --git a/arch/arm/boot/dts/spear320-evb.dts b/arch/arm/boot/dts/spear320-evb.dts
index 0c6463b71a37..6308fa3bec1e 100644
--- a/arch/arm/boot/dts/spear320-evb.dts
+++ b/arch/arm/boot/dts/spear320-evb.dts
@@ -110,6 +110,31 @@
110 110
111 smi: flash@fc000000 { 111 smi: flash@fc000000 {
112 status = "okay"; 112 status = "okay";
113 clock-rate=<50000000>;
114
115 flash@f8000000 {
116 #address-cells = <1>;
117 #size-cells = <1>;
118 reg = <0xf8000000 0x800000>;
119 st,smi-fast-mode;
120
121 partition@0 {
122 label = "xloader";
123 reg = <0x0 0x10000>;
124 };
125 partition@10000 {
126 label = "u-boot";
127 reg = <0x10000 0x40000>;
128 };
129 partition@50000 {
130 label = "linux";
131 reg = <0x50000 0x2c0000>;
132 };
133 partition@310000 {
134 label = "rootfs";
135 reg = <0x310000 0x4f0000>;
136 };
137 };
113 }; 138 };
114 139
115 spi0: spi@d0100000 { 140 spi0: spi@d0100000 {
diff --git a/arch/arm/boot/dts/spear3xx.dtsi b/arch/arm/boot/dts/spear3xx.dtsi
index 0ae7c8e86311..91072553963f 100644
--- a/arch/arm/boot/dts/spear3xx.dtsi
+++ b/arch/arm/boot/dts/spear3xx.dtsi
@@ -139,6 +139,12 @@
139 interrupts = <12>; 139 interrupts = <12>;
140 status = "disabled"; 140 status = "disabled";
141 }; 141 };
142
143 timer@f0000000 {
144 compatible = "st,spear-timer";
145 reg = <0xf0000000 0x400>;
146 interrupts = <2>;
147 };
142 }; 148 };
143 }; 149 };
144}; 150};
diff --git a/arch/arm/boot/dts/spear600-evb.dts b/arch/arm/boot/dts/spear600-evb.dts
index 790a7a8a5ccd..1119c22c9a82 100644
--- a/arch/arm/boot/dts/spear600-evb.dts
+++ b/arch/arm/boot/dts/spear600-evb.dts
@@ -33,6 +33,35 @@
33 status = "okay"; 33 status = "okay";
34 }; 34 };
35 35
36 smi: flash@fc000000 {
37 status = "okay";
38 clock-rate=<50000000>;
39
40 flash@f8000000 {
41 #address-cells = <1>;
42 #size-cells = <1>;
43 reg = <0xf8000000 0x800000>;
44 st,smi-fast-mode;
45
46 partition@0 {
47 label = "xloader";
48 reg = <0x0 0x10000>;
49 };
50 partition@10000 {
51 label = "u-boot";
52 reg = <0x10000 0x40000>;
53 };
54 partition@50000 {
55 label = "linux";
56 reg = <0x50000 0x2c0000>;
57 };
58 partition@310000 {
59 label = "rootfs";
60 reg = <0x310000 0x4f0000>;
61 };
62 };
63 };
64
36 apb { 65 apb {
37 serial@d0000000 { 66 serial@d0000000 {
38 status = "okay"; 67 status = "okay";
diff --git a/arch/arm/boot/dts/spear600.dtsi b/arch/arm/boot/dts/spear600.dtsi
index d777e3a6f178..089f0a42c50e 100644
--- a/arch/arm/boot/dts/spear600.dtsi
+++ b/arch/arm/boot/dts/spear600.dtsi
@@ -177,6 +177,12 @@
177 interrupts = <28>; 177 interrupts = <28>;
178 status = "disabled"; 178 status = "disabled";
179 }; 179 };
180
181 timer@f0000000 {
182 compatible = "st,spear-timer";
183 reg = <0xf0000000 0x400>;
184 interrupts = <16>;
185 };
180 }; 186 };
181 }; 187 };
182}; 188};
diff --git a/arch/arm/boot/dts/tegra-cardhu.dts b/arch/arm/boot/dts/tegra-cardhu.dts
index 0a9f34a2c3aa..36321bceec46 100644
--- a/arch/arm/boot/dts/tegra-cardhu.dts
+++ b/arch/arm/boot/dts/tegra-cardhu.dts
@@ -7,10 +7,10 @@
7 compatible = "nvidia,cardhu", "nvidia,tegra30"; 7 compatible = "nvidia,cardhu", "nvidia,tegra30";
8 8
9 memory { 9 memory {
10 reg = < 0x80000000 0x40000000 >; 10 reg = <0x80000000 0x40000000>;
11 }; 11 };
12 12
13 pinmux@70000000 { 13 pinmux {
14 pinctrl-names = "default"; 14 pinctrl-names = "default";
15 pinctrl-0 = <&state_default>; 15 pinctrl-0 = <&state_default>;
16 16
@@ -51,64 +51,122 @@
51 nvidia,pull = <2>; 51 nvidia,pull = <2>;
52 nvidia,tristate = <0>; 52 nvidia,tristate = <0>;
53 }; 53 };
54 dap2_fs_pa2 {
55 nvidia,pins = "dap2_fs_pa2",
56 "dap2_sclk_pa3",
57 "dap2_din_pa4",
58 "dap2_dout_pa5";
59 nvidia,function = "i2s1";
60 nvidia,pull = <0>;
61 nvidia,tristate = <0>;
62 };
54 }; 63 };
55 }; 64 };
56 65
57 serial@70006000 { 66 serial@70006000 {
58 clock-frequency = < 408000000 >; 67 status = "okay";
59 }; 68 clock-frequency = <408000000>;
60
61 serial@70006040 {
62 status = "disable";
63 };
64
65 serial@70006200 {
66 status = "disable";
67 };
68
69 serial@70006300 {
70 status = "disable";
71 };
72
73 serial@70006400 {
74 status = "disable";
75 }; 69 };
76 70
77 i2c@7000c000 { 71 i2c@7000c000 {
72 status = "okay";
78 clock-frequency = <100000>; 73 clock-frequency = <100000>;
79 }; 74 };
80 75
81 i2c@7000c400 { 76 i2c@7000c400 {
77 status = "okay";
82 clock-frequency = <100000>; 78 clock-frequency = <100000>;
83 }; 79 };
84 80
85 i2c@7000c500 { 81 i2c@7000c500 {
82 status = "okay";
86 clock-frequency = <100000>; 83 clock-frequency = <100000>;
84
85 /* ALS and Proximity sensor */
86 isl29028@44 {
87 compatible = "isil,isl29028";
88 reg = <0x44>;
89 interrupt-parent = <&gpio>;
90 interrupts = <88 0x04>; /*gpio PL0 */
91 };
87 }; 92 };
88 93
89 i2c@7000c700 { 94 i2c@7000c700 {
95 status = "okay";
90 clock-frequency = <100000>; 96 clock-frequency = <100000>;
91 }; 97 };
92 98
93 i2c@7000d000 { 99 i2c@7000d000 {
100 status = "okay";
94 clock-frequency = <100000>; 101 clock-frequency = <100000>;
102
103 wm8903: wm8903@1a {
104 compatible = "wlf,wm8903";
105 reg = <0x1a>;
106 interrupt-parent = <&gpio>;
107 interrupts = <179 0x04>; /* gpio PW3 */
108
109 gpio-controller;
110 #gpio-cells = <2>;
111
112 micdet-cfg = <0>;
113 micdet-delay = <100>;
114 gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
115 };
116
117 tps62361 {
118 compatible = "ti,tps62361";
119 reg = <0x60>;
120
121 regulator-name = "tps62361-vout";
122 regulator-min-microvolt = <500000>;
123 regulator-max-microvolt = <1500000>;
124 regulator-boot-on;
125 regulator-always-on;
126 ti,vsel0-state-high;
127 ti,vsel1-state-high;
128 };
129 };
130
131 ahub {
132 i2s@70080400 {
133 status = "okay";
134 };
95 }; 135 };
96 136
97 sdhci@78000000 { 137 sdhci@78000000 {
138 status = "okay";
98 cd-gpios = <&gpio 69 0>; /* gpio PI5 */ 139 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
99 wp-gpios = <&gpio 155 0>; /* gpio PT3 */ 140 wp-gpios = <&gpio 155 0>; /* gpio PT3 */
100 power-gpios = <&gpio 31 0>; /* gpio PD7 */ 141 power-gpios = <&gpio 31 0>; /* gpio PD7 */
142 bus-width = <4>;
101 }; 143 };
102 144
103 sdhci@78000200 { 145 sdhci@78000600 {
104 status = "disable"; 146 status = "okay";
147 support-8bit;
148 bus-width = <8>;
105 }; 149 };
106 150
107 sdhci@78000400 { 151 sound {
108 status = "disable"; 152 compatible = "nvidia,tegra-audio-wm8903-cardhu",
109 }; 153 "nvidia,tegra-audio-wm8903";
154 nvidia,model = "NVIDIA Tegra Cardhu";
110 155
111 sdhci@78000400 { 156 nvidia,audio-routing =
112 support-8bit; 157 "Headphone Jack", "HPOUTR",
158 "Headphone Jack", "HPOUTL",
159 "Int Spk", "ROP",
160 "Int Spk", "RON",
161 "Int Spk", "LOP",
162 "Int Spk", "LON",
163 "Mic Jack", "MICBIAS",
164 "IN1L", "Mic Jack";
165
166 nvidia,i2s-controller = <&tegra_i2s1>;
167 nvidia,audio-codec = <&wm8903>;
168
169 nvidia,spkr-en-gpios = <&wm8903 2 0>;
170 nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
113 }; 171 };
114}; 172};
diff --git a/arch/arm/boot/dts/tegra-harmony.dts b/arch/arm/boot/dts/tegra-harmony.dts
index 1a0b1f182944..7de701365fce 100644
--- a/arch/arm/boot/dts/tegra-harmony.dts
+++ b/arch/arm/boot/dts/tegra-harmony.dts
@@ -6,11 +6,11 @@
6 model = "NVIDIA Tegra2 Harmony evaluation board"; 6 model = "NVIDIA Tegra2 Harmony evaluation board";
7 compatible = "nvidia,harmony", "nvidia,tegra20"; 7 compatible = "nvidia,harmony", "nvidia,tegra20";
8 8
9 memory@0 { 9 memory {
10 reg = < 0x00000000 0x40000000 >; 10 reg = <0x00000000 0x40000000>;
11 }; 11 };
12 12
13 pinmux@70000000 { 13 pinmux {
14 pinctrl-names = "default"; 14 pinctrl-names = "default";
15 pinctrl-0 = <&state_default>; 15 pinctrl-0 = <&state_default>;
16 16
@@ -167,28 +167,28 @@
167 }; 167 };
168 conf_ata { 168 conf_ata {
169 nvidia,pins = "ata", "atb", "atc", "atd", "ate", 169 nvidia,pins = "ata", "atb", "atc", "atd", "ate",
170 "cdev1", "dap1", "dtb", "gma", "gmb", 170 "cdev1", "cdev2", "dap1", "dtb", "gma",
171 "gmc", "gmd", "gme", "gpu7", "gpv", 171 "gmb", "gmc", "gmd", "gme", "gpu7",
172 "i2cp", "pta", "rm", "slxa", "slxk", 172 "gpv", "i2cp", "pta", "rm", "slxa",
173 "spia", "spib"; 173 "slxk", "spia", "spib", "uac";
174 nvidia,pull = <0>; 174 nvidia,pull = <0>;
175 nvidia,tristate = <0>; 175 nvidia,tristate = <0>;
176 }; 176 };
177 conf_cdev2 {
178 nvidia,pins = "cdev2", "csus", "spid", "spif";
179 nvidia,pull = <1>;
180 nvidia,tristate = <1>;
181 };
182 conf_ck32 { 177 conf_ck32 {
183 nvidia,pins = "ck32", "ddrc", "pmca", "pmcb", 178 nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
184 "pmcc", "pmcd", "pmce", "xm2c", "xm2d"; 179 "pmcc", "pmcd", "pmce", "xm2c", "xm2d";
185 nvidia,pull = <0>; 180 nvidia,pull = <0>;
186 }; 181 };
182 conf_csus {
183 nvidia,pins = "csus", "spid", "spif";
184 nvidia,pull = <1>;
185 nvidia,tristate = <1>;
186 };
187 conf_crtp { 187 conf_crtp {
188 nvidia,pins = "crtp", "dap2", "dap3", "dap4", 188 nvidia,pins = "crtp", "dap2", "dap3", "dap4",
189 "dtc", "dte", "dtf", "gpu", "sdio1", 189 "dtc", "dte", "dtf", "gpu", "sdio1",
190 "slxc", "slxd", "spdi", "spdo", "spig", 190 "slxc", "slxd", "spdi", "spdo", "spig",
191 "uac", "uda"; 191 "uda";
192 nvidia,pull = <0>; 192 nvidia,pull = <0>;
193 nvidia,tristate = <1>; 193 nvidia,tristate = <1>;
194 }; 194 };
@@ -234,42 +234,81 @@
234 }; 234 };
235 }; 235 };
236 236
237 pmc@7000f400 { 237 i2s@70002800 {
238 nvidia,invert-interrupt; 238 status = "okay";
239 };
240
241 serial@70006300 {
242 status = "okay";
243 clock-frequency = <216000000>;
239 }; 244 };
240 245
241 i2c@7000c000 { 246 i2c@7000c000 {
247 status = "okay";
242 clock-frequency = <400000>; 248 clock-frequency = <400000>;
243 249
244 wm8903: wm8903@1a { 250 wm8903: wm8903@1a {
245 compatible = "wlf,wm8903"; 251 compatible = "wlf,wm8903";
246 reg = <0x1a>; 252 reg = <0x1a>;
247 interrupt-parent = <&gpio>; 253 interrupt-parent = <&gpio>;
248 interrupts = < 187 0x04 >; 254 interrupts = <187 0x04>;
249 255
250 gpio-controller; 256 gpio-controller;
251 #gpio-cells = <2>; 257 #gpio-cells = <2>;
252 258
253 micdet-cfg = <0>; 259 micdet-cfg = <0>;
254 micdet-delay = <100>; 260 micdet-delay = <100>;
255 gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >; 261 gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
256 }; 262 };
257 }; 263 };
258 264
259 i2c@7000c400 { 265 i2c@7000c400 {
266 status = "okay";
260 clock-frequency = <400000>; 267 clock-frequency = <400000>;
261 }; 268 };
262 269
263 i2c@7000c500 { 270 i2c@7000c500 {
271 status = "okay";
264 clock-frequency = <400000>; 272 clock-frequency = <400000>;
265 }; 273 };
266 274
267 i2c@7000d000 { 275 i2c@7000d000 {
276 status = "okay";
268 clock-frequency = <400000>; 277 clock-frequency = <400000>;
269 }; 278 };
270 279
271 i2s@70002a00 { 280 pmc {
272 status = "disable"; 281 nvidia,invert-interrupt;
282 };
283
284 usb@c5000000 {
285 status = "okay";
286 };
287
288 usb@c5004000 {
289 status = "okay";
290 nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
291 };
292
293 usb@c5008000 {
294 status = "okay";
295 };
296
297 sdhci@c8000200 {
298 status = "okay";
299 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
300 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
301 power-gpios = <&gpio 155 0>; /* gpio PT3 */
302 bus-width = <4>;
303 };
304
305 sdhci@c8000600 {
306 status = "okay";
307 cd-gpios = <&gpio 58 0>; /* gpio PH2 */
308 wp-gpios = <&gpio 59 0>; /* gpio PH3 */
309 power-gpios = <&gpio 70 0>; /* gpio PI6 */
310 support-8bit;
311 bus-width = <8>;
273 }; 312 };
274 313
275 sound { 314 sound {
@@ -295,45 +334,4 @@
295 nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */ 334 nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */
296 nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */ 335 nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
297 }; 336 };
298
299 serial@70006000 {
300 status = "disable";
301 };
302
303 serial@70006040 {
304 status = "disable";
305 };
306
307 serial@70006200 {
308 status = "disable";
309 };
310
311 serial@70006300 {
312 clock-frequency = < 216000000 >;
313 };
314
315 serial@70006400 {
316 status = "disable";
317 };
318
319 sdhci@c8000000 {
320 status = "disable";
321 };
322
323 sdhci@c8000200 {
324 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
325 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
326 power-gpios = <&gpio 155 0>; /* gpio PT3 */
327 };
328
329 sdhci@c8000400 {
330 status = "disable";
331 };
332
333 sdhci@c8000600 {
334 cd-gpios = <&gpio 58 0>; /* gpio PH2 */
335 wp-gpios = <&gpio 59 0>; /* gpio PH3 */
336 power-gpios = <&gpio 70 0>; /* gpio PI6 */
337 support-8bit;
338 };
339}; 337};
diff --git a/arch/arm/boot/dts/tegra-paz00.dts b/arch/arm/boot/dts/tegra-paz00.dts
index 10943fb2561c..bfeb117d5aea 100644
--- a/arch/arm/boot/dts/tegra-paz00.dts
+++ b/arch/arm/boot/dts/tegra-paz00.dts
@@ -6,11 +6,11 @@
6 model = "Toshiba AC100 / Dynabook AZ"; 6 model = "Toshiba AC100 / Dynabook AZ";
7 compatible = "compal,paz00", "nvidia,tegra20"; 7 compatible = "compal,paz00", "nvidia,tegra20";
8 8
9 memory@0 { 9 memory {
10 reg = <0x00000000 0x20000000>; 10 reg = <0x00000000 0x20000000>;
11 }; 11 };
12 12
13 pinmux@70000000 { 13 pinmux {
14 pinctrl-names = "default"; 14 pinctrl-names = "default";
15 pinctrl-0 = <&state_default>; 15 pinctrl-0 = <&state_default>;
16 16
@@ -159,18 +159,14 @@
159 }; 159 };
160 conf_ata { 160 conf_ata {
161 nvidia,pins = "ata", "atb", "atc", "atd", "ate", 161 nvidia,pins = "ata", "atb", "atc", "atd", "ate",
162 "cdev1", "dap1", "dap2", "dtf", "gma", 162 "cdev1", "cdev2", "dap1", "dap2", "dtf",
163 "gmb", "gmc", "gmd", "gme", "gpu", 163 "gma", "gmb", "gmc", "gmd", "gme",
164 "gpu7", "gpv", "i2cp", "pta", "rm", 164 "gpu", "gpu7", "gpv", "i2cp", "pta",
165 "sdio1", "slxk", "spdo", "uac", "uda"; 165 "rm", "sdio1", "slxk", "spdo", "uac",
166 "uda";
166 nvidia,pull = <0>; 167 nvidia,pull = <0>;
167 nvidia,tristate = <0>; 168 nvidia,tristate = <0>;
168 }; 169 };
169 conf_cdev2 {
170 nvidia,pins = "cdev2";
171 nvidia,pull = <1>;
172 nvidia,tristate = <0>;
173 };
174 conf_ck32 { 170 conf_ck32 {
175 nvidia,pins = "ck32", "ddrc", "pmca", "pmcb", 171 nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
176 "pmcc", "pmcd", "pmce", "xm2c", "xm2d"; 172 "pmcc", "pmcd", "pmce", "xm2c", "xm2d";
@@ -230,7 +226,22 @@
230 }; 226 };
231 }; 227 };
232 228
229 i2s@70002800 {
230 status = "okay";
231 };
232
233 serial@70006000 {
234 status = "okay";
235 clock-frequency = <216000000>;
236 };
237
238 serial@70006200 {
239 status = "okay";
240 clock-frequency = <216000000>;
241 };
242
233 i2c@7000c000 { 243 i2c@7000c000 {
244 status = "okay";
234 clock-frequency = <400000>; 245 clock-frequency = <400000>;
235 246
236 alc5632: alc5632@1e { 247 alc5632: alc5632@1e {
@@ -242,25 +253,23 @@
242 }; 253 };
243 254
244 i2c@7000c400 { 255 i2c@7000c400 {
256 status = "okay";
245 clock-frequency = <400000>; 257 clock-frequency = <400000>;
246 }; 258 };
247 259
248 i2c@7000c500 { 260 nvec {
249 status = "disable";
250 };
251
252 nvec@7000c500 {
253 #address-cells = <1>;
254 #size-cells = <0>;
255 compatible = "nvidia,nvec"; 261 compatible = "nvidia,nvec";
256 reg = <0x7000C500 0x100>; 262 reg = <0x7000c500 0x100>;
257 interrupts = <0 92 0x04>; 263 interrupts = <0 92 0x04>;
264 #address-cells = <1>;
265 #size-cells = <0>;
258 clock-frequency = <80000>; 266 clock-frequency = <80000>;
259 request-gpios = <&gpio 170 0>; 267 request-gpios = <&gpio 170 0>; /* gpio PV2 */
260 slave-addr = <138>; 268 slave-addr = <138>;
261 }; 269 };
262 270
263 i2c@7000d000 { 271 i2c@7000d000 {
272 status = "okay";
264 clock-frequency = <400000>; 273 clock-frequency = <400000>;
265 274
266 adt7461@4c { 275 adt7461@4c {
@@ -269,66 +278,31 @@
269 }; 278 };
270 }; 279 };
271 280
272 i2s@70002a00 { 281 usb@c5000000 {
273 status = "disable"; 282 status = "okay";
274 };
275
276 sound {
277 compatible = "nvidia,tegra-audio-alc5632-paz00",
278 "nvidia,tegra-audio-alc5632";
279
280 nvidia,model = "Compal PAZ00";
281
282 nvidia,audio-routing =
283 "Int Spk", "SPKOUT",
284 "Int Spk", "SPKOUTN",
285 "Headset Mic", "MICBIAS1",
286 "MIC1", "Headset Mic",
287 "Headset Stereophone", "HPR",
288 "Headset Stereophone", "HPL",
289 "DMICDAT", "Digital Mic";
290
291 nvidia,audio-codec = <&alc5632>;
292 nvidia,i2s-controller = <&tegra_i2s1>;
293 nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
294 };
295
296 serial@70006000 {
297 clock-frequency = <216000000>;
298 }; 283 };
299 284
300 serial@70006040 { 285 usb@c5004000 {
301 status = "disable"; 286 status = "okay";
287 nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
302 }; 288 };
303 289
304 serial@70006200 { 290 usb@c5008000 {
305 clock-frequency = <216000000>; 291 status = "okay";
306 };
307
308 serial@70006300 {
309 status = "disable";
310 };
311
312 serial@70006400 {
313 status = "disable";
314 }; 292 };
315 293
316 sdhci@c8000000 { 294 sdhci@c8000000 {
295 status = "okay";
317 cd-gpios = <&gpio 173 0>; /* gpio PV5 */ 296 cd-gpios = <&gpio 173 0>; /* gpio PV5 */
318 wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 297 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
319 power-gpios = <&gpio 169 0>; /* gpio PV1 */ 298 power-gpios = <&gpio 169 0>; /* gpio PV1 */
320 }; 299 bus-width = <4>;
321
322 sdhci@c8000200 {
323 status = "disable";
324 };
325
326 sdhci@c8000400 {
327 status = "disable";
328 }; 300 };
329 301
330 sdhci@c8000600 { 302 sdhci@c8000600 {
303 status = "okay";
331 support-8bit; 304 support-8bit;
305 bus-width = <8>;
332 }; 306 };
333 307
334 gpio-keys { 308 gpio-keys {
@@ -347,8 +321,28 @@
347 321
348 wifi { 322 wifi {
349 label = "wifi-led"; 323 label = "wifi-led";
350 gpios = <&gpio 24 0>; 324 gpios = <&gpio 24 0>; /* gpio PD0 */
351 linux,default-trigger = "rfkill0"; 325 linux,default-trigger = "rfkill0";
352 }; 326 };
353 }; 327 };
328
329 sound {
330 compatible = "nvidia,tegra-audio-alc5632-paz00",
331 "nvidia,tegra-audio-alc5632";
332
333 nvidia,model = "Compal PAZ00";
334
335 nvidia,audio-routing =
336 "Int Spk", "SPKOUT",
337 "Int Spk", "SPKOUTN",
338 "Headset Mic", "MICBIAS1",
339 "MIC1", "Headset Mic",
340 "Headset Stereophone", "HPR",
341 "Headset Stereophone", "HPL",
342 "DMICDAT", "Digital Mic";
343
344 nvidia,audio-codec = <&alc5632>;
345 nvidia,i2s-controller = <&tegra_i2s1>;
346 nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
347 };
354}; 348};
diff --git a/arch/arm/boot/dts/tegra-seaboard.dts b/arch/arm/boot/dts/tegra-seaboard.dts
index ec33116f5df9..89cb7f2acd92 100644
--- a/arch/arm/boot/dts/tegra-seaboard.dts
+++ b/arch/arm/boot/dts/tegra-seaboard.dts
@@ -7,11 +7,10 @@
7 compatible = "nvidia,seaboard", "nvidia,tegra20"; 7 compatible = "nvidia,seaboard", "nvidia,tegra20";
8 8
9 memory { 9 memory {
10 device_type = "memory"; 10 reg = <0x00000000 0x40000000>;
11 reg = < 0x00000000 0x40000000 >;
12 }; 11 };
13 12
14 pinmux@70000000 { 13 pinmux {
15 pinctrl-names = "default"; 14 pinctrl-names = "default";
16 pinctrl-0 = <&state_default>; 15 pinctrl-0 = <&state_default>;
17 16
@@ -100,7 +99,7 @@
100 }; 99 };
101 hdint { 100 hdint {
102 nvidia,pins = "hdint", "lpw0", "lpw2", "lsc1", 101 nvidia,pins = "hdint", "lpw0", "lpw2", "lsc1",
103 "lsck", "lsda", "pta"; 102 "lsck", "lsda";
104 nvidia,function = "hdmi"; 103 nvidia,function = "hdmi";
105 }; 104 };
106 i2cp { 105 i2cp {
@@ -134,6 +133,10 @@
134 nvidia,pins = "pmc"; 133 nvidia,pins = "pmc";
135 nvidia,function = "pwr_on"; 134 nvidia,function = "pwr_on";
136 }; 135 };
136 pta {
137 nvidia,pins = "pta";
138 nvidia,function = "i2c2";
139 };
137 rm { 140 rm {
138 nvidia,pins = "rm"; 141 nvidia,pins = "rm";
139 nvidia,function = "i2c1"; 142 nvidia,function = "i2c1";
@@ -254,108 +257,148 @@
254 }; 257 };
255 }; 258 };
256 259
260 i2s@70002800 {
261 status = "okay";
262 };
263
264 serial@70006300 {
265 status = "okay";
266 clock-frequency = <216000000>;
267 };
268
257 i2c@7000c000 { 269 i2c@7000c000 {
270 status = "okay";
258 clock-frequency = <400000>; 271 clock-frequency = <400000>;
259 272
260 wm8903: wm8903@1a { 273 wm8903: wm8903@1a {
261 compatible = "wlf,wm8903"; 274 compatible = "wlf,wm8903";
262 reg = <0x1a>; 275 reg = <0x1a>;
263 interrupt-parent = <&gpio>; 276 interrupt-parent = <&gpio>;
264 interrupts = < 187 0x04 >; 277 interrupts = <187 0x04>;
265 278
266 gpio-controller; 279 gpio-controller;
267 #gpio-cells = <2>; 280 #gpio-cells = <2>;
268 281
269 micdet-cfg = <0>; 282 micdet-cfg = <0>;
270 micdet-delay = <100>; 283 micdet-delay = <100>;
271 gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >; 284 gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
285 };
286
287 /* ALS and proximity sensor */
288 isl29018@44 {
289 compatible = "isil,isl29018";
290 reg = <0x44>;
291 interrupt-parent = <&gpio>;
292 interrupts = <202 0x04>; /* GPIO PZ2 */
293 };
294
295 gyrometer@68 {
296 compatible = "invn,mpu3050";
297 reg = <0x68>;
298 interrupt-parent = <&gpio>;
299 interrupts = <204 0x04>; /* gpio PZ4 */
272 }; 300 };
273 }; 301 };
274 302
275 i2c@7000c400 { 303 i2c@7000c400 {
276 clock-frequency = <400000>; 304 status = "okay";
305 clock-frequency = <100000>;
306
307 smart-battery@b {
308 compatible = "ti,bq20z75", "smart-battery-1.1";
309 reg = <0xb>;
310 ti,i2c-retry-count = <2>;
311 ti,poll-retry-count = <10>;
312 };
277 }; 313 };
278 314
279 i2c@7000c500 { 315 i2c@7000c500 {
316 status = "okay";
280 clock-frequency = <400000>; 317 clock-frequency = <400000>;
281 }; 318 };
282 319
283 i2c@7000d000 { 320 i2c@7000d000 {
321 status = "okay";
284 clock-frequency = <400000>; 322 clock-frequency = <400000>;
285 323
286 adt7461@4c { 324 temperature-sensor@4c {
287 compatible = "adt7461"; 325 compatible = "nct1008";
288 reg = <0x4c>; 326 reg = <0x4c>;
289 }; 327 };
290 };
291
292 i2s@70002a00 {
293 status = "disable";
294 };
295
296 sound {
297 compatible = "nvidia,tegra-audio-wm8903-seaboard",
298 "nvidia,tegra-audio-wm8903";
299 nvidia,model = "NVIDIA Tegra Seaboard";
300
301 nvidia,audio-routing =
302 "Headphone Jack", "HPOUTR",
303 "Headphone Jack", "HPOUTL",
304 "Int Spk", "ROP",
305 "Int Spk", "RON",
306 "Int Spk", "LOP",
307 "Int Spk", "LON",
308 "Mic Jack", "MICBIAS",
309 "IN1R", "Mic Jack";
310
311 nvidia,i2s-controller = <&tegra_i2s1>;
312 nvidia,audio-codec = <&wm8903>;
313
314 nvidia,spkr-en-gpios = <&wm8903 2 0>;
315 nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
316 };
317 328
318 serial@70006000 { 329 magnetometer@c {
319 status = "disable"; 330 compatible = "ak8975";
320 }; 331 reg = <0xc>;
321 332 interrupt-parent = <&gpio>;
322 serial@70006040 { 333 interrupts = <109 0x04>; /* gpio PN5 */
323 status = "disable"; 334 };
324 }; 335 };
325 336
326 serial@70006200 { 337 emc {
327 status = "disable"; 338 emc-table@190000 {
328 }; 339 reg = <190000>;
340 compatible = "nvidia,tegra20-emc-table";
341 clock-frequency = <190000>;
342 nvidia,emc-registers = <0x0000000c 0x00000026
343 0x00000009 0x00000003 0x00000004 0x00000004
344 0x00000002 0x0000000c 0x00000003 0x00000003
345 0x00000002 0x00000001 0x00000004 0x00000005
346 0x00000004 0x00000009 0x0000000d 0x0000059f
347 0x00000000 0x00000003 0x00000003 0x00000003
348 0x00000003 0x00000001 0x0000000b 0x000000c8
349 0x00000003 0x00000007 0x00000004 0x0000000f
350 0x00000002 0x00000000 0x00000000 0x00000002
351 0x00000000 0x00000000 0x00000083 0xa06204ae
352 0x007dc010 0x00000000 0x00000000 0x00000000
353 0x00000000 0x00000000 0x00000000 0x00000000>;
354 };
329 355
330 serial@70006300 { 356 emc-table@380000 {
331 clock-frequency = < 216000000 >; 357 reg = <380000>;
358 compatible = "nvidia,tegra20-emc-table";
359 clock-frequency = <380000>;
360 nvidia,emc-registers = <0x00000017 0x0000004b
361 0x00000012 0x00000006 0x00000004 0x00000005
362 0x00000003 0x0000000c 0x00000006 0x00000006
363 0x00000003 0x00000001 0x00000004 0x00000005
364 0x00000004 0x00000009 0x0000000d 0x00000b5f
365 0x00000000 0x00000003 0x00000003 0x00000006
366 0x00000006 0x00000001 0x00000011 0x000000c8
367 0x00000003 0x0000000e 0x00000007 0x0000000f
368 0x00000002 0x00000000 0x00000000 0x00000002
369 0x00000000 0x00000000 0x00000083 0xe044048b
370 0x007d8010 0x00000000 0x00000000 0x00000000
371 0x00000000 0x00000000 0x00000000 0x00000000>;
372 };
332 }; 373 };
333 374
334 serial@70006400 { 375 usb@c5000000 {
335 status = "disable"; 376 status = "okay";
377 nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
378 dr_mode = "otg";
336 }; 379 };
337 380
338 sdhci@c8000000 { 381 usb@c5004000 {
339 status = "disable"; 382 status = "okay";
383 nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
340 }; 384 };
341 385
342 sdhci@c8000200 { 386 usb@c5008000 {
343 status = "disable"; 387 status = "okay";
344 }; 388 };
345 389
346 sdhci@c8000400 { 390 sdhci@c8000400 {
391 status = "okay";
347 cd-gpios = <&gpio 69 0>; /* gpio PI5 */ 392 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
348 wp-gpios = <&gpio 57 0>; /* gpio PH1 */ 393 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
349 power-gpios = <&gpio 70 0>; /* gpio PI6 */ 394 power-gpios = <&gpio 70 0>; /* gpio PI6 */
395 bus-width = <4>;
350 }; 396 };
351 397
352 sdhci@c8000600 { 398 sdhci@c8000600 {
399 status = "okay";
353 support-8bit; 400 support-8bit;
354 }; 401 bus-width = <8>;
355
356 usb@c5000000 {
357 nvidia,vbus-gpio = <&gpio 24 0>; /* PD0 */
358 dr_mode = "otg";
359 }; 402 };
360 403
361 gpio-keys { 404 gpio-keys {
@@ -378,41 +421,25 @@
378 }; 421 };
379 }; 422 };
380 423
381 emc@7000f400 { 424 sound {
382 emc-table@190000 { 425 compatible = "nvidia,tegra-audio-wm8903-seaboard",
383 reg = < 190000 >; 426 "nvidia,tegra-audio-wm8903";
384 compatible = "nvidia,tegra20-emc-table"; 427 nvidia,model = "NVIDIA Tegra Seaboard";
385 clock-frequency = < 190000 >;
386 nvidia,emc-registers = < 0x0000000c 0x00000026
387 0x00000009 0x00000003 0x00000004 0x00000004
388 0x00000002 0x0000000c 0x00000003 0x00000003
389 0x00000002 0x00000001 0x00000004 0x00000005
390 0x00000004 0x00000009 0x0000000d 0x0000059f
391 0x00000000 0x00000003 0x00000003 0x00000003
392 0x00000003 0x00000001 0x0000000b 0x000000c8
393 0x00000003 0x00000007 0x00000004 0x0000000f
394 0x00000002 0x00000000 0x00000000 0x00000002
395 0x00000000 0x00000000 0x00000083 0xa06204ae
396 0x007dc010 0x00000000 0x00000000 0x00000000
397 0x00000000 0x00000000 0x00000000 0x00000000 >;
398 };
399 428
400 emc-table@380000 { 429 nvidia,audio-routing =
401 reg = < 380000 >; 430 "Headphone Jack", "HPOUTR",
402 compatible = "nvidia,tegra20-emc-table"; 431 "Headphone Jack", "HPOUTL",
403 clock-frequency = < 380000 >; 432 "Int Spk", "ROP",
404 nvidia,emc-registers = < 0x00000017 0x0000004b 433 "Int Spk", "RON",
405 0x00000012 0x00000006 0x00000004 0x00000005 434 "Int Spk", "LOP",
406 0x00000003 0x0000000c 0x00000006 0x00000006 435 "Int Spk", "LON",
407 0x00000003 0x00000001 0x00000004 0x00000005 436 "Mic Jack", "MICBIAS",
408 0x00000004 0x00000009 0x0000000d 0x00000b5f 437 "IN1R", "Mic Jack";
409 0x00000000 0x00000003 0x00000003 0x00000006 438
410 0x00000006 0x00000001 0x00000011 0x000000c8 439 nvidia,i2s-controller = <&tegra_i2s1>;
411 0x00000003 0x0000000e 0x00000007 0x0000000f 440 nvidia,audio-codec = <&wm8903>;
412 0x00000002 0x00000000 0x00000000 0x00000002 441
413 0x00000000 0x00000000 0x00000083 0xe044048b 442 nvidia,spkr-en-gpios = <&wm8903 2 0>;
414 0x007d8010 0x00000000 0x00000000 0x00000000 443 nvidia,hp-det-gpios = <&gpio 185 0>; /* gpio PX1 */
415 0x00000000 0x00000000 0x00000000 0x00000000 >;
416 };
417 }; 444 };
418}; 445};
diff --git a/arch/arm/boot/dts/tegra-trimslice.dts b/arch/arm/boot/dts/tegra-trimslice.dts
index 98efd5b0d7f9..9de5636023f6 100644
--- a/arch/arm/boot/dts/tegra-trimslice.dts
+++ b/arch/arm/boot/dts/tegra-trimslice.dts
@@ -6,11 +6,11 @@
6 model = "Compulab TrimSlice board"; 6 model = "Compulab TrimSlice board";
7 compatible = "compulab,trimslice", "nvidia,tegra20"; 7 compatible = "compulab,trimslice", "nvidia,tegra20";
8 8
9 memory@0 { 9 memory {
10 reg = < 0x00000000 0x40000000 >; 10 reg = <0x00000000 0x40000000>;
11 }; 11 };
12 12
13 pinmux@70000000 { 13 pinmux {
14 pinctrl-names = "default"; 14 pinctrl-names = "default";
15 pinctrl-0 = <&state_default>; 15 pinctrl-0 = <&state_default>;
16 16
@@ -182,23 +182,23 @@
182 nvidia,tristate = <1>; 182 nvidia,tristate = <1>;
183 }; 183 };
184 conf_atb { 184 conf_atb {
185 nvidia,pins = "atb", "cdev1", "dap1", "gma", 185 nvidia,pins = "atb", "cdev1", "cdev2", "dap1",
186 "gmc", "gmd", "gpu", "gpu7", "gpv", 186 "gma", "gmc", "gmd", "gpu", "gpu7",
187 "sdio1", "slxa", "slxk", "uac"; 187 "gpv", "sdio1", "slxa", "slxk", "uac";
188 nvidia,pull = <0>; 188 nvidia,pull = <0>;
189 nvidia,tristate = <0>; 189 nvidia,tristate = <0>;
190 }; 190 };
191 conf_cdev2 {
192 nvidia,pins = "cdev2", "csus", "spia", "spib",
193 "spid", "spif";
194 nvidia,pull = <1>;
195 nvidia,tristate = <1>;
196 };
197 conf_ck32 { 191 conf_ck32 {
198 nvidia,pins = "ck32", "ddrc", "pmca", "pmcb", 192 nvidia,pins = "ck32", "ddrc", "pmca", "pmcb",
199 "pmcc", "pmcd", "pmce", "xm2c", "xm2d"; 193 "pmcc", "pmcd", "pmce", "xm2c", "xm2d";
200 nvidia,pull = <0>; 194 nvidia,pull = <0>;
201 }; 195 };
196 conf_csus {
197 nvidia,pins = "csus", "spia", "spib",
198 "spid", "spif";
199 nvidia,pull = <1>;
200 nvidia,tristate = <1>;
201 };
202 conf_ddc { 202 conf_ddc {
203 nvidia,pins = "ddc", "dtf", "rm", "sdc", "sdd"; 203 nvidia,pins = "ddc", "dtf", "rm", "sdc", "sdd";
204 nvidia,pull = <2>; 204 nvidia,pull = <2>;
@@ -240,68 +240,67 @@
240 }; 240 };
241 }; 241 };
242 242
243 i2s@70002800 {
244 status = "okay";
245 };
246
247 serial@70006000 {
248 status = "okay";
249 clock-frequency = <216000000>;
250 };
251
243 i2c@7000c000 { 252 i2c@7000c000 {
253 status = "okay";
244 clock-frequency = <400000>; 254 clock-frequency = <400000>;
245 }; 255 };
246 256
247 i2c@7000c400 { 257 i2c@7000c400 {
258 status = "okay";
248 clock-frequency = <400000>; 259 clock-frequency = <400000>;
249 }; 260 };
250 261
251 i2c@7000c500 { 262 i2c@7000c500 {
263 status = "okay";
252 clock-frequency = <400000>; 264 clock-frequency = <400000>;
253 };
254
255 i2c@7000d000 {
256 status = "disable";
257 };
258
259 i2s@70002800 {
260 status = "disable";
261 };
262
263 i2s@70002a00 {
264 status = "disable";
265 };
266
267 das@70000c00 {
268 status = "disable";
269 };
270 265
271 serial@70006000 { 266 codec: codec@1a {
272 clock-frequency = < 216000000 >; 267 compatible = "ti,tlv320aic23";
273 }; 268 reg = <0x1a>;
269 };
274 270
275 serial@70006040 { 271 rtc@56 {
276 status = "disable"; 272 compatible = "emmicro,em3027";
273 reg = <0x56>;
274 };
277 }; 275 };
278 276
279 serial@70006200 { 277 usb@c5000000 {
280 status = "disable"; 278 status = "okay";
281 }; 279 };
282 280
283 serial@70006300 { 281 usb@c5004000 {
284 status = "disable"; 282 nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
285 }; 283 };
286 284
287 serial@70006400 { 285 usb@c5008000 {
288 status = "disable"; 286 status = "okay";
289 }; 287 };
290 288
291 sdhci@c8000000 { 289 sdhci@c8000000 {
292 status = "disable"; 290 status = "okay";
291 bus-width = <4>;
293 }; 292 };
294 293
295 sdhci@c8000200 { 294 sdhci@c8000600 {
296 status = "disable"; 295 status = "okay";
297 }; 296 cd-gpios = <&gpio 121 0>; /* gpio PP1 */
298 297 wp-gpios = <&gpio 122 0>; /* gpio PP2 */
299 sdhci@c8000400 { 298 bus-width = <4>;
300 status = "disable";
301 }; 299 };
302 300
303 sdhci@c8000600 { 301 sound {
304 cd-gpios = <&gpio 121 0>; 302 compatible = "nvidia,tegra-audio-trimslice";
305 wp-gpios = <&gpio 122 0>; 303 nvidia,i2s-controller = <&tegra_i2s1>;
304 nvidia,audio-codec = <&codec>;
306 }; 305 };
307}; 306};
diff --git a/arch/arm/boot/dts/tegra-ventana.dts b/arch/arm/boot/dts/tegra-ventana.dts
index 71eb2e50a668..445343b0fbdd 100644
--- a/arch/arm/boot/dts/tegra-ventana.dts
+++ b/arch/arm/boot/dts/tegra-ventana.dts
@@ -7,10 +7,10 @@
7 compatible = "nvidia,ventana", "nvidia,tegra20"; 7 compatible = "nvidia,ventana", "nvidia,tegra20";
8 8
9 memory { 9 memory {
10 reg = < 0x00000000 0x40000000 >; 10 reg = <0x00000000 0x40000000>;
11 }; 11 };
12 12
13 pinmux@70000000 { 13 pinmux {
14 pinctrl-names = "default"; 14 pinctrl-names = "default";
15 pinctrl-0 = <&state_default>; 15 pinctrl-0 = <&state_default>;
16 16
@@ -240,38 +240,82 @@
240 }; 240 };
241 }; 241 };
242 242
243 i2s@70002800 {
244 status = "okay";
245 };
246
247 serial@70006300 {
248 status = "okay";
249 clock-frequency = <216000000>;
250 };
251
243 i2c@7000c000 { 252 i2c@7000c000 {
253 status = "okay";
244 clock-frequency = <400000>; 254 clock-frequency = <400000>;
245 255
246 wm8903: wm8903@1a { 256 wm8903: wm8903@1a {
247 compatible = "wlf,wm8903"; 257 compatible = "wlf,wm8903";
248 reg = <0x1a>; 258 reg = <0x1a>;
249 interrupt-parent = <&gpio>; 259 interrupt-parent = <&gpio>;
250 interrupts = < 187 0x04 >; 260 interrupts = <187 0x04>;
251 261
252 gpio-controller; 262 gpio-controller;
253 #gpio-cells = <2>; 263 #gpio-cells = <2>;
254 264
255 micdet-cfg = <0>; 265 micdet-cfg = <0>;
256 micdet-delay = <100>; 266 micdet-delay = <100>;
257 gpio-cfg = < 0xffffffff 0xffffffff 0 0xffffffff 0xffffffff >; 267 gpio-cfg = <0xffffffff 0xffffffff 0 0xffffffff 0xffffffff>;
268 };
269
270 /* ALS and proximity sensor */
271 isl29018@44 {
272 compatible = "isil,isl29018";
273 reg = <0x44>;
274 interrupt-parent = <&gpio>;
275 interrupts = <202 0x04>; /*gpio PZ2 */
258 }; 276 };
259 }; 277 };
260 278
261 i2c@7000c400 { 279 i2c@7000c400 {
280 status = "okay";
262 clock-frequency = <400000>; 281 clock-frequency = <400000>;
263 }; 282 };
264 283
265 i2c@7000c500 { 284 i2c@7000c500 {
285 status = "okay";
266 clock-frequency = <400000>; 286 clock-frequency = <400000>;
267 }; 287 };
268 288
269 i2c@7000d000 { 289 i2c@7000d000 {
290 status = "okay";
270 clock-frequency = <400000>; 291 clock-frequency = <400000>;
271 }; 292 };
272 293
273 i2s@70002a00 { 294 usb@c5000000 {
274 status = "disable"; 295 status = "okay";
296 };
297
298 usb@c5004000 {
299 status = "okay";
300 nvidia,phy-reset-gpio = <&gpio 169 0>; /* gpio PV1 */
301 };
302
303 usb@c5008000 {
304 status = "okay";
305 };
306
307 sdhci@c8000400 {
308 status = "okay";
309 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
310 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
311 power-gpios = <&gpio 70 0>; /* gpio PI6 */
312 bus-width = <4>;
313 };
314
315 sdhci@c8000600 {
316 status = "okay";
317 support-8bit;
318 bus-width = <8>;
275 }; 319 };
276 320
277 sound { 321 sound {
@@ -294,45 +338,7 @@
294 338
295 nvidia,spkr-en-gpios = <&wm8903 2 0>; 339 nvidia,spkr-en-gpios = <&wm8903 2 0>;
296 nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */ 340 nvidia,hp-det-gpios = <&gpio 178 0>; /* gpio PW2 */
297 nvidia,int-mic-en-gpios = <&gpio 184 0>; /*gpio PX0 */ 341 nvidia,int-mic-en-gpios = <&gpio 184 0>; /* gpio PX0 */
298 nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */ 342 nvidia,ext-mic-en-gpios = <&gpio 185 0>; /* gpio PX1 */
299 }; 343 };
300
301 serial@70006000 {
302 status = "disable";
303 };
304
305 serial@70006040 {
306 status = "disable";
307 };
308
309 serial@70006200 {
310 status = "disable";
311 };
312
313 serial@70006300 {
314 clock-frequency = < 216000000 >;
315 };
316
317 serial@70006400 {
318 status = "disable";
319 };
320
321 sdhci@c8000000 {
322 status = "disable";
323 };
324
325 sdhci@c8000200 {
326 status = "disable";
327 };
328
329 sdhci@c8000400 {
330 cd-gpios = <&gpio 69 0>; /* gpio PI5 */
331 wp-gpios = <&gpio 57 0>; /* gpio PH1 */
332 power-gpios = <&gpio 70 0>; /* gpio PI6 */
333 };
334
335 sdhci@c8000600 {
336 support-8bit;
337 };
338}; 344};
diff --git a/arch/arm/boot/dts/tegra20.dtsi b/arch/arm/boot/dts/tegra20.dtsi
index 108e894a8926..c417d67e9027 100644
--- a/arch/arm/boot/dts/tegra20.dtsi
+++ b/arch/arm/boot/dts/tegra20.dtsi
@@ -4,207 +4,242 @@
4 compatible = "nvidia,tegra20"; 4 compatible = "nvidia,tegra20";
5 interrupt-parent = <&intc>; 5 interrupt-parent = <&intc>;
6 6
7 pmc@7000f400 { 7 intc: interrupt-controller {
8 compatible = "nvidia,tegra20-pmc";
9 reg = <0x7000e400 0x400>;
10 };
11
12 intc: interrupt-controller@50041000 {
13 compatible = "arm,cortex-a9-gic"; 8 compatible = "arm,cortex-a9-gic";
9 reg = <0x50041000 0x1000
10 0x50040100 0x0100>;
14 interrupt-controller; 11 interrupt-controller;
15 #interrupt-cells = <3>; 12 #interrupt-cells = <3>;
16 reg = < 0x50041000 0x1000 >,
17 < 0x50040100 0x0100 >;
18 }; 13 };
19 14
20 pmu { 15 apbdma: dma {
21 compatible = "arm,cortex-a9-pmu";
22 interrupts = <0 56 0x04
23 0 57 0x04>;
24 };
25
26 apbdma: dma@6000a000 {
27 compatible = "nvidia,tegra20-apbdma"; 16 compatible = "nvidia,tegra20-apbdma";
28 reg = <0x6000a000 0x1200>; 17 reg = <0x6000a000 0x1200>;
29 interrupts = < 0 104 0x04 18 interrupts = <0 104 0x04
30 0 105 0x04 19 0 105 0x04
31 0 106 0x04 20 0 106 0x04
32 0 107 0x04 21 0 107 0x04
33 0 108 0x04 22 0 108 0x04
34 0 109 0x04 23 0 109 0x04
35 0 110 0x04 24 0 110 0x04
36 0 111 0x04 25 0 111 0x04
37 0 112 0x04 26 0 112 0x04
38 0 113 0x04 27 0 113 0x04
39 0 114 0x04 28 0 114 0x04
40 0 115 0x04 29 0 115 0x04
41 0 116 0x04 30 0 116 0x04
42 0 117 0x04 31 0 117 0x04
43 0 118 0x04 32 0 118 0x04
44 0 119 0x04 >; 33 0 119 0x04>;
45 }; 34 };
46 35
47 i2c@7000c000 { 36 ahb {
48 #address-cells = <1>; 37 compatible = "nvidia,tegra20-ahb";
49 #size-cells = <0>; 38 reg = <0x6000c004 0x10c>; /* AHB Arbitration + Gizmo Controller */
50 compatible = "nvidia,tegra20-i2c"; 39 };
51 reg = <0x7000C000 0x100>; 40
52 interrupts = < 0 38 0x04 >; 41 gpio: gpio {
53 }; 42 compatible = "nvidia,tegra20-gpio";
54 43 reg = <0x6000d000 0x1000>;
55 i2c@7000c400 { 44 interrupts = <0 32 0x04
56 #address-cells = <1>; 45 0 33 0x04
57 #size-cells = <0>; 46 0 34 0x04
58 compatible = "nvidia,tegra20-i2c"; 47 0 35 0x04
59 reg = <0x7000C400 0x100>; 48 0 55 0x04
60 interrupts = < 0 84 0x04 >; 49 0 87 0x04
50 0 89 0x04>;
51 #gpio-cells = <2>;
52 gpio-controller;
53 #interrupt-cells = <2>;
54 interrupt-controller;
61 }; 55 };
62 56
63 i2c@7000c500 { 57 pinmux: pinmux {
64 #address-cells = <1>; 58 compatible = "nvidia,tegra20-pinmux";
65 #size-cells = <0>; 59 reg = <0x70000014 0x10 /* Tri-state registers */
66 compatible = "nvidia,tegra20-i2c"; 60 0x70000080 0x20 /* Mux registers */
67 reg = <0x7000C500 0x100>; 61 0x700000a0 0x14 /* Pull-up/down registers */
68 interrupts = < 0 92 0x04 >; 62 0x70000868 0xa8>; /* Pad control registers */
69 }; 63 };
70 64
71 i2c@7000d000 { 65 das {
72 #address-cells = <1>; 66 compatible = "nvidia,tegra20-das";
73 #size-cells = <0>; 67 reg = <0x70000c00 0x80>;
74 compatible = "nvidia,tegra20-i2c-dvc";
75 reg = <0x7000D000 0x200>;
76 interrupts = < 0 53 0x04 >;
77 }; 68 };
78 69
79 tegra_i2s1: i2s@70002800 { 70 tegra_i2s1: i2s@70002800 {
80 compatible = "nvidia,tegra20-i2s"; 71 compatible = "nvidia,tegra20-i2s";
81 reg = <0x70002800 0x200>; 72 reg = <0x70002800 0x200>;
82 interrupts = < 0 13 0x04 >; 73 interrupts = <0 13 0x04>;
83 nvidia,dma-request-selector = < &apbdma 2 >; 74 nvidia,dma-request-selector = <&apbdma 2>;
75 status = "disable";
84 }; 76 };
85 77
86 tegra_i2s2: i2s@70002a00 { 78 tegra_i2s2: i2s@70002a00 {
87 compatible = "nvidia,tegra20-i2s"; 79 compatible = "nvidia,tegra20-i2s";
88 reg = <0x70002a00 0x200>; 80 reg = <0x70002a00 0x200>;
89 interrupts = < 0 3 0x04 >; 81 interrupts = <0 3 0x04>;
90 nvidia,dma-request-selector = < &apbdma 1 >; 82 nvidia,dma-request-selector = <&apbdma 1>;
91 }; 83 status = "disable";
92
93 das@70000c00 {
94 compatible = "nvidia,tegra20-das";
95 reg = <0x70000c00 0x80>;
96 };
97
98 gpio: gpio@6000d000 {
99 compatible = "nvidia,tegra20-gpio";
100 reg = < 0x6000d000 0x1000 >;
101 interrupts = < 0 32 0x04
102 0 33 0x04
103 0 34 0x04
104 0 35 0x04
105 0 55 0x04
106 0 87 0x04
107 0 89 0x04 >;
108 #gpio-cells = <2>;
109 gpio-controller;
110 #interrupt-cells = <2>;
111 interrupt-controller;
112 };
113
114 pinmux: pinmux@70000000 {
115 compatible = "nvidia,tegra20-pinmux";
116 reg = < 0x70000014 0x10 /* Tri-state registers */
117 0x70000080 0x20 /* Mux registers */
118 0x700000a0 0x14 /* Pull-up/down registers */
119 0x70000868 0xa8 >; /* Pad control registers */
120 }; 84 };
121 85
122 serial@70006000 { 86 serial@70006000 {
123 compatible = "nvidia,tegra20-uart"; 87 compatible = "nvidia,tegra20-uart";
124 reg = <0x70006000 0x40>; 88 reg = <0x70006000 0x40>;
125 reg-shift = <2>; 89 reg-shift = <2>;
126 interrupts = < 0 36 0x04 >; 90 interrupts = <0 36 0x04>;
91 status = "disable";
127 }; 92 };
128 93
129 serial@70006040 { 94 serial@70006040 {
130 compatible = "nvidia,tegra20-uart"; 95 compatible = "nvidia,tegra20-uart";
131 reg = <0x70006040 0x40>; 96 reg = <0x70006040 0x40>;
132 reg-shift = <2>; 97 reg-shift = <2>;
133 interrupts = < 0 37 0x04 >; 98 interrupts = <0 37 0x04>;
99 status = "disable";
134 }; 100 };
135 101
136 serial@70006200 { 102 serial@70006200 {
137 compatible = "nvidia,tegra20-uart"; 103 compatible = "nvidia,tegra20-uart";
138 reg = <0x70006200 0x100>; 104 reg = <0x70006200 0x100>;
139 reg-shift = <2>; 105 reg-shift = <2>;
140 interrupts = < 0 46 0x04 >; 106 interrupts = <0 46 0x04>;
107 status = "disable";
141 }; 108 };
142 109
143 serial@70006300 { 110 serial@70006300 {
144 compatible = "nvidia,tegra20-uart"; 111 compatible = "nvidia,tegra20-uart";
145 reg = <0x70006300 0x100>; 112 reg = <0x70006300 0x100>;
146 reg-shift = <2>; 113 reg-shift = <2>;
147 interrupts = < 0 90 0x04 >; 114 interrupts = <0 90 0x04>;
115 status = "disable";
148 }; 116 };
149 117
150 serial@70006400 { 118 serial@70006400 {
151 compatible = "nvidia,tegra20-uart"; 119 compatible = "nvidia,tegra20-uart";
152 reg = <0x70006400 0x100>; 120 reg = <0x70006400 0x100>;
153 reg-shift = <2>; 121 reg-shift = <2>;
154 interrupts = < 0 91 0x04 >; 122 interrupts = <0 91 0x04>;
123 status = "disable";
155 }; 124 };
156 125
157 emc@7000f400 { 126 i2c@7000c000 {
127 compatible = "nvidia,tegra20-i2c";
128 reg = <0x7000c000 0x100>;
129 interrupts = <0 38 0x04>;
158 #address-cells = <1>; 130 #address-cells = <1>;
159 #size-cells = <0>; 131 #size-cells = <0>;
160 compatible = "nvidia,tegra20-emc"; 132 status = "disable";
161 reg = <0x7000f400 0x200>;
162 }; 133 };
163 134
164 sdhci@c8000000 { 135 i2c@7000c400 {
165 compatible = "nvidia,tegra20-sdhci"; 136 compatible = "nvidia,tegra20-i2c";
166 reg = <0xc8000000 0x200>; 137 reg = <0x7000c400 0x100>;
167 interrupts = < 0 14 0x04 >; 138 interrupts = <0 84 0x04>;
139 #address-cells = <1>;
140 #size-cells = <0>;
141 status = "disable";
168 }; 142 };
169 143
170 sdhci@c8000200 { 144 i2c@7000c500 {
171 compatible = "nvidia,tegra20-sdhci"; 145 compatible = "nvidia,tegra20-i2c";
172 reg = <0xc8000200 0x200>; 146 reg = <0x7000c500 0x100>;
173 interrupts = < 0 15 0x04 >; 147 interrupts = <0 92 0x04>;
148 #address-cells = <1>;
149 #size-cells = <0>;
150 status = "disable";
174 }; 151 };
175 152
176 sdhci@c8000400 { 153 i2c@7000d000 {
177 compatible = "nvidia,tegra20-sdhci"; 154 compatible = "nvidia,tegra20-i2c-dvc";
178 reg = <0xc8000400 0x200>; 155 reg = <0x7000d000 0x200>;
179 interrupts = < 0 19 0x04 >; 156 interrupts = <0 53 0x04>;
157 #address-cells = <1>;
158 #size-cells = <0>;
159 status = "disable";
180 }; 160 };
181 161
182 sdhci@c8000600 { 162 pmc {
183 compatible = "nvidia,tegra20-sdhci"; 163 compatible = "nvidia,tegra20-pmc";
184 reg = <0xc8000600 0x200>; 164 reg = <0x7000e400 0x400>;
185 interrupts = < 0 31 0x04 >; 165 };
166
167 mc {
168 compatible = "nvidia,tegra20-mc";
169 reg = <0x7000f000 0x024
170 0x7000f03c 0x3c4>;
171 interrupts = <0 77 0x04>;
172 };
173
174 gart {
175 compatible = "nvidia,tegra20-gart";
176 reg = <0x7000f024 0x00000018 /* controller registers */
177 0x58000000 0x02000000>; /* GART aperture */
178 };
179
180 emc {
181 compatible = "nvidia,tegra20-emc";
182 reg = <0x7000f400 0x200>;
183 #address-cells = <1>;
184 #size-cells = <0>;
186 }; 185 };
187 186
188 usb@c5000000 { 187 usb@c5000000 {
189 compatible = "nvidia,tegra20-ehci", "usb-ehci"; 188 compatible = "nvidia,tegra20-ehci", "usb-ehci";
190 reg = <0xc5000000 0x4000>; 189 reg = <0xc5000000 0x4000>;
191 interrupts = < 0 20 0x04 >; 190 interrupts = <0 20 0x04>;
192 phy_type = "utmi"; 191 phy_type = "utmi";
193 nvidia,has-legacy-mode; 192 nvidia,has-legacy-mode;
193 status = "disable";
194 }; 194 };
195 195
196 usb@c5004000 { 196 usb@c5004000 {
197 compatible = "nvidia,tegra20-ehci", "usb-ehci"; 197 compatible = "nvidia,tegra20-ehci", "usb-ehci";
198 reg = <0xc5004000 0x4000>; 198 reg = <0xc5004000 0x4000>;
199 interrupts = < 0 21 0x04 >; 199 interrupts = <0 21 0x04>;
200 phy_type = "ulpi"; 200 phy_type = "ulpi";
201 status = "disable";
201 }; 202 };
202 203
203 usb@c5008000 { 204 usb@c5008000 {
204 compatible = "nvidia,tegra20-ehci", "usb-ehci"; 205 compatible = "nvidia,tegra20-ehci", "usb-ehci";
205 reg = <0xc5008000 0x4000>; 206 reg = <0xc5008000 0x4000>;
206 interrupts = < 0 97 0x04 >; 207 interrupts = <0 97 0x04>;
207 phy_type = "utmi"; 208 phy_type = "utmi";
209 status = "disable";
210 };
211
212 sdhci@c8000000 {
213 compatible = "nvidia,tegra20-sdhci";
214 reg = <0xc8000000 0x200>;
215 interrupts = <0 14 0x04>;
216 status = "disable";
208 }; 217 };
209};
210 218
219 sdhci@c8000200 {
220 compatible = "nvidia,tegra20-sdhci";
221 reg = <0xc8000200 0x200>;
222 interrupts = <0 15 0x04>;
223 status = "disable";
224 };
225
226 sdhci@c8000400 {
227 compatible = "nvidia,tegra20-sdhci";
228 reg = <0xc8000400 0x200>;
229 interrupts = <0 19 0x04>;
230 status = "disable";
231 };
232
233 sdhci@c8000600 {
234 compatible = "nvidia,tegra20-sdhci";
235 reg = <0xc8000600 0x200>;
236 interrupts = <0 31 0x04>;
237 status = "disable";
238 };
239
240 pmu {
241 compatible = "arm,cortex-a9-pmu";
242 interrupts = <0 56 0x04
243 0 57 0x04>;
244 };
245};
diff --git a/arch/arm/boot/dts/tegra30.dtsi b/arch/arm/boot/dts/tegra30.dtsi
index 62a7b39f1c9a..2dcc09e784b5 100644
--- a/arch/arm/boot/dts/tegra30.dtsi
+++ b/arch/arm/boot/dts/tegra30.dtsi
@@ -4,183 +4,268 @@
4 compatible = "nvidia,tegra30"; 4 compatible = "nvidia,tegra30";
5 interrupt-parent = <&intc>; 5 interrupt-parent = <&intc>;
6 6
7 pmc@7000f400 { 7 intc: interrupt-controller {
8 compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
9 reg = <0x7000e400 0x400>;
10 };
11
12 intc: interrupt-controller@50041000 {
13 compatible = "arm,cortex-a9-gic"; 8 compatible = "arm,cortex-a9-gic";
9 reg = <0x50041000 0x1000
10 0x50040100 0x0100>;
14 interrupt-controller; 11 interrupt-controller;
15 #interrupt-cells = <3>; 12 #interrupt-cells = <3>;
16 reg = < 0x50041000 0x1000 >,
17 < 0x50040100 0x0100 >;
18 }; 13 };
19 14
20 pmu { 15 apbdma: dma {
21 compatible = "arm,cortex-a9-pmu";
22 interrupts = <0 144 0x04
23 0 145 0x04
24 0 146 0x04
25 0 147 0x04>;
26 };
27
28 apbdma: dma@6000a000 {
29 compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma"; 16 compatible = "nvidia,tegra30-apbdma", "nvidia,tegra20-apbdma";
30 reg = <0x6000a000 0x1400>; 17 reg = <0x6000a000 0x1400>;
31 interrupts = < 0 104 0x04 18 interrupts = <0 104 0x04
32 0 105 0x04 19 0 105 0x04
33 0 106 0x04 20 0 106 0x04
34 0 107 0x04 21 0 107 0x04
35 0 108 0x04 22 0 108 0x04
36 0 109 0x04 23 0 109 0x04
37 0 110 0x04 24 0 110 0x04
38 0 111 0x04 25 0 111 0x04
39 0 112 0x04 26 0 112 0x04
40 0 113 0x04 27 0 113 0x04
41 0 114 0x04 28 0 114 0x04
42 0 115 0x04 29 0 115 0x04
43 0 116 0x04 30 0 116 0x04
44 0 117 0x04 31 0 117 0x04
45 0 118 0x04 32 0 118 0x04
46 0 119 0x04 33 0 119 0x04
47 0 128 0x04 34 0 128 0x04
48 0 129 0x04 35 0 129 0x04
49 0 130 0x04 36 0 130 0x04
50 0 131 0x04 37 0 131 0x04
51 0 132 0x04 38 0 132 0x04
52 0 133 0x04 39 0 133 0x04
53 0 134 0x04 40 0 134 0x04
54 0 135 0x04 41 0 135 0x04
55 0 136 0x04 42 0 136 0x04
56 0 137 0x04 43 0 137 0x04
57 0 138 0x04 44 0 138 0x04
58 0 139 0x04 45 0 139 0x04
59 0 140 0x04 46 0 140 0x04
60 0 141 0x04 47 0 141 0x04
61 0 142 0x04 48 0 142 0x04
62 0 143 0x04 >; 49 0 143 0x04>;
63 };
64
65 i2c@7000c000 {
66 #address-cells = <1>;
67 #size-cells = <0>;
68 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
69 reg = <0x7000C000 0x100>;
70 interrupts = < 0 38 0x04 >;
71 };
72
73 i2c@7000c400 {
74 #address-cells = <1>;
75 #size-cells = <0>;
76 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
77 reg = <0x7000C400 0x100>;
78 interrupts = < 0 84 0x04 >;
79 };
80
81 i2c@7000c500 {
82 #address-cells = <1>;
83 #size-cells = <0>;
84 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
85 reg = <0x7000C500 0x100>;
86 interrupts = < 0 92 0x04 >;
87 }; 50 };
88 51
89 i2c@7000c700 { 52 ahb: ahb {
90 #address-cells = <1>; 53 compatible = "nvidia,tegra30-ahb";
91 #size-cells = <0>; 54 reg = <0x6000c004 0x14c>; /* AHB Arbitration + Gizmo Controller */
92 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
93 reg = <0x7000c700 0x100>;
94 interrupts = < 0 120 0x04 >;
95 }; 55 };
96 56
97 i2c@7000d000 { 57 gpio: gpio {
98 #address-cells = <1>;
99 #size-cells = <0>;
100 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
101 reg = <0x7000D000 0x100>;
102 interrupts = < 0 53 0x04 >;
103 };
104
105 gpio: gpio@6000d000 {
106 compatible = "nvidia,tegra30-gpio", "nvidia,tegra20-gpio"; 58 compatible = "nvidia,tegra30-gpio", "nvidia,tegra20-gpio";
107 reg = < 0x6000d000 0x1000 >; 59 reg = <0x6000d000 0x1000>;
108 interrupts = < 0 32 0x04 60 interrupts = <0 32 0x04
109 0 33 0x04 61 0 33 0x04
110 0 34 0x04 62 0 34 0x04
111 0 35 0x04 63 0 35 0x04
112 0 55 0x04 64 0 55 0x04
113 0 87 0x04 65 0 87 0x04
114 0 89 0x04 66 0 89 0x04
115 0 125 0x04 >; 67 0 125 0x04>;
116 #gpio-cells = <2>; 68 #gpio-cells = <2>;
117 gpio-controller; 69 gpio-controller;
118 #interrupt-cells = <2>; 70 #interrupt-cells = <2>;
119 interrupt-controller; 71 interrupt-controller;
120 }; 72 };
121 73
74 pinmux: pinmux {
75 compatible = "nvidia,tegra30-pinmux";
76 reg = <0x70000868 0xd0 /* Pad control registers */
77 0x70003000 0x3e0>; /* Mux registers */
78 };
79
122 serial@70006000 { 80 serial@70006000 {
123 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart"; 81 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
124 reg = <0x70006000 0x40>; 82 reg = <0x70006000 0x40>;
125 reg-shift = <2>; 83 reg-shift = <2>;
126 interrupts = < 0 36 0x04 >; 84 interrupts = <0 36 0x04>;
85 status = "disable";
127 }; 86 };
128 87
129 serial@70006040 { 88 serial@70006040 {
130 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart"; 89 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
131 reg = <0x70006040 0x40>; 90 reg = <0x70006040 0x40>;
132 reg-shift = <2>; 91 reg-shift = <2>;
133 interrupts = < 0 37 0x04 >; 92 interrupts = <0 37 0x04>;
93 status = "disable";
134 }; 94 };
135 95
136 serial@70006200 { 96 serial@70006200 {
137 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart"; 97 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
138 reg = <0x70006200 0x100>; 98 reg = <0x70006200 0x100>;
139 reg-shift = <2>; 99 reg-shift = <2>;
140 interrupts = < 0 46 0x04 >; 100 interrupts = <0 46 0x04>;
101 status = "disable";
141 }; 102 };
142 103
143 serial@70006300 { 104 serial@70006300 {
144 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart"; 105 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
145 reg = <0x70006300 0x100>; 106 reg = <0x70006300 0x100>;
146 reg-shift = <2>; 107 reg-shift = <2>;
147 interrupts = < 0 90 0x04 >; 108 interrupts = <0 90 0x04>;
109 status = "disable";
148 }; 110 };
149 111
150 serial@70006400 { 112 serial@70006400 {
151 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart"; 113 compatible = "nvidia,tegra30-uart", "nvidia,tegra20-uart";
152 reg = <0x70006400 0x100>; 114 reg = <0x70006400 0x100>;
153 reg-shift = <2>; 115 reg-shift = <2>;
154 interrupts = < 0 91 0x04 >; 116 interrupts = <0 91 0x04>;
117 status = "disable";
118 };
119
120 i2c@7000c000 {
121 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
122 reg = <0x7000c000 0x100>;
123 interrupts = <0 38 0x04>;
124 #address-cells = <1>;
125 #size-cells = <0>;
126 status = "disable";
127 };
128
129 i2c@7000c400 {
130 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
131 reg = <0x7000c400 0x100>;
132 interrupts = <0 84 0x04>;
133 #address-cells = <1>;
134 #size-cells = <0>;
135 status = "disable";
136 };
137
138 i2c@7000c500 {
139 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
140 reg = <0x7000c500 0x100>;
141 interrupts = <0 92 0x04>;
142 #address-cells = <1>;
143 #size-cells = <0>;
144 status = "disable";
145 };
146
147 i2c@7000c700 {
148 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
149 reg = <0x7000c700 0x100>;
150 interrupts = <0 120 0x04>;
151 #address-cells = <1>;
152 #size-cells = <0>;
153 status = "disable";
154 };
155
156 i2c@7000d000 {
157 compatible = "nvidia,tegra30-i2c", "nvidia,tegra20-i2c";
158 reg = <0x7000d000 0x100>;
159 interrupts = <0 53 0x04>;
160 #address-cells = <1>;
161 #size-cells = <0>;
162 status = "disable";
163 };
164
165 pmc {
166 compatible = "nvidia,tegra20-pmc", "nvidia,tegra30-pmc";
167 reg = <0x7000e400 0x400>;
168 };
169
170 mc {
171 compatible = "nvidia,tegra30-mc";
172 reg = <0x7000f000 0x010
173 0x7000f03c 0x1b4
174 0x7000f200 0x028
175 0x7000f284 0x17c>;
176 interrupts = <0 77 0x04>;
177 };
178
179 smmu {
180 compatible = "nvidia,tegra30-smmu";
181 reg = <0x7000f010 0x02c
182 0x7000f1f0 0x010
183 0x7000f228 0x05c>;
184 nvidia,#asids = <4>; /* # of ASIDs */
185 dma-window = <0 0x40000000>; /* IOVA start & length */
186 nvidia,ahb = <&ahb>;
187 };
188
189 ahub {
190 compatible = "nvidia,tegra30-ahub";
191 reg = <0x70080000 0x200
192 0x70080200 0x100>;
193 interrupts = <0 103 0x04>;
194 nvidia,dma-request-selector = <&apbdma 1>;
195
196 ranges;
197 #address-cells = <1>;
198 #size-cells = <1>;
199
200 tegra_i2s0: i2s@70080300 {
201 compatible = "nvidia,tegra30-i2s";
202 reg = <0x70080300 0x100>;
203 nvidia,ahub-cif-ids = <4 4>;
204 status = "disable";
205 };
206
207 tegra_i2s1: i2s@70080400 {
208 compatible = "nvidia,tegra30-i2s";
209 reg = <0x70080400 0x100>;
210 nvidia,ahub-cif-ids = <5 5>;
211 status = "disable";
212 };
213
214 tegra_i2s2: i2s@70080500 {
215 compatible = "nvidia,tegra30-i2s";
216 reg = <0x70080500 0x100>;
217 nvidia,ahub-cif-ids = <6 6>;
218 status = "disable";
219 };
220
221 tegra_i2s3: i2s@70080600 {
222 compatible = "nvidia,tegra30-i2s";
223 reg = <0x70080600 0x100>;
224 nvidia,ahub-cif-ids = <7 7>;
225 status = "disable";
226 };
227
228 tegra_i2s4: i2s@70080700 {
229 compatible = "nvidia,tegra30-i2s";
230 reg = <0x70080700 0x100>;
231 nvidia,ahub-cif-ids = <8 8>;
232 status = "disable";
233 };
155 }; 234 };
156 235
157 sdhci@78000000 { 236 sdhci@78000000 {
158 compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci"; 237 compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
159 reg = <0x78000000 0x200>; 238 reg = <0x78000000 0x200>;
160 interrupts = < 0 14 0x04 >; 239 interrupts = <0 14 0x04>;
240 status = "disable";
161 }; 241 };
162 242
163 sdhci@78000200 { 243 sdhci@78000200 {
164 compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci"; 244 compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
165 reg = <0x78000200 0x200>; 245 reg = <0x78000200 0x200>;
166 interrupts = < 0 15 0x04 >; 246 interrupts = <0 15 0x04>;
247 status = "disable";
167 }; 248 };
168 249
169 sdhci@78000400 { 250 sdhci@78000400 {
170 compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci"; 251 compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
171 reg = <0x78000400 0x200>; 252 reg = <0x78000400 0x200>;
172 interrupts = < 0 19 0x04 >; 253 interrupts = <0 19 0x04>;
254 status = "disable";
173 }; 255 };
174 256
175 sdhci@78000600 { 257 sdhci@78000600 {
176 compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci"; 258 compatible = "nvidia,tegra30-sdhci", "nvidia,tegra20-sdhci";
177 reg = <0x78000600 0x200>; 259 reg = <0x78000600 0x200>;
178 interrupts = < 0 31 0x04 >; 260 interrupts = <0 31 0x04>;
261 status = "disable";
179 }; 262 };
180 263
181 pinmux: pinmux@70000000 { 264 pmu {
182 compatible = "nvidia,tegra30-pinmux"; 265 compatible = "arm,cortex-a9-pmu";
183 reg = < 0x70000868 0xd0 /* Pad control registers */ 266 interrupts = <0 144 0x04
184 0x70003000 0x3e0 >; /* Mux registers */ 267 0 145 0x04
268 0 146 0x04
269 0 147 0x04>;
185 }; 270 };
186}; 271};
diff --git a/arch/arm/common/dmabounce.c b/arch/arm/common/dmabounce.c
index 595ecd290ebf..9d7eb530f95f 100644
--- a/arch/arm/common/dmabounce.c
+++ b/arch/arm/common/dmabounce.c
@@ -173,7 +173,8 @@ find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_
173 read_lock_irqsave(&device_info->lock, flags); 173 read_lock_irqsave(&device_info->lock, flags);
174 174
175 list_for_each_entry(b, &device_info->safe_buffers, node) 175 list_for_each_entry(b, &device_info->safe_buffers, node)
176 if (b->safe_dma_addr == safe_dma_addr) { 176 if (b->safe_dma_addr <= safe_dma_addr &&
177 b->safe_dma_addr + b->size > safe_dma_addr) {
177 rb = b; 178 rb = b;
178 break; 179 break;
179 } 180 }
@@ -254,7 +255,7 @@ static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
254 if (buf == NULL) { 255 if (buf == NULL) {
255 dev_err(dev, "%s: unable to map unsafe buffer %p!\n", 256 dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
256 __func__, ptr); 257 __func__, ptr);
257 return ~0; 258 return DMA_ERROR_CODE;
258 } 259 }
259 260
260 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 261 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -307,8 +308,9 @@ static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
307 * substitute the safe buffer for the unsafe one. 308 * substitute the safe buffer for the unsafe one.
308 * (basically move the buffer from an unsafe area to a safe one) 309 * (basically move the buffer from an unsafe area to a safe one)
309 */ 310 */
310dma_addr_t __dma_map_page(struct device *dev, struct page *page, 311static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
311 unsigned long offset, size_t size, enum dma_data_direction dir) 312 unsigned long offset, size_t size, enum dma_data_direction dir,
313 struct dma_attrs *attrs)
312{ 314{
313 dma_addr_t dma_addr; 315 dma_addr_t dma_addr;
314 int ret; 316 int ret;
@@ -320,21 +322,20 @@ dma_addr_t __dma_map_page(struct device *dev, struct page *page,
320 322
321 ret = needs_bounce(dev, dma_addr, size); 323 ret = needs_bounce(dev, dma_addr, size);
322 if (ret < 0) 324 if (ret < 0)
323 return ~0; 325 return DMA_ERROR_CODE;
324 326
325 if (ret == 0) { 327 if (ret == 0) {
326 __dma_page_cpu_to_dev(page, offset, size, dir); 328 arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
327 return dma_addr; 329 return dma_addr;
328 } 330 }
329 331
330 if (PageHighMem(page)) { 332 if (PageHighMem(page)) {
331 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n"); 333 dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
332 return ~0; 334 return DMA_ERROR_CODE;
333 } 335 }
334 336
335 return map_single(dev, page_address(page) + offset, size, dir); 337 return map_single(dev, page_address(page) + offset, size, dir);
336} 338}
337EXPORT_SYMBOL(__dma_map_page);
338 339
339/* 340/*
340 * see if a mapped address was really a "safe" buffer and if so, copy 341 * see if a mapped address was really a "safe" buffer and if so, copy
@@ -342,8 +343,8 @@ EXPORT_SYMBOL(__dma_map_page);
342 * the safe buffer. (basically return things back to the way they 343 * the safe buffer. (basically return things back to the way they
343 * should be) 344 * should be)
344 */ 345 */
345void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size, 346static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
346 enum dma_data_direction dir) 347 enum dma_data_direction dir, struct dma_attrs *attrs)
347{ 348{
348 struct safe_buffer *buf; 349 struct safe_buffer *buf;
349 350
@@ -352,19 +353,18 @@ void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
352 353
353 buf = find_safe_buffer_dev(dev, dma_addr, __func__); 354 buf = find_safe_buffer_dev(dev, dma_addr, __func__);
354 if (!buf) { 355 if (!buf) {
355 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, dma_addr)), 356 arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
356 dma_addr & ~PAGE_MASK, size, dir);
357 return; 357 return;
358 } 358 }
359 359
360 unmap_single(dev, buf, size, dir); 360 unmap_single(dev, buf, size, dir);
361} 361}
362EXPORT_SYMBOL(__dma_unmap_page);
363 362
364int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr, 363static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
365 unsigned long off, size_t sz, enum dma_data_direction dir) 364 size_t sz, enum dma_data_direction dir)
366{ 365{
367 struct safe_buffer *buf; 366 struct safe_buffer *buf;
367 unsigned long off;
368 368
369 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 369 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
370 __func__, addr, off, sz, dir); 370 __func__, addr, off, sz, dir);
@@ -373,6 +373,8 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
373 if (!buf) 373 if (!buf)
374 return 1; 374 return 1;
375 375
376 off = addr - buf->safe_dma_addr;
377
376 BUG_ON(buf->direction != dir); 378 BUG_ON(buf->direction != dir);
377 379
378 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 380 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -388,12 +390,21 @@ int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
388 } 390 }
389 return 0; 391 return 0;
390} 392}
391EXPORT_SYMBOL(dmabounce_sync_for_cpu);
392 393
393int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr, 394static void dmabounce_sync_for_cpu(struct device *dev,
394 unsigned long off, size_t sz, enum dma_data_direction dir) 395 dma_addr_t handle, size_t size, enum dma_data_direction dir)
396{
397 if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
398 return;
399
400 arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
401}
402
403static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
404 size_t sz, enum dma_data_direction dir)
395{ 405{
396 struct safe_buffer *buf; 406 struct safe_buffer *buf;
407 unsigned long off;
397 408
398 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n", 409 dev_dbg(dev, "%s(dma=%#x,off=%#lx,sz=%zx,dir=%x)\n",
399 __func__, addr, off, sz, dir); 410 __func__, addr, off, sz, dir);
@@ -402,6 +413,8 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
402 if (!buf) 413 if (!buf)
403 return 1; 414 return 1;
404 415
416 off = addr - buf->safe_dma_addr;
417
405 BUG_ON(buf->direction != dir); 418 BUG_ON(buf->direction != dir);
406 419
407 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n", 420 dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
@@ -417,7 +430,38 @@ int dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
417 } 430 }
418 return 0; 431 return 0;
419} 432}
420EXPORT_SYMBOL(dmabounce_sync_for_device); 433
434static void dmabounce_sync_for_device(struct device *dev,
435 dma_addr_t handle, size_t size, enum dma_data_direction dir)
436{
437 if (!__dmabounce_sync_for_device(dev, handle, size, dir))
438 return;
439
440 arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
441}
442
443static int dmabounce_set_mask(struct device *dev, u64 dma_mask)
444{
445 if (dev->archdata.dmabounce)
446 return 0;
447
448 return arm_dma_ops.set_dma_mask(dev, dma_mask);
449}
450
451static struct dma_map_ops dmabounce_ops = {
452 .alloc = arm_dma_alloc,
453 .free = arm_dma_free,
454 .mmap = arm_dma_mmap,
455 .map_page = dmabounce_map_page,
456 .unmap_page = dmabounce_unmap_page,
457 .sync_single_for_cpu = dmabounce_sync_for_cpu,
458 .sync_single_for_device = dmabounce_sync_for_device,
459 .map_sg = arm_dma_map_sg,
460 .unmap_sg = arm_dma_unmap_sg,
461 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
462 .sync_sg_for_device = arm_dma_sync_sg_for_device,
463 .set_dma_mask = dmabounce_set_mask,
464};
421 465
422static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev, 466static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
423 const char *name, unsigned long size) 467 const char *name, unsigned long size)
@@ -479,6 +523,7 @@ int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
479#endif 523#endif
480 524
481 dev->archdata.dmabounce = device_info; 525 dev->archdata.dmabounce = device_info;
526 set_dma_ops(dev, &dmabounce_ops);
482 527
483 dev_info(dev, "dmabounce: registered device\n"); 528 dev_info(dev, "dmabounce: registered device\n");
484 529
@@ -497,6 +542,7 @@ void dmabounce_unregister_dev(struct device *dev)
497 struct dmabounce_device_info *device_info = dev->archdata.dmabounce; 542 struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
498 543
499 dev->archdata.dmabounce = NULL; 544 dev->archdata.dmabounce = NULL;
545 set_dma_ops(dev, NULL);
500 546
501 if (!device_info) { 547 if (!device_info) {
502 dev_warn(dev, 548 dev_warn(dev,
diff --git a/arch/arm/configs/imx_v4_v5_defconfig b/arch/arm/configs/imx_v4_v5_defconfig
index 09a02963cf58..e05a2f1665a7 100644
--- a/arch/arm/configs/imx_v4_v5_defconfig
+++ b/arch/arm/configs/imx_v4_v5_defconfig
@@ -33,6 +33,7 @@ CONFIG_MACH_IMX27LITE=y
33CONFIG_MACH_PCA100=y 33CONFIG_MACH_PCA100=y
34CONFIG_MACH_MXT_TD60=y 34CONFIG_MACH_MXT_TD60=y
35CONFIG_MACH_IMX27IPCAM=y 35CONFIG_MACH_IMX27IPCAM=y
36CONFIG_MACH_IMX27_DT=y
36CONFIG_MXC_IRQ_PRIOR=y 37CONFIG_MXC_IRQ_PRIOR=y
37CONFIG_MXC_PWM=y 38CONFIG_MXC_PWM=y
38CONFIG_NO_HZ=y 39CONFIG_NO_HZ=y
@@ -172,7 +173,7 @@ CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
172CONFIG_RTC_CLASS=y 173CONFIG_RTC_CLASS=y
173CONFIG_RTC_DRV_PCF8563=y 174CONFIG_RTC_DRV_PCF8563=y
174CONFIG_RTC_DRV_IMXDI=y 175CONFIG_RTC_DRV_IMXDI=y
175CONFIG_RTC_MXC=y 176CONFIG_RTC_DRV_MXC=y
176CONFIG_DMADEVICES=y 177CONFIG_DMADEVICES=y
177CONFIG_IMX_SDMA=y 178CONFIG_IMX_SDMA=y
178CONFIG_IMX_DMA=y 179CONFIG_IMX_DMA=y
diff --git a/arch/arm/configs/imx_v6_v7_defconfig b/arch/arm/configs/imx_v6_v7_defconfig
index dc6f6411bbf5..b1d3675df72c 100644
--- a/arch/arm/configs/imx_v6_v7_defconfig
+++ b/arch/arm/configs/imx_v6_v7_defconfig
@@ -64,6 +64,12 @@ CONFIG_IPV6=y
64# CONFIG_WIRELESS is not set 64# CONFIG_WIRELESS is not set
65CONFIG_DEVTMPFS=y 65CONFIG_DEVTMPFS=y
66CONFIG_DEVTMPFS_MOUNT=y 66CONFIG_DEVTMPFS_MOUNT=y
67CONFIG_MTD=y
68CONFIG_MTD_OF_PARTS=y
69CONFIG_MTD_CHAR=y
70CONFIG_MTD_DATAFLASH=y
71CONFIG_MTD_M25P80=y
72CONFIG_MTD_SST25L=y
67# CONFIG_STANDALONE is not set 73# CONFIG_STANDALONE is not set
68CONFIG_CONNECTOR=y 74CONFIG_CONNECTOR=y
69CONFIG_BLK_DEV_LOOP=y 75CONFIG_BLK_DEV_LOOP=y
@@ -172,7 +178,7 @@ CONFIG_NEW_LEDS=y
172CONFIG_LEDS_CLASS=y 178CONFIG_LEDS_CLASS=y
173CONFIG_RTC_CLASS=y 179CONFIG_RTC_CLASS=y
174CONFIG_RTC_INTF_DEV_UIE_EMUL=y 180CONFIG_RTC_INTF_DEV_UIE_EMUL=y
175CONFIG_RTC_MXC=y 181CONFIG_RTC_DRV_MXC=y
176CONFIG_DMADEVICES=y 182CONFIG_DMADEVICES=y
177CONFIG_IMX_SDMA=y 183CONFIG_IMX_SDMA=y
178CONFIG_EXT2_FS=y 184CONFIG_EXT2_FS=y
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 1ebbf451c48d..5406c23a02e3 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -22,6 +22,7 @@ CONFIG_BLK_DEV_INTEGRITY=y
22# CONFIG_IOSCHED_DEADLINE is not set 22# CONFIG_IOSCHED_DEADLINE is not set
23# CONFIG_IOSCHED_CFQ is not set 23# CONFIG_IOSCHED_CFQ is not set
24CONFIG_ARCH_MXS=y 24CONFIG_ARCH_MXS=y
25CONFIG_MACH_MXS_DT=y
25CONFIG_MACH_MX23EVK=y 26CONFIG_MACH_MX23EVK=y
26CONFIG_MACH_MX28EVK=y 27CONFIG_MACH_MX28EVK=y
27CONFIG_MACH_STMP378X_DEVB=y 28CONFIG_MACH_STMP378X_DEVB=y
diff --git a/arch/arm/configs/prima2_defconfig b/arch/arm/configs/prima2_defconfig
new file mode 100644
index 000000000000..c328ac65479a
--- /dev/null
+++ b/arch/arm/configs/prima2_defconfig
@@ -0,0 +1,69 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_RELAY=y
3CONFIG_BLK_DEV_INITRD=y
4CONFIG_KALLSYMS_ALL=y
5CONFIG_MODULES=y
6CONFIG_MODULE_UNLOAD=y
7# CONFIG_BLK_DEV_BSG is not set
8CONFIG_PARTITION_ADVANCED=y
9CONFIG_BSD_DISKLABEL=y
10CONFIG_SOLARIS_X86_PARTITION=y
11CONFIG_ARCH_PRIMA2=y
12CONFIG_NO_HZ=y
13CONFIG_HIGH_RES_TIMERS=y
14CONFIG_PREEMPT=y
15CONFIG_AEABI=y
16CONFIG_KEXEC=y
17CONFIG_BINFMT_MISC=y
18CONFIG_PM_RUNTIME=y
19CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
20CONFIG_BLK_DEV_LOOP=y
21CONFIG_BLK_DEV_RAM=y
22CONFIG_BLK_DEV_RAM_SIZE=8192
23CONFIG_SCSI=y
24CONFIG_BLK_DEV_SD=y
25CONFIG_CHR_DEV_SG=y
26CONFIG_INPUT_EVDEV=y
27# CONFIG_INPUT_MOUSE is not set
28CONFIG_INPUT_TOUCHSCREEN=y
29CONFIG_SERIAL_SIRFSOC=y
30CONFIG_SERIAL_SIRFSOC_CONSOLE=y
31CONFIG_HW_RANDOM=y
32CONFIG_I2C=y
33CONFIG_I2C_CHARDEV=y
34CONFIG_I2C_SIRF=y
35CONFIG_SPI=y
36CONFIG_SPI_SIRF=y
37CONFIG_SPI_SPIDEV=y
38# CONFIG_HWMON is not set
39# CONFIG_HID_SUPPORT is not set
40CONFIG_USB_GADGET=y
41CONFIG_USB_FILE_STORAGE=m
42CONFIG_USB_MASS_STORAGE=m
43CONFIG_MMC=y
44CONFIG_MMC_SDHCI=y
45CONFIG_MMC_SDHCI_PLTFM=y
46CONFIG_DMADEVICES=y
47CONFIG_DMADEVICES_DEBUG=y
48CONFIG_DMADEVICES_VDEBUG=y
49CONFIG_SIRF_DMA=y
50# CONFIG_IOMMU_SUPPORT is not set
51CONFIG_EXT2_FS=y
52CONFIG_MSDOS_FS=y
53CONFIG_VFAT_FS=y
54CONFIG_TMPFS=y
55CONFIG_TMPFS_POSIX_ACL=y
56CONFIG_CRAMFS=y
57CONFIG_ROMFS_FS=y
58CONFIG_NLS_CODEPAGE_437=y
59CONFIG_NLS_ASCII=y
60CONFIG_NLS_ISO8859_1=y
61CONFIG_MAGIC_SYSRQ=y
62CONFIG_DEBUG_SECTION_MISMATCH=y
63CONFIG_DEBUG_KERNEL=y
64# CONFIG_DEBUG_PREEMPT is not set
65CONFIG_DEBUG_RT_MUTEXES=y
66CONFIG_DEBUG_SPINLOCK=y
67CONFIG_DEBUG_MUTEXES=y
68CONFIG_DEBUG_INFO=y
69CONFIG_CRC_CCITT=y
diff --git a/arch/arm/configs/spear13xx_defconfig b/arch/arm/configs/spear13xx_defconfig
new file mode 100644
index 000000000000..1fdb82694ca2
--- /dev/null
+++ b/arch/arm/configs/spear13xx_defconfig
@@ -0,0 +1,95 @@
1CONFIG_EXPERIMENTAL=y
2CONFIG_SYSVIPC=y
3CONFIG_BSD_PROCESS_ACCT=y
4CONFIG_BLK_DEV_INITRD=y
5CONFIG_MODULES=y
6CONFIG_MODULE_UNLOAD=y
7CONFIG_MODVERSIONS=y
8CONFIG_PARTITION_ADVANCED=y
9CONFIG_PLAT_SPEAR=y
10CONFIG_ARCH_SPEAR13XX=y
11CONFIG_MACH_SPEAR1310=y
12CONFIG_MACH_SPEAR1340=y
13# CONFIG_SWP_EMULATE is not set
14CONFIG_SMP=y
15# CONFIG_SMP_ON_UP is not set
16# CONFIG_ARM_CPU_TOPOLOGY is not set
17CONFIG_ARM_APPENDED_DTB=y
18CONFIG_ARM_ATAG_DTB_COMPAT=y
19CONFIG_BINFMT_MISC=y
20CONFIG_NET=y
21CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
22CONFIG_MTD=y
23CONFIG_MTD_OF_PARTS=y
24CONFIG_MTD_CHAR=y
25CONFIG_MTD_BLOCK=y
26CONFIG_MTD_NAND=y
27CONFIG_MTD_NAND_FSMC=y
28CONFIG_BLK_DEV_RAM=y
29CONFIG_BLK_DEV_RAM_SIZE=16384
30CONFIG_ATA=y
31# CONFIG_SATA_PMP is not set
32CONFIG_SATA_AHCI_PLATFORM=y
33CONFIG_PATA_ARASAN_CF=y
34CONFIG_NETDEVICES=y
35# CONFIG_NET_VENDOR_BROADCOM is not set
36# CONFIG_NET_VENDOR_CIRRUS is not set
37# CONFIG_NET_VENDOR_FARADAY is not set
38# CONFIG_NET_VENDOR_INTEL is not set
39# CONFIG_NET_VENDOR_MICREL is not set
40# CONFIG_NET_VENDOR_NATSEMI is not set
41# CONFIG_NET_VENDOR_SEEQ is not set
42# CONFIG_NET_VENDOR_SMSC is not set
43CONFIG_STMMAC_ETH=y
44# CONFIG_WLAN is not set
45CONFIG_INPUT_FF_MEMLESS=y
46# CONFIG_INPUT_MOUSEDEV_PSAUX is not set
47# CONFIG_KEYBOARD_ATKBD is not set
48CONFIG_KEYBOARD_SPEAR=y
49# CONFIG_INPUT_MOUSE is not set
50# CONFIG_LEGACY_PTYS is not set
51CONFIG_SERIAL_AMBA_PL011=y
52CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
53# CONFIG_HW_RANDOM is not set
54CONFIG_RAW_DRIVER=y
55CONFIG_MAX_RAW_DEVS=8192
56CONFIG_I2C=y
57CONFIG_I2C_DESIGNWARE_PLATFORM=y
58CONFIG_SPI=y
59CONFIG_SPI_PL022=y
60CONFIG_GPIO_SYSFS=y
61CONFIG_GPIO_PL061=y
62# CONFIG_HWMON is not set
63CONFIG_WATCHDOG=y
64CONFIG_MPCORE_WATCHDOG=y
65# CONFIG_HID_SUPPORT is not set
66CONFIG_USB=y
67# CONFIG_USB_DEVICE_CLASS is not set
68CONFIG_USB_EHCI_HCD=y
69CONFIG_USB_OHCI_HCD=y
70CONFIG_MMC=y
71CONFIG_MMC_SDHCI=y
72CONFIG_MMC_SDHCI_SPEAR=y
73CONFIG_RTC_CLASS=y
74CONFIG_DMADEVICES=y
75CONFIG_DW_DMAC=y
76CONFIG_DMATEST=m
77CONFIG_EXT2_FS=y
78CONFIG_EXT2_FS_XATTR=y
79CONFIG_EXT2_FS_SECURITY=y
80CONFIG_EXT3_FS=y
81CONFIG_EXT3_FS_SECURITY=y
82CONFIG_AUTOFS4_FS=m
83CONFIG_MSDOS_FS=m
84CONFIG_VFAT_FS=m
85CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
86CONFIG_TMPFS=y
87CONFIG_JFFS2_FS=y
88CONFIG_NLS_DEFAULT="utf8"
89CONFIG_NLS_CODEPAGE_437=y
90CONFIG_NLS_ASCII=m
91CONFIG_MAGIC_SYSRQ=y
92CONFIG_DEBUG_FS=y
93CONFIG_DEBUG_KERNEL=y
94CONFIG_DEBUG_SPINLOCK=y
95CONFIG_DEBUG_INFO=y
diff --git a/arch/arm/configs/spear3xx_defconfig b/arch/arm/configs/spear3xx_defconfig
index 7ed42912d69a..865980c5f212 100644
--- a/arch/arm/configs/spear3xx_defconfig
+++ b/arch/arm/configs/spear3xx_defconfig
@@ -14,6 +14,9 @@ CONFIG_BINFMT_MISC=y
14CONFIG_NET=y 14CONFIG_NET=y
15CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 15CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
16CONFIG_MTD=y 16CONFIG_MTD=y
17CONFIG_MTD_OF_PARTS=y
18CONFIG_MTD_CHAR=y
19CONFIG_MTD_BLOCK=y
17CONFIG_MTD_NAND=y 20CONFIG_MTD_NAND=y
18CONFIG_MTD_NAND_FSMC=y 21CONFIG_MTD_NAND_FSMC=y
19CONFIG_BLK_DEV_RAM=y 22CONFIG_BLK_DEV_RAM=y
@@ -73,6 +76,7 @@ CONFIG_MSDOS_FS=m
73CONFIG_VFAT_FS=m 76CONFIG_VFAT_FS=m
74CONFIG_FAT_DEFAULT_IOCHARSET="ascii" 77CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
75CONFIG_TMPFS=y 78CONFIG_TMPFS=y
79CONFIG_JFFS2_FS=y
76CONFIG_NLS_DEFAULT="utf8" 80CONFIG_NLS_DEFAULT="utf8"
77CONFIG_NLS_CODEPAGE_437=y 81CONFIG_NLS_CODEPAGE_437=y
78CONFIG_NLS_ASCII=m 82CONFIG_NLS_ASCII=m
diff --git a/arch/arm/configs/spear6xx_defconfig b/arch/arm/configs/spear6xx_defconfig
index cf94bc73a0e0..a2a1265f86b6 100644
--- a/arch/arm/configs/spear6xx_defconfig
+++ b/arch/arm/configs/spear6xx_defconfig
@@ -8,11 +8,13 @@ CONFIG_MODVERSIONS=y
8CONFIG_PARTITION_ADVANCED=y 8CONFIG_PARTITION_ADVANCED=y
9CONFIG_PLAT_SPEAR=y 9CONFIG_PLAT_SPEAR=y
10CONFIG_ARCH_SPEAR6XX=y 10CONFIG_ARCH_SPEAR6XX=y
11CONFIG_BOARD_SPEAR600_DT=y
12CONFIG_BINFMT_MISC=y 11CONFIG_BINFMT_MISC=y
13CONFIG_NET=y 12CONFIG_NET=y
14CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" 13CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
15CONFIG_MTD=y 14CONFIG_MTD=y
15CONFIG_MTD_OF_PARTS=y
16CONFIG_MTD_CHAR=y
17CONFIG_MTD_BLOCK=y
16CONFIG_MTD_NAND=y 18CONFIG_MTD_NAND=y
17CONFIG_MTD_NAND_FSMC=y 19CONFIG_MTD_NAND_FSMC=y
18CONFIG_BLK_DEV_RAM=y 20CONFIG_BLK_DEV_RAM=y
@@ -64,6 +66,7 @@ CONFIG_MSDOS_FS=m
64CONFIG_VFAT_FS=m 66CONFIG_VFAT_FS=m
65CONFIG_FAT_DEFAULT_IOCHARSET="ascii" 67CONFIG_FAT_DEFAULT_IOCHARSET="ascii"
66CONFIG_TMPFS=y 68CONFIG_TMPFS=y
69CONFIG_JFFS2_FS=y
67CONFIG_NLS_DEFAULT="utf8" 70CONFIG_NLS_DEFAULT="utf8"
68CONFIG_NLS_CODEPAGE_437=y 71CONFIG_NLS_CODEPAGE_437=y
69CONFIG_NLS_ASCII=m 72CONFIG_NLS_ASCII=m
diff --git a/arch/arm/configs/tegra_defconfig b/arch/arm/configs/tegra_defconfig
index 351d6708c3ae..1198dd61c7c4 100644
--- a/arch/arm/configs/tegra_defconfig
+++ b/arch/arm/configs/tegra_defconfig
@@ -45,6 +45,7 @@ CONFIG_CPU_FREQ=y
45CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y 45CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
46CONFIG_CPU_IDLE=y 46CONFIG_CPU_IDLE=y
47CONFIG_VFP=y 47CONFIG_VFP=y
48CONFIG_PM_RUNTIME=y
48CONFIG_NET=y 49CONFIG_NET=y
49CONFIG_PACKET=y 50CONFIG_PACKET=y
50CONFIG_UNIX=y 51CONFIG_UNIX=y
@@ -91,6 +92,8 @@ CONFIG_USB_NET_SMSC75XX=y
91CONFIG_USB_NET_SMSC95XX=y 92CONFIG_USB_NET_SMSC95XX=y
92# CONFIG_WLAN is not set 93# CONFIG_WLAN is not set
93CONFIG_INPUT_EVDEV=y 94CONFIG_INPUT_EVDEV=y
95CONFIG_INPUT_MISC=y
96CONFIG_INPUT_MPU3050=y
94# CONFIG_VT is not set 97# CONFIG_VT is not set
95# CONFIG_LEGACY_PTYS is not set 98# CONFIG_LEGACY_PTYS is not set
96# CONFIG_DEVKMEM is not set 99# CONFIG_DEVKMEM is not set
@@ -103,12 +106,15 @@ CONFIG_I2C=y
103CONFIG_I2C_TEGRA=y 106CONFIG_I2C_TEGRA=y
104CONFIG_SPI=y 107CONFIG_SPI=y
105CONFIG_SPI_TEGRA=y 108CONFIG_SPI_TEGRA=y
109CONFIG_POWER_SUPPLY=y
110CONFIG_BATTERY_SBS=y
106CONFIG_SENSORS_LM90=y 111CONFIG_SENSORS_LM90=y
107CONFIG_MFD_TPS6586X=y 112CONFIG_MFD_TPS6586X=y
108CONFIG_REGULATOR=y 113CONFIG_REGULATOR=y
109CONFIG_REGULATOR_FIXED_VOLTAGE=y 114CONFIG_REGULATOR_FIXED_VOLTAGE=y
110CONFIG_REGULATOR_VIRTUAL_CONSUMER=y 115CONFIG_REGULATOR_VIRTUAL_CONSUMER=y
111CONFIG_REGULATOR_GPIO=y 116CONFIG_REGULATOR_GPIO=y
117CONFIG_REGULATOR_TPS62360=y
112CONFIG_REGULATOR_TPS6586X=y 118CONFIG_REGULATOR_TPS6586X=y
113CONFIG_SOUND=y 119CONFIG_SOUND=y
114CONFIG_SND=y 120CONFIG_SND=y
@@ -133,16 +139,19 @@ CONFIG_MMC_SDHCI=y
133CONFIG_MMC_SDHCI_PLTFM=y 139CONFIG_MMC_SDHCI_PLTFM=y
134CONFIG_MMC_SDHCI_TEGRA=y 140CONFIG_MMC_SDHCI_TEGRA=y
135CONFIG_RTC_CLASS=y 141CONFIG_RTC_CLASS=y
142CONFIG_RTC_DRV_EM3027=y
136CONFIG_RTC_DRV_TEGRA=y 143CONFIG_RTC_DRV_TEGRA=y
137CONFIG_STAGING=y 144CONFIG_STAGING=y
138CONFIG_IIO=y
139CONFIG_SENSORS_ISL29018=y 145CONFIG_SENSORS_ISL29018=y
146CONFIG_SENSORS_ISL29028=y
140CONFIG_SENSORS_AK8975=y 147CONFIG_SENSORS_AK8975=y
141CONFIG_MFD_NVEC=y 148CONFIG_MFD_NVEC=y
142CONFIG_KEYBOARD_NVEC=y 149CONFIG_KEYBOARD_NVEC=y
143CONFIG_SERIO_NVEC_PS2=y 150CONFIG_SERIO_NVEC_PS2=y
144CONFIG_TEGRA_IOMMU_GART=y 151CONFIG_TEGRA_IOMMU_GART=y
145CONFIG_TEGRA_IOMMU_SMMU=y 152CONFIG_TEGRA_IOMMU_SMMU=y
153CONFIG_MEMORY=y
154CONFIG_IIO=y
146CONFIG_EXT2_FS=y 155CONFIG_EXT2_FS=y
147CONFIG_EXT2_FS_XATTR=y 156CONFIG_EXT2_FS_XATTR=y
148CONFIG_EXT2_FS_POSIX_ACL=y 157CONFIG_EXT2_FS_POSIX_ACL=y
diff --git a/arch/arm/include/asm/device.h b/arch/arm/include/asm/device.h
index 7aa368003b05..b69c0d3285f8 100644
--- a/arch/arm/include/asm/device.h
+++ b/arch/arm/include/asm/device.h
@@ -7,12 +7,16 @@
7#define ASMARM_DEVICE_H 7#define ASMARM_DEVICE_H
8 8
9struct dev_archdata { 9struct dev_archdata {
10 struct dma_map_ops *dma_ops;
10#ifdef CONFIG_DMABOUNCE 11#ifdef CONFIG_DMABOUNCE
11 struct dmabounce_device_info *dmabounce; 12 struct dmabounce_device_info *dmabounce;
12#endif 13#endif
13#ifdef CONFIG_IOMMU_API 14#ifdef CONFIG_IOMMU_API
14 void *iommu; /* private IOMMU data */ 15 void *iommu; /* private IOMMU data */
15#endif 16#endif
17#ifdef CONFIG_ARM_DMA_USE_IOMMU
18 struct dma_iommu_mapping *mapping;
19#endif
16}; 20};
17 21
18struct omap_device; 22struct omap_device;
diff --git a/arch/arm/include/asm/dma-contiguous.h b/arch/arm/include/asm/dma-contiguous.h
new file mode 100644
index 000000000000..3ed37b4d93da
--- /dev/null
+++ b/arch/arm/include/asm/dma-contiguous.h
@@ -0,0 +1,15 @@
1#ifndef ASMARM_DMA_CONTIGUOUS_H
2#define ASMARM_DMA_CONTIGUOUS_H
3
4#ifdef __KERNEL__
5#ifdef CONFIG_CMA
6
7#include <linux/types.h>
8#include <asm-generic/dma-contiguous.h>
9
10void dma_contiguous_early_fixup(phys_addr_t base, unsigned long size);
11
12#endif
13#endif
14
15#endif
diff --git a/arch/arm/include/asm/dma-iommu.h b/arch/arm/include/asm/dma-iommu.h
new file mode 100644
index 000000000000..799b09409fad
--- /dev/null
+++ b/arch/arm/include/asm/dma-iommu.h
@@ -0,0 +1,34 @@
1#ifndef ASMARM_DMA_IOMMU_H
2#define ASMARM_DMA_IOMMU_H
3
4#ifdef __KERNEL__
5
6#include <linux/mm_types.h>
7#include <linux/scatterlist.h>
8#include <linux/dma-debug.h>
9#include <linux/kmemcheck.h>
10
11struct dma_iommu_mapping {
12 /* iommu specific data */
13 struct iommu_domain *domain;
14
15 void *bitmap;
16 size_t bits;
17 unsigned int order;
18 dma_addr_t base;
19
20 spinlock_t lock;
21 struct kref kref;
22};
23
24struct dma_iommu_mapping *
25arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
26 int order);
27
28void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
29
30int arm_iommu_attach_device(struct device *dev,
31 struct dma_iommu_mapping *mapping);
32
33#endif /* __KERNEL__ */
34#endif
diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h
index cb3b7c981c4b..bbef15d04890 100644
--- a/arch/arm/include/asm/dma-mapping.h
+++ b/arch/arm/include/asm/dma-mapping.h
@@ -5,11 +5,35 @@
5 5
6#include <linux/mm_types.h> 6#include <linux/mm_types.h>
7#include <linux/scatterlist.h> 7#include <linux/scatterlist.h>
8#include <linux/dma-attrs.h>
8#include <linux/dma-debug.h> 9#include <linux/dma-debug.h>
9 10
10#include <asm-generic/dma-coherent.h> 11#include <asm-generic/dma-coherent.h>
11#include <asm/memory.h> 12#include <asm/memory.h>
12 13
14#define DMA_ERROR_CODE (~0)
15extern struct dma_map_ops arm_dma_ops;
16
17static inline struct dma_map_ops *get_dma_ops(struct device *dev)
18{
19 if (dev && dev->archdata.dma_ops)
20 return dev->archdata.dma_ops;
21 return &arm_dma_ops;
22}
23
24static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
25{
26 BUG_ON(!dev);
27 dev->archdata.dma_ops = ops;
28}
29
30#include <asm-generic/dma-mapping-common.h>
31
32static inline int dma_set_mask(struct device *dev, u64 mask)
33{
34 return get_dma_ops(dev)->set_dma_mask(dev, mask);
35}
36
13#ifdef __arch_page_to_dma 37#ifdef __arch_page_to_dma
14#error Please update to __arch_pfn_to_dma 38#error Please update to __arch_pfn_to_dma
15#endif 39#endif
@@ -62,68 +86,11 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
62#endif 86#endif
63 87
64/* 88/*
65 * The DMA API is built upon the notion of "buffer ownership". A buffer
66 * is either exclusively owned by the CPU (and therefore may be accessed
67 * by it) or exclusively owned by the DMA device. These helper functions
68 * represent the transitions between these two ownership states.
69 *
70 * Note, however, that on later ARMs, this notion does not work due to
71 * speculative prefetches. We model our approach on the assumption that
72 * the CPU does do speculative prefetches, which means we clean caches
73 * before transfers and delay cache invalidation until transfer completion.
74 *
75 * Private support functions: these are not part of the API and are
76 * liable to change. Drivers must not use these.
77 */
78static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
79 enum dma_data_direction dir)
80{
81 extern void ___dma_single_cpu_to_dev(const void *, size_t,
82 enum dma_data_direction);
83
84 if (!arch_is_coherent())
85 ___dma_single_cpu_to_dev(kaddr, size, dir);
86}
87
88static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
89 enum dma_data_direction dir)
90{
91 extern void ___dma_single_dev_to_cpu(const void *, size_t,
92 enum dma_data_direction);
93
94 if (!arch_is_coherent())
95 ___dma_single_dev_to_cpu(kaddr, size, dir);
96}
97
98static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
99 size_t size, enum dma_data_direction dir)
100{
101 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
102 size_t, enum dma_data_direction);
103
104 if (!arch_is_coherent())
105 ___dma_page_cpu_to_dev(page, off, size, dir);
106}
107
108static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
109 size_t size, enum dma_data_direction dir)
110{
111 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
112 size_t, enum dma_data_direction);
113
114 if (!arch_is_coherent())
115 ___dma_page_dev_to_cpu(page, off, size, dir);
116}
117
118extern int dma_supported(struct device *, u64);
119extern int dma_set_mask(struct device *, u64);
120
121/*
122 * DMA errors are defined by all-bits-set in the DMA address. 89 * DMA errors are defined by all-bits-set in the DMA address.
123 */ 90 */
124static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) 91static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
125{ 92{
126 return dma_addr == ~0; 93 return dma_addr == DMA_ERROR_CODE;
127} 94}
128 95
129/* 96/*
@@ -141,69 +108,118 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
141{ 108{
142} 109}
143 110
111extern int dma_supported(struct device *dev, u64 mask);
112
144/** 113/**
145 * dma_alloc_coherent - allocate consistent memory for DMA 114 * arm_dma_alloc - allocate consistent memory for DMA
146 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 115 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
147 * @size: required memory size 116 * @size: required memory size
148 * @handle: bus-specific DMA address 117 * @handle: bus-specific DMA address
118 * @attrs: optinal attributes that specific mapping properties
149 * 119 *
150 * Allocate some uncached, unbuffered memory for a device for 120 * Allocate some memory for a device for performing DMA. This function
151 * performing DMA. This function allocates pages, and will 121 * allocates pages, and will return the CPU-viewed address, and sets @handle
152 * return the CPU-viewed address, and sets @handle to be the 122 * to be the device-viewed address.
153 * device-viewed address.
154 */ 123 */
155extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); 124extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
125 gfp_t gfp, struct dma_attrs *attrs);
126
127#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
128
129static inline void *dma_alloc_attrs(struct device *dev, size_t size,
130 dma_addr_t *dma_handle, gfp_t flag,
131 struct dma_attrs *attrs)
132{
133 struct dma_map_ops *ops = get_dma_ops(dev);
134 void *cpu_addr;
135 BUG_ON(!ops);
136
137 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
138 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
139 return cpu_addr;
140}
156 141
157/** 142/**
158 * dma_free_coherent - free memory allocated by dma_alloc_coherent 143 * arm_dma_free - free memory allocated by arm_dma_alloc
159 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 144 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
160 * @size: size of memory originally requested in dma_alloc_coherent 145 * @size: size of memory originally requested in dma_alloc_coherent
161 * @cpu_addr: CPU-view address returned from dma_alloc_coherent 146 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
162 * @handle: device-view address returned from dma_alloc_coherent 147 * @handle: device-view address returned from dma_alloc_coherent
148 * @attrs: optinal attributes that specific mapping properties
163 * 149 *
164 * Free (and unmap) a DMA buffer previously allocated by 150 * Free (and unmap) a DMA buffer previously allocated by
165 * dma_alloc_coherent(). 151 * arm_dma_alloc().
166 * 152 *
167 * References to memory and mappings associated with cpu_addr/handle 153 * References to memory and mappings associated with cpu_addr/handle
168 * during and after this call executing are illegal. 154 * during and after this call executing are illegal.
169 */ 155 */
170extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); 156extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
157 dma_addr_t handle, struct dma_attrs *attrs);
158
159#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
160
161static inline void dma_free_attrs(struct device *dev, size_t size,
162 void *cpu_addr, dma_addr_t dma_handle,
163 struct dma_attrs *attrs)
164{
165 struct dma_map_ops *ops = get_dma_ops(dev);
166 BUG_ON(!ops);
167
168 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
169 ops->free(dev, size, cpu_addr, dma_handle, attrs);
170}
171 171
172/** 172/**
173 * dma_mmap_coherent - map a coherent DMA allocation into user space 173 * arm_dma_mmap - map a coherent DMA allocation into user space
174 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 174 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
175 * @vma: vm_area_struct describing requested user mapping 175 * @vma: vm_area_struct describing requested user mapping
176 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent 176 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
177 * @handle: device-view address returned from dma_alloc_coherent 177 * @handle: device-view address returned from dma_alloc_coherent
178 * @size: size of memory originally requested in dma_alloc_coherent 178 * @size: size of memory originally requested in dma_alloc_coherent
179 * @attrs: optinal attributes that specific mapping properties
179 * 180 *
180 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent 181 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
181 * into user space. The coherent DMA buffer must not be freed by the 182 * into user space. The coherent DMA buffer must not be freed by the
182 * driver until the user space mapping has been released. 183 * driver until the user space mapping has been released.
183 */ 184 */
184int dma_mmap_coherent(struct device *, struct vm_area_struct *, 185extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
185 void *, dma_addr_t, size_t); 186 void *cpu_addr, dma_addr_t dma_addr, size_t size,
187 struct dma_attrs *attrs);
186 188
189#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
187 190
188/** 191static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
189 * dma_alloc_writecombine - allocate writecombining memory for DMA 192 void *cpu_addr, dma_addr_t dma_addr,
190 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 193 size_t size, struct dma_attrs *attrs)
191 * @size: required memory size 194{
192 * @handle: bus-specific DMA address 195 struct dma_map_ops *ops = get_dma_ops(dev);
193 * 196 BUG_ON(!ops);
194 * Allocate some uncached, buffered memory for a device for 197 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
195 * performing DMA. This function allocates pages, and will 198}
196 * return the CPU-viewed address, and sets @handle to be the 199
197 * device-viewed address. 200static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
198 */ 201 dma_addr_t *dma_handle, gfp_t flag)
199extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, 202{
200 gfp_t); 203 DEFINE_DMA_ATTRS(attrs);
204 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
205 return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
206}
201 207
202#define dma_free_writecombine(dev,size,cpu_addr,handle) \ 208static inline void dma_free_writecombine(struct device *dev, size_t size,
203 dma_free_coherent(dev,size,cpu_addr,handle) 209 void *cpu_addr, dma_addr_t dma_handle)
210{
211 DEFINE_DMA_ATTRS(attrs);
212 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
213 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
214}
204 215
205int dma_mmap_writecombine(struct device *, struct vm_area_struct *, 216static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
206 void *, dma_addr_t, size_t); 217 void *cpu_addr, dma_addr_t dma_addr, size_t size)
218{
219 DEFINE_DMA_ATTRS(attrs);
220 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
221 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
222}
207 223
208/* 224/*
209 * This can be called during boot to increase the size of the consistent 225 * This can be called during boot to increase the size of the consistent
@@ -212,8 +228,6 @@ int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
212 */ 228 */
213extern void __init init_consistent_dma_size(unsigned long size); 229extern void __init init_consistent_dma_size(unsigned long size);
214 230
215
216#ifdef CONFIG_DMABOUNCE
217/* 231/*
218 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" 232 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
219 * and utilize bounce buffers as needed to work around limited DMA windows. 233 * and utilize bounce buffers as needed to work around limited DMA windows.
@@ -253,222 +267,19 @@ extern int dmabounce_register_dev(struct device *, unsigned long,
253 */ 267 */
254extern void dmabounce_unregister_dev(struct device *); 268extern void dmabounce_unregister_dev(struct device *);
255 269
256/*
257 * The DMA API, implemented by dmabounce.c. See below for descriptions.
258 */
259extern dma_addr_t __dma_map_page(struct device *, struct page *,
260 unsigned long, size_t, enum dma_data_direction);
261extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
262 enum dma_data_direction);
263
264/*
265 * Private functions
266 */
267int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
268 size_t, enum dma_data_direction);
269int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
270 size_t, enum dma_data_direction);
271#else
272static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
273 unsigned long offset, size_t size, enum dma_data_direction dir)
274{
275 return 1;
276}
277 270
278static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
279 unsigned long offset, size_t size, enum dma_data_direction dir)
280{
281 return 1;
282}
283
284
285static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
286 unsigned long offset, size_t size, enum dma_data_direction dir)
287{
288 __dma_page_cpu_to_dev(page, offset, size, dir);
289 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
290}
291
292static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
293 size_t size, enum dma_data_direction dir)
294{
295 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
296 handle & ~PAGE_MASK, size, dir);
297}
298#endif /* CONFIG_DMABOUNCE */
299
300/**
301 * dma_map_single - map a single buffer for streaming DMA
302 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
303 * @cpu_addr: CPU direct mapped address of buffer
304 * @size: size of buffer to map
305 * @dir: DMA transfer direction
306 *
307 * Ensure that any data held in the cache is appropriately discarded
308 * or written back.
309 *
310 * The device owns this memory once this call has completed. The CPU
311 * can regain ownership by calling dma_unmap_single() or
312 * dma_sync_single_for_cpu().
313 */
314static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
315 size_t size, enum dma_data_direction dir)
316{
317 unsigned long offset;
318 struct page *page;
319 dma_addr_t addr;
320
321 BUG_ON(!virt_addr_valid(cpu_addr));
322 BUG_ON(!virt_addr_valid(cpu_addr + size - 1));
323 BUG_ON(!valid_dma_direction(dir));
324
325 page = virt_to_page(cpu_addr);
326 offset = (unsigned long)cpu_addr & ~PAGE_MASK;
327 addr = __dma_map_page(dev, page, offset, size, dir);
328 debug_dma_map_page(dev, page, offset, size, dir, addr, true);
329
330 return addr;
331}
332
333/**
334 * dma_map_page - map a portion of a page for streaming DMA
335 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
336 * @page: page that buffer resides in
337 * @offset: offset into page for start of buffer
338 * @size: size of buffer to map
339 * @dir: DMA transfer direction
340 *
341 * Ensure that any data held in the cache is appropriately discarded
342 * or written back.
343 *
344 * The device owns this memory once this call has completed. The CPU
345 * can regain ownership by calling dma_unmap_page().
346 */
347static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
348 unsigned long offset, size_t size, enum dma_data_direction dir)
349{
350 dma_addr_t addr;
351
352 BUG_ON(!valid_dma_direction(dir));
353
354 addr = __dma_map_page(dev, page, offset, size, dir);
355 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
356
357 return addr;
358}
359
360/**
361 * dma_unmap_single - unmap a single buffer previously mapped
362 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
363 * @handle: DMA address of buffer
364 * @size: size of buffer (same as passed to dma_map_single)
365 * @dir: DMA transfer direction (same as passed to dma_map_single)
366 *
367 * Unmap a single streaming mode DMA translation. The handle and size
368 * must match what was provided in the previous dma_map_single() call.
369 * All other usages are undefined.
370 *
371 * After this call, reads by the CPU to the buffer are guaranteed to see
372 * whatever the device wrote there.
373 */
374static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
375 size_t size, enum dma_data_direction dir)
376{
377 debug_dma_unmap_page(dev, handle, size, dir, true);
378 __dma_unmap_page(dev, handle, size, dir);
379}
380
381/**
382 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
383 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
384 * @handle: DMA address of buffer
385 * @size: size of buffer (same as passed to dma_map_page)
386 * @dir: DMA transfer direction (same as passed to dma_map_page)
387 *
388 * Unmap a page streaming mode DMA translation. The handle and size
389 * must match what was provided in the previous dma_map_page() call.
390 * All other usages are undefined.
391 *
392 * After this call, reads by the CPU to the buffer are guaranteed to see
393 * whatever the device wrote there.
394 */
395static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
396 size_t size, enum dma_data_direction dir)
397{
398 debug_dma_unmap_page(dev, handle, size, dir, false);
399 __dma_unmap_page(dev, handle, size, dir);
400}
401
402/**
403 * dma_sync_single_range_for_cpu
404 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
405 * @handle: DMA address of buffer
406 * @offset: offset of region to start sync
407 * @size: size of region to sync
408 * @dir: DMA transfer direction (same as passed to dma_map_single)
409 *
410 * Make physical memory consistent for a single streaming mode DMA
411 * translation after a transfer.
412 *
413 * If you perform a dma_map_single() but wish to interrogate the
414 * buffer using the cpu, yet do not wish to teardown the PCI dma
415 * mapping, you must call this function before doing so. At the
416 * next point you give the PCI dma address back to the card, you
417 * must first the perform a dma_sync_for_device, and then the
418 * device again owns the buffer.
419 */
420static inline void dma_sync_single_range_for_cpu(struct device *dev,
421 dma_addr_t handle, unsigned long offset, size_t size,
422 enum dma_data_direction dir)
423{
424 BUG_ON(!valid_dma_direction(dir));
425
426 debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
427
428 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
429 return;
430
431 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
432}
433
434static inline void dma_sync_single_range_for_device(struct device *dev,
435 dma_addr_t handle, unsigned long offset, size_t size,
436 enum dma_data_direction dir)
437{
438 BUG_ON(!valid_dma_direction(dir));
439
440 debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
441
442 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
443 return;
444
445 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
446}
447
448static inline void dma_sync_single_for_cpu(struct device *dev,
449 dma_addr_t handle, size_t size, enum dma_data_direction dir)
450{
451 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
452}
453
454static inline void dma_sync_single_for_device(struct device *dev,
455 dma_addr_t handle, size_t size, enum dma_data_direction dir)
456{
457 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
458}
459 271
460/* 272/*
461 * The scatter list versions of the above methods. 273 * The scatter list versions of the above methods.
462 */ 274 */
463extern int dma_map_sg(struct device *, struct scatterlist *, int, 275extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
464 enum dma_data_direction); 276 enum dma_data_direction, struct dma_attrs *attrs);
465extern void dma_unmap_sg(struct device *, struct scatterlist *, int, 277extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
278 enum dma_data_direction, struct dma_attrs *attrs);
279extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
466 enum dma_data_direction); 280 enum dma_data_direction);
467extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, 281extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
468 enum dma_data_direction); 282 enum dma_data_direction);
469extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
470 enum dma_data_direction);
471
472 283
473#endif /* __KERNEL__ */ 284#endif /* __KERNEL__ */
474#endif 285#endif
diff --git a/arch/arm/include/asm/hardware/pl080.h b/arch/arm/include/asm/hardware/pl080.h
index 33c78d7af2e1..4eea2107214b 100644
--- a/arch/arm/include/asm/hardware/pl080.h
+++ b/arch/arm/include/asm/hardware/pl080.h
@@ -102,6 +102,8 @@
102#define PL080_WIDTH_16BIT (0x1) 102#define PL080_WIDTH_16BIT (0x1)
103#define PL080_WIDTH_32BIT (0x2) 103#define PL080_WIDTH_32BIT (0x2)
104 104
105#define PL080N_CONFIG_ITPROT (1 << 20)
106#define PL080N_CONFIG_SECPROT (1 << 19)
105#define PL080_CONFIG_HALT (1 << 18) 107#define PL080_CONFIG_HALT (1 << 18)
106#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */ 108#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
107#define PL080_CONFIG_LOCK (1 << 16) 109#define PL080_CONFIG_LOCK (1 << 16)
diff --git a/arch/arm/include/asm/io.h b/arch/arm/include/asm/io.h
index 9af5563dd3eb..815c669fec0a 100644
--- a/arch/arm/include/asm/io.h
+++ b/arch/arm/include/asm/io.h
@@ -47,9 +47,9 @@ extern void __raw_readsb(const void __iomem *addr, void *data, int bytelen);
47extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen); 47extern void __raw_readsw(const void __iomem *addr, void *data, int wordlen);
48extern void __raw_readsl(const void __iomem *addr, void *data, int longlen); 48extern void __raw_readsl(const void __iomem *addr, void *data, int longlen);
49 49
50#define __raw_writeb(v,a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v)) 50#define __raw_writeb(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned char __force *)(a) = (v)))
51#define __raw_writew(v,a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)) 51#define __raw_writew(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned short __force *)(a) = (v)))
52#define __raw_writel(v,a) (__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v)) 52#define __raw_writel(v,a) ((void)(__chk_io_ptr(a), *(volatile unsigned int __force *)(a) = (v)))
53 53
54#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a)) 54#define __raw_readb(a) (__chk_io_ptr(a), *(volatile unsigned char __force *)(a))
55#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a)) 55#define __raw_readw(a) (__chk_io_ptr(a), *(volatile unsigned short __force *)(a))
@@ -229,11 +229,9 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
229#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \ 229#define readl_relaxed(c) ({ u32 __r = le32_to_cpu((__force __le32) \
230 __raw_readl(c)); __r; }) 230 __raw_readl(c)); __r; })
231 231
232#define writeb_relaxed(v,c) ((void)__raw_writeb(v,c)) 232#define writeb_relaxed(v,c) __raw_writeb(v,c)
233#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \ 233#define writew_relaxed(v,c) __raw_writew((__force u16) cpu_to_le16(v),c)
234 cpu_to_le16(v),c)) 234#define writel_relaxed(v,c) __raw_writel((__force u32) cpu_to_le32(v),c)
235#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
236 cpu_to_le32(v),c))
237 235
238#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; }) 236#define readb(c) ({ u8 __v = readb_relaxed(c); __iormb(); __v; })
239#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; }) 237#define readw(c) ({ u16 __v = readw_relaxed(c); __iormb(); __v; })
@@ -281,12 +279,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
281#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; }) 279#define ioread16be(p) ({ unsigned int __v = be16_to_cpu((__force __be16)__raw_readw(p)); __iormb(); __v; })
282#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; }) 280#define ioread32be(p) ({ unsigned int __v = be32_to_cpu((__force __be32)__raw_readl(p)); __iormb(); __v; })
283 281
284#define iowrite8(v,p) ({ __iowmb(); (void)__raw_writeb(v, p); }) 282#define iowrite8(v,p) ({ __iowmb(); __raw_writeb(v, p); })
285#define iowrite16(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_le16(v), p); }) 283#define iowrite16(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_le16(v), p); })
286#define iowrite32(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_le32(v), p); }) 284#define iowrite32(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_le32(v), p); })
287 285
288#define iowrite16be(v,p) ({ __iowmb(); (void)__raw_writew((__force __u16)cpu_to_be16(v), p); }) 286#define iowrite16be(v,p) ({ __iowmb(); __raw_writew((__force __u16)cpu_to_be16(v), p); })
289#define iowrite32be(v,p) ({ __iowmb(); (void)__raw_writel((__force __u32)cpu_to_be32(v), p); }) 287#define iowrite32be(v,p) ({ __iowmb(); __raw_writel((__force __u32)cpu_to_be32(v), p); })
290 288
291#define ioread8_rep(p,d,c) __raw_readsb(p,d,c) 289#define ioread8_rep(p,d,c) __raw_readsb(p,d,c)
292#define ioread16_rep(p,d,c) __raw_readsw(p,d,c) 290#define ioread16_rep(p,d,c) __raw_readsw(p,d,c)
diff --git a/arch/arm/include/asm/kvm_para.h b/arch/arm/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/arm/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/arm/include/asm/mach/arch.h b/arch/arm/include/asm/mach/arch.h
index d7692cafde7f..0b1c94b8c652 100644
--- a/arch/arm/include/asm/mach/arch.h
+++ b/arch/arm/include/asm/mach/arch.h
@@ -43,6 +43,7 @@ struct machine_desc {
43 void (*init_irq)(void); 43 void (*init_irq)(void);
44 struct sys_timer *timer; /* system tick timer */ 44 struct sys_timer *timer; /* system tick timer */
45 void (*init_machine)(void); 45 void (*init_machine)(void);
46 void (*init_late)(void);
46#ifdef CONFIG_MULTI_IRQ_HANDLER 47#ifdef CONFIG_MULTI_IRQ_HANDLER
47 void (*handle_irq)(struct pt_regs *); 48 void (*handle_irq)(struct pt_regs *);
48#endif 49#endif
diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
index b36f3654bf54..a6efcdd6fd25 100644
--- a/arch/arm/include/asm/mach/map.h
+++ b/arch/arm/include/asm/mach/map.h
@@ -30,6 +30,7 @@ struct map_desc {
30#define MT_MEMORY_DTCM 12 30#define MT_MEMORY_DTCM 12
31#define MT_MEMORY_ITCM 13 31#define MT_MEMORY_ITCM 13
32#define MT_MEMORY_SO 14 32#define MT_MEMORY_SO 14
33#define MT_MEMORY_DMA_READY 15
33 34
34#ifdef CONFIG_MMU 35#ifdef CONFIG_MMU
35extern void iotable_init(struct map_desc *, int); 36extern void iotable_init(struct map_desc *, int);
diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
index 68388eb4946b..b79f8e97f775 100644
--- a/arch/arm/include/asm/thread_info.h
+++ b/arch/arm/include/asm/thread_info.h
@@ -148,6 +148,7 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
148#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */ 148#define TIF_NOTIFY_RESUME 2 /* callback before returning to user */
149#define TIF_SYSCALL_TRACE 8 149#define TIF_SYSCALL_TRACE 8
150#define TIF_SYSCALL_AUDIT 9 150#define TIF_SYSCALL_AUDIT 9
151#define TIF_SYSCALL_RESTARTSYS 10
151#define TIF_POLLING_NRFLAG 16 152#define TIF_POLLING_NRFLAG 16
152#define TIF_USING_IWMMXT 17 153#define TIF_USING_IWMMXT 17
153#define TIF_MEMDIE 18 /* is terminating due to OOM killer */ 154#define TIF_MEMDIE 18 /* is terminating due to OOM killer */
@@ -162,16 +163,17 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
162#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) 163#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
163#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG) 164#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
164#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT) 165#define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
165#define _TIF_RESTORE_SIGMASK (1 << TIF_RESTORE_SIGMASK)
166#define _TIF_SECCOMP (1 << TIF_SECCOMP) 166#define _TIF_SECCOMP (1 << TIF_SECCOMP)
167#define _TIF_SYSCALL_RESTARTSYS (1 << TIF_SYSCALL_RESTARTSYS)
167 168
168/* Checks for any syscall work in entry-common.S */ 169/* Checks for any syscall work in entry-common.S */
169#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT) 170#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
171 _TIF_SYSCALL_RESTARTSYS)
170 172
171/* 173/*
172 * Change these and you break ASM code in entry-common.S 174 * Change these and you break ASM code in entry-common.S
173 */ 175 */
174#define _TIF_WORK_MASK 0x000000ff 176#define _TIF_WORK_MASK (_TIF_NEED_RESCHED | _TIF_SIGPENDING | _TIF_NOTIFY_RESUME)
175 177
176#endif /* __KERNEL__ */ 178#endif /* __KERNEL__ */
177#endif /* __ASM_ARM_THREAD_INFO_H */ 179#endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
index 7bd2d3cb8957..4afed88d250a 100644
--- a/arch/arm/kernel/entry-common.S
+++ b/arch/arm/kernel/entry-common.S
@@ -53,9 +53,13 @@ fast_work_pending:
53work_pending: 53work_pending:
54 tst r1, #_TIF_NEED_RESCHED 54 tst r1, #_TIF_NEED_RESCHED
55 bne work_resched 55 bne work_resched
56 tst r1, #_TIF_SIGPENDING|_TIF_NOTIFY_RESUME 56 /*
57 beq no_work_pending 57 * TIF_SIGPENDING or TIF_NOTIFY_RESUME must've been set if we got here
58 */
59 ldr r2, [sp, #S_PSR]
58 mov r0, sp @ 'regs' 60 mov r0, sp @ 'regs'
61 tst r2, #15 @ are we returning to user mode?
62 bne no_work_pending @ no? just leave, then...
59 mov r2, why @ 'syscall' 63 mov r2, why @ 'syscall'
60 tst r1, #_TIF_SIGPENDING @ delivering a signal? 64 tst r1, #_TIF_SIGPENDING @ delivering a signal?
61 movne why, #0 @ prevent further restarts 65 movne why, #0 @ prevent further restarts
diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
index 14e38261cd31..5700a7ae7f0b 100644
--- a/arch/arm/kernel/ptrace.c
+++ b/arch/arm/kernel/ptrace.c
@@ -25,6 +25,7 @@
25#include <linux/regset.h> 25#include <linux/regset.h>
26#include <linux/audit.h> 26#include <linux/audit.h>
27#include <linux/tracehook.h> 27#include <linux/tracehook.h>
28#include <linux/unistd.h>
28 29
29#include <asm/pgtable.h> 30#include <asm/pgtable.h>
30#include <asm/traps.h> 31#include <asm/traps.h>
@@ -917,6 +918,8 @@ asmlinkage int syscall_trace(int why, struct pt_regs *regs, int scno)
917 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0, 918 audit_syscall_entry(AUDIT_ARCH_ARM, scno, regs->ARM_r0,
918 regs->ARM_r1, regs->ARM_r2, regs->ARM_r3); 919 regs->ARM_r1, regs->ARM_r2, regs->ARM_r3);
919 920
921 if (why == 0 && test_and_clear_thread_flag(TIF_SYSCALL_RESTARTSYS))
922 scno = __NR_restart_syscall - __NR_SYSCALL_BASE;
920 if (!test_thread_flag(TIF_SYSCALL_TRACE)) 923 if (!test_thread_flag(TIF_SYSCALL_TRACE))
921 return scno; 924 return scno;
922 925
diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
index ebfac782593f..e15d83bb4ea3 100644
--- a/arch/arm/kernel/setup.c
+++ b/arch/arm/kernel/setup.c
@@ -81,6 +81,7 @@ __setup("fpe=", fpe_setup);
81extern void paging_init(struct machine_desc *desc); 81extern void paging_init(struct machine_desc *desc);
82extern void sanity_check_meminfo(void); 82extern void sanity_check_meminfo(void);
83extern void reboot_setup(char *str); 83extern void reboot_setup(char *str);
84extern void setup_dma_zone(struct machine_desc *desc);
84 85
85unsigned int processor_id; 86unsigned int processor_id;
86EXPORT_SYMBOL(processor_id); 87EXPORT_SYMBOL(processor_id);
@@ -800,6 +801,14 @@ static int __init customize_machine(void)
800} 801}
801arch_initcall(customize_machine); 802arch_initcall(customize_machine);
802 803
804static int __init init_machine_late(void)
805{
806 if (machine_desc->init_late)
807 machine_desc->init_late();
808 return 0;
809}
810late_initcall(init_machine_late);
811
803#ifdef CONFIG_KEXEC 812#ifdef CONFIG_KEXEC
804static inline unsigned long long get_total_mem(void) 813static inline unsigned long long get_total_mem(void)
805{ 814{
@@ -939,12 +948,8 @@ void __init setup_arch(char **cmdline_p)
939 machine_desc = mdesc; 948 machine_desc = mdesc;
940 machine_name = mdesc->name; 949 machine_name = mdesc->name;
941 950
942#ifdef CONFIG_ZONE_DMA 951 setup_dma_zone(mdesc);
943 if (mdesc->dma_zone_size) { 952
944 extern unsigned long arm_dma_zone_size;
945 arm_dma_zone_size = mdesc->dma_zone_size;
946 }
947#endif
948 if (mdesc->restart_mode) 953 if (mdesc->restart_mode)
949 reboot_setup(&mdesc->restart_mode); 954 reboot_setup(&mdesc->restart_mode);
950 955
diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
index 4e5fdd9bd9e3..17fc36c41cff 100644
--- a/arch/arm/kernel/signal.c
+++ b/arch/arm/kernel/signal.c
@@ -29,7 +29,6 @@
29 */ 29 */
30#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE)) 30#define SWI_SYS_SIGRETURN (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
31#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE)) 31#define SWI_SYS_RT_SIGRETURN (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
32#define SWI_SYS_RESTART (0xef000000|__NR_restart_syscall|__NR_OABI_SYSCALL_BASE)
33 32
34/* 33/*
35 * With EABI, the syscall number has to be loaded into r7. 34 * With EABI, the syscall number has to be loaded into r7.
@@ -50,18 +49,6 @@ const unsigned long sigreturn_codes[7] = {
50}; 49};
51 50
52/* 51/*
53 * Either we support OABI only, or we have EABI with the OABI
54 * compat layer enabled. In the later case we don't know if
55 * user space is EABI or not, and if not we must not clobber r7.
56 * Always using the OABI syscall solves that issue and works for
57 * all those cases.
58 */
59const unsigned long syscall_restart_code[2] = {
60 SWI_SYS_RESTART, /* swi __NR_restart_syscall */
61 0xe49df004, /* ldr pc, [sp], #4 */
62};
63
64/*
65 * atomically swap in the new signal mask, and wait for a signal. 52 * atomically swap in the new signal mask, and wait for a signal.
66 */ 53 */
67asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask) 54asmlinkage int sys_sigsuspend(int restart, unsigned long oldmask, old_sigset_t mask)
@@ -82,10 +69,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
82 old_sigset_t mask; 69 old_sigset_t mask;
83 if (!access_ok(VERIFY_READ, act, sizeof(*act)) || 70 if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
84 __get_user(new_ka.sa.sa_handler, &act->sa_handler) || 71 __get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
85 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer)) 72 __get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
73 __get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
74 __get_user(mask, &act->sa_mask))
86 return -EFAULT; 75 return -EFAULT;
87 __get_user(new_ka.sa.sa_flags, &act->sa_flags);
88 __get_user(mask, &act->sa_mask);
89 siginitset(&new_ka.sa.sa_mask, mask); 76 siginitset(&new_ka.sa.sa_mask, mask);
90 } 77 }
91 78
@@ -94,10 +81,10 @@ sys_sigaction(int sig, const struct old_sigaction __user *act,
94 if (!ret && oact) { 81 if (!ret && oact) {
95 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) || 82 if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
96 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) || 83 __put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
97 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer)) 84 __put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
85 __put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
86 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
98 return -EFAULT; 87 return -EFAULT;
99 __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
100 __put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask);
101 } 88 }
102 89
103 return ret; 90 return ret;
@@ -602,15 +589,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
602 int signr; 589 int signr;
603 590
604 /* 591 /*
605 * We want the common case to go fast, which
606 * is why we may in certain cases get here from
607 * kernel mode. Just return without doing anything
608 * if so.
609 */
610 if (!user_mode(regs))
611 return;
612
613 /*
614 * If we were from a system call, check for system call restarting... 592 * If we were from a system call, check for system call restarting...
615 */ 593 */
616 if (syscall) { 594 if (syscall) {
@@ -626,18 +604,13 @@ static void do_signal(struct pt_regs *regs, int syscall)
626 case -ERESTARTNOHAND: 604 case -ERESTARTNOHAND:
627 case -ERESTARTSYS: 605 case -ERESTARTSYS:
628 case -ERESTARTNOINTR: 606 case -ERESTARTNOINTR:
607 case -ERESTART_RESTARTBLOCK:
629 regs->ARM_r0 = regs->ARM_ORIG_r0; 608 regs->ARM_r0 = regs->ARM_ORIG_r0;
630 regs->ARM_pc = restart_addr; 609 regs->ARM_pc = restart_addr;
631 break; 610 break;
632 case -ERESTART_RESTARTBLOCK:
633 regs->ARM_r0 = -EINTR;
634 break;
635 } 611 }
636 } 612 }
637 613
638 if (try_to_freeze())
639 goto no_signal;
640
641 /* 614 /*
642 * Get the signal to deliver. When running under ptrace, at this 615 * Get the signal to deliver. When running under ptrace, at this
643 * point the debugger may change all our registers ... 616 * point the debugger may change all our registers ...
@@ -652,12 +625,14 @@ static void do_signal(struct pt_regs *regs, int syscall)
652 * debugger has chosen to restart at a different PC. 625 * debugger has chosen to restart at a different PC.
653 */ 626 */
654 if (regs->ARM_pc == restart_addr) { 627 if (regs->ARM_pc == restart_addr) {
655 if (retval == -ERESTARTNOHAND 628 if (retval == -ERESTARTNOHAND ||
629 retval == -ERESTART_RESTARTBLOCK
656 || (retval == -ERESTARTSYS 630 || (retval == -ERESTARTSYS
657 && !(ka.sa.sa_flags & SA_RESTART))) { 631 && !(ka.sa.sa_flags & SA_RESTART))) {
658 regs->ARM_r0 = -EINTR; 632 regs->ARM_r0 = -EINTR;
659 regs->ARM_pc = continue_addr; 633 regs->ARM_pc = continue_addr;
660 } 634 }
635 clear_thread_flag(TIF_SYSCALL_RESTARTSYS);
661 } 636 }
662 637
663 if (test_thread_flag(TIF_RESTORE_SIGMASK)) 638 if (test_thread_flag(TIF_RESTORE_SIGMASK))
@@ -677,7 +652,6 @@ static void do_signal(struct pt_regs *regs, int syscall)
677 return; 652 return;
678 } 653 }
679 654
680 no_signal:
681 if (syscall) { 655 if (syscall) {
682 /* 656 /*
683 * Handle restarting a different system call. As above, 657 * Handle restarting a different system call. As above,
@@ -685,38 +659,15 @@ static void do_signal(struct pt_regs *regs, int syscall)
685 * ignore the restart. 659 * ignore the restart.
686 */ 660 */
687 if (retval == -ERESTART_RESTARTBLOCK 661 if (retval == -ERESTART_RESTARTBLOCK
688 && regs->ARM_pc == continue_addr) { 662 && regs->ARM_pc == restart_addr)
689 if (thumb_mode(regs)) { 663 set_thread_flag(TIF_SYSCALL_RESTARTSYS);
690 regs->ARM_r7 = __NR_restart_syscall - __NR_SYSCALL_BASE;
691 regs->ARM_pc -= 2;
692 } else {
693#if defined(CONFIG_AEABI) && !defined(CONFIG_OABI_COMPAT)
694 regs->ARM_r7 = __NR_restart_syscall;
695 regs->ARM_pc -= 4;
696#else
697 u32 __user *usp;
698
699 regs->ARM_sp -= 4;
700 usp = (u32 __user *)regs->ARM_sp;
701
702 if (put_user(regs->ARM_pc, usp) == 0) {
703 regs->ARM_pc = KERN_RESTART_CODE;
704 } else {
705 regs->ARM_sp += 4;
706 force_sigsegv(0, current);
707 }
708#endif
709 }
710 }
711
712 /* If there's no signal to deliver, we just put the saved sigmask
713 * back.
714 */
715 if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
716 clear_thread_flag(TIF_RESTORE_SIGMASK);
717 sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
718 }
719 } 664 }
665
666 /* If there's no signal to deliver, we just put the saved sigmask
667 * back.
668 */
669 if (test_and_clear_thread_flag(TIF_RESTORE_SIGMASK))
670 set_current_blocked(&current->saved_sigmask);
720} 671}
721 672
722asmlinkage void 673asmlinkage void
diff --git a/arch/arm/kernel/signal.h b/arch/arm/kernel/signal.h
index 6fcfe8398aa4..5ff067b7c752 100644
--- a/arch/arm/kernel/signal.h
+++ b/arch/arm/kernel/signal.h
@@ -8,7 +8,5 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500) 10#define KERN_SIGRETURN_CODE (CONFIG_VECTORS_BASE + 0x00000500)
11#define KERN_RESTART_CODE (KERN_SIGRETURN_CODE + sizeof(sigreturn_codes))
12 11
13extern const unsigned long sigreturn_codes[7]; 12extern const unsigned long sigreturn_codes[7];
14extern const unsigned long syscall_restart_code[2];
diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
index 3647170e9a16..4928d89758f4 100644
--- a/arch/arm/kernel/traps.c
+++ b/arch/arm/kernel/traps.c
@@ -820,8 +820,6 @@ void __init early_trap_init(void *vectors_base)
820 */ 820 */
821 memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE), 821 memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
822 sigreturn_codes, sizeof(sigreturn_codes)); 822 sigreturn_codes, sizeof(sigreturn_codes));
823 memcpy((void *)(vectors + KERN_RESTART_CODE - CONFIG_VECTORS_BASE),
824 syscall_restart_code, sizeof(syscall_restart_code));
825 823
826 flush_icache_range(vectors, vectors + PAGE_SIZE); 824 flush_icache_range(vectors, vectors + PAGE_SIZE);
827 modify_domain(DOMAIN_USER, DOMAIN_CLIENT); 825 modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
diff --git a/arch/arm/mach-at91/at91sam9g45_devices.c b/arch/arm/mach-at91/at91sam9g45_devices.c
index f6747246d649..933fc9afe7d0 100644
--- a/arch/arm/mach-at91/at91sam9g45_devices.c
+++ b/arch/arm/mach-at91/at91sam9g45_devices.c
@@ -436,7 +436,6 @@ void __init at91_add_device_mci(short mmc_id, struct mci_platform_data *data)
436 atslave->dma_dev = &at_hdmac_device.dev; 436 atslave->dma_dev = &at_hdmac_device.dev;
437 atslave->cfg = ATC_FIFOCFG_HALFFIFO 437 atslave->cfg = ATC_FIFOCFG_HALFFIFO
438 | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW; 438 | ATC_SRC_H2SEL_HW | ATC_DST_H2SEL_HW;
439 atslave->ctrla = ATC_SCSIZE_16 | ATC_DCSIZE_16;
440 if (mmc_id == 0) /* MCI0 */ 439 if (mmc_id == 0) /* MCI0 */
441 atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0) 440 atslave->cfg |= ATC_SRC_PER(AT_DMA_ID_MCI0)
442 | ATC_DST_PER(AT_DMA_ID_MCI0); 441 | ATC_DST_PER(AT_DMA_ID_MCI0);
diff --git a/arch/arm/mach-at91/include/mach/at_hdmac.h b/arch/arm/mach-at91/include/mach/at_hdmac.h
index fff48d1a0f4e..cab0997be3de 100644
--- a/arch/arm/mach-at91/include/mach/at_hdmac.h
+++ b/arch/arm/mach-at91/include/mach/at_hdmac.h
@@ -26,18 +26,11 @@ struct at_dma_platform_data {
26/** 26/**
27 * struct at_dma_slave - Controller-specific information about a slave 27 * struct at_dma_slave - Controller-specific information about a slave
28 * @dma_dev: required DMA master device 28 * @dma_dev: required DMA master device
29 * @tx_reg: physical address of data register used for
30 * memory-to-peripheral transfers
31 * @rx_reg: physical address of data register used for
32 * peripheral-to-memory transfers
33 * @reg_width: peripheral register width
34 * @cfg: Platform-specific initializer for the CFG register 29 * @cfg: Platform-specific initializer for the CFG register
35 * @ctrla: Platform-specific initializer for the CTRLA register
36 */ 30 */
37struct at_dma_slave { 31struct at_dma_slave {
38 struct device *dma_dev; 32 struct device *dma_dev;
39 u32 cfg; 33 u32 cfg;
40 u32 ctrla;
41}; 34};
42 35
43 36
@@ -64,24 +57,5 @@ struct at_dma_slave {
64#define ATC_FIFOCFG_HALFFIFO (0x1 << 28) 57#define ATC_FIFOCFG_HALFFIFO (0x1 << 28)
65#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28) 58#define ATC_FIFOCFG_ENOUGHSPACE (0x2 << 28)
66 59
67/* Platform-configurable bits in CTRLA */
68#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
69#define ATC_SCSIZE_1 (0x0 << 16)
70#define ATC_SCSIZE_4 (0x1 << 16)
71#define ATC_SCSIZE_8 (0x2 << 16)
72#define ATC_SCSIZE_16 (0x3 << 16)
73#define ATC_SCSIZE_32 (0x4 << 16)
74#define ATC_SCSIZE_64 (0x5 << 16)
75#define ATC_SCSIZE_128 (0x6 << 16)
76#define ATC_SCSIZE_256 (0x7 << 16)
77#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
78#define ATC_DCSIZE_1 (0x0 << 20)
79#define ATC_DCSIZE_4 (0x1 << 20)
80#define ATC_DCSIZE_8 (0x2 << 20)
81#define ATC_DCSIZE_16 (0x3 << 20)
82#define ATC_DCSIZE_32 (0x4 << 20)
83#define ATC_DCSIZE_64 (0x5 << 20)
84#define ATC_DCSIZE_128 (0x6 << 20)
85#define ATC_DCSIZE_256 (0x7 << 20)
86 60
87#endif /* AT_HDMAC_H */ 61#endif /* AT_HDMAC_H */
diff --git a/arch/arm/mach-davinci/board-da830-evm.c b/arch/arm/mach-davinci/board-da830-evm.c
index dc1afe5be20c..0031864e7f11 100644
--- a/arch/arm/mach-davinci/board-da830-evm.c
+++ b/arch/arm/mach-davinci/board-da830-evm.c
@@ -681,6 +681,7 @@ MACHINE_START(DAVINCI_DA830_EVM, "DaVinci DA830/OMAP-L137/AM17x EVM")
681 .init_irq = cp_intc_init, 681 .init_irq = cp_intc_init,
682 .timer = &davinci_timer, 682 .timer = &davinci_timer,
683 .init_machine = da830_evm_init, 683 .init_machine = da830_evm_init,
684 .init_late = davinci_init_late,
684 .dma_zone_size = SZ_128M, 685 .dma_zone_size = SZ_128M,
685 .restart = da8xx_restart, 686 .restart = da8xx_restart,
686MACHINE_END 687MACHINE_END
diff --git a/arch/arm/mach-davinci/board-da850-evm.c b/arch/arm/mach-davinci/board-da850-evm.c
index 09f61073c8d9..0149fb453be3 100644
--- a/arch/arm/mach-davinci/board-da850-evm.c
+++ b/arch/arm/mach-davinci/board-da850-evm.c
@@ -1411,6 +1411,7 @@ MACHINE_START(DAVINCI_DA850_EVM, "DaVinci DA850/OMAP-L138/AM18x EVM")
1411 .init_irq = cp_intc_init, 1411 .init_irq = cp_intc_init,
1412 .timer = &davinci_timer, 1412 .timer = &davinci_timer,
1413 .init_machine = da850_evm_init, 1413 .init_machine = da850_evm_init,
1414 .init_late = davinci_init_late,
1414 .dma_zone_size = SZ_128M, 1415 .dma_zone_size = SZ_128M,
1415 .restart = da8xx_restart, 1416 .restart = da8xx_restart,
1416MACHINE_END 1417MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-evm.c b/arch/arm/mach-davinci/board-dm355-evm.c
index 82ed753fb360..1c7b1f46a8f3 100644
--- a/arch/arm/mach-davinci/board-dm355-evm.c
+++ b/arch/arm/mach-davinci/board-dm355-evm.c
@@ -357,6 +357,7 @@ MACHINE_START(DAVINCI_DM355_EVM, "DaVinci DM355 EVM")
357 .init_irq = davinci_irq_init, 357 .init_irq = davinci_irq_init,
358 .timer = &davinci_timer, 358 .timer = &davinci_timer,
359 .init_machine = dm355_evm_init, 359 .init_machine = dm355_evm_init,
360 .init_late = davinci_init_late,
360 .dma_zone_size = SZ_128M, 361 .dma_zone_size = SZ_128M,
361 .restart = davinci_restart, 362 .restart = davinci_restart,
362MACHINE_END 363MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm355-leopard.c b/arch/arm/mach-davinci/board-dm355-leopard.c
index d74a8b3445fb..8e7703213b08 100644
--- a/arch/arm/mach-davinci/board-dm355-leopard.c
+++ b/arch/arm/mach-davinci/board-dm355-leopard.c
@@ -276,6 +276,7 @@ MACHINE_START(DM355_LEOPARD, "DaVinci DM355 leopard")
276 .init_irq = davinci_irq_init, 276 .init_irq = davinci_irq_init,
277 .timer = &davinci_timer, 277 .timer = &davinci_timer,
278 .init_machine = dm355_leopard_init, 278 .init_machine = dm355_leopard_init,
279 .init_late = davinci_init_late,
279 .dma_zone_size = SZ_128M, 280 .dma_zone_size = SZ_128M,
280 .restart = davinci_restart, 281 .restart = davinci_restart,
281MACHINE_END 282MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm365-evm.c b/arch/arm/mach-davinci/board-dm365-evm.c
index 5bce2b83bb4f..688a9c556dc9 100644
--- a/arch/arm/mach-davinci/board-dm365-evm.c
+++ b/arch/arm/mach-davinci/board-dm365-evm.c
@@ -618,6 +618,7 @@ MACHINE_START(DAVINCI_DM365_EVM, "DaVinci DM365 EVM")
618 .init_irq = davinci_irq_init, 618 .init_irq = davinci_irq_init,
619 .timer = &davinci_timer, 619 .timer = &davinci_timer,
620 .init_machine = dm365_evm_init, 620 .init_machine = dm365_evm_init,
621 .init_late = davinci_init_late,
621 .dma_zone_size = SZ_128M, 622 .dma_zone_size = SZ_128M,
622 .restart = davinci_restart, 623 .restart = davinci_restart,
623MACHINE_END 624MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm644x-evm.c b/arch/arm/mach-davinci/board-dm644x-evm.c
index 3683306e0245..d34ed55912b2 100644
--- a/arch/arm/mach-davinci/board-dm644x-evm.c
+++ b/arch/arm/mach-davinci/board-dm644x-evm.c
@@ -825,6 +825,7 @@ MACHINE_START(DAVINCI_EVM, "DaVinci DM644x EVM")
825 .init_irq = davinci_irq_init, 825 .init_irq = davinci_irq_init,
826 .timer = &davinci_timer, 826 .timer = &davinci_timer,
827 .init_machine = davinci_evm_init, 827 .init_machine = davinci_evm_init,
828 .init_late = davinci_init_late,
828 .dma_zone_size = SZ_128M, 829 .dma_zone_size = SZ_128M,
829 .restart = davinci_restart, 830 .restart = davinci_restart,
830MACHINE_END 831MACHINE_END
diff --git a/arch/arm/mach-davinci/board-dm646x-evm.c b/arch/arm/mach-davinci/board-dm646x-evm.c
index d72ab948d630..958679a20e13 100644
--- a/arch/arm/mach-davinci/board-dm646x-evm.c
+++ b/arch/arm/mach-davinci/board-dm646x-evm.c
@@ -788,6 +788,7 @@ MACHINE_START(DAVINCI_DM6467_EVM, "DaVinci DM646x EVM")
788 .init_irq = davinci_irq_init, 788 .init_irq = davinci_irq_init,
789 .timer = &davinci_timer, 789 .timer = &davinci_timer,
790 .init_machine = evm_init, 790 .init_machine = evm_init,
791 .init_late = davinci_init_late,
791 .dma_zone_size = SZ_128M, 792 .dma_zone_size = SZ_128M,
792 .restart = davinci_restart, 793 .restart = davinci_restart,
793MACHINE_END 794MACHINE_END
@@ -798,6 +799,7 @@ MACHINE_START(DAVINCI_DM6467TEVM, "DaVinci DM6467T EVM")
798 .init_irq = davinci_irq_init, 799 .init_irq = davinci_irq_init,
799 .timer = &davinci_timer, 800 .timer = &davinci_timer,
800 .init_machine = evm_init, 801 .init_machine = evm_init,
802 .init_late = davinci_init_late,
801 .dma_zone_size = SZ_128M, 803 .dma_zone_size = SZ_128M,
802 .restart = davinci_restart, 804 .restart = davinci_restart,
803MACHINE_END 805MACHINE_END
diff --git a/arch/arm/mach-davinci/board-mityomapl138.c b/arch/arm/mach-davinci/board-mityomapl138.c
index 672d820e2aa4..beecde3a1d2f 100644
--- a/arch/arm/mach-davinci/board-mityomapl138.c
+++ b/arch/arm/mach-davinci/board-mityomapl138.c
@@ -572,6 +572,7 @@ MACHINE_START(MITYOMAPL138, "MityDSP-L138/MityARM-1808")
572 .init_irq = cp_intc_init, 572 .init_irq = cp_intc_init,
573 .timer = &davinci_timer, 573 .timer = &davinci_timer,
574 .init_machine = mityomapl138_init, 574 .init_machine = mityomapl138_init,
575 .init_late = davinci_init_late,
575 .dma_zone_size = SZ_128M, 576 .dma_zone_size = SZ_128M,
576 .restart = da8xx_restart, 577 .restart = da8xx_restart,
577MACHINE_END 578MACHINE_END
diff --git a/arch/arm/mach-davinci/board-neuros-osd2.c b/arch/arm/mach-davinci/board-neuros-osd2.c
index a772bb45570a..5de69f2fcca9 100644
--- a/arch/arm/mach-davinci/board-neuros-osd2.c
+++ b/arch/arm/mach-davinci/board-neuros-osd2.c
@@ -278,6 +278,7 @@ MACHINE_START(NEUROS_OSD2, "Neuros OSD2")
278 .init_irq = davinci_irq_init, 278 .init_irq = davinci_irq_init,
279 .timer = &davinci_timer, 279 .timer = &davinci_timer,
280 .init_machine = davinci_ntosd2_init, 280 .init_machine = davinci_ntosd2_init,
281 .init_late = davinci_init_late,
281 .dma_zone_size = SZ_128M, 282 .dma_zone_size = SZ_128M,
282 .restart = davinci_restart, 283 .restart = davinci_restart,
283MACHINE_END 284MACHINE_END
diff --git a/arch/arm/mach-davinci/board-omapl138-hawk.c b/arch/arm/mach-davinci/board-omapl138-hawk.c
index 45e815760a27..dc1208e9e664 100644
--- a/arch/arm/mach-davinci/board-omapl138-hawk.c
+++ b/arch/arm/mach-davinci/board-omapl138-hawk.c
@@ -343,6 +343,7 @@ MACHINE_START(OMAPL138_HAWKBOARD, "AM18x/OMAP-L138 Hawkboard")
343 .init_irq = cp_intc_init, 343 .init_irq = cp_intc_init,
344 .timer = &davinci_timer, 344 .timer = &davinci_timer,
345 .init_machine = omapl138_hawk_init, 345 .init_machine = omapl138_hawk_init,
346 .init_late = davinci_init_late,
346 .dma_zone_size = SZ_128M, 347 .dma_zone_size = SZ_128M,
347 .restart = da8xx_restart, 348 .restart = da8xx_restart,
348MACHINE_END 349MACHINE_END
diff --git a/arch/arm/mach-davinci/board-sffsdr.c b/arch/arm/mach-davinci/board-sffsdr.c
index 76e675096104..9078acf94bac 100644
--- a/arch/arm/mach-davinci/board-sffsdr.c
+++ b/arch/arm/mach-davinci/board-sffsdr.c
@@ -157,6 +157,7 @@ MACHINE_START(SFFSDR, "Lyrtech SFFSDR")
157 .init_irq = davinci_irq_init, 157 .init_irq = davinci_irq_init,
158 .timer = &davinci_timer, 158 .timer = &davinci_timer,
159 .init_machine = davinci_sffsdr_init, 159 .init_machine = davinci_sffsdr_init,
160 .init_late = davinci_init_late,
160 .dma_zone_size = SZ_128M, 161 .dma_zone_size = SZ_128M,
161 .restart = davinci_restart, 162 .restart = davinci_restart,
162MACHINE_END 163MACHINE_END
diff --git a/arch/arm/mach-davinci/board-tnetv107x-evm.c b/arch/arm/mach-davinci/board-tnetv107x-evm.c
index 5f14e30b00d8..ac4e003ad863 100644
--- a/arch/arm/mach-davinci/board-tnetv107x-evm.c
+++ b/arch/arm/mach-davinci/board-tnetv107x-evm.c
@@ -282,6 +282,7 @@ MACHINE_START(TNETV107X, "TNETV107X EVM")
282 .init_irq = cp_intc_init, 282 .init_irq = cp_intc_init,
283 .timer = &davinci_timer, 283 .timer = &davinci_timer,
284 .init_machine = tnetv107x_evm_board_init, 284 .init_machine = tnetv107x_evm_board_init,
285 .init_late = davinci_init_late,
285 .dma_zone_size = SZ_128M, 286 .dma_zone_size = SZ_128M,
286 .restart = tnetv107x_restart, 287 .restart = tnetv107x_restart,
287MACHINE_END 288MACHINE_END
diff --git a/arch/arm/mach-davinci/clock.c b/arch/arm/mach-davinci/clock.c
index 008772e3b843..34668ead53c7 100644
--- a/arch/arm/mach-davinci/clock.c
+++ b/arch/arm/mach-davinci/clock.c
@@ -213,7 +213,7 @@ EXPORT_SYMBOL(clk_unregister);
213/* 213/*
214 * Disable any unused clocks left on by the bootloader 214 * Disable any unused clocks left on by the bootloader
215 */ 215 */
216static int __init clk_disable_unused(void) 216int __init davinci_clk_disable_unused(void)
217{ 217{
218 struct clk *ck; 218 struct clk *ck;
219 219
@@ -237,7 +237,6 @@ static int __init clk_disable_unused(void)
237 237
238 return 0; 238 return 0;
239} 239}
240late_initcall(clk_disable_unused);
241#endif 240#endif
242 241
243static unsigned long clk_sysclk_recalc(struct clk *clk) 242static unsigned long clk_sysclk_recalc(struct clk *clk)
diff --git a/arch/arm/mach-davinci/common.c b/arch/arm/mach-davinci/common.c
index cb9b2e47510c..64b0f65a8639 100644
--- a/arch/arm/mach-davinci/common.c
+++ b/arch/arm/mach-davinci/common.c
@@ -117,3 +117,10 @@ void __init davinci_common_init(struct davinci_soc_info *soc_info)
117err: 117err:
118 panic("davinci_common_init: SoC Initialization failed\n"); 118 panic("davinci_common_init: SoC Initialization failed\n");
119} 119}
120
121void __init davinci_init_late(void)
122{
123 davinci_cpufreq_init();
124 davinci_pm_init();
125 davinci_clk_disable_unused();
126}
diff --git a/arch/arm/mach-davinci/cpufreq.c b/arch/arm/mach-davinci/cpufreq.c
index 031048fec9f5..4729eaab0f40 100644
--- a/arch/arm/mach-davinci/cpufreq.c
+++ b/arch/arm/mach-davinci/cpufreq.c
@@ -240,10 +240,9 @@ static struct platform_driver davinci_cpufreq_driver = {
240 .remove = __exit_p(davinci_cpufreq_remove), 240 .remove = __exit_p(davinci_cpufreq_remove),
241}; 241};
242 242
243static int __init davinci_cpufreq_init(void) 243int __init davinci_cpufreq_init(void)
244{ 244{
245 return platform_driver_probe(&davinci_cpufreq_driver, 245 return platform_driver_probe(&davinci_cpufreq_driver,
246 davinci_cpufreq_probe); 246 davinci_cpufreq_probe);
247} 247}
248late_initcall(davinci_cpufreq_init);
249 248
diff --git a/arch/arm/mach-davinci/dma.c b/arch/arm/mach-davinci/dma.c
index 95ce019c9b98..a685e9706b7b 100644
--- a/arch/arm/mach-davinci/dma.c
+++ b/arch/arm/mach-davinci/dma.c
@@ -353,9 +353,10 @@ static int irq2ctlr(int irq)
353 *****************************************************************************/ 353 *****************************************************************************/
354static irqreturn_t dma_irq_handler(int irq, void *data) 354static irqreturn_t dma_irq_handler(int irq, void *data)
355{ 355{
356 int i;
357 int ctlr; 356 int ctlr;
358 unsigned int cnt = 0; 357 u32 sh_ier;
358 u32 sh_ipr;
359 u32 bank;
359 360
360 ctlr = irq2ctlr(irq); 361 ctlr = irq2ctlr(irq);
361 if (ctlr < 0) 362 if (ctlr < 0)
@@ -363,41 +364,39 @@ static irqreturn_t dma_irq_handler(int irq, void *data)
363 364
364 dev_dbg(data, "dma_irq_handler\n"); 365 dev_dbg(data, "dma_irq_handler\n");
365 366
366 if ((edma_shadow0_read_array(ctlr, SH_IPR, 0) == 0) && 367 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 0);
367 (edma_shadow0_read_array(ctlr, SH_IPR, 1) == 0)) 368 if (!sh_ipr) {
368 return IRQ_NONE; 369 sh_ipr = edma_shadow0_read_array(ctlr, SH_IPR, 1);
370 if (!sh_ipr)
371 return IRQ_NONE;
372 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 1);
373 bank = 1;
374 } else {
375 sh_ier = edma_shadow0_read_array(ctlr, SH_IER, 0);
376 bank = 0;
377 }
369 378
370 while (1) { 379 do {
371 int j; 380 u32 slot;
372 if (edma_shadow0_read_array(ctlr, SH_IPR, 0) & 381 u32 channel;
373 edma_shadow0_read_array(ctlr, SH_IER, 0)) 382
374 j = 0; 383 dev_dbg(data, "IPR%d %08x\n", bank, sh_ipr);
375 else if (edma_shadow0_read_array(ctlr, SH_IPR, 1) & 384
376 edma_shadow0_read_array(ctlr, SH_IER, 1)) 385 slot = __ffs(sh_ipr);
377 j = 1; 386 sh_ipr &= ~(BIT(slot));
378 else 387
379 break; 388 if (sh_ier & BIT(slot)) {
380 dev_dbg(data, "IPR%d %08x\n", j, 389 channel = (bank << 5) | slot;
381 edma_shadow0_read_array(ctlr, SH_IPR, j)); 390 /* Clear the corresponding IPR bits */
382 for (i = 0; i < 32; i++) { 391 edma_shadow0_write_array(ctlr, SH_ICR, bank,
383 int k = (j << 5) + i; 392 BIT(slot));
384 if ((edma_shadow0_read_array(ctlr, SH_IPR, j) & BIT(i)) 393 if (edma_cc[ctlr]->intr_data[channel].callback)
385 && (edma_shadow0_read_array(ctlr, 394 edma_cc[ctlr]->intr_data[channel].callback(
386 SH_IER, j) & BIT(i))) { 395 channel, DMA_COMPLETE,
387 /* Clear the corresponding IPR bits */ 396 edma_cc[ctlr]->intr_data[channel].data);
388 edma_shadow0_write_array(ctlr, SH_ICR, j,
389 BIT(i));
390 if (edma_cc[ctlr]->intr_data[k].callback)
391 edma_cc[ctlr]->intr_data[k].callback(
392 k, DMA_COMPLETE,
393 edma_cc[ctlr]->intr_data[k].
394 data);
395 }
396 } 397 }
397 cnt++; 398 } while (sh_ipr);
398 if (cnt > 10) 399
399 break;
400 }
401 edma_shadow0_write(ctlr, SH_IEVAL, 1); 400 edma_shadow0_write(ctlr, SH_IEVAL, 1);
402 return IRQ_HANDLED; 401 return IRQ_HANDLED;
403} 402}
diff --git a/arch/arm/mach-davinci/include/mach/common.h b/arch/arm/mach-davinci/include/mach/common.h
index 5cd39a4e0c96..bdc4aa8e672a 100644
--- a/arch/arm/mach-davinci/include/mach/common.h
+++ b/arch/arm/mach-davinci/include/mach/common.h
@@ -84,6 +84,25 @@ extern struct davinci_soc_info davinci_soc_info;
84extern void davinci_common_init(struct davinci_soc_info *soc_info); 84extern void davinci_common_init(struct davinci_soc_info *soc_info);
85extern void davinci_init_ide(void); 85extern void davinci_init_ide(void);
86void davinci_restart(char mode, const char *cmd); 86void davinci_restart(char mode, const char *cmd);
87void davinci_init_late(void);
88
89#ifdef CONFIG_DAVINCI_RESET_CLOCKS
90int davinci_clk_disable_unused(void);
91#else
92static inline int davinci_clk_disable_unused(void) { return 0; }
93#endif
94
95#ifdef CONFIG_CPU_FREQ
96int davinci_cpufreq_init(void);
97#else
98static inline int davinci_cpufreq_init(void) { return 0; }
99#endif
100
101#ifdef CONFIG_SUSPEND
102int davinci_pm_init(void);
103#else
104static inline int davinci_pm_init(void) { return 0; }
105#endif
87 106
88/* standard place to map on-chip SRAMs; they *may* support DMA */ 107/* standard place to map on-chip SRAMs; they *may* support DMA */
89#define SRAM_VIRT 0xfffe0000 108#define SRAM_VIRT 0xfffe0000
diff --git a/arch/arm/mach-davinci/include/mach/debug-macro.S b/arch/arm/mach-davinci/include/mach/debug-macro.S
index cf94552d5274..34290d14754b 100644
--- a/arch/arm/mach-davinci/include/mach/debug-macro.S
+++ b/arch/arm/mach-davinci/include/mach/debug-macro.S
@@ -22,46 +22,28 @@
22 22
23#define UART_SHIFT 2 23#define UART_SHIFT 2
24 24
25 .pushsection .data 25#if defined(CONFIG_DEBUG_DAVINCI_DMx_UART0)
26davinci_uart_phys: .word 0 26#define UART_BASE DAVINCI_UART0_BASE
27davinci_uart_virt: .word 0 27#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART0)
28 .popsection 28#define UART_BASE DA8XX_UART0_BASE
29 29#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART1)
30 .macro addruart, rp, rv, tmp 30#define UART_BASE DA8XX_UART1_BASE
31 31#elif defined(CONFIG_DEBUG_DAVINCI_DA8XX_UART2)
32 /* Use davinci_uart_phys/virt if already configured */ 32#define UART_BASE DA8XX_UART2_BASE
3310: adr \rp, 99f @ get effective addr of 99f 33#elif defined(CONFIG_DEBUG_DAVINCI_TNETV107X_UART1)
34 ldr \rv, [\rp] @ get absolute addr of 99f 34#define UART_BASE TNETV107X_UART2_BASE
35 sub \rv, \rv, \rp @ offset between the two 35#define UART_VIRTBASE TNETV107X_UART2_VIRT
36 ldr \rp, [\rp, #4] @ abs addr of omap_uart_phys 36#else
37 sub \tmp, \rp, \rv @ make it effective 37#error "Select a specifc port for DEBUG_LL"
38 ldr \rp, [\tmp, #0] @ davinci_uart_phys 38#endif
39 ldr \rv, [\tmp, #4] @ davinci_uart_virt
40 cmp \rp, #0 @ is port configured?
41 cmpne \rv, #0
42 bne 100f @ already configured
43
44 /* Check the debug UART address set in uncompress.h */
45 and \rp, pc, #0xff000000
46 ldr \rv, =DAVINCI_UART_INFO_OFS
47 add \rp, \rp, \rv
48
49 /* Copy uart phys address from decompressor uart info */
50 ldr \rv, [\rp, #0]
51 str \rv, [\tmp, #0]
52
53 /* Copy uart virt address from decompressor uart info */
54 ldr \rv, [\rp, #4]
55 str \rv, [\tmp, #4]
56
57 b 10b
58 39
59 .align 40#ifndef UART_VIRTBASE
6099: .word . 41#define UART_VIRTBASE IO_ADDRESS(UART_BASE)
61 .word davinci_uart_phys 42#endif
62 .ltorg
63 43
64100: 44 .macro addruart, rp, rv, tmp
45 ldr \rp, =UART_BASE
46 ldr \rv, =UART_VIRTBASE
65 .endm 47 .endm
66 48
67 .macro senduart,rd,rx 49 .macro senduart,rd,rx
diff --git a/arch/arm/mach-davinci/include/mach/hardware.h b/arch/arm/mach-davinci/include/mach/hardware.h
index 2184691ebc2f..16bb42291d39 100644
--- a/arch/arm/mach-davinci/include/mach/hardware.h
+++ b/arch/arm/mach-davinci/include/mach/hardware.h
@@ -22,7 +22,7 @@
22/* 22/*
23 * I/O mapping 23 * I/O mapping
24 */ 24 */
25#define IO_PHYS 0x01c00000UL 25#define IO_PHYS UL(0x01c00000)
26#define IO_OFFSET 0xfd000000 /* Virtual IO = 0xfec00000 */ 26#define IO_OFFSET 0xfd000000 /* Virtual IO = 0xfec00000 */
27#define IO_SIZE 0x00400000 27#define IO_SIZE 0x00400000
28#define IO_VIRT (IO_PHYS + IO_OFFSET) 28#define IO_VIRT (IO_PHYS + IO_OFFSET)
diff --git a/arch/arm/mach-davinci/include/mach/serial.h b/arch/arm/mach-davinci/include/mach/serial.h
index e347d88fef91..46b3cd11c3c2 100644
--- a/arch/arm/mach-davinci/include/mach/serial.h
+++ b/arch/arm/mach-davinci/include/mach/serial.h
@@ -15,16 +15,6 @@
15 15
16#include <mach/hardware.h> 16#include <mach/hardware.h>
17 17
18/*
19 * Stolen area that contains debug uart physical and virtual addresses. These
20 * addresses are filled in by the uncompress.h code, and are used by the debug
21 * macros in debug-macro.S.
22 *
23 * This area sits just below the page tables (see arch/arm/kernel/head.S).
24 * We define it as a relative offset from start of usable RAM.
25 */
26#define DAVINCI_UART_INFO_OFS 0x3ff8
27
28#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000) 18#define DAVINCI_UART0_BASE (IO_PHYS + 0x20000)
29#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400) 19#define DAVINCI_UART1_BASE (IO_PHYS + 0x20400)
30#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800) 20#define DAVINCI_UART2_BASE (IO_PHYS + 0x20800)
diff --git a/arch/arm/mach-davinci/include/mach/uncompress.h b/arch/arm/mach-davinci/include/mach/uncompress.h
index da2fb2c2155a..18cfd4977155 100644
--- a/arch/arm/mach-davinci/include/mach/uncompress.h
+++ b/arch/arm/mach-davinci/include/mach/uncompress.h
@@ -43,37 +43,27 @@ static inline void flush(void)
43 barrier(); 43 barrier();
44} 44}
45 45
46static inline void set_uart_info(u32 phys, void * __iomem virt) 46static inline void set_uart_info(u32 phys)
47{ 47{
48 /*
49 * Get address of some.bss variable and round it down
50 * a la CONFIG_AUTO_ZRELADDR.
51 */
52 u32 ram_start = (u32)&uart & 0xf8000000;
53 u32 *uart_info = (u32 *)(ram_start + DAVINCI_UART_INFO_OFS);
54
55 uart = (u32 *)phys; 48 uart = (u32 *)phys;
56 uart_info[0] = phys;
57 uart_info[1] = (u32)virt;
58} 49}
59 50
60#define _DEBUG_LL_ENTRY(machine, phys, virt) \ 51#define _DEBUG_LL_ENTRY(machine, phys) \
61 if (machine_is_##machine()) { \ 52 { \
62 set_uart_info(phys, virt); \ 53 if (machine_is_##machine()) { \
63 break; \ 54 set_uart_info(phys); \
55 break; \
56 } \
64 } 57 }
65 58
66#define DEBUG_LL_DAVINCI(machine, port) \ 59#define DEBUG_LL_DAVINCI(machine, port) \
67 _DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE, \ 60 _DEBUG_LL_ENTRY(machine, DAVINCI_UART##port##_BASE)
68 IO_ADDRESS(DAVINCI_UART##port##_BASE))
69 61
70#define DEBUG_LL_DA8XX(machine, port) \ 62#define DEBUG_LL_DA8XX(machine, port) \
71 _DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE, \ 63 _DEBUG_LL_ENTRY(machine, DA8XX_UART##port##_BASE)
72 IO_ADDRESS(DA8XX_UART##port##_BASE))
73 64
74#define DEBUG_LL_TNETV107X(machine, port) \ 65#define DEBUG_LL_TNETV107X(machine, port) \
75 _DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE, \ 66 _DEBUG_LL_ENTRY(machine, TNETV107X_UART##port##_BASE)
76 TNETV107X_UART##port##_VIRT)
77 67
78static inline void __arch_decomp_setup(unsigned long arch_id) 68static inline void __arch_decomp_setup(unsigned long arch_id)
79{ 69{
diff --git a/arch/arm/mach-davinci/pm.c b/arch/arm/mach-davinci/pm.c
index 04c49f7543ef..eb8360b33aa9 100644
--- a/arch/arm/mach-davinci/pm.c
+++ b/arch/arm/mach-davinci/pm.c
@@ -152,8 +152,7 @@ static struct platform_driver davinci_pm_driver = {
152 .remove = __exit_p(davinci_pm_remove), 152 .remove = __exit_p(davinci_pm_remove),
153}; 153};
154 154
155static int __init davinci_pm_init(void) 155int __init davinci_pm_init(void)
156{ 156{
157 return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe); 157 return platform_driver_probe(&davinci_pm_driver, davinci_pm_probe);
158} 158}
159late_initcall(davinci_pm_init);
diff --git a/arch/arm/mach-dove/common.c b/arch/arm/mach-dove/common.c
index 42ab1e7c4ecc..9493076fc594 100644
--- a/arch/arm/mach-dove/common.c
+++ b/arch/arm/mach-dove/common.c
@@ -13,7 +13,7 @@
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/clk.h> 16#include <linux/clk-provider.h>
17#include <linux/ata_platform.h> 17#include <linux/ata_platform.h>
18#include <linux/gpio.h> 18#include <linux/gpio.h>
19#include <asm/page.h> 19#include <asm/page.h>
@@ -68,6 +68,19 @@ void __init dove_map_io(void)
68} 68}
69 69
70/***************************************************************************** 70/*****************************************************************************
71 * CLK tree
72 ****************************************************************************/
73static struct clk *tclk;
74
75static void __init clk_init(void)
76{
77 tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
78 get_tclk());
79
80 orion_clkdev_init(tclk);
81}
82
83/*****************************************************************************
71 * EHCI0 84 * EHCI0
72 ****************************************************************************/ 85 ****************************************************************************/
73void __init dove_ehci0_init(void) 86void __init dove_ehci0_init(void)
@@ -89,8 +102,7 @@ void __init dove_ehci1_init(void)
89void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data) 102void __init dove_ge00_init(struct mv643xx_eth_platform_data *eth_data)
90{ 103{
91 orion_ge00_init(eth_data, 104 orion_ge00_init(eth_data,
92 DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM, 105 DOVE_GE00_PHYS_BASE, IRQ_DOVE_GE00_SUM, 0);
93 0, get_tclk());
94} 106}
95 107
96/***************************************************************************** 108/*****************************************************************************
@@ -116,7 +128,7 @@ void __init dove_sata_init(struct mv_sata_platform_data *sata_data)
116void __init dove_uart0_init(void) 128void __init dove_uart0_init(void)
117{ 129{
118 orion_uart0_init(DOVE_UART0_VIRT_BASE, DOVE_UART0_PHYS_BASE, 130 orion_uart0_init(DOVE_UART0_VIRT_BASE, DOVE_UART0_PHYS_BASE,
119 IRQ_DOVE_UART_0, get_tclk()); 131 IRQ_DOVE_UART_0, tclk);
120} 132}
121 133
122/***************************************************************************** 134/*****************************************************************************
@@ -125,7 +137,7 @@ void __init dove_uart0_init(void)
125void __init dove_uart1_init(void) 137void __init dove_uart1_init(void)
126{ 138{
127 orion_uart1_init(DOVE_UART1_VIRT_BASE, DOVE_UART1_PHYS_BASE, 139 orion_uart1_init(DOVE_UART1_VIRT_BASE, DOVE_UART1_PHYS_BASE,
128 IRQ_DOVE_UART_1, get_tclk()); 140 IRQ_DOVE_UART_1, tclk);
129} 141}
130 142
131/***************************************************************************** 143/*****************************************************************************
@@ -134,7 +146,7 @@ void __init dove_uart1_init(void)
134void __init dove_uart2_init(void) 146void __init dove_uart2_init(void)
135{ 147{
136 orion_uart2_init(DOVE_UART2_VIRT_BASE, DOVE_UART2_PHYS_BASE, 148 orion_uart2_init(DOVE_UART2_VIRT_BASE, DOVE_UART2_PHYS_BASE,
137 IRQ_DOVE_UART_2, get_tclk()); 149 IRQ_DOVE_UART_2, tclk);
138} 150}
139 151
140/***************************************************************************** 152/*****************************************************************************
@@ -143,7 +155,7 @@ void __init dove_uart2_init(void)
143void __init dove_uart3_init(void) 155void __init dove_uart3_init(void)
144{ 156{
145 orion_uart3_init(DOVE_UART3_VIRT_BASE, DOVE_UART3_PHYS_BASE, 157 orion_uart3_init(DOVE_UART3_VIRT_BASE, DOVE_UART3_PHYS_BASE,
146 IRQ_DOVE_UART_3, get_tclk()); 158 IRQ_DOVE_UART_3, tclk);
147} 159}
148 160
149/***************************************************************************** 161/*****************************************************************************
@@ -151,12 +163,12 @@ void __init dove_uart3_init(void)
151 ****************************************************************************/ 163 ****************************************************************************/
152void __init dove_spi0_init(void) 164void __init dove_spi0_init(void)
153{ 165{
154 orion_spi_init(DOVE_SPI0_PHYS_BASE, get_tclk()); 166 orion_spi_init(DOVE_SPI0_PHYS_BASE);
155} 167}
156 168
157void __init dove_spi1_init(void) 169void __init dove_spi1_init(void)
158{ 170{
159 orion_spi_1_init(DOVE_SPI1_PHYS_BASE, get_tclk()); 171 orion_spi_1_init(DOVE_SPI1_PHYS_BASE);
160} 172}
161 173
162/***************************************************************************** 174/*****************************************************************************
@@ -272,18 +284,17 @@ void __init dove_sdio1_init(void)
272 284
273void __init dove_init(void) 285void __init dove_init(void)
274{ 286{
275 int tclk;
276
277 tclk = get_tclk();
278
279 printk(KERN_INFO "Dove 88AP510 SoC, "); 287 printk(KERN_INFO "Dove 88AP510 SoC, ");
280 printk(KERN_INFO "TCLK = %dMHz\n", (tclk + 499999) / 1000000); 288 printk(KERN_INFO "TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
281 289
282#ifdef CONFIG_CACHE_TAUROS2 290#ifdef CONFIG_CACHE_TAUROS2
283 tauros2_init(); 291 tauros2_init();
284#endif 292#endif
285 dove_setup_cpu_mbus(); 293 dove_setup_cpu_mbus();
286 294
295 /* Setup root of clk tree */
296 clk_init();
297
287 /* internal devices that every board has */ 298 /* internal devices that every board has */
288 dove_rtc_init(); 299 dove_rtc_init();
289 dove_xor0_init(); 300 dove_xor0_init();
diff --git a/arch/arm/mach-dove/dove-db-setup.c b/arch/arm/mach-dove/dove-db-setup.c
index ea77ae430b2d..bc2867f11346 100644
--- a/arch/arm/mach-dove/dove-db-setup.c
+++ b/arch/arm/mach-dove/dove-db-setup.c
@@ -20,7 +20,6 @@
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/pci.h> 21#include <linux/pci.h>
22#include <linux/spi/spi.h> 22#include <linux/spi/spi.h>
23#include <linux/spi/orion_spi.h>
24#include <linux/spi/flash.h> 23#include <linux/spi/flash.h>
25#include <linux/gpio.h> 24#include <linux/gpio.h>
26#include <asm/mach-types.h> 25#include <asm/mach-types.h>
diff --git a/arch/arm/mach-ep93xx/adssphere.c b/arch/arm/mach-ep93xx/adssphere.c
index 2d45947a3034..a472777e9eba 100644
--- a/arch/arm/mach-ep93xx/adssphere.c
+++ b/arch/arm/mach-ep93xx/adssphere.c
@@ -41,5 +41,6 @@ MACHINE_START(ADSSPHERE, "ADS Sphere board")
41 .handle_irq = vic_handle_irq, 41 .handle_irq = vic_handle_irq,
42 .timer = &ep93xx_timer, 42 .timer = &ep93xx_timer,
43 .init_machine = adssphere_init_machine, 43 .init_machine = adssphere_init_machine,
44 .init_late = ep93xx_init_late,
44 .restart = ep93xx_restart, 45 .restart = ep93xx_restart,
45MACHINE_END 46MACHINE_END
diff --git a/arch/arm/mach-ep93xx/core.c b/arch/arm/mach-ep93xx/core.c
index 66b1494f23a6..4dd07a0e3604 100644
--- a/arch/arm/mach-ep93xx/core.c
+++ b/arch/arm/mach-ep93xx/core.c
@@ -675,7 +675,7 @@ int ep93xx_keypad_acquire_gpio(struct platform_device *pdev)
675fail_gpio_d: 675fail_gpio_d:
676 gpio_free(EP93XX_GPIO_LINE_C(i)); 676 gpio_free(EP93XX_GPIO_LINE_C(i));
677fail_gpio_c: 677fail_gpio_c:
678 for ( ; i >= 0; --i) { 678 for (--i; i >= 0; --i) {
679 gpio_free(EP93XX_GPIO_LINE_C(i)); 679 gpio_free(EP93XX_GPIO_LINE_C(i));
680 gpio_free(EP93XX_GPIO_LINE_D(i)); 680 gpio_free(EP93XX_GPIO_LINE_D(i));
681 } 681 }
@@ -834,3 +834,8 @@ void ep93xx_restart(char mode, const char *cmd)
834 while (1) 834 while (1)
835 ; 835 ;
836} 836}
837
838void __init ep93xx_init_late(void)
839{
840 crunch_init();
841}
diff --git a/arch/arm/mach-ep93xx/crunch.c b/arch/arm/mach-ep93xx/crunch.c
index 74753e2df603..a4a2ab9648c9 100644
--- a/arch/arm/mach-ep93xx/crunch.c
+++ b/arch/arm/mach-ep93xx/crunch.c
@@ -79,12 +79,10 @@ static struct notifier_block crunch_notifier_block = {
79 .notifier_call = crunch_do, 79 .notifier_call = crunch_do,
80}; 80};
81 81
82static int __init crunch_init(void) 82int __init crunch_init(void)
83{ 83{
84 thread_register_notifier(&crunch_notifier_block); 84 thread_register_notifier(&crunch_notifier_block);
85 elf_hwcap |= HWCAP_CRUNCH; 85 elf_hwcap |= HWCAP_CRUNCH;
86 86
87 return 0; 87 return 0;
88} 88}
89
90late_initcall(crunch_init);
diff --git a/arch/arm/mach-ep93xx/edb93xx.c b/arch/arm/mach-ep93xx/edb93xx.c
index da9047d726f0..d74c5cddb98b 100644
--- a/arch/arm/mach-ep93xx/edb93xx.c
+++ b/arch/arm/mach-ep93xx/edb93xx.c
@@ -255,6 +255,7 @@ MACHINE_START(EDB9301, "Cirrus Logic EDB9301 Evaluation Board")
255 .handle_irq = vic_handle_irq, 255 .handle_irq = vic_handle_irq,
256 .timer = &ep93xx_timer, 256 .timer = &ep93xx_timer,
257 .init_machine = edb93xx_init_machine, 257 .init_machine = edb93xx_init_machine,
258 .init_late = ep93xx_init_late,
258 .restart = ep93xx_restart, 259 .restart = ep93xx_restart,
259MACHINE_END 260MACHINE_END
260#endif 261#endif
@@ -268,6 +269,7 @@ MACHINE_START(EDB9302, "Cirrus Logic EDB9302 Evaluation Board")
268 .handle_irq = vic_handle_irq, 269 .handle_irq = vic_handle_irq,
269 .timer = &ep93xx_timer, 270 .timer = &ep93xx_timer,
270 .init_machine = edb93xx_init_machine, 271 .init_machine = edb93xx_init_machine,
272 .init_late = ep93xx_init_late,
271 .restart = ep93xx_restart, 273 .restart = ep93xx_restart,
272MACHINE_END 274MACHINE_END
273#endif 275#endif
@@ -281,6 +283,7 @@ MACHINE_START(EDB9302A, "Cirrus Logic EDB9302A Evaluation Board")
281 .handle_irq = vic_handle_irq, 283 .handle_irq = vic_handle_irq,
282 .timer = &ep93xx_timer, 284 .timer = &ep93xx_timer,
283 .init_machine = edb93xx_init_machine, 285 .init_machine = edb93xx_init_machine,
286 .init_late = ep93xx_init_late,
284 .restart = ep93xx_restart, 287 .restart = ep93xx_restart,
285MACHINE_END 288MACHINE_END
286#endif 289#endif
@@ -294,6 +297,7 @@ MACHINE_START(EDB9307, "Cirrus Logic EDB9307 Evaluation Board")
294 .handle_irq = vic_handle_irq, 297 .handle_irq = vic_handle_irq,
295 .timer = &ep93xx_timer, 298 .timer = &ep93xx_timer,
296 .init_machine = edb93xx_init_machine, 299 .init_machine = edb93xx_init_machine,
300 .init_late = ep93xx_init_late,
297 .restart = ep93xx_restart, 301 .restart = ep93xx_restart,
298MACHINE_END 302MACHINE_END
299#endif 303#endif
@@ -307,6 +311,7 @@ MACHINE_START(EDB9307A, "Cirrus Logic EDB9307A Evaluation Board")
307 .handle_irq = vic_handle_irq, 311 .handle_irq = vic_handle_irq,
308 .timer = &ep93xx_timer, 312 .timer = &ep93xx_timer,
309 .init_machine = edb93xx_init_machine, 313 .init_machine = edb93xx_init_machine,
314 .init_late = ep93xx_init_late,
310 .restart = ep93xx_restart, 315 .restart = ep93xx_restart,
311MACHINE_END 316MACHINE_END
312#endif 317#endif
@@ -320,6 +325,7 @@ MACHINE_START(EDB9312, "Cirrus Logic EDB9312 Evaluation Board")
320 .handle_irq = vic_handle_irq, 325 .handle_irq = vic_handle_irq,
321 .timer = &ep93xx_timer, 326 .timer = &ep93xx_timer,
322 .init_machine = edb93xx_init_machine, 327 .init_machine = edb93xx_init_machine,
328 .init_late = ep93xx_init_late,
323 .restart = ep93xx_restart, 329 .restart = ep93xx_restart,
324MACHINE_END 330MACHINE_END
325#endif 331#endif
@@ -333,6 +339,7 @@ MACHINE_START(EDB9315, "Cirrus Logic EDB9315 Evaluation Board")
333 .handle_irq = vic_handle_irq, 339 .handle_irq = vic_handle_irq,
334 .timer = &ep93xx_timer, 340 .timer = &ep93xx_timer,
335 .init_machine = edb93xx_init_machine, 341 .init_machine = edb93xx_init_machine,
342 .init_late = ep93xx_init_late,
336 .restart = ep93xx_restart, 343 .restart = ep93xx_restart,
337MACHINE_END 344MACHINE_END
338#endif 345#endif
@@ -346,6 +353,7 @@ MACHINE_START(EDB9315A, "Cirrus Logic EDB9315A Evaluation Board")
346 .handle_irq = vic_handle_irq, 353 .handle_irq = vic_handle_irq,
347 .timer = &ep93xx_timer, 354 .timer = &ep93xx_timer,
348 .init_machine = edb93xx_init_machine, 355 .init_machine = edb93xx_init_machine,
356 .init_late = ep93xx_init_late,
349 .restart = ep93xx_restart, 357 .restart = ep93xx_restart,
350MACHINE_END 358MACHINE_END
351#endif 359#endif
diff --git a/arch/arm/mach-ep93xx/gesbc9312.c b/arch/arm/mach-ep93xx/gesbc9312.c
index fcdffbe49dcc..437c34111155 100644
--- a/arch/arm/mach-ep93xx/gesbc9312.c
+++ b/arch/arm/mach-ep93xx/gesbc9312.c
@@ -41,5 +41,6 @@ MACHINE_START(GESBC9312, "Glomation GESBC-9312-sx")
41 .handle_irq = vic_handle_irq, 41 .handle_irq = vic_handle_irq,
42 .timer = &ep93xx_timer, 42 .timer = &ep93xx_timer,
43 .init_machine = gesbc9312_init_machine, 43 .init_machine = gesbc9312_init_machine,
44 .init_late = ep93xx_init_late,
44 .restart = ep93xx_restart, 45 .restart = ep93xx_restart,
45MACHINE_END 46MACHINE_END
diff --git a/arch/arm/mach-ep93xx/include/mach/platform.h b/arch/arm/mach-ep93xx/include/mach/platform.h
index 602bd87fd0ab..1ecb040d98bf 100644
--- a/arch/arm/mach-ep93xx/include/mach/platform.h
+++ b/arch/arm/mach-ep93xx/include/mach/platform.h
@@ -53,5 +53,12 @@ void ep93xx_init_devices(void);
53extern struct sys_timer ep93xx_timer; 53extern struct sys_timer ep93xx_timer;
54 54
55void ep93xx_restart(char, const char *); 55void ep93xx_restart(char, const char *);
56void ep93xx_init_late(void);
57
58#ifdef CONFIG_CRUNCH
59int crunch_init(void);
60#else
61static inline int crunch_init(void) { return 0; }
62#endif
56 63
57#endif 64#endif
diff --git a/arch/arm/mach-ep93xx/micro9.c b/arch/arm/mach-ep93xx/micro9.c
index dc431c5f04ce..3d7cdab725b2 100644
--- a/arch/arm/mach-ep93xx/micro9.c
+++ b/arch/arm/mach-ep93xx/micro9.c
@@ -85,6 +85,7 @@ MACHINE_START(MICRO9, "Contec Micro9-High")
85 .handle_irq = vic_handle_irq, 85 .handle_irq = vic_handle_irq,
86 .timer = &ep93xx_timer, 86 .timer = &ep93xx_timer,
87 .init_machine = micro9_init_machine, 87 .init_machine = micro9_init_machine,
88 .init_late = ep93xx_init_late,
88 .restart = ep93xx_restart, 89 .restart = ep93xx_restart,
89MACHINE_END 90MACHINE_END
90#endif 91#endif
@@ -98,6 +99,7 @@ MACHINE_START(MICRO9M, "Contec Micro9-Mid")
98 .handle_irq = vic_handle_irq, 99 .handle_irq = vic_handle_irq,
99 .timer = &ep93xx_timer, 100 .timer = &ep93xx_timer,
100 .init_machine = micro9_init_machine, 101 .init_machine = micro9_init_machine,
102 .init_late = ep93xx_init_late,
101 .restart = ep93xx_restart, 103 .restart = ep93xx_restart,
102MACHINE_END 104MACHINE_END
103#endif 105#endif
@@ -111,6 +113,7 @@ MACHINE_START(MICRO9L, "Contec Micro9-Lite")
111 .handle_irq = vic_handle_irq, 113 .handle_irq = vic_handle_irq,
112 .timer = &ep93xx_timer, 114 .timer = &ep93xx_timer,
113 .init_machine = micro9_init_machine, 115 .init_machine = micro9_init_machine,
116 .init_late = ep93xx_init_late,
114 .restart = ep93xx_restart, 117 .restart = ep93xx_restart,
115MACHINE_END 118MACHINE_END
116#endif 119#endif
@@ -124,6 +127,7 @@ MACHINE_START(MICRO9S, "Contec Micro9-Slim")
124 .handle_irq = vic_handle_irq, 127 .handle_irq = vic_handle_irq,
125 .timer = &ep93xx_timer, 128 .timer = &ep93xx_timer,
126 .init_machine = micro9_init_machine, 129 .init_machine = micro9_init_machine,
130 .init_late = ep93xx_init_late,
127 .restart = ep93xx_restart, 131 .restart = ep93xx_restart,
128MACHINE_END 132MACHINE_END
129#endif 133#endif
diff --git a/arch/arm/mach-ep93xx/simone.c b/arch/arm/mach-ep93xx/simone.c
index f40c2987e545..33dc07917417 100644
--- a/arch/arm/mach-ep93xx/simone.c
+++ b/arch/arm/mach-ep93xx/simone.c
@@ -86,5 +86,6 @@ MACHINE_START(SIM_ONE, "Simplemachines Sim.One Board")
86 .handle_irq = vic_handle_irq, 86 .handle_irq = vic_handle_irq,
87 .timer = &ep93xx_timer, 87 .timer = &ep93xx_timer,
88 .init_machine = simone_init_machine, 88 .init_machine = simone_init_machine,
89 .init_late = ep93xx_init_late,
89 .restart = ep93xx_restart, 90 .restart = ep93xx_restart,
90MACHINE_END 91MACHINE_END
diff --git a/arch/arm/mach-ep93xx/snappercl15.c b/arch/arm/mach-ep93xx/snappercl15.c
index 0c00852ef160..eb282378fa78 100644
--- a/arch/arm/mach-ep93xx/snappercl15.c
+++ b/arch/arm/mach-ep93xx/snappercl15.c
@@ -183,5 +183,6 @@ MACHINE_START(SNAPPER_CL15, "Bluewater Systems Snapper CL15")
183 .handle_irq = vic_handle_irq, 183 .handle_irq = vic_handle_irq,
184 .timer = &ep93xx_timer, 184 .timer = &ep93xx_timer,
185 .init_machine = snappercl15_init_machine, 185 .init_machine = snappercl15_init_machine,
186 .init_late = ep93xx_init_late,
186 .restart = ep93xx_restart, 187 .restart = ep93xx_restart,
187MACHINE_END 188MACHINE_END
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 5ea790942e94..d4ef339d961e 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -252,5 +252,6 @@ MACHINE_START(TS72XX, "Technologic Systems TS-72xx SBC")
252 .handle_irq = vic_handle_irq, 252 .handle_irq = vic_handle_irq,
253 .timer = &ep93xx_timer, 253 .timer = &ep93xx_timer,
254 .init_machine = ts72xx_init_machine, 254 .init_machine = ts72xx_init_machine,
255 .init_late = ep93xx_init_late,
255 .restart = ep93xx_restart, 256 .restart = ep93xx_restart,
256MACHINE_END 257MACHINE_END
diff --git a/arch/arm/mach-ep93xx/vision_ep9307.c b/arch/arm/mach-ep93xx/vision_ep9307.c
index ba156eb225e8..2905a4929bdc 100644
--- a/arch/arm/mach-ep93xx/vision_ep9307.c
+++ b/arch/arm/mach-ep93xx/vision_ep9307.c
@@ -367,5 +367,6 @@ MACHINE_START(VISION_EP9307, "Vision Engraving Systems EP9307")
367 .handle_irq = vic_handle_irq, 367 .handle_irq = vic_handle_irq,
368 .timer = &ep93xx_timer, 368 .timer = &ep93xx_timer,
369 .init_machine = vision_init_machine, 369 .init_machine = vision_init_machine,
370 .init_late = ep93xx_init_late,
370 .restart = ep93xx_restart, 371 .restart = ep93xx_restart,
371MACHINE_END 372MACHINE_END
diff --git a/arch/arm/mach-exynos/Kconfig b/arch/arm/mach-exynos/Kconfig
index 15b05b89cc39..43ebe9094411 100644
--- a/arch/arm/mach-exynos/Kconfig
+++ b/arch/arm/mach-exynos/Kconfig
@@ -61,6 +61,7 @@ config SOC_EXYNOS5250
61 bool "SAMSUNG EXYNOS5250" 61 bool "SAMSUNG EXYNOS5250"
62 default y 62 default y
63 depends on ARCH_EXYNOS5 63 depends on ARCH_EXYNOS5
64 select SAMSUNG_DMADEV
64 help 65 help
65 Enable EXYNOS5250 SoC support 66 Enable EXYNOS5250 SoC support
66 67
@@ -70,7 +71,7 @@ config EXYNOS4_MCT
70 help 71 help
71 Use MCT (Multi Core Timer) as kernel timers 72 Use MCT (Multi Core Timer) as kernel timers
72 73
73config EXYNOS4_DEV_DMA 74config EXYNOS_DEV_DMA
74 bool 75 bool
75 help 76 help
76 Compile in amba device definitions for DMA controller 77 Compile in amba device definitions for DMA controller
@@ -80,15 +81,20 @@ config EXYNOS4_DEV_AHCI
80 help 81 help
81 Compile in platform device definitions for AHCI 82 Compile in platform device definitions for AHCI
82 83
84config EXYNOS_DEV_DRM
85 bool
86 help
87 Compile in platform device definitions for core DRM device
88
83config EXYNOS4_SETUP_FIMD0 89config EXYNOS4_SETUP_FIMD0
84 bool 90 bool
85 help 91 help
86 Common setup code for FIMD0. 92 Common setup code for FIMD0.
87 93
88config EXYNOS4_DEV_SYSMMU 94config EXYNOS_DEV_SYSMMU
89 bool 95 bool
90 help 96 help
91 Common setup code for SYSTEM MMU in EXYNOS4 97 Common setup code for SYSTEM MMU in EXYNOS platforms
92 98
93config EXYNOS4_DEV_DWMCI 99config EXYNOS4_DEV_DWMCI
94 bool 100 bool
@@ -161,7 +167,7 @@ config EXYNOS4_SETUP_USB_PHY
161 help 167 help
162 Common setup code for USB PHY controller 168 Common setup code for USB PHY controller
163 169
164config EXYNOS4_SETUP_SPI 170config EXYNOS_SETUP_SPI
165 bool 171 bool
166 help 172 help
167 Common setup code for SPI GPIO configurations. 173 Common setup code for SPI GPIO configurations.
@@ -201,12 +207,12 @@ config MACH_SMDKV310
201 select S3C_DEV_HSMMC3 207 select S3C_DEV_HSMMC3
202 select SAMSUNG_DEV_BACKLIGHT 208 select SAMSUNG_DEV_BACKLIGHT
203 select EXYNOS_DEV_DRM 209 select EXYNOS_DEV_DRM
210 select EXYNOS_DEV_SYSMMU
204 select EXYNOS4_DEV_AHCI 211 select EXYNOS4_DEV_AHCI
205 select SAMSUNG_DEV_KEYPAD 212 select SAMSUNG_DEV_KEYPAD
206 select EXYNOS4_DEV_DMA 213 select EXYNOS4_DEV_DMA
207 select SAMSUNG_DEV_PWM 214 select SAMSUNG_DEV_PWM
208 select EXYNOS4_DEV_USB_OHCI 215 select EXYNOS4_DEV_USB_OHCI
209 select EXYNOS4_DEV_SYSMMU
210 select EXYNOS4_SETUP_FIMD0 216 select EXYNOS4_SETUP_FIMD0
211 select EXYNOS4_SETUP_I2C1 217 select EXYNOS4_SETUP_I2C1
212 select EXYNOS4_SETUP_KEYPAD 218 select EXYNOS4_SETUP_KEYPAD
@@ -224,8 +230,7 @@ config MACH_ARMLEX4210
224 select S3C_DEV_HSMMC2 230 select S3C_DEV_HSMMC2
225 select S3C_DEV_HSMMC3 231 select S3C_DEV_HSMMC3
226 select EXYNOS4_DEV_AHCI 232 select EXYNOS4_DEV_AHCI
227 select EXYNOS4_DEV_DMA 233 select EXYNOS_DEV_DMA
228 select EXYNOS4_DEV_SYSMMU
229 select EXYNOS4_SETUP_SDHCI 234 select EXYNOS4_SETUP_SDHCI
230 help 235 help
231 Machine support for Samsung ARMLEX4210 based on EXYNOS4210 236 Machine support for Samsung ARMLEX4210 based on EXYNOS4210
@@ -256,6 +261,7 @@ config MACH_UNIVERSAL_C210
256 select S5P_DEV_MFC 261 select S5P_DEV_MFC
257 select S5P_DEV_ONENAND 262 select S5P_DEV_ONENAND
258 select S5P_DEV_TV 263 select S5P_DEV_TV
264 select EXYNOS_DEV_SYSMMU
259 select EXYNOS4_DEV_DMA 265 select EXYNOS4_DEV_DMA
260 select EXYNOS_DEV_DRM 266 select EXYNOS_DEV_DRM
261 select EXYNOS4_SETUP_FIMD0 267 select EXYNOS4_SETUP_FIMD0
@@ -332,6 +338,7 @@ config MACH_ORIGEN
332 select SAMSUNG_DEV_BACKLIGHT 338 select SAMSUNG_DEV_BACKLIGHT
333 select SAMSUNG_DEV_PWM 339 select SAMSUNG_DEV_PWM
334 select EXYNOS_DEV_DRM 340 select EXYNOS_DEV_DRM
341 select EXYNOS_DEV_SYSMMU
335 select EXYNOS4_DEV_DMA 342 select EXYNOS4_DEV_DMA
336 select EXYNOS4_DEV_USB_OHCI 343 select EXYNOS4_DEV_USB_OHCI
337 select EXYNOS4_SETUP_FIMD0 344 select EXYNOS4_SETUP_FIMD0
@@ -360,7 +367,8 @@ config MACH_SMDK4212
360 select SAMSUNG_DEV_BACKLIGHT 367 select SAMSUNG_DEV_BACKLIGHT
361 select SAMSUNG_DEV_KEYPAD 368 select SAMSUNG_DEV_KEYPAD
362 select SAMSUNG_DEV_PWM 369 select SAMSUNG_DEV_PWM
363 select EXYNOS4_DEV_DMA 370 select EXYNOS_DEV_SYSMMU
371 select EXYNOS_DEV_DMA
364 select EXYNOS4_SETUP_I2C1 372 select EXYNOS4_SETUP_I2C1
365 select EXYNOS4_SETUP_I2C3 373 select EXYNOS4_SETUP_I2C3
366 select EXYNOS4_SETUP_I2C7 374 select EXYNOS4_SETUP_I2C7
diff --git a/arch/arm/mach-exynos/Makefile b/arch/arm/mach-exynos/Makefile
index 8631840d1b5e..440a637c76f1 100644
--- a/arch/arm/mach-exynos/Makefile
+++ b/arch/arm/mach-exynos/Makefile
@@ -50,10 +50,11 @@ obj-$(CONFIG_MACH_EXYNOS5_DT) += mach-exynos5-dt.o
50obj-y += dev-uart.o 50obj-y += dev-uart.o
51obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o 51obj-$(CONFIG_ARCH_EXYNOS4) += dev-audio.o
52obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o 52obj-$(CONFIG_EXYNOS4_DEV_AHCI) += dev-ahci.o
53obj-$(CONFIG_EXYNOS4_DEV_SYSMMU) += dev-sysmmu.o
54obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o 53obj-$(CONFIG_EXYNOS4_DEV_DWMCI) += dev-dwmci.o
55obj-$(CONFIG_EXYNOS4_DEV_DMA) += dma.o 54obj-$(CONFIG_EXYNOS_DEV_DMA) += dma.o
56obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o 55obj-$(CONFIG_EXYNOS4_DEV_USB_OHCI) += dev-ohci.o
56obj-$(CONFIG_EXYNOS_DEV_DRM) += dev-drm.o
57obj-$(CONFIG_EXYNOS_DEV_SYSMMU) += dev-sysmmu.o
57 58
58obj-$(CONFIG_ARCH_EXYNOS) += setup-i2c0.o 59obj-$(CONFIG_ARCH_EXYNOS) += setup-i2c0.o
59obj-$(CONFIG_EXYNOS4_SETUP_FIMC) += setup-fimc.o 60obj-$(CONFIG_EXYNOS4_SETUP_FIMC) += setup-fimc.o
@@ -68,4 +69,4 @@ obj-$(CONFIG_EXYNOS4_SETUP_I2C7) += setup-i2c7.o
68obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o 69obj-$(CONFIG_EXYNOS4_SETUP_KEYPAD) += setup-keypad.o
69obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o 70obj-$(CONFIG_EXYNOS4_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
70obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o 71obj-$(CONFIG_EXYNOS4_SETUP_USB_PHY) += setup-usb-phy.o
71obj-$(CONFIG_EXYNOS4_SETUP_SPI) += setup-spi.o 72obj-$(CONFIG_EXYNOS_SETUP_SPI) += setup-spi.o
diff --git a/arch/arm/mach-exynos/Makefile.boot b/arch/arm/mach-exynos/Makefile.boot
index b9862e22bf10..31bd181b0514 100644
--- a/arch/arm/mach-exynos/Makefile.boot
+++ b/arch/arm/mach-exynos/Makefile.boot
@@ -1,2 +1,5 @@
1 zreladdr-y += 0x40008000 1 zreladdr-y += 0x40008000
2params_phys-y := 0x40000100 2params_phys-y := 0x40000100
3
4dtb-$(CONFIG_MACH_EXYNOS4_DT) += exynos4210-origen.dtb exynos4210-smdkv310.dtb
5dtb-$(CONFIG_MACH_EXYNOS5_DT) += exynos5250-smdk5250.dtb
diff --git a/arch/arm/mach-exynos/clock-exynos4.c b/arch/arm/mach-exynos/clock-exynos4.c
index 6efd1e5919fd..bcb7db453145 100644
--- a/arch/arm/mach-exynos/clock-exynos4.c
+++ b/arch/arm/mach-exynos/clock-exynos4.c
@@ -168,7 +168,7 @@ static int exynos4_clk_ip_tv_ctrl(struct clk *clk, int enable)
168 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_TV, clk, enable); 168 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_TV, clk, enable);
169} 169}
170 170
171static int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable) 171int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable)
172{ 172{
173 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_IMAGE, clk, enable); 173 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_IMAGE, clk, enable);
174} 174}
@@ -198,6 +198,11 @@ static int exynos4_clk_ip_perir_ctrl(struct clk *clk, int enable)
198 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIR, clk, enable); 198 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_PERIR, clk, enable);
199} 199}
200 200
201int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable)
202{
203 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_DMC, clk, enable);
204}
205
201static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable) 206static int exynos4_clk_hdmiphy_ctrl(struct clk *clk, int enable)
202{ 207{
203 return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable); 208 return s5p_gatectrl(S5P_HDMI_PHY_CONTROL, clk, enable);
@@ -678,61 +683,55 @@ static struct clk exynos4_init_clocks_off[] = {
678 .enable = exynos4_clk_ip_peril_ctrl, 683 .enable = exynos4_clk_ip_peril_ctrl,
679 .ctrlbit = (1 << 14), 684 .ctrlbit = (1 << 14),
680 }, { 685 }, {
681 .name = "SYSMMU_MDMA", 686 .name = SYSMMU_CLOCK_NAME,
687 .devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
688 .enable = exynos4_clk_ip_mfc_ctrl,
689 .ctrlbit = (1 << 1),
690 }, {
691 .name = SYSMMU_CLOCK_NAME,
692 .devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
693 .enable = exynos4_clk_ip_mfc_ctrl,
694 .ctrlbit = (1 << 2),
695 }, {
696 .name = SYSMMU_CLOCK_NAME,
697 .devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
698 .enable = exynos4_clk_ip_tv_ctrl,
699 .ctrlbit = (1 << 4),
700 }, {
701 .name = SYSMMU_CLOCK_NAME,
702 .devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
703 .enable = exynos4_clk_ip_cam_ctrl,
704 .ctrlbit = (1 << 11),
705 }, {
706 .name = SYSMMU_CLOCK_NAME,
707 .devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
682 .enable = exynos4_clk_ip_image_ctrl, 708 .enable = exynos4_clk_ip_image_ctrl,
683 .ctrlbit = (1 << 5), 709 .ctrlbit = (1 << 4),
684 }, { 710 }, {
685 .name = "SYSMMU_FIMC0", 711 .name = SYSMMU_CLOCK_NAME,
712 .devname = SYSMMU_CLOCK_DEVNAME(fimc0, 5),
686 .enable = exynos4_clk_ip_cam_ctrl, 713 .enable = exynos4_clk_ip_cam_ctrl,
687 .ctrlbit = (1 << 7), 714 .ctrlbit = (1 << 7),
688 }, { 715 }, {
689 .name = "SYSMMU_FIMC1", 716 .name = SYSMMU_CLOCK_NAME,
717 .devname = SYSMMU_CLOCK_DEVNAME(fimc1, 6),
690 .enable = exynos4_clk_ip_cam_ctrl, 718 .enable = exynos4_clk_ip_cam_ctrl,
691 .ctrlbit = (1 << 8), 719 .ctrlbit = (1 << 8),
692 }, { 720 }, {
693 .name = "SYSMMU_FIMC2", 721 .name = SYSMMU_CLOCK_NAME,
722 .devname = SYSMMU_CLOCK_DEVNAME(fimc2, 7),
694 .enable = exynos4_clk_ip_cam_ctrl, 723 .enable = exynos4_clk_ip_cam_ctrl,
695 .ctrlbit = (1 << 9), 724 .ctrlbit = (1 << 9),
696 }, { 725 }, {
697 .name = "SYSMMU_FIMC3", 726 .name = SYSMMU_CLOCK_NAME,
727 .devname = SYSMMU_CLOCK_DEVNAME(fimc3, 8),
698 .enable = exynos4_clk_ip_cam_ctrl, 728 .enable = exynos4_clk_ip_cam_ctrl,
699 .ctrlbit = (1 << 10), 729 .ctrlbit = (1 << 10),
700 }, { 730 }, {
701 .name = "SYSMMU_JPEG", 731 .name = SYSMMU_CLOCK_NAME,
702 .enable = exynos4_clk_ip_cam_ctrl, 732 .devname = SYSMMU_CLOCK_DEVNAME(fimd0, 10),
703 .ctrlbit = (1 << 11),
704 }, {
705 .name = "SYSMMU_FIMD0",
706 .enable = exynos4_clk_ip_lcd0_ctrl, 733 .enable = exynos4_clk_ip_lcd0_ctrl,
707 .ctrlbit = (1 << 4), 734 .ctrlbit = (1 << 4),
708 }, {
709 .name = "SYSMMU_FIMD1",
710 .enable = exynos4_clk_ip_lcd1_ctrl,
711 .ctrlbit = (1 << 4),
712 }, {
713 .name = "SYSMMU_PCIe",
714 .enable = exynos4_clk_ip_fsys_ctrl,
715 .ctrlbit = (1 << 18),
716 }, {
717 .name = "SYSMMU_G2D",
718 .enable = exynos4_clk_ip_image_ctrl,
719 .ctrlbit = (1 << 3),
720 }, {
721 .name = "SYSMMU_ROTATOR",
722 .enable = exynos4_clk_ip_image_ctrl,
723 .ctrlbit = (1 << 4),
724 }, {
725 .name = "SYSMMU_TV",
726 .enable = exynos4_clk_ip_tv_ctrl,
727 .ctrlbit = (1 << 4),
728 }, {
729 .name = "SYSMMU_MFC_L",
730 .enable = exynos4_clk_ip_mfc_ctrl,
731 .ctrlbit = (1 << 1),
732 }, {
733 .name = "SYSMMU_MFC_R",
734 .enable = exynos4_clk_ip_mfc_ctrl,
735 .ctrlbit = (1 << 2),
736 } 735 }
737}; 736};
738 737
diff --git a/arch/arm/mach-exynos/clock-exynos4.h b/arch/arm/mach-exynos/clock-exynos4.h
index cb71c29c14d1..28a119701182 100644
--- a/arch/arm/mach-exynos/clock-exynos4.h
+++ b/arch/arm/mach-exynos/clock-exynos4.h
@@ -26,5 +26,7 @@ extern struct clk *exynos4_clkset_group_list[];
26extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable); 26extern int exynos4_clksrc_mask_fsys_ctrl(struct clk *clk, int enable);
27extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable); 27extern int exynos4_clk_ip_fsys_ctrl(struct clk *clk, int enable);
28extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable); 28extern int exynos4_clk_ip_lcd1_ctrl(struct clk *clk, int enable);
29extern int exynos4_clk_ip_image_ctrl(struct clk *clk, int enable);
30extern int exynos4_clk_ip_dmc_ctrl(struct clk *clk, int enable);
29 31
30#endif /* __ASM_ARCH_CLOCK_H */ 32#endif /* __ASM_ARCH_CLOCK_H */
diff --git a/arch/arm/mach-exynos/clock-exynos4210.c b/arch/arm/mach-exynos/clock-exynos4210.c
index 3b131e4b6ef5..b8689ff60baf 100644
--- a/arch/arm/mach-exynos/clock-exynos4210.c
+++ b/arch/arm/mach-exynos/clock-exynos4210.c
@@ -26,6 +26,7 @@
26#include <mach/hardware.h> 26#include <mach/hardware.h>
27#include <mach/map.h> 27#include <mach/map.h>
28#include <mach/regs-clock.h> 28#include <mach/regs-clock.h>
29#include <mach/sysmmu.h>
29 30
30#include "common.h" 31#include "common.h"
31#include "clock-exynos4.h" 32#include "clock-exynos4.h"
@@ -94,6 +95,16 @@ static struct clk init_clocks_off[] = {
94 .devname = "exynos4-fb.1", 95 .devname = "exynos4-fb.1",
95 .enable = exynos4_clk_ip_lcd1_ctrl, 96 .enable = exynos4_clk_ip_lcd1_ctrl,
96 .ctrlbit = (1 << 0), 97 .ctrlbit = (1 << 0),
98 }, {
99 .name = SYSMMU_CLOCK_NAME,
100 .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
101 .enable = exynos4_clk_ip_image_ctrl,
102 .ctrlbit = (1 << 3),
103 }, {
104 .name = SYSMMU_CLOCK_NAME,
105 .devname = SYSMMU_CLOCK_DEVNAME(fimd1, 11),
106 .enable = exynos4_clk_ip_lcd1_ctrl,
107 .ctrlbit = (1 << 4),
97 }, 108 },
98}; 109};
99 110
diff --git a/arch/arm/mach-exynos/clock-exynos4212.c b/arch/arm/mach-exynos/clock-exynos4212.c
index 3ecc01e06f74..da397d21bbcf 100644
--- a/arch/arm/mach-exynos/clock-exynos4212.c
+++ b/arch/arm/mach-exynos/clock-exynos4212.c
@@ -26,6 +26,7 @@
26#include <mach/hardware.h> 26#include <mach/hardware.h>
27#include <mach/map.h> 27#include <mach/map.h>
28#include <mach/regs-clock.h> 28#include <mach/regs-clock.h>
29#include <mach/sysmmu.h>
29 30
30#include "common.h" 31#include "common.h"
31#include "clock-exynos4.h" 32#include "clock-exynos4.h"
@@ -39,6 +40,16 @@ static struct sleep_save exynos4212_clock_save[] = {
39}; 40};
40#endif 41#endif
41 42
43static int exynos4212_clk_ip_isp0_ctrl(struct clk *clk, int enable)
44{
45 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP0, clk, enable);
46}
47
48static int exynos4212_clk_ip_isp1_ctrl(struct clk *clk, int enable)
49{
50 return s5p_gatectrl(EXYNOS4_CLKGATE_IP_ISP1, clk, enable);
51}
52
42static struct clk *clk_src_mpll_user_list[] = { 53static struct clk *clk_src_mpll_user_list[] = {
43 [0] = &clk_fin_mpll, 54 [0] = &clk_fin_mpll,
44 [1] = &exynos4_clk_mout_mpll.clk, 55 [1] = &exynos4_clk_mout_mpll.clk,
@@ -66,7 +77,32 @@ static struct clksrc_clk clksrcs[] = {
66}; 77};
67 78
68static struct clk init_clocks_off[] = { 79static struct clk init_clocks_off[] = {
69 /* nothing here yet */ 80 {
81 .name = SYSMMU_CLOCK_NAME,
82 .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
83 .enable = exynos4_clk_ip_dmc_ctrl,
84 .ctrlbit = (1 << 24),
85 }, {
86 .name = SYSMMU_CLOCK_NAME,
87 .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
88 .enable = exynos4212_clk_ip_isp0_ctrl,
89 .ctrlbit = (7 << 8),
90 }, {
91 .name = SYSMMU_CLOCK_NAME2,
92 .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
93 .enable = exynos4212_clk_ip_isp1_ctrl,
94 .ctrlbit = (1 << 4),
95 }, {
96 .name = "flite",
97 .devname = "exynos-fimc-lite.0",
98 .enable = exynos4212_clk_ip_isp0_ctrl,
99 .ctrlbit = (1 << 4),
100 }, {
101 .name = "flite",
102 .devname = "exynos-fimc-lite.1",
103 .enable = exynos4212_clk_ip_isp0_ctrl,
104 .ctrlbit = (1 << 3),
105 }
70}; 106};
71 107
72#ifdef CONFIG_PM_SLEEP 108#ifdef CONFIG_PM_SLEEP
diff --git a/arch/arm/mach-exynos/clock-exynos5.c b/arch/arm/mach-exynos/clock-exynos5.c
index 7ac6ff4c46bd..5aa460b01fdf 100644
--- a/arch/arm/mach-exynos/clock-exynos5.c
+++ b/arch/arm/mach-exynos/clock-exynos5.c
@@ -82,6 +82,11 @@ static int exynos5_clksrc_mask_peric0_ctrl(struct clk *clk, int enable)
82 return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable); 82 return s5p_gatectrl(EXYNOS5_CLKSRC_MASK_PERIC0, clk, enable);
83} 83}
84 84
85static int exynos5_clk_ip_acp_ctrl(struct clk *clk, int enable)
86{
87 return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ACP, clk, enable);
88}
89
85static int exynos5_clk_ip_core_ctrl(struct clk *clk, int enable) 90static int exynos5_clk_ip_core_ctrl(struct clk *clk, int enable)
86{ 91{
87 return s5p_gatectrl(EXYNOS5_CLKGATE_IP_CORE, clk, enable); 92 return s5p_gatectrl(EXYNOS5_CLKGATE_IP_CORE, clk, enable);
@@ -127,6 +132,21 @@ static int exynos5_clk_ip_peris_ctrl(struct clk *clk, int enable)
127 return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIS, clk, enable); 132 return s5p_gatectrl(EXYNOS5_CLKGATE_IP_PERIS, clk, enable);
128} 133}
129 134
135static int exynos5_clk_ip_gscl_ctrl(struct clk *clk, int enable)
136{
137 return s5p_gatectrl(EXYNOS5_CLKGATE_IP_GSCL, clk, enable);
138}
139
140static int exynos5_clk_ip_isp0_ctrl(struct clk *clk, int enable)
141{
142 return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP0, clk, enable);
143}
144
145static int exynos5_clk_ip_isp1_ctrl(struct clk *clk, int enable)
146{
147 return s5p_gatectrl(EXYNOS5_CLKGATE_IP_ISP1, clk, enable);
148}
149
130/* Core list of CMU_CPU side */ 150/* Core list of CMU_CPU side */
131 151
132static struct clksrc_clk exynos5_clk_mout_apll = { 152static struct clksrc_clk exynos5_clk_mout_apll = {
@@ -145,11 +165,29 @@ static struct clksrc_clk exynos5_clk_sclk_apll = {
145 .reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 24, .size = 3 }, 165 .reg_div = { .reg = EXYNOS5_CLKDIV_CPU0, .shift = 24, .size = 3 },
146}; 166};
147 167
168static struct clksrc_clk exynos5_clk_mout_bpll_fout = {
169 .clk = {
170 .name = "mout_bpll_fout",
171 },
172 .sources = &clk_src_bpll_fout,
173 .reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 0, .size = 1 },
174};
175
176static struct clk *exynos5_clk_src_bpll_list[] = {
177 [0] = &clk_fin_bpll,
178 [1] = &exynos5_clk_mout_bpll_fout.clk,
179};
180
181static struct clksrc_sources exynos5_clk_src_bpll = {
182 .sources = exynos5_clk_src_bpll_list,
183 .nr_sources = ARRAY_SIZE(exynos5_clk_src_bpll_list),
184};
185
148static struct clksrc_clk exynos5_clk_mout_bpll = { 186static struct clksrc_clk exynos5_clk_mout_bpll = {
149 .clk = { 187 .clk = {
150 .name = "mout_bpll", 188 .name = "mout_bpll",
151 }, 189 },
152 .sources = &clk_src_bpll, 190 .sources = &exynos5_clk_src_bpll,
153 .reg_src = { .reg = EXYNOS5_CLKSRC_CDREX, .shift = 0, .size = 1 }, 191 .reg_src = { .reg = EXYNOS5_CLKSRC_CDREX, .shift = 0, .size = 1 },
154}; 192};
155 193
@@ -187,11 +225,29 @@ static struct clksrc_clk exynos5_clk_mout_epll = {
187 .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 12, .size = 1 }, 225 .reg_src = { .reg = EXYNOS5_CLKSRC_TOP2, .shift = 12, .size = 1 },
188}; 226};
189 227
228static struct clksrc_clk exynos5_clk_mout_mpll_fout = {
229 .clk = {
230 .name = "mout_mpll_fout",
231 },
232 .sources = &clk_src_mpll_fout,
233 .reg_src = { .reg = EXYNOS5_PLL_DIV2_SEL, .shift = 4, .size = 1 },
234};
235
236static struct clk *exynos5_clk_src_mpll_list[] = {
237 [0] = &clk_fin_mpll,
238 [1] = &exynos5_clk_mout_mpll_fout.clk,
239};
240
241static struct clksrc_sources exynos5_clk_src_mpll = {
242 .sources = exynos5_clk_src_mpll_list,
243 .nr_sources = ARRAY_SIZE(exynos5_clk_src_mpll_list),
244};
245
190struct clksrc_clk exynos5_clk_mout_mpll = { 246struct clksrc_clk exynos5_clk_mout_mpll = {
191 .clk = { 247 .clk = {
192 .name = "mout_mpll", 248 .name = "mout_mpll",
193 }, 249 },
194 .sources = &clk_src_mpll, 250 .sources = &exynos5_clk_src_mpll,
195 .reg_src = { .reg = EXYNOS5_CLKSRC_CORE1, .shift = 8, .size = 1 }, 251 .reg_src = { .reg = EXYNOS5_CLKSRC_CORE1, .shift = 8, .size = 1 },
196}; 252};
197 253
@@ -454,6 +510,11 @@ static struct clk exynos5_init_clocks_off[] = {
454 .enable = exynos5_clk_ip_peris_ctrl, 510 .enable = exynos5_clk_ip_peris_ctrl,
455 .ctrlbit = (1 << 20), 511 .ctrlbit = (1 << 20),
456 }, { 512 }, {
513 .name = "watchdog",
514 .parent = &exynos5_clk_aclk_66.clk,
515 .enable = exynos5_clk_ip_peris_ctrl,
516 .ctrlbit = (1 << 19),
517 }, {
457 .name = "hsmmc", 518 .name = "hsmmc",
458 .devname = "exynos4-sdhci.0", 519 .devname = "exynos4-sdhci.0",
459 .parent = &exynos5_clk_aclk_200.clk, 520 .parent = &exynos5_clk_aclk_200.clk,
@@ -630,6 +691,76 @@ static struct clk exynos5_init_clocks_off[] = {
630 .parent = &exynos5_clk_aclk_66.clk, 691 .parent = &exynos5_clk_aclk_66.clk,
631 .enable = exynos5_clk_ip_peric_ctrl, 692 .enable = exynos5_clk_ip_peric_ctrl,
632 .ctrlbit = (1 << 14), 693 .ctrlbit = (1 << 14),
694 }, {
695 .name = SYSMMU_CLOCK_NAME,
696 .devname = SYSMMU_CLOCK_DEVNAME(mfc_l, 0),
697 .enable = &exynos5_clk_ip_mfc_ctrl,
698 .ctrlbit = (1 << 1),
699 }, {
700 .name = SYSMMU_CLOCK_NAME,
701 .devname = SYSMMU_CLOCK_DEVNAME(mfc_r, 1),
702 .enable = &exynos5_clk_ip_mfc_ctrl,
703 .ctrlbit = (1 << 2),
704 }, {
705 .name = SYSMMU_CLOCK_NAME,
706 .devname = SYSMMU_CLOCK_DEVNAME(tv, 2),
707 .enable = &exynos5_clk_ip_disp1_ctrl,
708 .ctrlbit = (1 << 9)
709 }, {
710 .name = SYSMMU_CLOCK_NAME,
711 .devname = SYSMMU_CLOCK_DEVNAME(jpeg, 3),
712 .enable = &exynos5_clk_ip_gen_ctrl,
713 .ctrlbit = (1 << 7),
714 }, {
715 .name = SYSMMU_CLOCK_NAME,
716 .devname = SYSMMU_CLOCK_DEVNAME(rot, 4),
717 .enable = &exynos5_clk_ip_gen_ctrl,
718 .ctrlbit = (1 << 6)
719 }, {
720 .name = SYSMMU_CLOCK_NAME,
721 .devname = SYSMMU_CLOCK_DEVNAME(gsc0, 5),
722 .enable = &exynos5_clk_ip_gscl_ctrl,
723 .ctrlbit = (1 << 7),
724 }, {
725 .name = SYSMMU_CLOCK_NAME,
726 .devname = SYSMMU_CLOCK_DEVNAME(gsc1, 6),
727 .enable = &exynos5_clk_ip_gscl_ctrl,
728 .ctrlbit = (1 << 8),
729 }, {
730 .name = SYSMMU_CLOCK_NAME,
731 .devname = SYSMMU_CLOCK_DEVNAME(gsc2, 7),
732 .enable = &exynos5_clk_ip_gscl_ctrl,
733 .ctrlbit = (1 << 9),
734 }, {
735 .name = SYSMMU_CLOCK_NAME,
736 .devname = SYSMMU_CLOCK_DEVNAME(gsc3, 8),
737 .enable = &exynos5_clk_ip_gscl_ctrl,
738 .ctrlbit = (1 << 10),
739 }, {
740 .name = SYSMMU_CLOCK_NAME,
741 .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
742 .enable = &exynos5_clk_ip_isp0_ctrl,
743 .ctrlbit = (0x3F << 8),
744 }, {
745 .name = SYSMMU_CLOCK_NAME2,
746 .devname = SYSMMU_CLOCK_DEVNAME(isp, 9),
747 .enable = &exynos5_clk_ip_isp1_ctrl,
748 .ctrlbit = (0xF << 4),
749 }, {
750 .name = SYSMMU_CLOCK_NAME,
751 .devname = SYSMMU_CLOCK_DEVNAME(camif0, 12),
752 .enable = &exynos5_clk_ip_gscl_ctrl,
753 .ctrlbit = (1 << 11),
754 }, {
755 .name = SYSMMU_CLOCK_NAME,
756 .devname = SYSMMU_CLOCK_DEVNAME(camif1, 13),
757 .enable = &exynos5_clk_ip_gscl_ctrl,
758 .ctrlbit = (1 << 12),
759 }, {
760 .name = SYSMMU_CLOCK_NAME,
761 .devname = SYSMMU_CLOCK_DEVNAME(2d, 14),
762 .enable = &exynos5_clk_ip_acp_ctrl,
763 .ctrlbit = (1 << 7)
633 } 764 }
634}; 765};
635 766
@@ -941,10 +1072,12 @@ static struct clksrc_clk *exynos5_sysclks[] = {
941 &exynos5_clk_mout_apll, 1072 &exynos5_clk_mout_apll,
942 &exynos5_clk_sclk_apll, 1073 &exynos5_clk_sclk_apll,
943 &exynos5_clk_mout_bpll, 1074 &exynos5_clk_mout_bpll,
1075 &exynos5_clk_mout_bpll_fout,
944 &exynos5_clk_mout_bpll_user, 1076 &exynos5_clk_mout_bpll_user,
945 &exynos5_clk_mout_cpll, 1077 &exynos5_clk_mout_cpll,
946 &exynos5_clk_mout_epll, 1078 &exynos5_clk_mout_epll,
947 &exynos5_clk_mout_mpll, 1079 &exynos5_clk_mout_mpll,
1080 &exynos5_clk_mout_mpll_fout,
948 &exynos5_clk_mout_mpll_user, 1081 &exynos5_clk_mout_mpll_user,
949 &exynos5_clk_vpllsrc, 1082 &exynos5_clk_vpllsrc,
950 &exynos5_clk_sclk_vpll, 1083 &exynos5_clk_sclk_vpll,
@@ -1008,7 +1141,9 @@ static struct clk *exynos5_clks[] __initdata = {
1008 &exynos5_clk_sclk_hdmi27m, 1141 &exynos5_clk_sclk_hdmi27m,
1009 &exynos5_clk_sclk_hdmiphy, 1142 &exynos5_clk_sclk_hdmiphy,
1010 &clk_fout_bpll, 1143 &clk_fout_bpll,
1144 &clk_fout_bpll_div2,
1011 &clk_fout_cpll, 1145 &clk_fout_cpll,
1146 &clk_fout_mpll_div2,
1012 &exynos5_clk_armclk, 1147 &exynos5_clk_armclk,
1013}; 1148};
1014 1149
@@ -1173,8 +1308,10 @@ void __init_or_cpufreq exynos5_setup_clocks(void)
1173 1308
1174 clk_fout_apll.ops = &exynos5_fout_apll_ops; 1309 clk_fout_apll.ops = &exynos5_fout_apll_ops;
1175 clk_fout_bpll.rate = bpll; 1310 clk_fout_bpll.rate = bpll;
1311 clk_fout_bpll_div2.rate = bpll >> 1;
1176 clk_fout_cpll.rate = cpll; 1312 clk_fout_cpll.rate = cpll;
1177 clk_fout_mpll.rate = mpll; 1313 clk_fout_mpll.rate = mpll;
1314 clk_fout_mpll_div2.rate = mpll >> 1;
1178 clk_fout_epll.rate = epll; 1315 clk_fout_epll.rate = epll;
1179 clk_fout_vpll.rate = vpll; 1316 clk_fout_vpll.rate = vpll;
1180 1317
diff --git a/arch/arm/mach-exynos/common.c b/arch/arm/mach-exynos/common.c
index 5ccd6e80a607..742edd3bbec3 100644
--- a/arch/arm/mach-exynos/common.c
+++ b/arch/arm/mach-exynos/common.c
@@ -19,6 +19,9 @@
19#include <linux/serial_core.h> 19#include <linux/serial_core.h>
20#include <linux/of.h> 20#include <linux/of.h>
21#include <linux/of_irq.h> 21#include <linux/of_irq.h>
22#include <linux/export.h>
23#include <linux/irqdomain.h>
24#include <linux/of_address.h>
22 25
23#include <asm/proc-fns.h> 26#include <asm/proc-fns.h>
24#include <asm/exception.h> 27#include <asm/exception.h>
@@ -265,12 +268,12 @@ static struct map_desc exynos5_iodesc[] __initdata = {
265 }, { 268 }, {
266 .virtual = (unsigned long)S5P_VA_GIC_CPU, 269 .virtual = (unsigned long)S5P_VA_GIC_CPU,
267 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU), 270 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_CPU),
268 .length = SZ_64K, 271 .length = SZ_8K,
269 .type = MT_DEVICE, 272 .type = MT_DEVICE,
270 }, { 273 }, {
271 .virtual = (unsigned long)S5P_VA_GIC_DIST, 274 .virtual = (unsigned long)S5P_VA_GIC_DIST,
272 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST), 275 .pfn = __phys_to_pfn(EXYNOS5_PA_GIC_DIST),
273 .length = SZ_64K, 276 .length = SZ_4K,
274 .type = MT_DEVICE, 277 .type = MT_DEVICE,
275 }, 278 },
276}; 279};
@@ -285,6 +288,11 @@ void exynos5_restart(char mode, const char *cmd)
285 __raw_writel(0x1, EXYNOS_SWRESET); 288 __raw_writel(0x1, EXYNOS_SWRESET);
286} 289}
287 290
291void __init exynos_init_late(void)
292{
293 exynos_pm_late_initcall();
294}
295
288/* 296/*
289 * exynos_map_io 297 * exynos_map_io
290 * 298 *
@@ -399,6 +407,7 @@ struct combiner_chip_data {
399 void __iomem *base; 407 void __iomem *base;
400}; 408};
401 409
410static struct irq_domain *combiner_irq_domain;
402static struct combiner_chip_data combiner_data[MAX_COMBINER_NR]; 411static struct combiner_chip_data combiner_data[MAX_COMBINER_NR];
403 412
404static inline void __iomem *combiner_base(struct irq_data *data) 413static inline void __iomem *combiner_base(struct irq_data *data)
@@ -411,14 +420,14 @@ static inline void __iomem *combiner_base(struct irq_data *data)
411 420
412static void combiner_mask_irq(struct irq_data *data) 421static void combiner_mask_irq(struct irq_data *data)
413{ 422{
414 u32 mask = 1 << (data->irq % 32); 423 u32 mask = 1 << (data->hwirq % 32);
415 424
416 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR); 425 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_CLEAR);
417} 426}
418 427
419static void combiner_unmask_irq(struct irq_data *data) 428static void combiner_unmask_irq(struct irq_data *data)
420{ 429{
421 u32 mask = 1 << (data->irq % 32); 430 u32 mask = 1 << (data->hwirq % 32);
422 431
423 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET); 432 __raw_writel(mask, combiner_base(data) + COMBINER_ENABLE_SET);
424} 433}
@@ -474,49 +483,131 @@ static void __init combiner_cascade_irq(unsigned int combiner_nr, unsigned int i
474 irq_set_chained_handler(irq, combiner_handle_cascade_irq); 483 irq_set_chained_handler(irq, combiner_handle_cascade_irq);
475} 484}
476 485
477static void __init combiner_init(unsigned int combiner_nr, void __iomem *base, 486static void __init combiner_init_one(unsigned int combiner_nr,
478 unsigned int irq_start) 487 void __iomem *base)
479{ 488{
480 unsigned int i;
481 unsigned int max_nr;
482
483 if (soc_is_exynos5250())
484 max_nr = EXYNOS5_MAX_COMBINER_NR;
485 else
486 max_nr = EXYNOS4_MAX_COMBINER_NR;
487
488 if (combiner_nr >= max_nr)
489 BUG();
490
491 combiner_data[combiner_nr].base = base; 489 combiner_data[combiner_nr].base = base;
492 combiner_data[combiner_nr].irq_offset = irq_start; 490 combiner_data[combiner_nr].irq_offset = irq_find_mapping(
491 combiner_irq_domain, combiner_nr * MAX_IRQ_IN_COMBINER);
493 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3); 492 combiner_data[combiner_nr].irq_mask = 0xff << ((combiner_nr % 4) << 3);
494 493
495 /* Disable all interrupts */ 494 /* Disable all interrupts */
496
497 __raw_writel(combiner_data[combiner_nr].irq_mask, 495 __raw_writel(combiner_data[combiner_nr].irq_mask,
498 base + COMBINER_ENABLE_CLEAR); 496 base + COMBINER_ENABLE_CLEAR);
497}
498
499#ifdef CONFIG_OF
500static int combiner_irq_domain_xlate(struct irq_domain *d,
501 struct device_node *controller,
502 const u32 *intspec, unsigned int intsize,
503 unsigned long *out_hwirq,
504 unsigned int *out_type)
505{
506 if (d->of_node != controller)
507 return -EINVAL;
508
509 if (intsize < 2)
510 return -EINVAL;
511
512 *out_hwirq = intspec[0] * MAX_IRQ_IN_COMBINER + intspec[1];
513 *out_type = 0;
514
515 return 0;
516}
517#else
518static int combiner_irq_domain_xlate(struct irq_domain *d,
519 struct device_node *controller,
520 const u32 *intspec, unsigned int intsize,
521 unsigned long *out_hwirq,
522 unsigned int *out_type)
523{
524 return -EINVAL;
525}
526#endif
527
528static int combiner_irq_domain_map(struct irq_domain *d, unsigned int irq,
529 irq_hw_number_t hw)
530{
531 irq_set_chip_and_handler(irq, &combiner_chip, handle_level_irq);
532 irq_set_chip_data(irq, &combiner_data[hw >> 3]);
533 set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
534
535 return 0;
536}
537
538static struct irq_domain_ops combiner_irq_domain_ops = {
539 .xlate = combiner_irq_domain_xlate,
540 .map = combiner_irq_domain_map,
541};
542
543void __init combiner_init(void __iomem *combiner_base, struct device_node *np)
544{
545 int i, irq, irq_base;
546 unsigned int max_nr, nr_irq;
547
548 if (np) {
549 if (of_property_read_u32(np, "samsung,combiner-nr", &max_nr)) {
550 pr_warning("%s: number of combiners not specified, "
551 "setting default as %d.\n",
552 __func__, EXYNOS4_MAX_COMBINER_NR);
553 max_nr = EXYNOS4_MAX_COMBINER_NR;
554 }
555 } else {
556 max_nr = soc_is_exynos5250() ? EXYNOS5_MAX_COMBINER_NR :
557 EXYNOS4_MAX_COMBINER_NR;
558 }
559 nr_irq = max_nr * MAX_IRQ_IN_COMBINER;
499 560
500 /* Setup the Linux IRQ subsystem */ 561 irq_base = irq_alloc_descs(COMBINER_IRQ(0, 0), 1, nr_irq, 0);
562 if (IS_ERR_VALUE(irq_base)) {
563 irq_base = COMBINER_IRQ(0, 0);
564 pr_warning("%s: irq desc alloc failed. Continuing with %d as linux irq base\n", __func__, irq_base);
565 }
501 566
502 for (i = irq_start; i < combiner_data[combiner_nr].irq_offset 567 combiner_irq_domain = irq_domain_add_legacy(np, nr_irq, irq_base, 0,
503 + MAX_IRQ_IN_COMBINER; i++) { 568 &combiner_irq_domain_ops, &combiner_data);
504 irq_set_chip_and_handler(i, &combiner_chip, handle_level_irq); 569 if (WARN_ON(!combiner_irq_domain)) {
505 irq_set_chip_data(i, &combiner_data[combiner_nr]); 570 pr_warning("%s: irq domain init failed\n", __func__);
506 set_irq_flags(i, IRQF_VALID | IRQF_PROBE); 571 return;
572 }
573
574 for (i = 0; i < max_nr; i++) {
575 combiner_init_one(i, combiner_base + (i >> 2) * 0x10);
576 irq = IRQ_SPI(i);
577#ifdef CONFIG_OF
578 if (np)
579 irq = irq_of_parse_and_map(np, i);
580#endif
581 combiner_cascade_irq(i, irq);
507 } 582 }
508} 583}
509 584
510#ifdef CONFIG_OF 585#ifdef CONFIG_OF
586int __init combiner_of_init(struct device_node *np, struct device_node *parent)
587{
588 void __iomem *combiner_base;
589
590 combiner_base = of_iomap(np, 0);
591 if (!combiner_base) {
592 pr_err("%s: failed to map combiner registers\n", __func__);
593 return -ENXIO;
594 }
595
596 combiner_init(combiner_base, np);
597
598 return 0;
599}
600
511static const struct of_device_id exynos4_dt_irq_match[] = { 601static const struct of_device_id exynos4_dt_irq_match[] = {
512 { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, }, 602 { .compatible = "arm,cortex-a9-gic", .data = gic_of_init, },
603 { .compatible = "samsung,exynos4210-combiner",
604 .data = combiner_of_init, },
513 {}, 605 {},
514}; 606};
515#endif 607#endif
516 608
517void __init exynos4_init_irq(void) 609void __init exynos4_init_irq(void)
518{ 610{
519 int irq;
520 unsigned int gic_bank_offset; 611 unsigned int gic_bank_offset;
521 612
522 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000; 613 gic_bank_offset = soc_is_exynos4412() ? 0x4000 : 0x8000;
@@ -528,12 +619,8 @@ void __init exynos4_init_irq(void)
528 of_irq_init(exynos4_dt_irq_match); 619 of_irq_init(exynos4_dt_irq_match);
529#endif 620#endif
530 621
531 for (irq = 0; irq < EXYNOS4_MAX_COMBINER_NR; irq++) { 622 if (!of_have_populated_dt())
532 623 combiner_init(S5P_VA_COMBINER_BASE, NULL);
533 combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
534 COMBINER_IRQ(irq, 0));
535 combiner_cascade_irq(irq, IRQ_SPI(irq));
536 }
537 624
538 /* 625 /*
539 * The parameters of s5p_init_irq() are for VIC init. 626 * The parameters of s5p_init_irq() are for VIC init.
@@ -545,18 +632,9 @@ void __init exynos4_init_irq(void)
545 632
546void __init exynos5_init_irq(void) 633void __init exynos5_init_irq(void)
547{ 634{
548 int irq;
549
550#ifdef CONFIG_OF 635#ifdef CONFIG_OF
551 of_irq_init(exynos4_dt_irq_match); 636 of_irq_init(exynos4_dt_irq_match);
552#endif 637#endif
553
554 for (irq = 0; irq < EXYNOS5_MAX_COMBINER_NR; irq++) {
555 combiner_init(irq, (void __iomem *)S5P_VA_COMBINER(irq),
556 COMBINER_IRQ(irq, 0));
557 combiner_cascade_irq(irq, IRQ_SPI(irq));
558 }
559
560 /* 638 /*
561 * The parameters of s5p_init_irq() are for VIC init. 639 * The parameters of s5p_init_irq() are for VIC init.
562 * Theses parameters should be NULL and 0 because EXYNOS4 640 * Theses parameters should be NULL and 0 because EXYNOS4
@@ -565,30 +643,18 @@ void __init exynos5_init_irq(void)
565 s5p_init_irq(NULL, 0); 643 s5p_init_irq(NULL, 0);
566} 644}
567 645
568struct bus_type exynos4_subsys = { 646struct bus_type exynos_subsys = {
569 .name = "exynos4-core", 647 .name = "exynos-core",
570 .dev_name = "exynos4-core", 648 .dev_name = "exynos-core",
571};
572
573struct bus_type exynos5_subsys = {
574 .name = "exynos5-core",
575 .dev_name = "exynos5-core",
576}; 649};
577 650
578static struct device exynos4_dev = { 651static struct device exynos4_dev = {
579 .bus = &exynos4_subsys, 652 .bus = &exynos_subsys,
580};
581
582static struct device exynos5_dev = {
583 .bus = &exynos5_subsys,
584}; 653};
585 654
586static int __init exynos_core_init(void) 655static int __init exynos_core_init(void)
587{ 656{
588 if (soc_is_exynos5250()) 657 return subsys_system_register(&exynos_subsys, NULL);
589 return subsys_system_register(&exynos5_subsys, NULL);
590 else
591 return subsys_system_register(&exynos4_subsys, NULL);
592} 658}
593core_initcall(exynos_core_init); 659core_initcall(exynos_core_init);
594 660
@@ -675,10 +741,7 @@ static int __init exynos_init(void)
675{ 741{
676 printk(KERN_INFO "EXYNOS: Initializing architecture\n"); 742 printk(KERN_INFO "EXYNOS: Initializing architecture\n");
677 743
678 if (soc_is_exynos5250()) 744 return device_register(&exynos4_dev);
679 return device_register(&exynos5_dev);
680 else
681 return device_register(&exynos4_dev);
682} 745}
683 746
684/* uart registration process */ 747/* uart registration process */
diff --git a/arch/arm/mach-exynos/common.h b/arch/arm/mach-exynos/common.h
index 677b5467df18..aed2eeb06517 100644
--- a/arch/arm/mach-exynos/common.h
+++ b/arch/arm/mach-exynos/common.h
@@ -19,6 +19,13 @@ void exynos4_init_irq(void);
19void exynos5_init_irq(void); 19void exynos5_init_irq(void);
20void exynos4_restart(char mode, const char *cmd); 20void exynos4_restart(char mode, const char *cmd);
21void exynos5_restart(char mode, const char *cmd); 21void exynos5_restart(char mode, const char *cmd);
22void exynos_init_late(void);
23
24#ifdef CONFIG_PM_GENERIC_DOMAINS
25int exynos_pm_late_initcall(void);
26#else
27static int exynos_pm_late_initcall(void) { return 0; }
28#endif
22 29
23#ifdef CONFIG_ARCH_EXYNOS4 30#ifdef CONFIG_ARCH_EXYNOS4
24void exynos4_register_clocks(void); 31void exynos4_register_clocks(void);
diff --git a/arch/arm/mach-exynos/dev-drm.c b/arch/arm/mach-exynos/dev-drm.c
new file mode 100644
index 000000000000..17c9c6ecc2e0
--- /dev/null
+++ b/arch/arm/mach-exynos/dev-drm.c
@@ -0,0 +1,29 @@
1/*
2 * linux/arch/arm/mach-exynos/dev-drm.c
3 *
4 * Copyright (c) 2012 Samsung Electronics Co., Ltd.
5 * http://www.samsung.com
6 *
7 * EXYNOS - core DRM device
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15#include <linux/kernel.h>
16#include <linux/dma-mapping.h>
17#include <linux/platform_device.h>
18
19#include <plat/devs.h>
20
21static u64 exynos_drm_dma_mask = DMA_BIT_MASK(32);
22
23struct platform_device exynos_device_drm = {
24 .name = "exynos-drm",
25 .dev = {
26 .dma_mask = &exynos_drm_dma_mask,
27 .coherent_dma_mask = DMA_BIT_MASK(32),
28 }
29};
diff --git a/arch/arm/mach-exynos/dev-sysmmu.c b/arch/arm/mach-exynos/dev-sysmmu.c
index 781563fcb156..c5b1ea301df0 100644
--- a/arch/arm/mach-exynos/dev-sysmmu.c
+++ b/arch/arm/mach-exynos/dev-sysmmu.c
@@ -1,9 +1,9 @@
1/* linux/arch/arm/mach-exynos4/dev-sysmmu.c 1/* linux/arch/arm/mach-exynos/dev-sysmmu.c
2 * 2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 3 * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 4 * http://www.samsung.com
5 * 5 *
6 * EXYNOS4 - System MMU support 6 * EXYNOS - System MMU support
7 * 7 *
8 * This program is free software; you can redistribute it and/or modify 8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 9 * it under the terms of the GNU General Public License version 2 as
@@ -12,222 +12,263 @@
12 12
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/export.h> 15
16#include <plat/cpu.h>
16 17
17#include <mach/map.h> 18#include <mach/map.h>
18#include <mach/irqs.h> 19#include <mach/irqs.h>
19#include <mach/sysmmu.h> 20#include <mach/sysmmu.h>
20#include <plat/s5p-clock.h>
21
22/* These names must be equal to the clock names in mach-exynos4/clock.c */
23const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM] = {
24 "SYSMMU_MDMA" ,
25 "SYSMMU_SSS" ,
26 "SYSMMU_FIMC0" ,
27 "SYSMMU_FIMC1" ,
28 "SYSMMU_FIMC2" ,
29 "SYSMMU_FIMC3" ,
30 "SYSMMU_JPEG" ,
31 "SYSMMU_FIMD0" ,
32 "SYSMMU_FIMD1" ,
33 "SYSMMU_PCIe" ,
34 "SYSMMU_G2D" ,
35 "SYSMMU_ROTATOR",
36 "SYSMMU_MDMA2" ,
37 "SYSMMU_TV" ,
38 "SYSMMU_MFC_L" ,
39 "SYSMMU_MFC_R" ,
40};
41 21
42static struct resource exynos4_sysmmu_resource[] = { 22static u64 exynos_sysmmu_dma_mask = DMA_BIT_MASK(32);
43 [0] = { 23
44 .start = EXYNOS4_PA_SYSMMU_MDMA, 24#define SYSMMU_PLATFORM_DEVICE(ipname, devid) \
45 .end = EXYNOS4_PA_SYSMMU_MDMA + SZ_64K - 1, 25static struct sysmmu_platform_data platdata_##ipname = { \
46 .flags = IORESOURCE_MEM, 26 .dbgname = #ipname, \
47 }, 27}; \
48 [1] = { 28struct platform_device SYSMMU_PLATDEV(ipname) = \
49 .start = IRQ_SYSMMU_MDMA0_0, 29{ \
50 .end = IRQ_SYSMMU_MDMA0_0, 30 .name = SYSMMU_DEVNAME_BASE, \
51 .flags = IORESOURCE_IRQ, 31 .id = devid, \
52 }, 32 .dev = { \
53 [2] = { 33 .dma_mask = &exynos_sysmmu_dma_mask, \
54 .start = EXYNOS4_PA_SYSMMU_SSS, 34 .coherent_dma_mask = DMA_BIT_MASK(32), \
55 .end = EXYNOS4_PA_SYSMMU_SSS + SZ_64K - 1, 35 .platform_data = &platdata_##ipname, \
56 .flags = IORESOURCE_MEM, 36 }, \
57 }, 37}
58 [3] = { 38
59 .start = IRQ_SYSMMU_SSS_0, 39SYSMMU_PLATFORM_DEVICE(mfc_l, 0);
60 .end = IRQ_SYSMMU_SSS_0, 40SYSMMU_PLATFORM_DEVICE(mfc_r, 1);
61 .flags = IORESOURCE_IRQ, 41SYSMMU_PLATFORM_DEVICE(tv, 2);
62 }, 42SYSMMU_PLATFORM_DEVICE(jpeg, 3);
63 [4] = { 43SYSMMU_PLATFORM_DEVICE(rot, 4);
64 .start = EXYNOS4_PA_SYSMMU_FIMC0, 44SYSMMU_PLATFORM_DEVICE(fimc0, 5); /* fimc* and gsc* exist exclusively */
65 .end = EXYNOS4_PA_SYSMMU_FIMC0 + SZ_64K - 1, 45SYSMMU_PLATFORM_DEVICE(fimc1, 6);
66 .flags = IORESOURCE_MEM, 46SYSMMU_PLATFORM_DEVICE(fimc2, 7);
67 }, 47SYSMMU_PLATFORM_DEVICE(fimc3, 8);
68 [5] = { 48SYSMMU_PLATFORM_DEVICE(gsc0, 5);
69 .start = IRQ_SYSMMU_FIMC0_0, 49SYSMMU_PLATFORM_DEVICE(gsc1, 6);
70 .end = IRQ_SYSMMU_FIMC0_0, 50SYSMMU_PLATFORM_DEVICE(gsc2, 7);
71 .flags = IORESOURCE_IRQ, 51SYSMMU_PLATFORM_DEVICE(gsc3, 8);
72 }, 52SYSMMU_PLATFORM_DEVICE(isp, 9);
73 [6] = { 53SYSMMU_PLATFORM_DEVICE(fimd0, 10);
74 .start = EXYNOS4_PA_SYSMMU_FIMC1, 54SYSMMU_PLATFORM_DEVICE(fimd1, 11);
75 .end = EXYNOS4_PA_SYSMMU_FIMC1 + SZ_64K - 1, 55SYSMMU_PLATFORM_DEVICE(camif0, 12);
76 .flags = IORESOURCE_MEM, 56SYSMMU_PLATFORM_DEVICE(camif1, 13);
77 }, 57SYSMMU_PLATFORM_DEVICE(2d, 14);
78 [7] = { 58
79 .start = IRQ_SYSMMU_FIMC1_0, 59#define SYSMMU_RESOURCE_NAME(core, ipname) sysmmures_##core##_##ipname
80 .end = IRQ_SYSMMU_FIMC1_0, 60
81 .flags = IORESOURCE_IRQ, 61#define SYSMMU_RESOURCE(core, ipname) \
82 }, 62 static struct resource SYSMMU_RESOURCE_NAME(core, ipname)[] __initdata =
83 [8] = { 63
84 .start = EXYNOS4_PA_SYSMMU_FIMC2, 64#define DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
85 .end = EXYNOS4_PA_SYSMMU_FIMC2 + SZ_64K - 1, 65 DEFINE_RES_MEM_NAMED(core##_PA_SYSMMU_##mem, SZ_4K, #mem), \
86 .flags = IORESOURCE_MEM, 66 DEFINE_RES_IRQ_NAMED(core##_IRQ_SYSMMU_##irq##_0, #mem)
87 }, 67
88 [9] = { 68#define SYSMMU_RESOURCE_DEFINE(core, ipname, mem, irq) \
89 .start = IRQ_SYSMMU_FIMC2_0, 69 SYSMMU_RESOURCE(core, ipname) { \
90 .end = IRQ_SYSMMU_FIMC2_0, 70 DEFINE_SYSMMU_RESOURCE(core, mem, irq) \
91 .flags = IORESOURCE_IRQ, 71 }
92 },
93 [10] = {
94 .start = EXYNOS4_PA_SYSMMU_FIMC3,
95 .end = EXYNOS4_PA_SYSMMU_FIMC3 + SZ_64K - 1,
96 .flags = IORESOURCE_MEM,
97 },
98 [11] = {
99 .start = IRQ_SYSMMU_FIMC3_0,
100 .end = IRQ_SYSMMU_FIMC3_0,
101 .flags = IORESOURCE_IRQ,
102 },
103 [12] = {
104 .start = EXYNOS4_PA_SYSMMU_JPEG,
105 .end = EXYNOS4_PA_SYSMMU_JPEG + SZ_64K - 1,
106 .flags = IORESOURCE_MEM,
107 },
108 [13] = {
109 .start = IRQ_SYSMMU_JPEG_0,
110 .end = IRQ_SYSMMU_JPEG_0,
111 .flags = IORESOURCE_IRQ,
112 },
113 [14] = {
114 .start = EXYNOS4_PA_SYSMMU_FIMD0,
115 .end = EXYNOS4_PA_SYSMMU_FIMD0 + SZ_64K - 1,
116 .flags = IORESOURCE_MEM,
117 },
118 [15] = {
119 .start = IRQ_SYSMMU_LCD0_M0_0,
120 .end = IRQ_SYSMMU_LCD0_M0_0,
121 .flags = IORESOURCE_IRQ,
122 },
123 [16] = {
124 .start = EXYNOS4_PA_SYSMMU_FIMD1,
125 .end = EXYNOS4_PA_SYSMMU_FIMD1 + SZ_64K - 1,
126 .flags = IORESOURCE_MEM,
127 },
128 [17] = {
129 .start = IRQ_SYSMMU_LCD1_M1_0,
130 .end = IRQ_SYSMMU_LCD1_M1_0,
131 .flags = IORESOURCE_IRQ,
132 },
133 [18] = {
134 .start = EXYNOS4_PA_SYSMMU_PCIe,
135 .end = EXYNOS4_PA_SYSMMU_PCIe + SZ_64K - 1,
136 .flags = IORESOURCE_MEM,
137 },
138 [19] = {
139 .start = IRQ_SYSMMU_PCIE_0,
140 .end = IRQ_SYSMMU_PCIE_0,
141 .flags = IORESOURCE_IRQ,
142 },
143 [20] = {
144 .start = EXYNOS4_PA_SYSMMU_G2D,
145 .end = EXYNOS4_PA_SYSMMU_G2D + SZ_64K - 1,
146 .flags = IORESOURCE_MEM,
147 },
148 [21] = {
149 .start = IRQ_SYSMMU_2D_0,
150 .end = IRQ_SYSMMU_2D_0,
151 .flags = IORESOURCE_IRQ,
152 },
153 [22] = {
154 .start = EXYNOS4_PA_SYSMMU_ROTATOR,
155 .end = EXYNOS4_PA_SYSMMU_ROTATOR + SZ_64K - 1,
156 .flags = IORESOURCE_MEM,
157 },
158 [23] = {
159 .start = IRQ_SYSMMU_ROTATOR_0,
160 .end = IRQ_SYSMMU_ROTATOR_0,
161 .flags = IORESOURCE_IRQ,
162 },
163 [24] = {
164 .start = EXYNOS4_PA_SYSMMU_MDMA2,
165 .end = EXYNOS4_PA_SYSMMU_MDMA2 + SZ_64K - 1,
166 .flags = IORESOURCE_MEM,
167 },
168 [25] = {
169 .start = IRQ_SYSMMU_MDMA1_0,
170 .end = IRQ_SYSMMU_MDMA1_0,
171 .flags = IORESOURCE_IRQ,
172 },
173 [26] = {
174 .start = EXYNOS4_PA_SYSMMU_TV,
175 .end = EXYNOS4_PA_SYSMMU_TV + SZ_64K - 1,
176 .flags = IORESOURCE_MEM,
177 },
178 [27] = {
179 .start = IRQ_SYSMMU_TV_M0_0,
180 .end = IRQ_SYSMMU_TV_M0_0,
181 .flags = IORESOURCE_IRQ,
182 },
183 [28] = {
184 .start = EXYNOS4_PA_SYSMMU_MFC_L,
185 .end = EXYNOS4_PA_SYSMMU_MFC_L + SZ_64K - 1,
186 .flags = IORESOURCE_MEM,
187 },
188 [29] = {
189 .start = IRQ_SYSMMU_MFC_M0_0,
190 .end = IRQ_SYSMMU_MFC_M0_0,
191 .flags = IORESOURCE_IRQ,
192 },
193 [30] = {
194 .start = EXYNOS4_PA_SYSMMU_MFC_R,
195 .end = EXYNOS4_PA_SYSMMU_MFC_R + SZ_64K - 1,
196 .flags = IORESOURCE_MEM,
197 },
198 [31] = {
199 .start = IRQ_SYSMMU_MFC_M1_0,
200 .end = IRQ_SYSMMU_MFC_M1_0,
201 .flags = IORESOURCE_IRQ,
202 },
203};
204 72
205struct platform_device exynos4_device_sysmmu = { 73struct sysmmu_resource_map {
206 .name = "s5p-sysmmu", 74 struct platform_device *pdev;
207 .id = 32, 75 struct resource *res;
208 .num_resources = ARRAY_SIZE(exynos4_sysmmu_resource), 76 u32 rnum;
209 .resource = exynos4_sysmmu_resource, 77 struct device *pdd;
78 char *clocknames;
210}; 79};
211EXPORT_SYMBOL(exynos4_device_sysmmu);
212 80
213static struct clk *sysmmu_clk[S5P_SYSMMU_TOTAL_IPNUM]; 81#define SYSMMU_RESOURCE_MAPPING(core, ipname, resname) { \
214void sysmmu_clk_init(struct device *dev, sysmmu_ips ips) 82 .pdev = &SYSMMU_PLATDEV(ipname), \
215{ 83 .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
216 sysmmu_clk[ips] = clk_get(dev, sysmmu_ips_name[ips]); 84 .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
217 if (IS_ERR(sysmmu_clk[ips])) 85 .clocknames = SYSMMU_CLOCK_NAME, \
218 sysmmu_clk[ips] = NULL;
219 else
220 clk_put(sysmmu_clk[ips]);
221} 86}
222 87
223void sysmmu_clk_enable(sysmmu_ips ips) 88#define SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata) { \
224{ 89 .pdev = &SYSMMU_PLATDEV(ipname), \
225 if (sysmmu_clk[ips]) 90 .res = SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
226 clk_enable(sysmmu_clk[ips]); 91 .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
92 .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
93}
94
95#ifdef CONFIG_EXYNOS_DEV_PD
96#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) { \
97 .pdev = &SYSMMU_PLATDEV(ipname), \
98 .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
99 .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
100 .clocknames = SYSMMU_CLOCK_NAME, \
101 .pdd = &exynos##core##_device_pd[pd].dev, \
102}
103
104#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) {\
105 .pdev = &SYSMMU_PLATDEV(ipname), \
106 .res = &SYSMMU_RESOURCE_NAME(EXYNOS##core, resname), \
107 .rnum = ARRAY_SIZE(SYSMMU_RESOURCE_NAME(EXYNOS##core, resname)),\
108 .clocknames = SYSMMU_CLOCK_NAME "," SYSMMU_CLOCK_NAME2, \
109 .pdd = &exynos##core##_device_pd[pd].dev, \
227} 110}
111#else
112#define SYSMMU_RESOURCE_MAPPING_PD(core, ipname, resname, pd) \
113 SYSMMU_RESOURCE_MAPPING(core, ipname, resname)
114#define SYSMMU_RESOURCE_MAPPING_MCPD(core, ipname, resname, pd, pdata) \
115 SYSMMU_RESOURCE_MAPPING_MC(core, ipname, resname, pdata)
116
117#endif /* CONFIG_EXYNOS_DEV_PD */
118
119#ifdef CONFIG_ARCH_EXYNOS4
120SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc0, FIMC0, FIMC0);
121SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc1, FIMC1, FIMC1);
122SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc2, FIMC2, FIMC2);
123SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimc3, FIMC3, FIMC3);
124SYSMMU_RESOURCE_DEFINE(EXYNOS4, jpeg, JPEG, JPEG);
125SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d, G2D, 2D);
126SYSMMU_RESOURCE_DEFINE(EXYNOS4, tv, TV, TV_M0);
127SYSMMU_RESOURCE_DEFINE(EXYNOS4, 2d_acp, 2D_ACP, 2D);
128SYSMMU_RESOURCE_DEFINE(EXYNOS4, rot, ROTATOR, ROTATOR);
129SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd0, FIMD0, LCD0_M0);
130SYSMMU_RESOURCE_DEFINE(EXYNOS4, fimd1, FIMD1, LCD1_M1);
131SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite0, FIMC_LITE0, FIMC_LITE0);
132SYSMMU_RESOURCE_DEFINE(EXYNOS4, flite1, FIMC_LITE1, FIMC_LITE1);
133SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_r, MFC_R, MFC_M0);
134SYSMMU_RESOURCE_DEFINE(EXYNOS4, mfc_l, MFC_L, MFC_M1);
135SYSMMU_RESOURCE(EXYNOS4, isp) {
136 DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_ISP, FIMC_ISP),
137 DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_DRC, FIMC_DRC),
138 DEFINE_SYSMMU_RESOURCE(EXYNOS4, FIMC_FD, FIMC_FD),
139 DEFINE_SYSMMU_RESOURCE(EXYNOS4, ISPCPU, FIMC_CX),
140};
141
142static struct sysmmu_resource_map sysmmu_resmap4[] __initdata = {
143 SYSMMU_RESOURCE_MAPPING_PD(4, fimc0, fimc0, PD_CAM),
144 SYSMMU_RESOURCE_MAPPING_PD(4, fimc1, fimc1, PD_CAM),
145 SYSMMU_RESOURCE_MAPPING_PD(4, fimc2, fimc2, PD_CAM),
146 SYSMMU_RESOURCE_MAPPING_PD(4, fimc3, fimc3, PD_CAM),
147 SYSMMU_RESOURCE_MAPPING_PD(4, tv, tv, PD_TV),
148 SYSMMU_RESOURCE_MAPPING_PD(4, mfc_r, mfc_r, PD_MFC),
149 SYSMMU_RESOURCE_MAPPING_PD(4, mfc_l, mfc_l, PD_MFC),
150 SYSMMU_RESOURCE_MAPPING_PD(4, rot, rot, PD_LCD0),
151 SYSMMU_RESOURCE_MAPPING_PD(4, jpeg, jpeg, PD_CAM),
152 SYSMMU_RESOURCE_MAPPING_PD(4, fimd0, fimd0, PD_LCD0),
153};
154
155static struct sysmmu_resource_map sysmmu_resmap4210[] __initdata = {
156 SYSMMU_RESOURCE_MAPPING_PD(4, 2d, 2d, PD_LCD0),
157 SYSMMU_RESOURCE_MAPPING_PD(4, fimd1, fimd1, PD_LCD1),
158};
159
160static struct sysmmu_resource_map sysmmu_resmap4212[] __initdata = {
161 SYSMMU_RESOURCE_MAPPING(4, 2d, 2d_acp),
162 SYSMMU_RESOURCE_MAPPING_PD(4, camif0, flite0, PD_ISP),
163 SYSMMU_RESOURCE_MAPPING_PD(4, camif1, flite1, PD_ISP),
164 SYSMMU_RESOURCE_MAPPING_PD(4, isp, isp, PD_ISP),
165};
166#endif /* CONFIG_ARCH_EXYNOS4 */
228 167
229void sysmmu_clk_disable(sysmmu_ips ips) 168#ifdef CONFIG_ARCH_EXYNOS5
169SYSMMU_RESOURCE_DEFINE(EXYNOS5, jpeg, JPEG, JPEG);
170SYSMMU_RESOURCE_DEFINE(EXYNOS5, fimd1, FIMD1, FIMD1);
171SYSMMU_RESOURCE_DEFINE(EXYNOS5, 2d, 2D, 2D);
172SYSMMU_RESOURCE_DEFINE(EXYNOS5, rot, ROTATOR, ROTATOR);
173SYSMMU_RESOURCE_DEFINE(EXYNOS5, tv, TV, TV);
174SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite0, LITE0, LITE0);
175SYSMMU_RESOURCE_DEFINE(EXYNOS5, flite1, LITE1, LITE1);
176SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc0, GSC0, GSC0);
177SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc1, GSC1, GSC1);
178SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc2, GSC2, GSC2);
179SYSMMU_RESOURCE_DEFINE(EXYNOS5, gsc3, GSC3, GSC3);
180SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_r, MFC_R, MFC_R);
181SYSMMU_RESOURCE_DEFINE(EXYNOS5, mfc_l, MFC_L, MFC_L);
182SYSMMU_RESOURCE(EXYNOS5, isp) {
183 DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISP, ISP),
184 DEFINE_SYSMMU_RESOURCE(EXYNOS5, DRC, DRC),
185 DEFINE_SYSMMU_RESOURCE(EXYNOS5, FD, FD),
186 DEFINE_SYSMMU_RESOURCE(EXYNOS5, ISPCPU, MCUISP),
187 DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERC, SCALERCISP),
188 DEFINE_SYSMMU_RESOURCE(EXYNOS5, SCALERP, SCALERPISP),
189 DEFINE_SYSMMU_RESOURCE(EXYNOS5, ODC, ODC),
190 DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS0, DIS0),
191 DEFINE_SYSMMU_RESOURCE(EXYNOS5, DIS1, DIS1),
192 DEFINE_SYSMMU_RESOURCE(EXYNOS5, 3DNR, 3DNR),
193};
194
195static struct sysmmu_resource_map sysmmu_resmap5[] __initdata = {
196 SYSMMU_RESOURCE_MAPPING(5, jpeg, jpeg),
197 SYSMMU_RESOURCE_MAPPING(5, fimd1, fimd1),
198 SYSMMU_RESOURCE_MAPPING(5, 2d, 2d),
199 SYSMMU_RESOURCE_MAPPING(5, rot, rot),
200 SYSMMU_RESOURCE_MAPPING_PD(5, tv, tv, PD_DISP1),
201 SYSMMU_RESOURCE_MAPPING_PD(5, camif0, flite0, PD_GSCL),
202 SYSMMU_RESOURCE_MAPPING_PD(5, camif1, flite1, PD_GSCL),
203 SYSMMU_RESOURCE_MAPPING_PD(5, gsc0, gsc0, PD_GSCL),
204 SYSMMU_RESOURCE_MAPPING_PD(5, gsc1, gsc1, PD_GSCL),
205 SYSMMU_RESOURCE_MAPPING_PD(5, gsc2, gsc2, PD_GSCL),
206 SYSMMU_RESOURCE_MAPPING_PD(5, gsc3, gsc3, PD_GSCL),
207 SYSMMU_RESOURCE_MAPPING_PD(5, mfc_r, mfc_r, PD_MFC),
208 SYSMMU_RESOURCE_MAPPING_PD(5, mfc_l, mfc_l, PD_MFC),
209 SYSMMU_RESOURCE_MAPPING_MCPD(5, isp, isp, PD_ISP, mc_platdata),
210};
211#endif /* CONFIG_ARCH_EXYNOS5 */
212
213static int __init init_sysmmu_platform_device(void)
230{ 214{
231 if (sysmmu_clk[ips]) 215 int i, j;
232 clk_disable(sysmmu_clk[ips]); 216 struct sysmmu_resource_map *resmap[2] = {NULL, NULL};
217 int nmap[2] = {0, 0};
218
219#ifdef CONFIG_ARCH_EXYNOS5
220 if (soc_is_exynos5250()) {
221 resmap[0] = sysmmu_resmap5;
222 nmap[0] = ARRAY_SIZE(sysmmu_resmap5);
223 nmap[1] = 0;
224 }
225#endif
226
227#ifdef CONFIG_ARCH_EXYNOS4
228 if (resmap[0] == NULL) {
229 resmap[0] = sysmmu_resmap4;
230 nmap[0] = ARRAY_SIZE(sysmmu_resmap4);
231 }
232
233 if (soc_is_exynos4210()) {
234 resmap[1] = sysmmu_resmap4210;
235 nmap[1] = ARRAY_SIZE(sysmmu_resmap4210);
236 }
237
238 if (soc_is_exynos4412() || soc_is_exynos4212()) {
239 resmap[1] = sysmmu_resmap4212;
240 nmap[1] = ARRAY_SIZE(sysmmu_resmap4212);
241 }
242#endif
243
244 for (j = 0; j < 2; j++) {
245 for (i = 0; i < nmap[j]; i++) {
246 struct sysmmu_resource_map *map;
247 struct sysmmu_platform_data *platdata;
248
249 map = &resmap[j][i];
250
251 map->pdev->dev.parent = map->pdd;
252
253 platdata = map->pdev->dev.platform_data;
254 platdata->clockname = map->clocknames;
255
256 if (platform_device_add_resources(map->pdev, map->res,
257 map->rnum)) {
258 pr_err("%s: Failed to add device resources for "
259 "%s.%d\n", __func__,
260 map->pdev->name, map->pdev->id);
261 continue;
262 }
263
264 if (platform_device_register(map->pdev)) {
265 pr_err("%s: Failed to register %s.%d\n",
266 __func__, map->pdev->name,
267 map->pdev->id);
268 }
269 }
270 }
271
272 return 0;
233} 273}
274arch_initcall(init_sysmmu_platform_device);
diff --git a/arch/arm/mach-exynos/dma.c b/arch/arm/mach-exynos/dma.c
index 69aaa4503205..f60b66dbcf84 100644
--- a/arch/arm/mach-exynos/dma.c
+++ b/arch/arm/mach-exynos/dma.c
@@ -103,10 +103,45 @@ static u8 exynos4212_pdma0_peri[] = {
103 DMACH_MIPI_HSI5, 103 DMACH_MIPI_HSI5,
104}; 104};
105 105
106struct dma_pl330_platdata exynos4_pdma0_pdata; 106static u8 exynos5250_pdma0_peri[] = {
107 DMACH_PCM0_RX,
108 DMACH_PCM0_TX,
109 DMACH_PCM2_RX,
110 DMACH_PCM2_TX,
111 DMACH_SPI0_RX,
112 DMACH_SPI0_TX,
113 DMACH_SPI2_RX,
114 DMACH_SPI2_TX,
115 DMACH_I2S0S_TX,
116 DMACH_I2S0_RX,
117 DMACH_I2S0_TX,
118 DMACH_I2S2_RX,
119 DMACH_I2S2_TX,
120 DMACH_UART0_RX,
121 DMACH_UART0_TX,
122 DMACH_UART2_RX,
123 DMACH_UART2_TX,
124 DMACH_UART4_RX,
125 DMACH_UART4_TX,
126 DMACH_SLIMBUS0_RX,
127 DMACH_SLIMBUS0_TX,
128 DMACH_SLIMBUS2_RX,
129 DMACH_SLIMBUS2_TX,
130 DMACH_SLIMBUS4_RX,
131 DMACH_SLIMBUS4_TX,
132 DMACH_AC97_MICIN,
133 DMACH_AC97_PCMIN,
134 DMACH_AC97_PCMOUT,
135 DMACH_MIPI_HSI0,
136 DMACH_MIPI_HSI2,
137 DMACH_MIPI_HSI4,
138 DMACH_MIPI_HSI6,
139};
140
141static struct dma_pl330_platdata exynos_pdma0_pdata;
107 142
108static AMBA_AHB_DEVICE(exynos4_pdma0, "dma-pl330.0", 0x00041330, 143static AMBA_AHB_DEVICE(exynos_pdma0, "dma-pl330.0", 0x00041330,
109 EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos4_pdma0_pdata); 144 EXYNOS4_PA_PDMA0, {EXYNOS4_IRQ_PDMA0}, &exynos_pdma0_pdata);
110 145
111static u8 exynos4210_pdma1_peri[] = { 146static u8 exynos4210_pdma1_peri[] = {
112 DMACH_PCM0_RX, 147 DMACH_PCM0_RX,
@@ -169,10 +204,45 @@ static u8 exynos4212_pdma1_peri[] = {
169 DMACH_MIPI_HSI7, 204 DMACH_MIPI_HSI7,
170}; 205};
171 206
172static struct dma_pl330_platdata exynos4_pdma1_pdata; 207static u8 exynos5250_pdma1_peri[] = {
208 DMACH_PCM0_RX,
209 DMACH_PCM0_TX,
210 DMACH_PCM1_RX,
211 DMACH_PCM1_TX,
212 DMACH_SPI1_RX,
213 DMACH_SPI1_TX,
214 DMACH_PWM,
215 DMACH_SPDIF,
216 DMACH_I2S0S_TX,
217 DMACH_I2S0_RX,
218 DMACH_I2S0_TX,
219 DMACH_I2S1_RX,
220 DMACH_I2S1_TX,
221 DMACH_UART0_RX,
222 DMACH_UART0_TX,
223 DMACH_UART1_RX,
224 DMACH_UART1_TX,
225 DMACH_UART3_RX,
226 DMACH_UART3_TX,
227 DMACH_SLIMBUS1_RX,
228 DMACH_SLIMBUS1_TX,
229 DMACH_SLIMBUS3_RX,
230 DMACH_SLIMBUS3_TX,
231 DMACH_SLIMBUS5_RX,
232 DMACH_SLIMBUS5_TX,
233 DMACH_SLIMBUS0AUX_RX,
234 DMACH_SLIMBUS0AUX_TX,
235 DMACH_DISP1,
236 DMACH_MIPI_HSI1,
237 DMACH_MIPI_HSI3,
238 DMACH_MIPI_HSI5,
239 DMACH_MIPI_HSI7,
240};
173 241
174static AMBA_AHB_DEVICE(exynos4_pdma1, "dma-pl330.1", 0x00041330, 242static struct dma_pl330_platdata exynos_pdma1_pdata;
175 EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos4_pdma1_pdata); 243
244static AMBA_AHB_DEVICE(exynos_pdma1, "dma-pl330.1", 0x00041330,
245 EXYNOS4_PA_PDMA1, {EXYNOS4_IRQ_PDMA1}, &exynos_pdma1_pdata);
176 246
177static u8 mdma_peri[] = { 247static u8 mdma_peri[] = {
178 DMACH_MTOM_0, 248 DMACH_MTOM_0,
@@ -185,46 +255,63 @@ static u8 mdma_peri[] = {
185 DMACH_MTOM_7, 255 DMACH_MTOM_7,
186}; 256};
187 257
188static struct dma_pl330_platdata exynos4_mdma1_pdata = { 258static struct dma_pl330_platdata exynos_mdma1_pdata = {
189 .nr_valid_peri = ARRAY_SIZE(mdma_peri), 259 .nr_valid_peri = ARRAY_SIZE(mdma_peri),
190 .peri_id = mdma_peri, 260 .peri_id = mdma_peri,
191}; 261};
192 262
193static AMBA_AHB_DEVICE(exynos4_mdma1, "dma-pl330.2", 0x00041330, 263static AMBA_AHB_DEVICE(exynos_mdma1, "dma-pl330.2", 0x00041330,
194 EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos4_mdma1_pdata); 264 EXYNOS4_PA_MDMA1, {EXYNOS4_IRQ_MDMA1}, &exynos_mdma1_pdata);
195 265
196static int __init exynos4_dma_init(void) 266static int __init exynos_dma_init(void)
197{ 267{
198 if (of_have_populated_dt()) 268 if (of_have_populated_dt())
199 return 0; 269 return 0;
200 270
201 if (soc_is_exynos4210()) { 271 if (soc_is_exynos4210()) {
202 exynos4_pdma0_pdata.nr_valid_peri = 272 exynos_pdma0_pdata.nr_valid_peri =
203 ARRAY_SIZE(exynos4210_pdma0_peri); 273 ARRAY_SIZE(exynos4210_pdma0_peri);
204 exynos4_pdma0_pdata.peri_id = exynos4210_pdma0_peri; 274 exynos_pdma0_pdata.peri_id = exynos4210_pdma0_peri;
205 exynos4_pdma1_pdata.nr_valid_peri = 275 exynos_pdma1_pdata.nr_valid_peri =
206 ARRAY_SIZE(exynos4210_pdma1_peri); 276 ARRAY_SIZE(exynos4210_pdma1_peri);
207 exynos4_pdma1_pdata.peri_id = exynos4210_pdma1_peri; 277 exynos_pdma1_pdata.peri_id = exynos4210_pdma1_peri;
208 } else if (soc_is_exynos4212() || soc_is_exynos4412()) { 278 } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
209 exynos4_pdma0_pdata.nr_valid_peri = 279 exynos_pdma0_pdata.nr_valid_peri =
210 ARRAY_SIZE(exynos4212_pdma0_peri); 280 ARRAY_SIZE(exynos4212_pdma0_peri);
211 exynos4_pdma0_pdata.peri_id = exynos4212_pdma0_peri; 281 exynos_pdma0_pdata.peri_id = exynos4212_pdma0_peri;
212 exynos4_pdma1_pdata.nr_valid_peri = 282 exynos_pdma1_pdata.nr_valid_peri =
213 ARRAY_SIZE(exynos4212_pdma1_peri); 283 ARRAY_SIZE(exynos4212_pdma1_peri);
214 exynos4_pdma1_pdata.peri_id = exynos4212_pdma1_peri; 284 exynos_pdma1_pdata.peri_id = exynos4212_pdma1_peri;
285 } else if (soc_is_exynos5250()) {
286 exynos_pdma0_pdata.nr_valid_peri =
287 ARRAY_SIZE(exynos5250_pdma0_peri);
288 exynos_pdma0_pdata.peri_id = exynos5250_pdma0_peri;
289 exynos_pdma1_pdata.nr_valid_peri =
290 ARRAY_SIZE(exynos5250_pdma1_peri);
291 exynos_pdma1_pdata.peri_id = exynos5250_pdma1_peri;
292
293 exynos_pdma0_device.res.start = EXYNOS5_PA_PDMA0;
294 exynos_pdma0_device.res.end = EXYNOS5_PA_PDMA0 + SZ_4K;
295 exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA0;
296 exynos_pdma1_device.res.start = EXYNOS5_PA_PDMA1;
297 exynos_pdma1_device.res.end = EXYNOS5_PA_PDMA1 + SZ_4K;
298 exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_PDMA1;
299 exynos_mdma1_device.res.start = EXYNOS5_PA_MDMA1;
300 exynos_mdma1_device.res.end = EXYNOS5_PA_MDMA1 + SZ_4K;
301 exynos_pdma0_device.irq[0] = EXYNOS5_IRQ_MDMA1;
215 } 302 }
216 303
217 dma_cap_set(DMA_SLAVE, exynos4_pdma0_pdata.cap_mask); 304 dma_cap_set(DMA_SLAVE, exynos_pdma0_pdata.cap_mask);
218 dma_cap_set(DMA_CYCLIC, exynos4_pdma0_pdata.cap_mask); 305 dma_cap_set(DMA_CYCLIC, exynos_pdma0_pdata.cap_mask);
219 amba_device_register(&exynos4_pdma0_device, &iomem_resource); 306 amba_device_register(&exynos_pdma0_device, &iomem_resource);
220 307
221 dma_cap_set(DMA_SLAVE, exynos4_pdma1_pdata.cap_mask); 308 dma_cap_set(DMA_SLAVE, exynos_pdma1_pdata.cap_mask);
222 dma_cap_set(DMA_CYCLIC, exynos4_pdma1_pdata.cap_mask); 309 dma_cap_set(DMA_CYCLIC, exynos_pdma1_pdata.cap_mask);
223 amba_device_register(&exynos4_pdma1_device, &iomem_resource); 310 amba_device_register(&exynos_pdma1_device, &iomem_resource);
224 311
225 dma_cap_set(DMA_MEMCPY, exynos4_mdma1_pdata.cap_mask); 312 dma_cap_set(DMA_MEMCPY, exynos_mdma1_pdata.cap_mask);
226 amba_device_register(&exynos4_mdma1_device, &iomem_resource); 313 amba_device_register(&exynos_mdma1_device, &iomem_resource);
227 314
228 return 0; 315 return 0;
229} 316}
230arch_initcall(exynos4_dma_init); 317arch_initcall(exynos_dma_init);
diff --git a/arch/arm/mach-exynos/include/mach/gpio.h b/arch/arm/mach-exynos/include/mach/gpio.h
index d7498afe036a..eb24f1eb8e3b 100644
--- a/arch/arm/mach-exynos/include/mach/gpio.h
+++ b/arch/arm/mach-exynos/include/mach/gpio.h
@@ -153,10 +153,11 @@ enum exynos4_gpio_number {
153#define EXYNOS5_GPIO_B2_NR (4) 153#define EXYNOS5_GPIO_B2_NR (4)
154#define EXYNOS5_GPIO_B3_NR (4) 154#define EXYNOS5_GPIO_B3_NR (4)
155#define EXYNOS5_GPIO_C0_NR (7) 155#define EXYNOS5_GPIO_C0_NR (7)
156#define EXYNOS5_GPIO_C1_NR (7) 156#define EXYNOS5_GPIO_C1_NR (4)
157#define EXYNOS5_GPIO_C2_NR (7) 157#define EXYNOS5_GPIO_C2_NR (7)
158#define EXYNOS5_GPIO_C3_NR (7) 158#define EXYNOS5_GPIO_C3_NR (7)
159#define EXYNOS5_GPIO_D0_NR (8) 159#define EXYNOS5_GPIO_C4_NR (7)
160#define EXYNOS5_GPIO_D0_NR (4)
160#define EXYNOS5_GPIO_D1_NR (8) 161#define EXYNOS5_GPIO_D1_NR (8)
161#define EXYNOS5_GPIO_Y0_NR (6) 162#define EXYNOS5_GPIO_Y0_NR (6)
162#define EXYNOS5_GPIO_Y1_NR (4) 163#define EXYNOS5_GPIO_Y1_NR (4)
@@ -199,7 +200,8 @@ enum exynos5_gpio_number {
199 EXYNOS5_GPIO_C1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C0), 200 EXYNOS5_GPIO_C1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C0),
200 EXYNOS5_GPIO_C2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C1), 201 EXYNOS5_GPIO_C2_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C1),
201 EXYNOS5_GPIO_C3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C2), 202 EXYNOS5_GPIO_C3_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C2),
202 EXYNOS5_GPIO_D0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3), 203 EXYNOS5_GPIO_C4_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C3),
204 EXYNOS5_GPIO_D0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_C4),
203 EXYNOS5_GPIO_D1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D0), 205 EXYNOS5_GPIO_D1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D0),
204 EXYNOS5_GPIO_Y0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D1), 206 EXYNOS5_GPIO_Y0_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_D1),
205 EXYNOS5_GPIO_Y1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y0), 207 EXYNOS5_GPIO_Y1_START = EXYNOS_GPIO_NEXT(EXYNOS5_GPIO_Y0),
@@ -242,6 +244,7 @@ enum exynos5_gpio_number {
242#define EXYNOS5_GPC1(_nr) (EXYNOS5_GPIO_C1_START + (_nr)) 244#define EXYNOS5_GPC1(_nr) (EXYNOS5_GPIO_C1_START + (_nr))
243#define EXYNOS5_GPC2(_nr) (EXYNOS5_GPIO_C2_START + (_nr)) 245#define EXYNOS5_GPC2(_nr) (EXYNOS5_GPIO_C2_START + (_nr))
244#define EXYNOS5_GPC3(_nr) (EXYNOS5_GPIO_C3_START + (_nr)) 246#define EXYNOS5_GPC3(_nr) (EXYNOS5_GPIO_C3_START + (_nr))
247#define EXYNOS5_GPC4(_nr) (EXYNOS5_GPIO_C4_START + (_nr))
245#define EXYNOS5_GPD0(_nr) (EXYNOS5_GPIO_D0_START + (_nr)) 248#define EXYNOS5_GPD0(_nr) (EXYNOS5_GPIO_D0_START + (_nr))
246#define EXYNOS5_GPD1(_nr) (EXYNOS5_GPIO_D1_START + (_nr)) 249#define EXYNOS5_GPD1(_nr) (EXYNOS5_GPIO_D1_START + (_nr))
247#define EXYNOS5_GPY0(_nr) (EXYNOS5_GPIO_Y0_START + (_nr)) 250#define EXYNOS5_GPY0(_nr) (EXYNOS5_GPIO_Y0_START + (_nr))
diff --git a/arch/arm/mach-exynos/include/mach/irqs.h b/arch/arm/mach-exynos/include/mach/irqs.h
index c02dae7bf4a3..7a4b4789eb72 100644
--- a/arch/arm/mach-exynos/include/mach/irqs.h
+++ b/arch/arm/mach-exynos/include/mach/irqs.h
@@ -154,6 +154,13 @@
154#define EXYNOS4_IRQ_SYSMMU_MFC_M1_0 COMBINER_IRQ(5, 6) 154#define EXYNOS4_IRQ_SYSMMU_MFC_M1_0 COMBINER_IRQ(5, 6)
155#define EXYNOS4_IRQ_SYSMMU_PCIE_0 COMBINER_IRQ(5, 7) 155#define EXYNOS4_IRQ_SYSMMU_PCIE_0 COMBINER_IRQ(5, 7)
156 156
157#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE0_0 COMBINER_IRQ(16, 0)
158#define EXYNOS4_IRQ_SYSMMU_FIMC_LITE1_0 COMBINER_IRQ(16, 1)
159#define EXYNOS4_IRQ_SYSMMU_FIMC_ISP_0 COMBINER_IRQ(16, 2)
160#define EXYNOS4_IRQ_SYSMMU_FIMC_DRC_0 COMBINER_IRQ(16, 3)
161#define EXYNOS4_IRQ_SYSMMU_FIMC_FD_0 COMBINER_IRQ(16, 4)
162#define EXYNOS4_IRQ_SYSMMU_FIMC_CX_0 COMBINER_IRQ(16, 5)
163
157#define EXYNOS4_IRQ_FIMD0_FIFO COMBINER_IRQ(11, 0) 164#define EXYNOS4_IRQ_FIMD0_FIFO COMBINER_IRQ(11, 0)
158#define EXYNOS4_IRQ_FIMD0_VSYNC COMBINER_IRQ(11, 1) 165#define EXYNOS4_IRQ_FIMD0_VSYNC COMBINER_IRQ(11, 1)
159#define EXYNOS4_IRQ_FIMD0_SYSTEM COMBINER_IRQ(11, 2) 166#define EXYNOS4_IRQ_FIMD0_SYSTEM COMBINER_IRQ(11, 2)
@@ -221,24 +228,6 @@
221#define IRQ_KEYPAD EXYNOS4_IRQ_KEYPAD 228#define IRQ_KEYPAD EXYNOS4_IRQ_KEYPAD
222#define IRQ_PMU EXYNOS4_IRQ_PMU 229#define IRQ_PMU EXYNOS4_IRQ_PMU
223 230
224#define IRQ_SYSMMU_MDMA0_0 EXYNOS4_IRQ_SYSMMU_MDMA0_0
225#define IRQ_SYSMMU_SSS_0 EXYNOS4_IRQ_SYSMMU_SSS_0
226#define IRQ_SYSMMU_FIMC0_0 EXYNOS4_IRQ_SYSMMU_FIMC0_0
227#define IRQ_SYSMMU_FIMC1_0 EXYNOS4_IRQ_SYSMMU_FIMC1_0
228#define IRQ_SYSMMU_FIMC2_0 EXYNOS4_IRQ_SYSMMU_FIMC2_0
229#define IRQ_SYSMMU_FIMC3_0 EXYNOS4_IRQ_SYSMMU_FIMC3_0
230#define IRQ_SYSMMU_JPEG_0 EXYNOS4_IRQ_SYSMMU_JPEG_0
231#define IRQ_SYSMMU_2D_0 EXYNOS4_IRQ_SYSMMU_2D_0
232
233#define IRQ_SYSMMU_ROTATOR_0 EXYNOS4_IRQ_SYSMMU_ROTATOR_0
234#define IRQ_SYSMMU_MDMA1_0 EXYNOS4_IRQ_SYSMMU_MDMA1_0
235#define IRQ_SYSMMU_LCD0_M0_0 EXYNOS4_IRQ_SYSMMU_LCD0_M0_0
236#define IRQ_SYSMMU_LCD1_M1_0 EXYNOS4_IRQ_SYSMMU_LCD1_M1_0
237#define IRQ_SYSMMU_TV_M0_0 EXYNOS4_IRQ_SYSMMU_TV_M0_0
238#define IRQ_SYSMMU_MFC_M0_0 EXYNOS4_IRQ_SYSMMU_MFC_M0_0
239#define IRQ_SYSMMU_MFC_M1_0 EXYNOS4_IRQ_SYSMMU_MFC_M1_0
240#define IRQ_SYSMMU_PCIE_0 EXYNOS4_IRQ_SYSMMU_PCIE_0
241
242#define IRQ_FIMD0_FIFO EXYNOS4_IRQ_FIMD0_FIFO 231#define IRQ_FIMD0_FIFO EXYNOS4_IRQ_FIMD0_FIFO
243#define IRQ_FIMD0_VSYNC EXYNOS4_IRQ_FIMD0_VSYNC 232#define IRQ_FIMD0_VSYNC EXYNOS4_IRQ_FIMD0_VSYNC
244#define IRQ_FIMD0_SYSTEM EXYNOS4_IRQ_FIMD0_SYSTEM 233#define IRQ_FIMD0_SYSTEM EXYNOS4_IRQ_FIMD0_SYSTEM
@@ -298,6 +287,7 @@
298#define EXYNOS5_IRQ_MIPICSI1 IRQ_SPI(80) 287#define EXYNOS5_IRQ_MIPICSI1 IRQ_SPI(80)
299#define EXYNOS5_IRQ_EFNFCON_DMA_ABORT IRQ_SPI(81) 288#define EXYNOS5_IRQ_EFNFCON_DMA_ABORT IRQ_SPI(81)
300#define EXYNOS5_IRQ_MIPIDSI0 IRQ_SPI(82) 289#define EXYNOS5_IRQ_MIPIDSI0 IRQ_SPI(82)
290#define EXYNOS5_IRQ_WDT_IOP IRQ_SPI(83)
301#define EXYNOS5_IRQ_ROTATOR IRQ_SPI(84) 291#define EXYNOS5_IRQ_ROTATOR IRQ_SPI(84)
302#define EXYNOS5_IRQ_GSC0 IRQ_SPI(85) 292#define EXYNOS5_IRQ_GSC0 IRQ_SPI(85)
303#define EXYNOS5_IRQ_GSC1 IRQ_SPI(86) 293#define EXYNOS5_IRQ_GSC1 IRQ_SPI(86)
@@ -306,8 +296,8 @@
306#define EXYNOS5_IRQ_JPEG IRQ_SPI(89) 296#define EXYNOS5_IRQ_JPEG IRQ_SPI(89)
307#define EXYNOS5_IRQ_EFNFCON_DMA IRQ_SPI(90) 297#define EXYNOS5_IRQ_EFNFCON_DMA IRQ_SPI(90)
308#define EXYNOS5_IRQ_2D IRQ_SPI(91) 298#define EXYNOS5_IRQ_2D IRQ_SPI(91)
309#define EXYNOS5_IRQ_SFMC0 IRQ_SPI(92) 299#define EXYNOS5_IRQ_EFNFCON_0 IRQ_SPI(92)
310#define EXYNOS5_IRQ_SFMC1 IRQ_SPI(93) 300#define EXYNOS5_IRQ_EFNFCON_1 IRQ_SPI(93)
311#define EXYNOS5_IRQ_MIXER IRQ_SPI(94) 301#define EXYNOS5_IRQ_MIXER IRQ_SPI(94)
312#define EXYNOS5_IRQ_HDMI IRQ_SPI(95) 302#define EXYNOS5_IRQ_HDMI IRQ_SPI(95)
313#define EXYNOS5_IRQ_MFC IRQ_SPI(96) 303#define EXYNOS5_IRQ_MFC IRQ_SPI(96)
@@ -321,7 +311,7 @@
321#define EXYNOS5_IRQ_PCM2 IRQ_SPI(104) 311#define EXYNOS5_IRQ_PCM2 IRQ_SPI(104)
322#define EXYNOS5_IRQ_SPDIF IRQ_SPI(105) 312#define EXYNOS5_IRQ_SPDIF IRQ_SPI(105)
323#define EXYNOS5_IRQ_ADC0 IRQ_SPI(106) 313#define EXYNOS5_IRQ_ADC0 IRQ_SPI(106)
324 314#define EXYNOS5_IRQ_ADC1 IRQ_SPI(107)
325#define EXYNOS5_IRQ_SATA_PHY IRQ_SPI(108) 315#define EXYNOS5_IRQ_SATA_PHY IRQ_SPI(108)
326#define EXYNOS5_IRQ_SATA_PMEMREQ IRQ_SPI(109) 316#define EXYNOS5_IRQ_SATA_PMEMREQ IRQ_SPI(109)
327#define EXYNOS5_IRQ_CAM_C IRQ_SPI(110) 317#define EXYNOS5_IRQ_CAM_C IRQ_SPI(110)
@@ -330,8 +320,9 @@
330#define EXYNOS5_IRQ_DP1_INTP1 IRQ_SPI(113) 320#define EXYNOS5_IRQ_DP1_INTP1 IRQ_SPI(113)
331#define EXYNOS5_IRQ_CEC IRQ_SPI(114) 321#define EXYNOS5_IRQ_CEC IRQ_SPI(114)
332#define EXYNOS5_IRQ_SATA IRQ_SPI(115) 322#define EXYNOS5_IRQ_SATA IRQ_SPI(115)
333#define EXYNOS5_IRQ_NFCON IRQ_SPI(116)
334 323
324#define EXYNOS5_IRQ_MCT_L0 IRQ_SPI(120)
325#define EXYNOS5_IRQ_MCT_L1 IRQ_SPI(121)
335#define EXYNOS5_IRQ_MMC44 IRQ_SPI(123) 326#define EXYNOS5_IRQ_MMC44 IRQ_SPI(123)
336#define EXYNOS5_IRQ_MDMA1 IRQ_SPI(124) 327#define EXYNOS5_IRQ_MDMA1 IRQ_SPI(124)
337#define EXYNOS5_IRQ_FIMC_LITE0 IRQ_SPI(125) 328#define EXYNOS5_IRQ_FIMC_LITE0 IRQ_SPI(125)
@@ -339,7 +330,6 @@
339#define EXYNOS5_IRQ_RP_TIMER IRQ_SPI(127) 330#define EXYNOS5_IRQ_RP_TIMER IRQ_SPI(127)
340 331
341#define EXYNOS5_IRQ_PMU COMBINER_IRQ(1, 2) 332#define EXYNOS5_IRQ_PMU COMBINER_IRQ(1, 2)
342#define EXYNOS5_IRQ_PMU_CPU1 COMBINER_IRQ(1, 6)
343 333
344#define EXYNOS5_IRQ_SYSMMU_GSC0_0 COMBINER_IRQ(2, 0) 334#define EXYNOS5_IRQ_SYSMMU_GSC0_0 COMBINER_IRQ(2, 0)
345#define EXYNOS5_IRQ_SYSMMU_GSC0_1 COMBINER_IRQ(2, 1) 335#define EXYNOS5_IRQ_SYSMMU_GSC0_1 COMBINER_IRQ(2, 1)
@@ -350,6 +340,8 @@
350#define EXYNOS5_IRQ_SYSMMU_GSC3_0 COMBINER_IRQ(2, 6) 340#define EXYNOS5_IRQ_SYSMMU_GSC3_0 COMBINER_IRQ(2, 6)
351#define EXYNOS5_IRQ_SYSMMU_GSC3_1 COMBINER_IRQ(2, 7) 341#define EXYNOS5_IRQ_SYSMMU_GSC3_1 COMBINER_IRQ(2, 7)
352 342
343#define EXYNOS5_IRQ_SYSMMU_LITE2_0 COMBINER_IRQ(3, 0)
344#define EXYNOS5_IRQ_SYSMMU_LITE2_1 COMBINER_IRQ(3, 1)
353#define EXYNOS5_IRQ_SYSMMU_FIMD1_0 COMBINER_IRQ(3, 2) 345#define EXYNOS5_IRQ_SYSMMU_FIMD1_0 COMBINER_IRQ(3, 2)
354#define EXYNOS5_IRQ_SYSMMU_FIMD1_1 COMBINER_IRQ(3, 3) 346#define EXYNOS5_IRQ_SYSMMU_FIMD1_1 COMBINER_IRQ(3, 3)
355#define EXYNOS5_IRQ_SYSMMU_LITE0_0 COMBINER_IRQ(3, 4) 347#define EXYNOS5_IRQ_SYSMMU_LITE0_0 COMBINER_IRQ(3, 4)
@@ -373,8 +365,8 @@
373 365
374#define EXYNOS5_IRQ_SYSMMU_ARM_0 COMBINER_IRQ(6, 0) 366#define EXYNOS5_IRQ_SYSMMU_ARM_0 COMBINER_IRQ(6, 0)
375#define EXYNOS5_IRQ_SYSMMU_ARM_1 COMBINER_IRQ(6, 1) 367#define EXYNOS5_IRQ_SYSMMU_ARM_1 COMBINER_IRQ(6, 1)
376#define EXYNOS5_IRQ_SYSMMU_MFC_L_0 COMBINER_IRQ(6, 2) 368#define EXYNOS5_IRQ_SYSMMU_MFC_R_0 COMBINER_IRQ(6, 2)
377#define EXYNOS5_IRQ_SYSMMU_MFC_L_1 COMBINER_IRQ(6, 3) 369#define EXYNOS5_IRQ_SYSMMU_MFC_R_1 COMBINER_IRQ(6, 3)
378#define EXYNOS5_IRQ_SYSMMU_RTIC_0 COMBINER_IRQ(6, 4) 370#define EXYNOS5_IRQ_SYSMMU_RTIC_0 COMBINER_IRQ(6, 4)
379#define EXYNOS5_IRQ_SYSMMU_RTIC_1 COMBINER_IRQ(6, 5) 371#define EXYNOS5_IRQ_SYSMMU_RTIC_1 COMBINER_IRQ(6, 5)
380#define EXYNOS5_IRQ_SYSMMU_SSS_0 COMBINER_IRQ(6, 6) 372#define EXYNOS5_IRQ_SYSMMU_SSS_0 COMBINER_IRQ(6, 6)
@@ -386,11 +378,9 @@
386#define EXYNOS5_IRQ_SYSMMU_MDMA1_1 COMBINER_IRQ(7, 3) 378#define EXYNOS5_IRQ_SYSMMU_MDMA1_1 COMBINER_IRQ(7, 3)
387#define EXYNOS5_IRQ_SYSMMU_TV_0 COMBINER_IRQ(7, 4) 379#define EXYNOS5_IRQ_SYSMMU_TV_0 COMBINER_IRQ(7, 4)
388#define EXYNOS5_IRQ_SYSMMU_TV_1 COMBINER_IRQ(7, 5) 380#define EXYNOS5_IRQ_SYSMMU_TV_1 COMBINER_IRQ(7, 5)
389#define EXYNOS5_IRQ_SYSMMU_GPSX_0 COMBINER_IRQ(7, 6)
390#define EXYNOS5_IRQ_SYSMMU_GPSX_1 COMBINER_IRQ(7, 7)
391 381
392#define EXYNOS5_IRQ_SYSMMU_MFC_R_0 COMBINER_IRQ(8, 5) 382#define EXYNOS5_IRQ_SYSMMU_MFC_L_0 COMBINER_IRQ(8, 5)
393#define EXYNOS5_IRQ_SYSMMU_MFC_R_1 COMBINER_IRQ(8, 6) 383#define EXYNOS5_IRQ_SYSMMU_MFC_L_1 COMBINER_IRQ(8, 6)
394 384
395#define EXYNOS5_IRQ_SYSMMU_DIS1_0 COMBINER_IRQ(9, 4) 385#define EXYNOS5_IRQ_SYSMMU_DIS1_0 COMBINER_IRQ(9, 4)
396#define EXYNOS5_IRQ_SYSMMU_DIS1_1 COMBINER_IRQ(9, 5) 386#define EXYNOS5_IRQ_SYSMMU_DIS1_1 COMBINER_IRQ(9, 5)
@@ -406,17 +396,24 @@
406#define EXYNOS5_IRQ_SYSMMU_DRC_0 COMBINER_IRQ(11, 6) 396#define EXYNOS5_IRQ_SYSMMU_DRC_0 COMBINER_IRQ(11, 6)
407#define EXYNOS5_IRQ_SYSMMU_DRC_1 COMBINER_IRQ(11, 7) 397#define EXYNOS5_IRQ_SYSMMU_DRC_1 COMBINER_IRQ(11, 7)
408 398
399#define EXYNOS5_IRQ_MDMA1_ABORT COMBINER_IRQ(13, 1)
400
401#define EXYNOS5_IRQ_MDMA0_ABORT COMBINER_IRQ(15, 3)
402
409#define EXYNOS5_IRQ_FIMD1_FIFO COMBINER_IRQ(18, 4) 403#define EXYNOS5_IRQ_FIMD1_FIFO COMBINER_IRQ(18, 4)
410#define EXYNOS5_IRQ_FIMD1_VSYNC COMBINER_IRQ(18, 5) 404#define EXYNOS5_IRQ_FIMD1_VSYNC COMBINER_IRQ(18, 5)
411#define EXYNOS5_IRQ_FIMD1_SYSTEM COMBINER_IRQ(18, 6) 405#define EXYNOS5_IRQ_FIMD1_SYSTEM COMBINER_IRQ(18, 6)
412 406
407#define EXYNOS5_IRQ_ARMIOP_GIC COMBINER_IRQ(19, 0)
408#define EXYNOS5_IRQ_ARMISP_GIC COMBINER_IRQ(19, 1)
409#define EXYNOS5_IRQ_IOP_GIC COMBINER_IRQ(19, 3)
410#define EXYNOS5_IRQ_ISP_GIC COMBINER_IRQ(19, 4)
411
412#define EXYNOS5_IRQ_PMU_CPU1 COMBINER_IRQ(22, 4)
413
413#define EXYNOS5_IRQ_EINT0 COMBINER_IRQ(23, 0) 414#define EXYNOS5_IRQ_EINT0 COMBINER_IRQ(23, 0)
414#define EXYNOS5_IRQ_MCT_L0 COMBINER_IRQ(23, 1)
415#define EXYNOS5_IRQ_MCT_L1 COMBINER_IRQ(23, 2)
416#define EXYNOS5_IRQ_MCT_G0 COMBINER_IRQ(23, 3) 415#define EXYNOS5_IRQ_MCT_G0 COMBINER_IRQ(23, 3)
417#define EXYNOS5_IRQ_MCT_G1 COMBINER_IRQ(23, 4) 416#define EXYNOS5_IRQ_MCT_G1 COMBINER_IRQ(23, 4)
418#define EXYNOS5_IRQ_MCT_G2 COMBINER_IRQ(23, 5)
419#define EXYNOS5_IRQ_MCT_G3 COMBINER_IRQ(23, 6)
420 417
421#define EXYNOS5_IRQ_EINT1 COMBINER_IRQ(24, 0) 418#define EXYNOS5_IRQ_EINT1 COMBINER_IRQ(24, 0)
422#define EXYNOS5_IRQ_SYSMMU_LITE1_0 COMBINER_IRQ(24, 1) 419#define EXYNOS5_IRQ_SYSMMU_LITE1_0 COMBINER_IRQ(24, 1)
@@ -447,7 +444,7 @@
447 444
448#define EXYNOS5_MAX_COMBINER_NR 32 445#define EXYNOS5_MAX_COMBINER_NR 32
449 446
450#define EXYNOS5_IRQ_GPIO1_NR_GROUPS 13 447#define EXYNOS5_IRQ_GPIO1_NR_GROUPS 14
451#define EXYNOS5_IRQ_GPIO2_NR_GROUPS 9 448#define EXYNOS5_IRQ_GPIO2_NR_GROUPS 9
452#define EXYNOS5_IRQ_GPIO3_NR_GROUPS 5 449#define EXYNOS5_IRQ_GPIO3_NR_GROUPS 5
453#define EXYNOS5_IRQ_GPIO4_NR_GROUPS 1 450#define EXYNOS5_IRQ_GPIO4_NR_GROUPS 1
diff --git a/arch/arm/mach-exynos/include/mach/map.h b/arch/arm/mach-exynos/include/mach/map.h
index e009a66477f4..ca4aa89aa46b 100644
--- a/arch/arm/mach-exynos/include/mach/map.h
+++ b/arch/arm/mach-exynos/include/mach/map.h
@@ -34,6 +34,9 @@
34 34
35#define EXYNOS4_PA_JPEG 0x11840000 35#define EXYNOS4_PA_JPEG 0x11840000
36 36
37/* x = 0...1 */
38#define EXYNOS4_PA_FIMC_LITE(x) (0x12390000 + ((x) * 0x10000))
39
37#define EXYNOS4_PA_G2D 0x12800000 40#define EXYNOS4_PA_G2D 0x12800000
38 41
39#define EXYNOS4_PA_I2S0 0x03830000 42#define EXYNOS4_PA_I2S0 0x03830000
@@ -78,8 +81,8 @@
78 81
79#define EXYNOS4_PA_GIC_CPU 0x10480000 82#define EXYNOS4_PA_GIC_CPU 0x10480000
80#define EXYNOS4_PA_GIC_DIST 0x10490000 83#define EXYNOS4_PA_GIC_DIST 0x10490000
81#define EXYNOS5_PA_GIC_CPU 0x10480000 84#define EXYNOS5_PA_GIC_CPU 0x10482000
82#define EXYNOS5_PA_GIC_DIST 0x10490000 85#define EXYNOS5_PA_GIC_DIST 0x10481000
83 86
84#define EXYNOS4_PA_COREPERI 0x10500000 87#define EXYNOS4_PA_COREPERI 0x10500000
85#define EXYNOS4_PA_TWD 0x10500600 88#define EXYNOS4_PA_TWD 0x10500600
@@ -95,6 +98,7 @@
95#define EXYNOS5_PA_PDMA1 0x121B0000 98#define EXYNOS5_PA_PDMA1 0x121B0000
96 99
97#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000 100#define EXYNOS4_PA_SYSMMU_MDMA 0x10A40000
101#define EXYNOS4_PA_SYSMMU_2D_ACP 0x10A40000
98#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000 102#define EXYNOS4_PA_SYSMMU_SSS 0x10A50000
99#define EXYNOS4_PA_SYSMMU_FIMC0 0x11A20000 103#define EXYNOS4_PA_SYSMMU_FIMC0 0x11A20000
100#define EXYNOS4_PA_SYSMMU_FIMC1 0x11A30000 104#define EXYNOS4_PA_SYSMMU_FIMC1 0x11A30000
@@ -103,6 +107,12 @@
103#define EXYNOS4_PA_SYSMMU_JPEG 0x11A60000 107#define EXYNOS4_PA_SYSMMU_JPEG 0x11A60000
104#define EXYNOS4_PA_SYSMMU_FIMD0 0x11E20000 108#define EXYNOS4_PA_SYSMMU_FIMD0 0x11E20000
105#define EXYNOS4_PA_SYSMMU_FIMD1 0x12220000 109#define EXYNOS4_PA_SYSMMU_FIMD1 0x12220000
110#define EXYNOS4_PA_SYSMMU_FIMC_ISP 0x12260000
111#define EXYNOS4_PA_SYSMMU_FIMC_DRC 0x12270000
112#define EXYNOS4_PA_SYSMMU_FIMC_FD 0x122A0000
113#define EXYNOS4_PA_SYSMMU_ISPCPU 0x122B0000
114#define EXYNOS4_PA_SYSMMU_FIMC_LITE0 0x123B0000
115#define EXYNOS4_PA_SYSMMU_FIMC_LITE1 0x123C0000
106#define EXYNOS4_PA_SYSMMU_PCIe 0x12620000 116#define EXYNOS4_PA_SYSMMU_PCIe 0x12620000
107#define EXYNOS4_PA_SYSMMU_G2D 0x12A20000 117#define EXYNOS4_PA_SYSMMU_G2D 0x12A20000
108#define EXYNOS4_PA_SYSMMU_ROTATOR 0x12A30000 118#define EXYNOS4_PA_SYSMMU_ROTATOR 0x12A30000
@@ -110,6 +120,37 @@
110#define EXYNOS4_PA_SYSMMU_TV 0x12E20000 120#define EXYNOS4_PA_SYSMMU_TV 0x12E20000
111#define EXYNOS4_PA_SYSMMU_MFC_L 0x13620000 121#define EXYNOS4_PA_SYSMMU_MFC_L 0x13620000
112#define EXYNOS4_PA_SYSMMU_MFC_R 0x13630000 122#define EXYNOS4_PA_SYSMMU_MFC_R 0x13630000
123
124#define EXYNOS5_PA_SYSMMU_MDMA1 0x10A40000
125#define EXYNOS5_PA_SYSMMU_SSS 0x10A50000
126#define EXYNOS5_PA_SYSMMU_2D 0x10A60000
127#define EXYNOS5_PA_SYSMMU_MFC_L 0x11200000
128#define EXYNOS5_PA_SYSMMU_MFC_R 0x11210000
129#define EXYNOS5_PA_SYSMMU_ROTATOR 0x11D40000
130#define EXYNOS5_PA_SYSMMU_MDMA2 0x11D50000
131#define EXYNOS5_PA_SYSMMU_JPEG 0x11F20000
132#define EXYNOS5_PA_SYSMMU_IOP 0x12360000
133#define EXYNOS5_PA_SYSMMU_RTIC 0x12370000
134#define EXYNOS5_PA_SYSMMU_GPS 0x12630000
135#define EXYNOS5_PA_SYSMMU_ISP 0x13260000
136#define EXYNOS5_PA_SYSMMU_DRC 0x12370000
137#define EXYNOS5_PA_SYSMMU_SCALERC 0x13280000
138#define EXYNOS5_PA_SYSMMU_SCALERP 0x13290000
139#define EXYNOS5_PA_SYSMMU_FD 0x132A0000
140#define EXYNOS5_PA_SYSMMU_ISPCPU 0x132B0000
141#define EXYNOS5_PA_SYSMMU_ODC 0x132C0000
142#define EXYNOS5_PA_SYSMMU_DIS0 0x132D0000
143#define EXYNOS5_PA_SYSMMU_DIS1 0x132E0000
144#define EXYNOS5_PA_SYSMMU_3DNR 0x132F0000
145#define EXYNOS5_PA_SYSMMU_LITE0 0x13C40000
146#define EXYNOS5_PA_SYSMMU_LITE1 0x13C50000
147#define EXYNOS5_PA_SYSMMU_GSC0 0x13E80000
148#define EXYNOS5_PA_SYSMMU_GSC1 0x13E90000
149#define EXYNOS5_PA_SYSMMU_GSC2 0x13EA0000
150#define EXYNOS5_PA_SYSMMU_GSC3 0x13EB0000
151#define EXYNOS5_PA_SYSMMU_FIMD1 0x14640000
152#define EXYNOS5_PA_SYSMMU_TV 0x14650000
153
113#define EXYNOS4_PA_SPI0 0x13920000 154#define EXYNOS4_PA_SPI0 0x13920000
114#define EXYNOS4_PA_SPI1 0x13930000 155#define EXYNOS4_PA_SPI1 0x13930000
115#define EXYNOS4_PA_SPI2 0x13940000 156#define EXYNOS4_PA_SPI2 0x13940000
diff --git a/arch/arm/mach-exynos/include/mach/regs-clock.h b/arch/arm/mach-exynos/include/mach/regs-clock.h
index d9578a58ae7f..b78b5f3ad9c0 100644
--- a/arch/arm/mach-exynos/include/mach/regs-clock.h
+++ b/arch/arm/mach-exynos/include/mach/regs-clock.h
@@ -135,6 +135,9 @@
135#define EXYNOS4_CLKGATE_SCLKCPU EXYNOS_CLKREG(0x14800) 135#define EXYNOS4_CLKGATE_SCLKCPU EXYNOS_CLKREG(0x14800)
136#define EXYNOS4_CLKGATE_IP_CPU EXYNOS_CLKREG(0x14900) 136#define EXYNOS4_CLKGATE_IP_CPU EXYNOS_CLKREG(0x14900)
137 137
138#define EXYNOS4_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x18800)
139#define EXYNOS4_CLKGATE_IP_ISP1 EXYNOS_CLKREG(0x18804)
140
138#define EXYNOS4_APLL_LOCKTIME (0x1C20) /* 300us */ 141#define EXYNOS4_APLL_LOCKTIME (0x1C20) /* 300us */
139 142
140#define EXYNOS4_APLLCON0_ENABLE_SHIFT (31) 143#define EXYNOS4_APLLCON0_ENABLE_SHIFT (31)
@@ -303,6 +306,8 @@
303#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558) 306#define EXYNOS5_CLKDIV_PERIC0 EXYNOS_CLKREG(0x10558)
304 307
305#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800) 308#define EXYNOS5_CLKGATE_IP_ACP EXYNOS_CLKREG(0x08800)
309#define EXYNOS5_CLKGATE_IP_ISP0 EXYNOS_CLKREG(0x0C800)
310#define EXYNOS5_CLKGATE_IP_ISP1 EXYNOS_CLKREG(0x0C804)
306#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920) 311#define EXYNOS5_CLKGATE_IP_GSCL EXYNOS_CLKREG(0x10920)
307#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928) 312#define EXYNOS5_CLKGATE_IP_DISP1 EXYNOS_CLKREG(0x10928)
308#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C) 313#define EXYNOS5_CLKGATE_IP_MFC EXYNOS_CLKREG(0x1092C)
@@ -317,6 +322,8 @@
317#define EXYNOS5_CLKSRC_CDREX EXYNOS_CLKREG(0x20200) 322#define EXYNOS5_CLKSRC_CDREX EXYNOS_CLKREG(0x20200)
318#define EXYNOS5_CLKDIV_CDREX EXYNOS_CLKREG(0x20500) 323#define EXYNOS5_CLKDIV_CDREX EXYNOS_CLKREG(0x20500)
319 324
325#define EXYNOS5_PLL_DIV2_SEL EXYNOS_CLKREG(0x20A24)
326
320#define EXYNOS5_EPLL_LOCK EXYNOS_CLKREG(0x10030) 327#define EXYNOS5_EPLL_LOCK EXYNOS_CLKREG(0x10030)
321 328
322#define EXYNOS5_EPLLCON0_LOCKED_SHIFT (29) 329#define EXYNOS5_EPLLCON0_LOCKED_SHIFT (29)
diff --git a/arch/arm/mach-exynos/include/mach/regs-pmu.h b/arch/arm/mach-exynos/include/mach/regs-pmu.h
index d457d052a420..4dbb8629b200 100644
--- a/arch/arm/mach-exynos/include/mach/regs-pmu.h
+++ b/arch/arm/mach-exynos/include/mach/regs-pmu.h
@@ -180,7 +180,7 @@
180 180
181#define S5P_PMU_LCD1_CONF S5P_PMUREG(0x3CA0) 181#define S5P_PMU_LCD1_CONF S5P_PMUREG(0x3CA0)
182 182
183/* Only for EXYNOS4212 */ 183/* Only for EXYNOS4x12 */
184#define S5P_ISP_ARM_LOWPWR S5P_PMUREG(0x1050) 184#define S5P_ISP_ARM_LOWPWR S5P_PMUREG(0x1050)
185#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR S5P_PMUREG(0x1054) 185#define S5P_DIS_IRQ_ISP_ARM_LOCAL_LOWPWR S5P_PMUREG(0x1054)
186#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR S5P_PMUREG(0x1058) 186#define S5P_DIS_IRQ_ISP_ARM_CENTRAL_LOWPWR S5P_PMUREG(0x1058)
@@ -221,4 +221,12 @@
221#define S5P_SECSS_MEM_OPTION S5P_PMUREG(0x2EC8) 221#define S5P_SECSS_MEM_OPTION S5P_PMUREG(0x2EC8)
222#define S5P_ROTATOR_MEM_OPTION S5P_PMUREG(0x2F48) 222#define S5P_ROTATOR_MEM_OPTION S5P_PMUREG(0x2F48)
223 223
224/* Only for EXYNOS4412 */
225#define S5P_ARM_CORE2_LOWPWR S5P_PMUREG(0x1020)
226#define S5P_DIS_IRQ_CORE2 S5P_PMUREG(0x1024)
227#define S5P_DIS_IRQ_CENTRAL2 S5P_PMUREG(0x1028)
228#define S5P_ARM_CORE3_LOWPWR S5P_PMUREG(0x1030)
229#define S5P_DIS_IRQ_CORE3 S5P_PMUREG(0x1034)
230#define S5P_DIS_IRQ_CENTRAL3 S5P_PMUREG(0x1038)
231
224#endif /* __ASM_ARCH_REGS_PMU_H */ 232#endif /* __ASM_ARCH_REGS_PMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h b/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
deleted file mode 100644
index 68ff6ad08a2b..000000000000
--- a/arch/arm/mach-exynos/include/mach/regs-sysmmu.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/* linux/arch/arm/mach-exynos4/include/mach/regs-sysmmu.h
2 *
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * EXYNOS4 - System MMU register
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __ASM_ARCH_REGS_SYSMMU_H
14#define __ASM_ARCH_REGS_SYSMMU_H __FILE__
15
16#define S5P_MMU_CTRL 0x000
17#define S5P_MMU_CFG 0x004
18#define S5P_MMU_STATUS 0x008
19#define S5P_MMU_FLUSH 0x00C
20#define S5P_PT_BASE_ADDR 0x014
21#define S5P_INT_STATUS 0x018
22#define S5P_INT_CLEAR 0x01C
23#define S5P_PAGE_FAULT_ADDR 0x024
24#define S5P_AW_FAULT_ADDR 0x028
25#define S5P_AR_FAULT_ADDR 0x02C
26#define S5P_DEFAULT_SLAVE_ADDR 0x030
27
28#endif /* __ASM_ARCH_REGS_SYSMMU_H */
diff --git a/arch/arm/mach-exynos/include/mach/spi-clocks.h b/arch/arm/mach-exynos/include/mach/spi-clocks.h
index 576efdf6d091..c71a5fba6a84 100644
--- a/arch/arm/mach-exynos/include/mach/spi-clocks.h
+++ b/arch/arm/mach-exynos/include/mach/spi-clocks.h
@@ -11,6 +11,6 @@
11#define __ASM_ARCH_SPI_CLKS_H __FILE__ 11#define __ASM_ARCH_SPI_CLKS_H __FILE__
12 12
13/* Must source from SCLK_SPI */ 13/* Must source from SCLK_SPI */
14#define EXYNOS4_SPI_SRCCLK_SCLK 0 14#define EXYNOS_SPI_SRCCLK_SCLK 0
15 15
16#endif /* __ASM_ARCH_SPI_CLKS_H */ 16#endif /* __ASM_ARCH_SPI_CLKS_H */
diff --git a/arch/arm/mach-exynos/include/mach/sysmmu.h b/arch/arm/mach-exynos/include/mach/sysmmu.h
index 6a5fbb534e82..998daf2add92 100644
--- a/arch/arm/mach-exynos/include/mach/sysmmu.h
+++ b/arch/arm/mach-exynos/include/mach/sysmmu.h
@@ -1,46 +1,66 @@
1/* linux/arch/arm/mach-exynos4/include/mach/sysmmu.h 1/*
2 * 2 * Copyright (c) 2011-2012 Samsung Electronics Co., Ltd.
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 3 * http://www.samsung.com
5 * 4 *
6 * Samsung sysmmu driver for EXYNOS4 5 * EXYNOS - System MMU support
7 * 6 *
8 * This program is free software; you can redistribute it and/or modify 7 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as 8 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation. 9 * published by the Free Software Foundation.
11*/ 10 */
12 11
13#ifndef __ASM_ARM_ARCH_SYSMMU_H 12#ifndef _ARM_MACH_EXYNOS_SYSMMU_H_
14#define __ASM_ARM_ARCH_SYSMMU_H __FILE__ 13#define _ARM_MACH_EXYNOS_SYSMMU_H_
15 14
16enum exynos4_sysmmu_ips { 15struct sysmmu_platform_data {
17 SYSMMU_MDMA, 16 char *dbgname;
18 SYSMMU_SSS, 17 /* comma(,) separated list of clock names for clock gating */
19 SYSMMU_FIMC0, 18 char *clockname;
20 SYSMMU_FIMC1,
21 SYSMMU_FIMC2,
22 SYSMMU_FIMC3,
23 SYSMMU_JPEG,
24 SYSMMU_FIMD0,
25 SYSMMU_FIMD1,
26 SYSMMU_PCIe,
27 SYSMMU_G2D,
28 SYSMMU_ROTATOR,
29 SYSMMU_MDMA2,
30 SYSMMU_TV,
31 SYSMMU_MFC_L,
32 SYSMMU_MFC_R,
33 EXYNOS4_SYSMMU_TOTAL_IPNUM,
34}; 19};
35 20
36#define S5P_SYSMMU_TOTAL_IPNUM EXYNOS4_SYSMMU_TOTAL_IPNUM 21#define SYSMMU_DEVNAME_BASE "exynos-sysmmu"
22
23#define SYSMMU_CLOCK_NAME "sysmmu"
24#define SYSMMU_CLOCK_NAME2 "sysmmu_mc"
25
26#ifdef CONFIG_EXYNOS_DEV_SYSMMU
27#include <linux/device.h>
28struct platform_device;
29
30#define SYSMMU_PLATDEV(ipname) exynos_device_sysmmu_##ipname
31
32extern struct platform_device SYSMMU_PLATDEV(mfc_l);
33extern struct platform_device SYSMMU_PLATDEV(mfc_r);
34extern struct platform_device SYSMMU_PLATDEV(tv);
35extern struct platform_device SYSMMU_PLATDEV(jpeg);
36extern struct platform_device SYSMMU_PLATDEV(rot);
37extern struct platform_device SYSMMU_PLATDEV(fimc0);
38extern struct platform_device SYSMMU_PLATDEV(fimc1);
39extern struct platform_device SYSMMU_PLATDEV(fimc2);
40extern struct platform_device SYSMMU_PLATDEV(fimc3);
41extern struct platform_device SYSMMU_PLATDEV(gsc0);
42extern struct platform_device SYSMMU_PLATDEV(gsc1);
43extern struct platform_device SYSMMU_PLATDEV(gsc2);
44extern struct platform_device SYSMMU_PLATDEV(gsc3);
45extern struct platform_device SYSMMU_PLATDEV(isp);
46extern struct platform_device SYSMMU_PLATDEV(fimd0);
47extern struct platform_device SYSMMU_PLATDEV(fimd1);
48extern struct platform_device SYSMMU_PLATDEV(camif0);
49extern struct platform_device SYSMMU_PLATDEV(camif1);
50extern struct platform_device SYSMMU_PLATDEV(2d);
37 51
38extern const char *sysmmu_ips_name[EXYNOS4_SYSMMU_TOTAL_IPNUM]; 52#ifdef CONFIG_IOMMU_API
53static inline void platform_set_sysmmu(
54 struct device *sysmmu, struct device *dev)
55{
56 dev->archdata.iommu = sysmmu;
57}
58#endif
39 59
40typedef enum exynos4_sysmmu_ips sysmmu_ips; 60#else /* !CONFIG_EXYNOS_DEV_SYSMMU */
61#define platform_set_sysmmu(dev, sysmmu) do { } while (0)
62#endif
41 63
42void sysmmu_clk_init(struct device *dev, sysmmu_ips ips); 64#define SYSMMU_CLOCK_DEVNAME(ipname, id) (SYSMMU_DEVNAME_BASE "." #id)
43void sysmmu_clk_enable(sysmmu_ips ips);
44void sysmmu_clk_disable(sysmmu_ips ips);
45 65
46#endif /* __ASM_ARM_ARCH_SYSMMU_H */ 66#endif /* _ARM_MACH_EXYNOS_SYSMMU_H_ */
diff --git a/arch/arm/mach-exynos/mach-armlex4210.c b/arch/arm/mach-exynos/mach-armlex4210.c
index fed7116418eb..5a3daa0168d8 100644
--- a/arch/arm/mach-exynos/mach-armlex4210.c
+++ b/arch/arm/mach-exynos/mach-armlex4210.c
@@ -147,7 +147,6 @@ static struct platform_device *armlex4210_devices[] __initdata = {
147 &s3c_device_hsmmc3, 147 &s3c_device_hsmmc3,
148 &s3c_device_rtc, 148 &s3c_device_rtc,
149 &s3c_device_wdt, 149 &s3c_device_wdt,
150 &exynos4_device_sysmmu,
151 &samsung_asoc_dma, 150 &samsung_asoc_dma,
152 &armlex4210_smsc911x, 151 &armlex4210_smsc911x,
153 &exynos4_device_ahci, 152 &exynos4_device_ahci,
@@ -204,6 +203,7 @@ MACHINE_START(ARMLEX4210, "ARMLEX4210")
204 .map_io = armlex4210_map_io, 203 .map_io = armlex4210_map_io,
205 .handle_irq = gic_handle_irq, 204 .handle_irq = gic_handle_irq,
206 .init_machine = armlex4210_machine_init, 205 .init_machine = armlex4210_machine_init,
206 .init_late = exynos_init_late,
207 .timer = &exynos4_timer, 207 .timer = &exynos4_timer,
208 .restart = exynos4_restart, 208 .restart = exynos4_restart,
209MACHINE_END 209MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-exynos4-dt.c b/arch/arm/mach-exynos/mach-exynos4-dt.c
index 8245f1c761d9..e7e9743543ac 100644
--- a/arch/arm/mach-exynos/mach-exynos4-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos4-dt.c
@@ -83,6 +83,7 @@ DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)")
83 .map_io = exynos4210_dt_map_io, 83 .map_io = exynos4210_dt_map_io,
84 .handle_irq = gic_handle_irq, 84 .handle_irq = gic_handle_irq,
85 .init_machine = exynos4210_dt_machine_init, 85 .init_machine = exynos4210_dt_machine_init,
86 .init_late = exynos_init_late,
86 .timer = &exynos4_timer, 87 .timer = &exynos4_timer,
87 .dt_compat = exynos4210_dt_compat, 88 .dt_compat = exynos4210_dt_compat,
88 .restart = exynos4_restart, 89 .restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-exynos5-dt.c b/arch/arm/mach-exynos/mach-exynos5-dt.c
index 4711c8920e37..7b1e11a228cc 100644
--- a/arch/arm/mach-exynos/mach-exynos5-dt.c
+++ b/arch/arm/mach-exynos/mach-exynos5-dt.c
@@ -43,6 +43,10 @@ static const struct of_dev_auxdata exynos5250_auxdata_lookup[] __initconst = {
43 "exynos4210-uart.2", NULL), 43 "exynos4210-uart.2", NULL),
44 OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART3, 44 OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS5_PA_UART3,
45 "exynos4210-uart.3", NULL), 45 "exynos4210-uart.3", NULL),
46 OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(0),
47 "s3c2440-i2c.0", NULL),
48 OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS5_PA_IIC(1),
49 "s3c2440-i2c.1", NULL),
46 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL), 50 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA0, "dma-pl330.0", NULL),
47 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL), 51 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_PDMA1, "dma-pl330.1", NULL),
48 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL), 52 OF_DEV_AUXDATA("arm,pl330", EXYNOS5_PA_MDMA1, "dma-pl330.2", NULL),
@@ -72,6 +76,7 @@ DT_MACHINE_START(EXYNOS5_DT, "SAMSUNG EXYNOS5 (Flattened Device Tree)")
72 .map_io = exynos5250_dt_map_io, 76 .map_io = exynos5250_dt_map_io,
73 .handle_irq = gic_handle_irq, 77 .handle_irq = gic_handle_irq,
74 .init_machine = exynos5250_dt_machine_init, 78 .init_machine = exynos5250_dt_machine_init,
79 .init_late = exynos_init_late,
75 .timer = &exynos4_timer, 80 .timer = &exynos4_timer,
76 .dt_compat = exynos5250_dt_compat, 81 .dt_compat = exynos5250_dt_compat,
77 .restart = exynos5_restart, 82 .restart = exynos5_restart,
diff --git a/arch/arm/mach-exynos/mach-nuri.c b/arch/arm/mach-exynos/mach-nuri.c
index 6c31f2ad765d..972983e392bc 100644
--- a/arch/arm/mach-exynos/mach-nuri.c
+++ b/arch/arm/mach-exynos/mach-nuri.c
@@ -1389,6 +1389,7 @@ MACHINE_START(NURI, "NURI")
1389 .map_io = nuri_map_io, 1389 .map_io = nuri_map_io,
1390 .handle_irq = gic_handle_irq, 1390 .handle_irq = gic_handle_irq,
1391 .init_machine = nuri_machine_init, 1391 .init_machine = nuri_machine_init,
1392 .init_late = exynos_init_late,
1392 .timer = &exynos4_timer, 1393 .timer = &exynos4_timer,
1393 .reserve = &nuri_reserve, 1394 .reserve = &nuri_reserve,
1394 .restart = exynos4_restart, 1395 .restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-origen.c b/arch/arm/mach-exynos/mach-origen.c
index 26124a38bcbd..a7f7fd567dde 100644
--- a/arch/arm/mach-exynos/mach-origen.c
+++ b/arch/arm/mach-exynos/mach-origen.c
@@ -766,6 +766,7 @@ MACHINE_START(ORIGEN, "ORIGEN")
766 .map_io = origen_map_io, 766 .map_io = origen_map_io,
767 .handle_irq = gic_handle_irq, 767 .handle_irq = gic_handle_irq,
768 .init_machine = origen_machine_init, 768 .init_machine = origen_machine_init,
769 .init_late = exynos_init_late,
769 .timer = &exynos4_timer, 770 .timer = &exynos4_timer,
770 .reserve = &origen_reserve, 771 .reserve = &origen_reserve,
771 .restart = exynos4_restart, 772 .restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mach-smdk4x12.c b/arch/arm/mach-exynos/mach-smdk4x12.c
index fe772d893cc9..fb09c70e195a 100644
--- a/arch/arm/mach-exynos/mach-smdk4x12.c
+++ b/arch/arm/mach-exynos/mach-smdk4x12.c
@@ -316,6 +316,7 @@ MACHINE_START(SMDK4412, "SMDK4412")
316 .map_io = smdk4x12_map_io, 316 .map_io = smdk4x12_map_io,
317 .handle_irq = gic_handle_irq, 317 .handle_irq = gic_handle_irq,
318 .init_machine = smdk4x12_machine_init, 318 .init_machine = smdk4x12_machine_init,
319 .init_late = exynos_init_late,
319 .timer = &exynos4_timer, 320 .timer = &exynos4_timer,
320 .restart = exynos4_restart, 321 .restart = exynos4_restart,
321 .reserve = &smdk4x12_reserve, 322 .reserve = &smdk4x12_reserve,
diff --git a/arch/arm/mach-exynos/mach-smdkv310.c b/arch/arm/mach-exynos/mach-smdkv310.c
index 5af96064ca51..70df1a0c2118 100644
--- a/arch/arm/mach-exynos/mach-smdkv310.c
+++ b/arch/arm/mach-exynos/mach-smdkv310.c
@@ -295,7 +295,6 @@ static struct platform_device *smdkv310_devices[] __initdata = {
295 &s5p_device_mfc_l, 295 &s5p_device_mfc_l,
296 &s5p_device_mfc_r, 296 &s5p_device_mfc_r,
297 &exynos4_device_spdif, 297 &exynos4_device_spdif,
298 &exynos4_device_sysmmu,
299 &samsung_asoc_dma, 298 &samsung_asoc_dma,
300 &samsung_asoc_idma, 299 &samsung_asoc_idma,
301 &s5p_device_fimd0, 300 &s5p_device_fimd0,
@@ -412,6 +411,7 @@ MACHINE_START(SMDKC210, "SMDKC210")
412 .map_io = smdkv310_map_io, 411 .map_io = smdkv310_map_io,
413 .handle_irq = gic_handle_irq, 412 .handle_irq = gic_handle_irq,
414 .init_machine = smdkv310_machine_init, 413 .init_machine = smdkv310_machine_init,
414 .init_late = exynos_init_late,
415 .timer = &exynos4_timer, 415 .timer = &exynos4_timer,
416 .restart = exynos4_restart, 416 .restart = exynos4_restart,
417MACHINE_END 417MACHINE_END
diff --git a/arch/arm/mach-exynos/mach-universal_c210.c b/arch/arm/mach-exynos/mach-universal_c210.c
index 6b731b863275..083b44de9c10 100644
--- a/arch/arm/mach-exynos/mach-universal_c210.c
+++ b/arch/arm/mach-exynos/mach-universal_c210.c
@@ -1157,6 +1157,7 @@ MACHINE_START(UNIVERSAL_C210, "UNIVERSAL_C210")
1157 .map_io = universal_map_io, 1157 .map_io = universal_map_io,
1158 .handle_irq = gic_handle_irq, 1158 .handle_irq = gic_handle_irq,
1159 .init_machine = universal_machine_init, 1159 .init_machine = universal_machine_init,
1160 .init_late = exynos_init_late,
1160 .timer = &s5p_timer, 1161 .timer = &s5p_timer,
1161 .reserve = &universal_reserve, 1162 .reserve = &universal_reserve,
1162 .restart = exynos4_restart, 1163 .restart = exynos4_restart,
diff --git a/arch/arm/mach-exynos/mct.c b/arch/arm/mach-exynos/mct.c
index 897d9a9cf226..b601fb8a408b 100644
--- a/arch/arm/mach-exynos/mct.c
+++ b/arch/arm/mach-exynos/mct.c
@@ -388,6 +388,7 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
388{ 388{
389 struct mct_clock_event_device *mevt; 389 struct mct_clock_event_device *mevt;
390 unsigned int cpu = smp_processor_id(); 390 unsigned int cpu = smp_processor_id();
391 int mct_lx_irq;
391 392
392 mevt = this_cpu_ptr(&percpu_mct_tick); 393 mevt = this_cpu_ptr(&percpu_mct_tick);
393 mevt->evt = evt; 394 mevt->evt = evt;
@@ -414,14 +415,18 @@ static int __cpuinit exynos4_local_timer_setup(struct clock_event_device *evt)
414 415
415 if (mct_int_type == MCT_INT_SPI) { 416 if (mct_int_type == MCT_INT_SPI) {
416 if (cpu == 0) { 417 if (cpu == 0) {
418 mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L0 :
419 EXYNOS5_IRQ_MCT_L0;
417 mct_tick0_event_irq.dev_id = mevt; 420 mct_tick0_event_irq.dev_id = mevt;
418 evt->irq = EXYNOS4_IRQ_MCT_L0; 421 evt->irq = mct_lx_irq;
419 setup_irq(EXYNOS4_IRQ_MCT_L0, &mct_tick0_event_irq); 422 setup_irq(mct_lx_irq, &mct_tick0_event_irq);
420 } else { 423 } else {
424 mct_lx_irq = soc_is_exynos4210() ? EXYNOS4_IRQ_MCT_L1 :
425 EXYNOS5_IRQ_MCT_L1;
421 mct_tick1_event_irq.dev_id = mevt; 426 mct_tick1_event_irq.dev_id = mevt;
422 evt->irq = EXYNOS4_IRQ_MCT_L1; 427 evt->irq = mct_lx_irq;
423 setup_irq(EXYNOS4_IRQ_MCT_L1, &mct_tick1_event_irq); 428 setup_irq(mct_lx_irq, &mct_tick1_event_irq);
424 irq_set_affinity(EXYNOS4_IRQ_MCT_L1, cpumask_of(1)); 429 irq_set_affinity(mct_lx_irq, cpumask_of(1));
425 } 430 }
426 } else { 431 } else {
427 enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0); 432 enable_percpu_irq(EXYNOS_IRQ_MCT_LOCALTIMER, 0);
@@ -473,7 +478,7 @@ static void __init exynos4_timer_resources(void)
473 478
474static void __init exynos4_timer_init(void) 479static void __init exynos4_timer_init(void)
475{ 480{
476 if (soc_is_exynos4210()) 481 if ((soc_is_exynos4210()) || (soc_is_exynos5250()))
477 mct_int_type = MCT_INT_SPI; 482 mct_int_type = MCT_INT_SPI;
478 else 483 else
479 mct_int_type = MCT_INT_PPI; 484 mct_int_type = MCT_INT_PPI;
diff --git a/arch/arm/mach-exynos/pm.c b/arch/arm/mach-exynos/pm.c
index 428cfeb57724..563dea9a6dbb 100644
--- a/arch/arm/mach-exynos/pm.c
+++ b/arch/arm/mach-exynos/pm.c
@@ -275,7 +275,7 @@ static void exynos4_restore_pll(void)
275 275
276static struct subsys_interface exynos4_pm_interface = { 276static struct subsys_interface exynos4_pm_interface = {
277 .name = "exynos4_pm", 277 .name = "exynos4_pm",
278 .subsys = &exynos4_subsys, 278 .subsys = &exynos_subsys,
279 .add_dev = exynos4_pm_add, 279 .add_dev = exynos4_pm_add,
280}; 280};
281 281
@@ -313,7 +313,7 @@ static int exynos4_pm_suspend(void)
313 tmp &= ~S5P_CENTRAL_LOWPWR_CFG; 313 tmp &= ~S5P_CENTRAL_LOWPWR_CFG;
314 __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION); 314 __raw_writel(tmp, S5P_CENTRAL_SEQ_CONFIGURATION);
315 315
316 if (soc_is_exynos4212()) { 316 if (soc_is_exynos4212() || soc_is_exynos4412()) {
317 tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION); 317 tmp = __raw_readl(S5P_CENTRAL_SEQ_OPTION);
318 tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM | 318 tmp &= ~(S5P_USE_STANDBYWFI_ISP_ARM |
319 S5P_USE_STANDBYWFE_ISP_ARM); 319 S5P_USE_STANDBYWFE_ISP_ARM);
diff --git a/arch/arm/mach-exynos/pm_domains.c b/arch/arm/mach-exynos/pm_domains.c
index 13b306808b42..e9fafcf163de 100644
--- a/arch/arm/mach-exynos/pm_domains.c
+++ b/arch/arm/mach-exynos/pm_domains.c
@@ -193,9 +193,8 @@ static __init int exynos4_pm_init_power_domain(void)
193} 193}
194arch_initcall(exynos4_pm_init_power_domain); 194arch_initcall(exynos4_pm_init_power_domain);
195 195
196static __init int exynos_pm_late_initcall(void) 196int __init exynos_pm_late_initcall(void)
197{ 197{
198 pm_genpd_poweroff_unused(); 198 pm_genpd_poweroff_unused();
199 return 0; 199 return 0;
200} 200}
201late_initcall(exynos_pm_late_initcall);
diff --git a/arch/arm/mach-exynos/pmu.c b/arch/arm/mach-exynos/pmu.c
index bba48f5c3e8f..77c6815eebee 100644
--- a/arch/arm/mach-exynos/pmu.c
+++ b/arch/arm/mach-exynos/pmu.c
@@ -94,7 +94,7 @@ static struct exynos4_pmu_conf exynos4210_pmu_config[] = {
94 { PMU_TABLE_END,}, 94 { PMU_TABLE_END,},
95}; 95};
96 96
97static struct exynos4_pmu_conf exynos4212_pmu_config[] = { 97static struct exynos4_pmu_conf exynos4x12_pmu_config[] = {
98 { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } }, 98 { S5P_ARM_CORE0_LOWPWR, { 0x0, 0x0, 0x2 } },
99 { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } }, 99 { S5P_DIS_IRQ_CORE0, { 0x0, 0x0, 0x0 } },
100 { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } }, 100 { S5P_DIS_IRQ_CENTRAL0, { 0x0, 0x0, 0x0 } },
@@ -202,6 +202,16 @@ static struct exynos4_pmu_conf exynos4212_pmu_config[] = {
202 { PMU_TABLE_END,}, 202 { PMU_TABLE_END,},
203}; 203};
204 204
205static struct exynos4_pmu_conf exynos4412_pmu_config[] = {
206 { S5P_ARM_CORE2_LOWPWR, { 0x0, 0x0, 0x2 } },
207 { S5P_DIS_IRQ_CORE2, { 0x0, 0x0, 0x0 } },
208 { S5P_DIS_IRQ_CENTRAL2, { 0x0, 0x0, 0x0 } },
209 { S5P_ARM_CORE3_LOWPWR, { 0x0, 0x0, 0x2 } },
210 { S5P_DIS_IRQ_CORE3, { 0x0, 0x0, 0x0 } },
211 { S5P_DIS_IRQ_CENTRAL3, { 0x0, 0x0, 0x0 } },
212 { PMU_TABLE_END,},
213};
214
205void exynos4_sys_powerdown_conf(enum sys_powerdown mode) 215void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
206{ 216{
207 unsigned int i; 217 unsigned int i;
@@ -209,6 +219,12 @@ void exynos4_sys_powerdown_conf(enum sys_powerdown mode)
209 for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++) 219 for (i = 0; (exynos4_pmu_config[i].reg != PMU_TABLE_END) ; i++)
210 __raw_writel(exynos4_pmu_config[i].val[mode], 220 __raw_writel(exynos4_pmu_config[i].val[mode],
211 exynos4_pmu_config[i].reg); 221 exynos4_pmu_config[i].reg);
222
223 if (soc_is_exynos4412()) {
224 for (i = 0; exynos4412_pmu_config[i].reg != PMU_TABLE_END ; i++)
225 __raw_writel(exynos4412_pmu_config[i].val[mode],
226 exynos4412_pmu_config[i].reg);
227 }
212} 228}
213 229
214static int __init exynos4_pmu_init(void) 230static int __init exynos4_pmu_init(void)
@@ -218,9 +234,9 @@ static int __init exynos4_pmu_init(void)
218 if (soc_is_exynos4210()) { 234 if (soc_is_exynos4210()) {
219 exynos4_pmu_config = exynos4210_pmu_config; 235 exynos4_pmu_config = exynos4210_pmu_config;
220 pr_info("EXYNOS4210 PMU Initialize\n"); 236 pr_info("EXYNOS4210 PMU Initialize\n");
221 } else if (soc_is_exynos4212()) { 237 } else if (soc_is_exynos4212() || soc_is_exynos4412()) {
222 exynos4_pmu_config = exynos4212_pmu_config; 238 exynos4_pmu_config = exynos4x12_pmu_config;
223 pr_info("EXYNOS4212 PMU Initialize\n"); 239 pr_info("EXYNOS4x12 PMU Initialize\n");
224 } else { 240 } else {
225 pr_info("EXYNOS4: PMU not supported\n"); 241 pr_info("EXYNOS4: PMU not supported\n");
226 } 242 }
diff --git a/arch/arm/mach-imx/Kconfig b/arch/arm/mach-imx/Kconfig
index cca8c0c74794..0021f726b153 100644
--- a/arch/arm/mach-imx/Kconfig
+++ b/arch/arm/mach-imx/Kconfig
@@ -34,6 +34,7 @@ config ARCH_MX53
34config SOC_IMX1 34config SOC_IMX1
35 bool 35 bool
36 select ARCH_MX1 36 select ARCH_MX1
37 select COMMON_CLK
37 select CPU_ARM920T 38 select CPU_ARM920T
38 select IMX_HAVE_IOMUX_V1 39 select IMX_HAVE_IOMUX_V1
39 select MXC_AVIC 40 select MXC_AVIC
@@ -42,12 +43,14 @@ config SOC_IMX21
42 bool 43 bool
43 select MACH_MX21 44 select MACH_MX21
44 select CPU_ARM926T 45 select CPU_ARM926T
46 select COMMON_CLK
45 select IMX_HAVE_IOMUX_V1 47 select IMX_HAVE_IOMUX_V1
46 select MXC_AVIC 48 select MXC_AVIC
47 49
48config SOC_IMX25 50config SOC_IMX25
49 bool 51 bool
50 select ARCH_MX25 52 select ARCH_MX25
53 select COMMON_CLK
51 select CPU_ARM926T 54 select CPU_ARM926T
52 select ARCH_MXC_IOMUX_V3 55 select ARCH_MXC_IOMUX_V3
53 select MXC_AVIC 56 select MXC_AVIC
@@ -56,6 +59,7 @@ config SOC_IMX27
56 bool 59 bool
57 select MACH_MX27 60 select MACH_MX27
58 select CPU_ARM926T 61 select CPU_ARM926T
62 select COMMON_CLK
59 select IMX_HAVE_IOMUX_V1 63 select IMX_HAVE_IOMUX_V1
60 select MXC_AVIC 64 select MXC_AVIC
61 65
@@ -64,12 +68,14 @@ config SOC_IMX31
64 select CPU_V6 68 select CPU_V6
65 select IMX_HAVE_PLATFORM_MXC_RNGA 69 select IMX_HAVE_PLATFORM_MXC_RNGA
66 select MXC_AVIC 70 select MXC_AVIC
71 select COMMON_CLK
67 select SMP_ON_UP if SMP 72 select SMP_ON_UP if SMP
68 73
69config SOC_IMX35 74config SOC_IMX35
70 bool 75 bool
71 select CPU_V6 76 select CPU_V6
72 select ARCH_MXC_IOMUX_V3 77 select ARCH_MXC_IOMUX_V3
78 select COMMON_CLK
73 select HAVE_EPIT 79 select HAVE_EPIT
74 select MXC_AVIC 80 select MXC_AVIC
75 select SMP_ON_UP if SMP 81 select SMP_ON_UP if SMP
@@ -77,6 +83,7 @@ config SOC_IMX35
77config SOC_IMX5 83config SOC_IMX5
78 select CPU_V7 84 select CPU_V7
79 select MXC_TZIC 85 select MXC_TZIC
86 select COMMON_CLK
80 select ARCH_MXC_IOMUX_V3 87 select ARCH_MXC_IOMUX_V3
81 select ARCH_HAS_CPUFREQ 88 select ARCH_HAS_CPUFREQ
82 select ARCH_MX5 89 select ARCH_MX5
@@ -815,6 +822,7 @@ config SOC_IMX6Q
815 bool "i.MX6 Quad support" 822 bool "i.MX6 Quad support"
816 select ARM_CPU_SUSPEND if PM 823 select ARM_CPU_SUSPEND if PM
817 select ARM_GIC 824 select ARM_GIC
825 select COMMON_CLK
818 select CPU_V7 826 select CPU_V7
819 select HAVE_ARM_SCU 827 select HAVE_ARM_SCU
820 select HAVE_IMX_GPC 828 select HAVE_IMX_GPC
diff --git a/arch/arm/mach-imx/Makefile b/arch/arm/mach-imx/Makefile
index 4937c070a57e..ff29421414f2 100644
--- a/arch/arm/mach-imx/Makefile
+++ b/arch/arm/mach-imx/Makefile
@@ -1,15 +1,18 @@
1obj-$(CONFIG_SOC_IMX1) += clock-imx1.o mm-imx1.o 1obj-$(CONFIG_SOC_IMX1) += clk-imx1.o mm-imx1.o
2obj-$(CONFIG_SOC_IMX21) += clock-imx21.o mm-imx21.o 2obj-$(CONFIG_SOC_IMX21) += clk-imx21.o mm-imx21.o
3 3
4obj-$(CONFIG_SOC_IMX25) += clock-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o 4obj-$(CONFIG_SOC_IMX25) += clk-imx25.o mm-imx25.o ehci-imx25.o cpu-imx25.o
5 5
6obj-$(CONFIG_SOC_IMX27) += cpu-imx27.o pm-imx27.o 6obj-$(CONFIG_SOC_IMX27) += cpu-imx27.o pm-imx27.o
7obj-$(CONFIG_SOC_IMX27) += clock-imx27.o mm-imx27.o ehci-imx27.o 7obj-$(CONFIG_SOC_IMX27) += clk-imx27.o mm-imx27.o ehci-imx27.o
8 8
9obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clock-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o 9obj-$(CONFIG_SOC_IMX31) += mm-imx3.o cpu-imx31.o clk-imx31.o iomux-imx31.o ehci-imx31.o pm-imx3.o
10obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clock-imx35.o ehci-imx35.o pm-imx3.o 10obj-$(CONFIG_SOC_IMX35) += mm-imx3.o cpu-imx35.o clk-imx35.o ehci-imx35.o pm-imx3.o
11 11
12obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clock-mx51-mx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o 12obj-$(CONFIG_SOC_IMX5) += cpu-imx5.o mm-imx5.o clk-imx51-imx53.o ehci-imx5.o pm-imx5.o cpu_op-mx51.o
13
14obj-$(CONFIG_COMMON_CLK) += clk-pllv1.o clk-pllv2.o clk-pllv3.o clk-gate2.o \
15 clk-pfd.o clk-busy.o
13 16
14# Support for CMOS sensor interface 17# Support for CMOS sensor interface
15obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o 18obj-$(CONFIG_MX1_VIDEO) += mx1-camera-fiq.o mx1-camera-fiq-ksym.o
@@ -70,7 +73,7 @@ obj-$(CONFIG_CPU_V7) += head-v7.o
70AFLAGS_head-v7.o :=-Wa,-march=armv7-a 73AFLAGS_head-v7.o :=-Wa,-march=armv7-a
71obj-$(CONFIG_SMP) += platsmp.o 74obj-$(CONFIG_SMP) += platsmp.o
72obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o 75obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
73obj-$(CONFIG_SOC_IMX6Q) += clock-imx6q.o mach-imx6q.o 76obj-$(CONFIG_SOC_IMX6Q) += clk-imx6q.o mach-imx6q.o
74 77
75ifeq ($(CONFIG_PM),y) 78ifeq ($(CONFIG_PM),y)
76obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o 79obj-$(CONFIG_SOC_IMX6Q) += pm-imx6q.o
diff --git a/arch/arm/mach-imx/Makefile.boot b/arch/arm/mach-imx/Makefile.boot
index 3851d8a27875..05541cf4a878 100644
--- a/arch/arm/mach-imx/Makefile.boot
+++ b/arch/arm/mach-imx/Makefile.boot
@@ -42,4 +42,5 @@ dtb-$(CONFIG_MACH_IMX51_DT) += imx51-babbage.dtb
42dtb-$(CONFIG_MACH_IMX53_DT) += imx53-ard.dtb imx53-evk.dtb \ 42dtb-$(CONFIG_MACH_IMX53_DT) += imx53-ard.dtb imx53-evk.dtb \
43 imx53-qsb.dtb imx53-smd.dtb 43 imx53-qsb.dtb imx53-smd.dtb
44dtb-$(CONFIG_SOC_IMX6Q) += imx6q-arm2.dtb \ 44dtb-$(CONFIG_SOC_IMX6Q) += imx6q-arm2.dtb \
45 imx6q-sabrelite.dtb 45 imx6q-sabrelite.dtb \
46 imx6q-sabresd.dtb \
diff --git a/arch/arm/mach-imx/clk-busy.c b/arch/arm/mach-imx/clk-busy.c
new file mode 100644
index 000000000000..1a7a8dd045a1
--- /dev/null
+++ b/arch/arm/mach-imx/clk-busy.c
@@ -0,0 +1,189 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 * Copyright 2012 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/jiffies.h>
18#include <linux/err.h>
19#include "clk.h"
20
21static int clk_busy_wait(void __iomem *reg, u8 shift)
22{
23 unsigned long timeout = jiffies + msecs_to_jiffies(10);
24
25 while (readl_relaxed(reg) & (1 << shift))
26 if (time_after(jiffies, timeout))
27 return -ETIMEDOUT;
28
29 return 0;
30}
31
32struct clk_busy_divider {
33 struct clk_divider div;
34 const struct clk_ops *div_ops;
35 void __iomem *reg;
36 u8 shift;
37};
38
39static inline struct clk_busy_divider *to_clk_busy_divider(struct clk_hw *hw)
40{
41 struct clk_divider *div = container_of(hw, struct clk_divider, hw);
42
43 return container_of(div, struct clk_busy_divider, div);
44}
45
46static unsigned long clk_busy_divider_recalc_rate(struct clk_hw *hw,
47 unsigned long parent_rate)
48{
49 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
50
51 return busy->div_ops->recalc_rate(&busy->div.hw, parent_rate);
52}
53
54static long clk_busy_divider_round_rate(struct clk_hw *hw, unsigned long rate,
55 unsigned long *prate)
56{
57 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
58
59 return busy->div_ops->round_rate(&busy->div.hw, rate, prate);
60}
61
62static int clk_busy_divider_set_rate(struct clk_hw *hw, unsigned long rate,
63 unsigned long parent_rate)
64{
65 struct clk_busy_divider *busy = to_clk_busy_divider(hw);
66 int ret;
67
68 ret = busy->div_ops->set_rate(&busy->div.hw, rate, parent_rate);
69 if (!ret)
70 ret = clk_busy_wait(busy->reg, busy->shift);
71
72 return ret;
73}
74
75static struct clk_ops clk_busy_divider_ops = {
76 .recalc_rate = clk_busy_divider_recalc_rate,
77 .round_rate = clk_busy_divider_round_rate,
78 .set_rate = clk_busy_divider_set_rate,
79};
80
81struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
82 void __iomem *reg, u8 shift, u8 width,
83 void __iomem *busy_reg, u8 busy_shift)
84{
85 struct clk_busy_divider *busy;
86 struct clk *clk;
87 struct clk_init_data init;
88
89 busy = kzalloc(sizeof(*busy), GFP_KERNEL);
90 if (!busy)
91 return ERR_PTR(-ENOMEM);
92
93 busy->reg = busy_reg;
94 busy->shift = busy_shift;
95
96 busy->div.reg = reg;
97 busy->div.shift = shift;
98 busy->div.width = width;
99 busy->div.lock = &imx_ccm_lock;
100 busy->div_ops = &clk_divider_ops;
101
102 init.name = name;
103 init.ops = &clk_busy_divider_ops;
104 init.flags = CLK_SET_RATE_PARENT;
105 init.parent_names = &parent_name;
106 init.num_parents = 1;
107
108 busy->div.hw.init = &init;
109
110 clk = clk_register(NULL, &busy->div.hw);
111 if (!clk)
112 kfree(busy);
113
114 return clk;
115}
116
117struct clk_busy_mux {
118 struct clk_mux mux;
119 const struct clk_ops *mux_ops;
120 void __iomem *reg;
121 u8 shift;
122};
123
124static inline struct clk_busy_mux *to_clk_busy_mux(struct clk_hw *hw)
125{
126 struct clk_mux *mux = container_of(hw, struct clk_mux, hw);
127
128 return container_of(mux, struct clk_busy_mux, mux);
129}
130
131static u8 clk_busy_mux_get_parent(struct clk_hw *hw)
132{
133 struct clk_busy_mux *busy = to_clk_busy_mux(hw);
134
135 return busy->mux_ops->get_parent(&busy->mux.hw);
136}
137
138static int clk_busy_mux_set_parent(struct clk_hw *hw, u8 index)
139{
140 struct clk_busy_mux *busy = to_clk_busy_mux(hw);
141 int ret;
142
143 ret = busy->mux_ops->set_parent(&busy->mux.hw, index);
144 if (!ret)
145 ret = clk_busy_wait(busy->reg, busy->shift);
146
147 return ret;
148}
149
150struct clk_ops clk_busy_mux_ops = {
151 .get_parent = clk_busy_mux_get_parent,
152 .set_parent = clk_busy_mux_set_parent,
153};
154
155struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
156 u8 width, void __iomem *busy_reg, u8 busy_shift,
157 const char **parent_names, int num_parents)
158{
159 struct clk_busy_mux *busy;
160 struct clk *clk;
161 struct clk_init_data init;
162
163 busy = kzalloc(sizeof(*busy), GFP_KERNEL);
164 if (!busy)
165 return ERR_PTR(-ENOMEM);
166
167 busy->reg = busy_reg;
168 busy->shift = busy_shift;
169
170 busy->mux.reg = reg;
171 busy->mux.shift = shift;
172 busy->mux.width = width;
173 busy->mux.lock = &imx_ccm_lock;
174 busy->mux_ops = &clk_mux_ops;
175
176 init.name = name;
177 init.ops = &clk_busy_mux_ops;
178 init.flags = 0;
179 init.parent_names = parent_names;
180 init.num_parents = num_parents;
181
182 busy->mux.hw.init = &init;
183
184 clk = clk_register(NULL, &busy->mux.hw);
185 if (IS_ERR(clk))
186 kfree(busy);
187
188 return clk;
189}
diff --git a/arch/arm/mach-imx/clk-gate2.c b/arch/arm/mach-imx/clk-gate2.c
new file mode 100644
index 000000000000..3c1b8ff9a0a6
--- /dev/null
+++ b/arch/arm/mach-imx/clk-gate2.c
@@ -0,0 +1,118 @@
1/*
2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Mike Turquette, Linaro Ltd <mturquette@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Gated clock implementation
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/module.h>
14#include <linux/slab.h>
15#include <linux/io.h>
16#include <linux/err.h>
17#include <linux/string.h>
18
19/**
20 * DOC: basic gatable clock which can gate and ungate it's ouput
21 *
22 * Traits of this clock:
23 * prepare - clk_(un)prepare only ensures parent is (un)prepared
24 * enable - clk_enable and clk_disable are functional & control gating
25 * rate - inherits rate from parent. No clk_set_rate support
26 * parent - fixed parent. No clk_set_parent support
27 */
28
29#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
30
31static int clk_gate2_enable(struct clk_hw *hw)
32{
33 struct clk_gate *gate = to_clk_gate(hw);
34 u32 reg;
35 unsigned long flags = 0;
36
37 if (gate->lock)
38 spin_lock_irqsave(gate->lock, flags);
39
40 reg = readl(gate->reg);
41 reg |= 3 << gate->bit_idx;
42 writel(reg, gate->reg);
43
44 if (gate->lock)
45 spin_unlock_irqrestore(gate->lock, flags);
46
47 return 0;
48}
49
50static void clk_gate2_disable(struct clk_hw *hw)
51{
52 struct clk_gate *gate = to_clk_gate(hw);
53 u32 reg;
54 unsigned long flags = 0;
55
56 if (gate->lock)
57 spin_lock_irqsave(gate->lock, flags);
58
59 reg = readl(gate->reg);
60 reg &= ~(3 << gate->bit_idx);
61 writel(reg, gate->reg);
62
63 if (gate->lock)
64 spin_unlock_irqrestore(gate->lock, flags);
65}
66
67static int clk_gate2_is_enabled(struct clk_hw *hw)
68{
69 u32 reg;
70 struct clk_gate *gate = to_clk_gate(hw);
71
72 reg = readl(gate->reg);
73
74 if (((reg >> gate->bit_idx) & 3) == 3)
75 return 1;
76
77 return 0;
78}
79
80static struct clk_ops clk_gate2_ops = {
81 .enable = clk_gate2_enable,
82 .disable = clk_gate2_disable,
83 .is_enabled = clk_gate2_is_enabled,
84};
85
86struct clk *clk_register_gate2(struct device *dev, const char *name,
87 const char *parent_name, unsigned long flags,
88 void __iomem *reg, u8 bit_idx,
89 u8 clk_gate2_flags, spinlock_t *lock)
90{
91 struct clk_gate *gate;
92 struct clk *clk;
93 struct clk_init_data init;
94
95 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
96 if (!gate)
97 return ERR_PTR(-ENOMEM);
98
99 /* struct clk_gate assignments */
100 gate->reg = reg;
101 gate->bit_idx = bit_idx;
102 gate->flags = clk_gate2_flags;
103 gate->lock = lock;
104
105 init.name = name;
106 init.ops = &clk_gate2_ops;
107 init.flags = flags;
108 init.parent_names = parent_name ? &parent_name : NULL;
109 init.num_parents = parent_name ? 1 : 0;
110
111 gate->hw.init = &init;
112
113 clk = clk_register(dev, &gate->hw);
114 if (IS_ERR(clk))
115 kfree(clk);
116
117 return clk;
118}
diff --git a/arch/arm/mach-imx/clk-imx1.c b/arch/arm/mach-imx/clk-imx1.c
new file mode 100644
index 000000000000..0f0beb580b73
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx1.c
@@ -0,0 +1,115 @@
1/*
2 * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/clk.h>
21#include <linux/io.h>
22#include <linux/clkdev.h>
23#include <linux/err.h>
24
25#include <mach/hardware.h>
26#include <mach/common.h>
27#include "clk.h"
28
29/* CCM register addresses */
30#define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
31
32#define CCM_CSCR IO_ADDR_CCM(0x0)
33#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
34#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
35#define CCM_PCDR IO_ADDR_CCM(0x20)
36
37/* SCM register addresses */
38#define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
39
40#define SCM_GCCR IO_ADDR_SCM(0xc)
41
42static const char *prem_sel_clks[] = { "clk32_premult", "clk16m", };
43static const char *clko_sel_clks[] = { "per1", "hclk", "clk48m", "clk16m", "prem",
44 "fclk", };
45enum imx1_clks {
46 dummy, clk32, clk16m_ext, clk16m, clk32_premult, prem, mpll, spll, mcu,
47 fclk, hclk, clk48m, per1, per2, per3, clko, dma_gate, csi_gate,
48 mma_gate, usbd_gate, clk_max
49};
50
51static struct clk *clk[clk_max];
52
53int __init mx1_clocks_init(unsigned long fref)
54{
55 int i;
56
57 clk[dummy] = imx_clk_fixed("dummy", 0);
58 clk[clk32] = imx_clk_fixed("clk32", fref);
59 clk[clk16m_ext] = imx_clk_fixed("clk16m_ext", 16000000);
60 clk[clk16m] = imx_clk_gate("clk16m", "clk16m_ext", CCM_CSCR, 17);
61 clk[clk32_premult] = imx_clk_fixed_factor("clk32_premult", "clk32", 512, 1);
62 clk[prem] = imx_clk_mux("prem", CCM_CSCR, 16, 1, prem_sel_clks,
63 ARRAY_SIZE(prem_sel_clks));
64 clk[mpll] = imx_clk_pllv1("mpll", "clk32_premult", CCM_MPCTL0);
65 clk[spll] = imx_clk_pllv1("spll", "prem", CCM_SPCTL0);
66 clk[mcu] = imx_clk_divider("mcu", "clk32_premult", CCM_CSCR, 15, 1);
67 clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 15, 1);
68 clk[hclk] = imx_clk_divider("hclk", "spll", CCM_CSCR, 10, 4);
69 clk[clk48m] = imx_clk_divider("clk48m", "spll", CCM_CSCR, 26, 3);
70 clk[per1] = imx_clk_divider("per1", "spll", CCM_PCDR, 0, 4);
71 clk[per2] = imx_clk_divider("per2", "spll", CCM_PCDR, 4, 4);
72 clk[per3] = imx_clk_divider("per3", "spll", CCM_PCDR, 16, 7);
73 clk[clko] = imx_clk_mux("clko", CCM_CSCR, 29, 3, clko_sel_clks,
74 ARRAY_SIZE(clko_sel_clks));
75 clk[dma_gate] = imx_clk_gate("dma_gate", "hclk", SCM_GCCR, 4);
76 clk[csi_gate] = imx_clk_gate("csi_gate", "hclk", SCM_GCCR, 2);
77 clk[mma_gate] = imx_clk_gate("mma_gate", "hclk", SCM_GCCR, 1);
78 clk[usbd_gate] = imx_clk_gate("usbd_gate", "clk48m", SCM_GCCR, 0);
79
80 for (i = 0; i < ARRAY_SIZE(clk); i++)
81 if (IS_ERR(clk[i]))
82 pr_err("imx1 clk %d: register failed with %ld\n",
83 i, PTR_ERR(clk[i]));
84
85 clk_register_clkdev(clk[dma_gate], "ahb", "imx-dma");
86 clk_register_clkdev(clk[csi_gate], NULL, "mx1-camera.0");
87 clk_register_clkdev(clk[mma_gate], "mma", NULL);
88 clk_register_clkdev(clk[usbd_gate], NULL, "imx_udc.0");
89 clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
90 clk_register_clkdev(clk[hclk], "ipg", "imx-gpt.0");
91 clk_register_clkdev(clk[per1], "per", "imx1-uart.0");
92 clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.0");
93 clk_register_clkdev(clk[per1], "per", "imx1-uart.1");
94 clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.1");
95 clk_register_clkdev(clk[per1], "per", "imx1-uart.2");
96 clk_register_clkdev(clk[hclk], "ipg", "imx1-uart.2");
97 clk_register_clkdev(clk[hclk], NULL, "imx-i2c.0");
98 clk_register_clkdev(clk[per2], "per", "imx1-cspi.0");
99 clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.0");
100 clk_register_clkdev(clk[per2], "per", "imx1-cspi.1");
101 clk_register_clkdev(clk[dummy], "ipg", "imx1-cspi.1");
102 clk_register_clkdev(clk[per2], NULL, "imx-mmc.0");
103 clk_register_clkdev(clk[per2], "per", "imx-fb.0");
104 clk_register_clkdev(clk[dummy], "ipg", "imx-fb.0");
105 clk_register_clkdev(clk[dummy], "ahb", "imx-fb.0");
106 clk_register_clkdev(clk[hclk], "mshc", NULL);
107 clk_register_clkdev(clk[per3], "ssi", NULL);
108 clk_register_clkdev(clk[clk32], NULL, "mxc_rtc.0");
109 clk_register_clkdev(clk[clko], "clko", NULL);
110
111 mxc_timer_init(NULL, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
112 MX1_TIM1_INT);
113
114 return 0;
115}
diff --git a/arch/arm/mach-imx/clk-imx21.c b/arch/arm/mach-imx/clk-imx21.c
new file mode 100644
index 000000000000..4e4f384ee8dd
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx21.c
@@ -0,0 +1,186 @@
1/*
2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
4 * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
18 * MA 02110-1301, USA.
19 */
20
21#include <linux/clk.h>
22#include <linux/clkdev.h>
23#include <linux/clk-provider.h>
24#include <linux/io.h>
25#include <linux/module.h>
26#include <linux/clkdev.h>
27#include <linux/err.h>
28
29#include <mach/hardware.h>
30#include <mach/common.h>
31#include "clk.h"
32
33#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
34
35/* Register offsets */
36#define CCM_CSCR IO_ADDR_CCM(0x0)
37#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
38#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
39#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
40#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
41#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
42#define CCM_PCDR0 IO_ADDR_CCM(0x18)
43#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
44#define CCM_PCCR0 IO_ADDR_CCM(0x20)
45#define CCM_PCCR1 IO_ADDR_CCM(0x24)
46#define CCM_CCSR IO_ADDR_CCM(0x28)
47#define CCM_PMCTL IO_ADDR_CCM(0x2c)
48#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
49#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
50
51static const char *mpll_sel_clks[] = { "fpm", "ckih", };
52static const char *spll_sel_clks[] = { "fpm", "ckih", };
53
54enum imx21_clks {
55 ckil, ckih, fpm, mpll_sel, spll_sel, mpll, spll, fclk, hclk, ipg, per1,
56 per2, per3, per4, uart1_ipg_gate, uart2_ipg_gate, uart3_ipg_gate,
57 uart4_ipg_gate, gpt1_ipg_gate, gpt2_ipg_gate, gpt3_ipg_gate,
58 pwm_ipg_gate, sdhc1_ipg_gate, sdhc2_ipg_gate, lcdc_ipg_gate,
59 lcdc_hclk_gate, cspi3_ipg_gate, cspi2_ipg_gate, cspi1_ipg_gate,
60 per4_gate, csi_hclk_gate, usb_div, usb_gate, usb_hclk_gate, ssi1_gate,
61 ssi2_gate, nfc_div, nfc_gate, dma_gate, dma_hclk_gate, brom_gate,
62 emma_gate, emma_hclk_gate, slcdc_gate, slcdc_hclk_gate, wdog_gate,
63 gpio_gate, i2c_gate, kpp_gate, owire_gate, rtc_gate, clk_max
64};
65
66static struct clk *clk[clk_max];
67
68/*
69 * must be called very early to get information about the
70 * available clock rate when the timer framework starts
71 */
72int __init mx21_clocks_init(unsigned long lref, unsigned long href)
73{
74 int i;
75
76 clk[ckil] = imx_clk_fixed("ckil", lref);
77 clk[ckih] = imx_clk_fixed("ckih", href);
78 clk[fpm] = imx_clk_fixed_factor("fpm", "ckil", 512, 1);
79 clk[mpll_sel] = imx_clk_mux("mpll_sel", CCM_CSCR, 16, 1, mpll_sel_clks,
80 ARRAY_SIZE(mpll_sel_clks));
81 clk[spll_sel] = imx_clk_mux("spll_sel", CCM_CSCR, 17, 1, spll_sel_clks,
82 ARRAY_SIZE(spll_sel_clks));
83 clk[mpll] = imx_clk_pllv1("mpll", "mpll_sel", CCM_MPCTL0);
84 clk[spll] = imx_clk_pllv1("spll", "spll_sel", CCM_SPCTL0);
85 clk[fclk] = imx_clk_divider("fclk", "mpll", CCM_CSCR, 29, 3);
86 clk[hclk] = imx_clk_divider("hclk", "fclk", CCM_CSCR, 10, 4);
87 clk[ipg] = imx_clk_divider("ipg", "hclk", CCM_CSCR, 9, 1);
88 clk[per1] = imx_clk_divider("per1", "mpll", CCM_PCDR1, 0, 6);
89 clk[per2] = imx_clk_divider("per2", "mpll", CCM_PCDR1, 8, 6);
90 clk[per3] = imx_clk_divider("per3", "mpll", CCM_PCDR1, 16, 6);
91 clk[per4] = imx_clk_divider("per4", "mpll", CCM_PCDR1, 24, 6);
92 clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR0, 0);
93 clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR0, 1);
94 clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR0, 2);
95 clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR0, 3);
96 clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR1, 25);
97 clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR1, 26);
98 clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR1, 27);
99 clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR1, 28);
100 clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 9);
101 clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 10);
102 clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 18);
103 clk[lcdc_hclk_gate] = imx_clk_gate("lcdc_hclk_gate", "hclk", CCM_PCCR0, 26);
104 clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR1, 23);
105 clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 5);
106 clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 4);
107 clk[per4_gate] = imx_clk_gate("per4_gate", "per4", CCM_PCCR0, 22);
108 clk[csi_hclk_gate] = imx_clk_gate("csi_hclk_gate", "hclk", CCM_PCCR0, 31);
109 clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 26, 3);
110 clk[usb_gate] = imx_clk_gate("usb_gate", "usb_div", CCM_PCCR0, 14);
111 clk[usb_hclk_gate] = imx_clk_gate("usb_hclk_gate", "hclk", CCM_PCCR0, 24);
112 clk[ssi1_gate] = imx_clk_gate("ssi1_gate", "ipg", CCM_PCCR0, 6);
113 clk[ssi2_gate] = imx_clk_gate("ssi2_gate", "ipg", CCM_PCCR0, 7);
114 clk[nfc_div] = imx_clk_divider("nfc_div", "ipg", CCM_PCDR0, 12, 4);
115 clk[nfc_gate] = imx_clk_gate("nfc_gate", "nfc_div", CCM_PCCR0, 19);
116 clk[dma_gate] = imx_clk_gate("dma_gate", "ipg", CCM_PCCR0, 13);
117 clk[dma_hclk_gate] = imx_clk_gate("dma_hclk_gate", "hclk", CCM_PCCR0, 30);
118 clk[brom_gate] = imx_clk_gate("brom_gate", "hclk", CCM_PCCR0, 28);
119 clk[emma_gate] = imx_clk_gate("emma_gate", "ipg", CCM_PCCR0, 15);
120 clk[emma_hclk_gate] = imx_clk_gate("emma_hclk_gate", "hclk", CCM_PCCR0, 27);
121 clk[slcdc_gate] = imx_clk_gate("slcdc_gate", "ipg", CCM_PCCR0, 25);
122 clk[slcdc_hclk_gate] = imx_clk_gate("slcdc_hclk_gate", "hclk", CCM_PCCR0, 21);
123 clk[wdog_gate] = imx_clk_gate("wdog_gate", "ipg", CCM_PCCR1, 24);
124 clk[gpio_gate] = imx_clk_gate("gpio_gate", "ipg", CCM_PCCR0, 11);
125 clk[i2c_gate] = imx_clk_gate("i2c_gate", "ipg", CCM_PCCR0, 12);
126 clk[kpp_gate] = imx_clk_gate("kpp_gate", "ipg", CCM_PCCR1, 30);
127 clk[owire_gate] = imx_clk_gate("owire_gate", "ipg", CCM_PCCR1, 31);
128 clk[rtc_gate] = imx_clk_gate("rtc_gate", "ipg", CCM_PCCR1, 29);
129
130 for (i = 0; i < ARRAY_SIZE(clk); i++)
131 if (IS_ERR(clk[i]))
132 pr_err("i.MX21 clk %d: register failed with %ld\n",
133 i, PTR_ERR(clk[i]));
134
135 clk_register_clkdev(clk[per1], "per1", NULL);
136 clk_register_clkdev(clk[per2], "per2", NULL);
137 clk_register_clkdev(clk[per3], "per3", NULL);
138 clk_register_clkdev(clk[per4], "per4", NULL);
139 clk_register_clkdev(clk[per1], "per", "imx21-uart.0");
140 clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
141 clk_register_clkdev(clk[per1], "per", "imx21-uart.1");
142 clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
143 clk_register_clkdev(clk[per1], "per", "imx21-uart.2");
144 clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
145 clk_register_clkdev(clk[per1], "per", "imx21-uart.3");
146 clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
147 clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
148 clk_register_clkdev(clk[per1], "per", "imx-gpt.0");
149 clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
150 clk_register_clkdev(clk[per1], "per", "imx-gpt.1");
151 clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
152 clk_register_clkdev(clk[per1], "per", "imx-gpt.2");
153 clk_register_clkdev(clk[pwm_ipg_gate], "pwm", "mxc_pwm.0");
154 clk_register_clkdev(clk[per2], "per", "imx21-cspi.0");
155 clk_register_clkdev(clk[cspi1_ipg_gate], "ipg", "imx21-cspi.0");
156 clk_register_clkdev(clk[per2], "per", "imx21-cspi.1");
157 clk_register_clkdev(clk[cspi2_ipg_gate], "ipg", "imx21-cspi.1");
158 clk_register_clkdev(clk[per2], "per", "imx21-cspi.2");
159 clk_register_clkdev(clk[cspi3_ipg_gate], "ipg", "imx21-cspi.2");
160 clk_register_clkdev(clk[per3], "per", "imx-fb.0");
161 clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
162 clk_register_clkdev(clk[lcdc_hclk_gate], "ahb", "imx-fb.0");
163 clk_register_clkdev(clk[usb_gate], "per", "imx21-hcd.0");
164 clk_register_clkdev(clk[usb_hclk_gate], "ahb", "imx21-hcd.0");
165 clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand.0");
166 clk_register_clkdev(clk[dma_hclk_gate], "ahb", "imx-dma");
167 clk_register_clkdev(clk[dma_gate], "ipg", "imx-dma");
168 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
169 clk_register_clkdev(clk[i2c_gate], NULL, "imx-i2c.0");
170 clk_register_clkdev(clk[kpp_gate], NULL, "mxc-keypad");
171 clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
172 clk_register_clkdev(clk[brom_gate], "brom", NULL);
173 clk_register_clkdev(clk[emma_gate], "emma", NULL);
174 clk_register_clkdev(clk[slcdc_gate], "slcdc", NULL);
175 clk_register_clkdev(clk[gpio_gate], "gpio", NULL);
176 clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
177 clk_register_clkdev(clk[csi_hclk_gate], "csi", NULL);
178 clk_register_clkdev(clk[ssi1_gate], "ssi1", NULL);
179 clk_register_clkdev(clk[ssi2_gate], "ssi2", NULL);
180 clk_register_clkdev(clk[sdhc1_ipg_gate], "sdhc1", NULL);
181 clk_register_clkdev(clk[sdhc2_ipg_gate], "sdhc2", NULL);
182
183 mxc_timer_init(NULL, MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
184 MX21_INT_GPT1);
185 return 0;
186}
diff --git a/arch/arm/mach-imx/clk-imx25.c b/arch/arm/mach-imx/clk-imx25.c
new file mode 100644
index 000000000000..d9833bb5fd61
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx25.c
@@ -0,0 +1,248 @@
1/*
2 * Copyright (C) 2009 by Sascha Hauer, Pengutronix
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
16 * MA 02110-1301, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/list.h>
22#include <linux/clk.h>
23#include <linux/io.h>
24#include <linux/clkdev.h>
25#include <linux/err.h>
26
27#include <mach/hardware.h>
28#include <mach/common.h>
29#include <mach/mx25.h>
30#include "clk.h"
31
32#define CRM_BASE MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
33
34#define CCM_MPCTL 0x00
35#define CCM_UPCTL 0x04
36#define CCM_CCTL 0x08
37#define CCM_CGCR0 0x0C
38#define CCM_CGCR1 0x10
39#define CCM_CGCR2 0x14
40#define CCM_PCDR0 0x18
41#define CCM_PCDR1 0x1C
42#define CCM_PCDR2 0x20
43#define CCM_PCDR3 0x24
44#define CCM_RCSR 0x28
45#define CCM_CRDR 0x2C
46#define CCM_DCVR0 0x30
47#define CCM_DCVR1 0x34
48#define CCM_DCVR2 0x38
49#define CCM_DCVR3 0x3c
50#define CCM_LTR0 0x40
51#define CCM_LTR1 0x44
52#define CCM_LTR2 0x48
53#define CCM_LTR3 0x4c
54#define CCM_MCR 0x64
55
56#define ccm(x) (CRM_BASE + (x))
57
58static const char *cpu_sel_clks[] = { "mpll", "mpll_cpu_3_4", };
59static const char *per_sel_clks[] = { "ahb", "upll", };
60
61enum mx25_clks {
62 dummy, osc, mpll, upll, mpll_cpu_3_4, cpu_sel, cpu, ahb, usb_div, ipg,
63 per0_sel, per1_sel, per2_sel, per3_sel, per4_sel, per5_sel, per6_sel,
64 per7_sel, per8_sel, per9_sel, per10_sel, per11_sel, per12_sel,
65 per13_sel, per14_sel, per15_sel, per0, per1, per2, per3, per4, per5,
66 per6, per7, per8, per9, per10, per11, per12, per13, per14, per15,
67 csi_ipg_per, esdhc1_ipg_per, esdhc2_ipg_per, gpt_ipg_per, i2c_ipg_per,
68 lcdc_ipg_per, nfc_ipg_per, ssi1_ipg_per, ssi2_ipg_per, uart_ipg_per,
69 csi_ahb, esdhc1_ahb, esdhc2_ahb, fec_ahb, lcdc_ahb, sdma_ahb,
70 usbotg_ahb, can1_ipg, can2_ipg, csi_ipg, cspi1_ipg, cspi2_ipg,
71 cspi3_ipg, dryice_ipg, esdhc1_ipg, esdhc2_ipg, fec_ipg, iim_ipg,
72 kpp_ipg, lcdc_ipg, pwm1_ipg, pwm2_ipg, pwm3_ipg, pwm4_ipg, sdma_ipg,
73 ssi1_ipg, ssi2_ipg, tsc_ipg, uart1_ipg, uart2_ipg, uart3_ipg,
74 uart4_ipg, uart5_ipg, wdt_ipg, clk_max
75};
76
77static struct clk *clk[clk_max];
78
79int __init mx25_clocks_init(void)
80{
81 int i;
82
83 clk[dummy] = imx_clk_fixed("dummy", 0);
84 clk[osc] = imx_clk_fixed("osc", 24000000);
85 clk[mpll] = imx_clk_pllv1("mpll", "osc", ccm(CCM_MPCTL));
86 clk[upll] = imx_clk_pllv1("upll", "osc", ccm(CCM_UPCTL));
87 clk[mpll_cpu_3_4] = imx_clk_fixed_factor("mpll_cpu_3_4", "mpll", 3, 4);
88 clk[cpu_sel] = imx_clk_mux("cpu_sel", ccm(CCM_CCTL), 14, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
89 clk[cpu] = imx_clk_divider("cpu", "cpu_sel", ccm(CCM_CCTL), 30, 2);
90 clk[ahb] = imx_clk_divider("ahb", "cpu", ccm(CCM_CCTL), 28, 2);
91 clk[usb_div] = imx_clk_divider("usb_div", "upll", ccm(CCM_CCTL), 16, 6);
92 clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
93 clk[per0_sel] = imx_clk_mux("per0_sel", ccm(CCM_MCR), 0, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
94 clk[per1_sel] = imx_clk_mux("per1_sel", ccm(CCM_MCR), 1, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
95 clk[per2_sel] = imx_clk_mux("per2_sel", ccm(CCM_MCR), 2, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
96 clk[per3_sel] = imx_clk_mux("per3_sel", ccm(CCM_MCR), 3, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
97 clk[per4_sel] = imx_clk_mux("per4_sel", ccm(CCM_MCR), 4, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
98 clk[per5_sel] = imx_clk_mux("per5_sel", ccm(CCM_MCR), 5, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
99 clk[per6_sel] = imx_clk_mux("per6_sel", ccm(CCM_MCR), 6, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
100 clk[per7_sel] = imx_clk_mux("per7_sel", ccm(CCM_MCR), 7, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
101 clk[per8_sel] = imx_clk_mux("per8_sel", ccm(CCM_MCR), 8, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
102 clk[per9_sel] = imx_clk_mux("per9_sel", ccm(CCM_MCR), 9, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
103 clk[per10_sel] = imx_clk_mux("per10_sel", ccm(CCM_MCR), 10, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
104 clk[per11_sel] = imx_clk_mux("per11_sel", ccm(CCM_MCR), 11, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
105 clk[per12_sel] = imx_clk_mux("per12_sel", ccm(CCM_MCR), 12, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
106 clk[per13_sel] = imx_clk_mux("per13_sel", ccm(CCM_MCR), 13, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
107 clk[per14_sel] = imx_clk_mux("per14_sel", ccm(CCM_MCR), 14, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
108 clk[per15_sel] = imx_clk_mux("per15_sel", ccm(CCM_MCR), 15, 1, per_sel_clks, ARRAY_SIZE(per_sel_clks));
109 clk[per0] = imx_clk_divider("per0", "per0_sel", ccm(CCM_PCDR0), 0, 6);
110 clk[per1] = imx_clk_divider("per1", "per1_sel", ccm(CCM_PCDR0), 8, 6);
111 clk[per2] = imx_clk_divider("per2", "per2_sel", ccm(CCM_PCDR0), 16, 6);
112 clk[per3] = imx_clk_divider("per3", "per3_sel", ccm(CCM_PCDR0), 24, 6);
113 clk[per4] = imx_clk_divider("per4", "per4_sel", ccm(CCM_PCDR1), 0, 6);
114 clk[per5] = imx_clk_divider("per5", "per5_sel", ccm(CCM_PCDR1), 8, 6);
115 clk[per6] = imx_clk_divider("per6", "per6_sel", ccm(CCM_PCDR1), 16, 6);
116 clk[per7] = imx_clk_divider("per7", "per7_sel", ccm(CCM_PCDR1), 24, 6);
117 clk[per8] = imx_clk_divider("per8", "per8_sel", ccm(CCM_PCDR2), 0, 6);
118 clk[per9] = imx_clk_divider("per9", "per9_sel", ccm(CCM_PCDR2), 8, 6);
119 clk[per10] = imx_clk_divider("per10", "per10_sel", ccm(CCM_PCDR2), 16, 6);
120 clk[per11] = imx_clk_divider("per11", "per11_sel", ccm(CCM_PCDR2), 24, 6);
121 clk[per12] = imx_clk_divider("per12", "per12_sel", ccm(CCM_PCDR3), 0, 6);
122 clk[per13] = imx_clk_divider("per13", "per13_sel", ccm(CCM_PCDR3), 8, 6);
123 clk[per14] = imx_clk_divider("per14", "per14_sel", ccm(CCM_PCDR3), 16, 6);
124 clk[per15] = imx_clk_divider("per15", "per15_sel", ccm(CCM_PCDR3), 24, 6);
125 clk[csi_ipg_per] = imx_clk_gate("csi_ipg_per", "per0", ccm(CCM_CGCR0), 0);
126 clk[esdhc1_ipg_per] = imx_clk_gate("esdhc1_ipg_per", "per3", ccm(CCM_CGCR0), 3);
127 clk[esdhc2_ipg_per] = imx_clk_gate("esdhc2_ipg_per", "per4", ccm(CCM_CGCR0), 4);
128 clk[gpt_ipg_per] = imx_clk_gate("gpt_ipg_per", "per5", ccm(CCM_CGCR0), 5);
129 clk[i2c_ipg_per] = imx_clk_gate("i2c_ipg_per", "per6", ccm(CCM_CGCR0), 6);
130 clk[lcdc_ipg_per] = imx_clk_gate("lcdc_ipg_per", "per8", ccm(CCM_CGCR0), 7);
131 clk[nfc_ipg_per] = imx_clk_gate("nfc_ipg_per", "ipg_per", ccm(CCM_CGCR0), 8);
132 clk[ssi1_ipg_per] = imx_clk_gate("ssi1_ipg_per", "per13", ccm(CCM_CGCR0), 13);
133 clk[ssi2_ipg_per] = imx_clk_gate("ssi2_ipg_per", "per14", ccm(CCM_CGCR0), 14);
134 clk[uart_ipg_per] = imx_clk_gate("uart_ipg_per", "per15", ccm(CCM_CGCR0), 15);
135 clk[csi_ahb] = imx_clk_gate("csi_ahb", "ahb", ccm(CCM_CGCR0), 18);
136 clk[esdhc1_ahb] = imx_clk_gate("esdhc1_ahb", "ahb", ccm(CCM_CGCR0), 21);
137 clk[esdhc2_ahb] = imx_clk_gate("esdhc2_ahb", "ahb", ccm(CCM_CGCR0), 22);
138 clk[fec_ahb] = imx_clk_gate("fec_ahb", "ahb", ccm(CCM_CGCR0), 23);
139 clk[lcdc_ahb] = imx_clk_gate("lcdc_ahb", "ahb", ccm(CCM_CGCR0), 24);
140 clk[sdma_ahb] = imx_clk_gate("sdma_ahb", "ahb", ccm(CCM_CGCR0), 26);
141 clk[usbotg_ahb] = imx_clk_gate("usbotg_ahb", "ahb", ccm(CCM_CGCR0), 28);
142 clk[can1_ipg] = imx_clk_gate("can1_ipg", "ipg", ccm(CCM_CGCR1), 2);
143 clk[can2_ipg] = imx_clk_gate("can2_ipg", "ipg", ccm(CCM_CGCR1), 3);
144 clk[csi_ipg] = imx_clk_gate("csi_ipg", "ipg", ccm(CCM_CGCR1), 4);
145 clk[cspi1_ipg] = imx_clk_gate("cspi1_ipg", "ipg", ccm(CCM_CGCR1), 5);
146 clk[cspi2_ipg] = imx_clk_gate("cspi2_ipg", "ipg", ccm(CCM_CGCR1), 6);
147 clk[cspi3_ipg] = imx_clk_gate("cspi3_ipg", "ipg", ccm(CCM_CGCR1), 7);
148 clk[dryice_ipg] = imx_clk_gate("dryice_ipg", "ipg", ccm(CCM_CGCR1), 8);
149 clk[esdhc1_ipg] = imx_clk_gate("esdhc1_ipg", "ipg", ccm(CCM_CGCR1), 13);
150 clk[esdhc2_ipg] = imx_clk_gate("esdhc2_ipg", "ipg", ccm(CCM_CGCR1), 14);
151 clk[fec_ipg] = imx_clk_gate("fec_ipg", "ipg", ccm(CCM_CGCR1), 15);
152 clk[iim_ipg] = imx_clk_gate("iim_ipg", "ipg", ccm(CCM_CGCR1), 26);
153 clk[kpp_ipg] = imx_clk_gate("kpp_ipg", "ipg", ccm(CCM_CGCR1), 28);
154 clk[lcdc_ipg] = imx_clk_gate("lcdc_ipg", "ipg", ccm(CCM_CGCR1), 29);
155 clk[pwm1_ipg] = imx_clk_gate("pwm1_ipg", "ipg", ccm(CCM_CGCR1), 31);
156 clk[pwm2_ipg] = imx_clk_gate("pwm2_ipg", "ipg", ccm(CCM_CGCR2), 0);
157 clk[pwm3_ipg] = imx_clk_gate("pwm3_ipg", "ipg", ccm(CCM_CGCR2), 1);
158 clk[pwm4_ipg] = imx_clk_gate("pwm4_ipg", "ipg", ccm(CCM_CGCR2), 2);
159 clk[sdma_ipg] = imx_clk_gate("sdma_ipg", "ipg", ccm(CCM_CGCR2), 6);
160 clk[ssi1_ipg] = imx_clk_gate("ssi1_ipg", "ipg", ccm(CCM_CGCR2), 11);
161 clk[ssi2_ipg] = imx_clk_gate("ssi2_ipg", "ipg", ccm(CCM_CGCR2), 12);
162 clk[tsc_ipg] = imx_clk_gate("tsc_ipg", "ipg", ccm(CCM_CGCR2), 13);
163 clk[uart1_ipg] = imx_clk_gate("uart1_ipg", "ipg", ccm(CCM_CGCR2), 14);
164 clk[uart2_ipg] = imx_clk_gate("uart2_ipg", "ipg", ccm(CCM_CGCR2), 15);
165 clk[uart3_ipg] = imx_clk_gate("uart3_ipg", "ipg", ccm(CCM_CGCR2), 16);
166 clk[uart4_ipg] = imx_clk_gate("uart4_ipg", "ipg", ccm(CCM_CGCR2), 17);
167 clk[uart5_ipg] = imx_clk_gate("uart5_ipg", "ipg", ccm(CCM_CGCR2), 18);
168 clk[wdt_ipg] = imx_clk_gate("wdt_ipg", "ipg", ccm(CCM_CGCR2), 19);
169
170 for (i = 0; i < ARRAY_SIZE(clk); i++)
171 if (IS_ERR(clk[i]))
172 pr_err("i.MX25 clk %d: register failed with %ld\n",
173 i, PTR_ERR(clk[i]));
174
175 /* i.mx25 has the i.mx21 type uart */
176 clk_register_clkdev(clk[uart1_ipg], "ipg", "imx21-uart.0");
177 clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.0");
178 clk_register_clkdev(clk[uart2_ipg], "ipg", "imx21-uart.1");
179 clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.1");
180 clk_register_clkdev(clk[uart3_ipg], "ipg", "imx21-uart.2");
181 clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.2");
182 clk_register_clkdev(clk[uart4_ipg], "ipg", "imx21-uart.3");
183 clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.3");
184 clk_register_clkdev(clk[uart5_ipg], "ipg", "imx21-uart.4");
185 clk_register_clkdev(clk[uart_ipg_per], "per", "imx21-uart.4");
186 clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
187 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
188 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
189 clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.0");
190 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
191 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
192 clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.1");
193 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
194 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
195 clk_register_clkdev(clk[usbotg_ahb], "ahb", "mxc-ehci.2");
196 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
197 clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
198 clk_register_clkdev(clk[usbotg_ahb], "ahb", "fsl-usb2-udc");
199 clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
200 clk_register_clkdev(clk[nfc_ipg_per], NULL, "mxc_nand.0");
201 /* i.mx25 has the i.mx35 type cspi */
202 clk_register_clkdev(clk[cspi1_ipg], NULL, "imx35-cspi.0");
203 clk_register_clkdev(clk[cspi2_ipg], NULL, "imx35-cspi.1");
204 clk_register_clkdev(clk[cspi3_ipg], NULL, "imx35-cspi.2");
205 clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.0");
206 clk_register_clkdev(clk[per10], "per", "mxc_pwm.0");
207 clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.1");
208 clk_register_clkdev(clk[per10], "per", "mxc_pwm.1");
209 clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.2");
210 clk_register_clkdev(clk[per10], "per", "mxc_pwm.2");
211 clk_register_clkdev(clk[pwm1_ipg], "ipg", "mxc_pwm.3");
212 clk_register_clkdev(clk[per10], "per", "mxc_pwm.3");
213 clk_register_clkdev(clk[kpp_ipg], NULL, "imx-keypad");
214 clk_register_clkdev(clk[tsc_ipg], NULL, "mx25-adc");
215 clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.0");
216 clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.1");
217 clk_register_clkdev(clk[i2c_ipg_per], NULL, "imx-i2c.2");
218 clk_register_clkdev(clk[fec_ipg], "ipg", "imx25-fec.0");
219 clk_register_clkdev(clk[fec_ahb], "ahb", "imx25-fec.0");
220 clk_register_clkdev(clk[dryice_ipg], NULL, "imxdi_rtc.0");
221 clk_register_clkdev(clk[lcdc_ipg_per], "per", "imx-fb.0");
222 clk_register_clkdev(clk[lcdc_ipg], "ipg", "imx-fb.0");
223 clk_register_clkdev(clk[lcdc_ahb], "ahb", "imx-fb.0");
224 clk_register_clkdev(clk[wdt_ipg], NULL, "imx2-wdt.0");
225 clk_register_clkdev(clk[ssi1_ipg_per], "per", "imx-ssi.0");
226 clk_register_clkdev(clk[ssi1_ipg], "ipg", "imx-ssi.0");
227 clk_register_clkdev(clk[ssi2_ipg_per], "per", "imx-ssi.1");
228 clk_register_clkdev(clk[ssi2_ipg], "ipg", "imx-ssi.1");
229 clk_register_clkdev(clk[esdhc1_ipg_per], "per", "sdhci-esdhc-imx25.0");
230 clk_register_clkdev(clk[esdhc1_ipg], "ipg", "sdhci-esdhc-imx25.0");
231 clk_register_clkdev(clk[esdhc1_ahb], "ahb", "sdhci-esdhc-imx25.0");
232 clk_register_clkdev(clk[esdhc2_ipg_per], "per", "sdhci-esdhc-imx25.1");
233 clk_register_clkdev(clk[esdhc2_ipg], "ipg", "sdhci-esdhc-imx25.1");
234 clk_register_clkdev(clk[esdhc2_ahb], "ahb", "sdhci-esdhc-imx25.1");
235 clk_register_clkdev(clk[csi_ipg_per], "per", "mx2-camera.0");
236 clk_register_clkdev(clk[csi_ipg], "ipg", "mx2-camera.0");
237 clk_register_clkdev(clk[csi_ahb], "ahb", "mx2-camera.0");
238 clk_register_clkdev(clk[dummy], "audmux", NULL);
239 clk_register_clkdev(clk[can1_ipg], NULL, "flexcan.0");
240 clk_register_clkdev(clk[can2_ipg], NULL, "flexcan.1");
241 /* i.mx25 has the i.mx35 type sdma */
242 clk_register_clkdev(clk[sdma_ipg], "ipg", "imx35-sdma");
243 clk_register_clkdev(clk[sdma_ahb], "ahb", "imx35-sdma");
244 clk_register_clkdev(clk[iim_ipg], "iim", NULL);
245
246 mxc_timer_init(NULL, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
247 return 0;
248}
diff --git a/arch/arm/mach-imx/clk-imx27.c b/arch/arm/mach-imx/clk-imx27.c
new file mode 100644
index 000000000000..50a7ebd8d1b2
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx27.c
@@ -0,0 +1,290 @@
1#include <linux/clk.h>
2#include <linux/io.h>
3#include <linux/module.h>
4#include <linux/clkdev.h>
5#include <linux/err.h>
6#include <linux/clk-provider.h>
7#include <linux/of.h>
8
9#include <mach/common.h>
10#include <mach/hardware.h>
11#include "clk.h"
12
13#define IO_ADDR_CCM(off) (MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
14
15/* Register offsets */
16#define CCM_CSCR IO_ADDR_CCM(0x0)
17#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
18#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
19#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
20#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
21#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
22#define CCM_PCDR0 IO_ADDR_CCM(0x18)
23#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
24#define CCM_PCCR0 IO_ADDR_CCM(0x20)
25#define CCM_PCCR1 IO_ADDR_CCM(0x24)
26#define CCM_CCSR IO_ADDR_CCM(0x28)
27#define CCM_PMCTL IO_ADDR_CCM(0x2c)
28#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
29#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
30
31#define CCM_CSCR_UPDATE_DIS (1 << 31)
32#define CCM_CSCR_SSI2 (1 << 23)
33#define CCM_CSCR_SSI1 (1 << 22)
34#define CCM_CSCR_VPU (1 << 21)
35#define CCM_CSCR_MSHC (1 << 20)
36#define CCM_CSCR_SPLLRES (1 << 19)
37#define CCM_CSCR_MPLLRES (1 << 18)
38#define CCM_CSCR_SP (1 << 17)
39#define CCM_CSCR_MCU (1 << 16)
40#define CCM_CSCR_OSC26MDIV (1 << 4)
41#define CCM_CSCR_OSC26M (1 << 3)
42#define CCM_CSCR_FPM (1 << 2)
43#define CCM_CSCR_SPEN (1 << 1)
44#define CCM_CSCR_MPEN (1 << 0)
45
46/* i.MX27 TO 2+ */
47#define CCM_CSCR_ARM_SRC (1 << 15)
48
49#define CCM_SPCTL1_LF (1 << 15)
50#define CCM_SPCTL1_BRMO (1 << 6)
51
52static const char *vpu_sel_clks[] = { "spll", "mpll_main2", };
53static const char *cpu_sel_clks[] = { "mpll_main2", "mpll", };
54static const char *clko_sel_clks[] = {
55 "ckil", "prem", "ckih", "ckih",
56 "ckih", "mpll", "spll", "cpu_div",
57 "ahb", "ipg", "per1_div", "per2_div",
58 "per3_div", "per4_div", "ssi1_div", "ssi2_div",
59 "nfc_div", "mshc_div", "vpu_div", "60m",
60 "32k", "usb_div", "dptc",
61};
62
63static const char *ssi_sel_clks[] = { "spll", "mpll", };
64
65enum mx27_clks {
66 dummy, ckih, ckil, mpll, spll, mpll_main2, ahb, ipg, nfc_div, per1_div,
67 per2_div, per3_div, per4_div, vpu_sel, vpu_div, usb_div, cpu_sel,
68 clko_sel, cpu_div, clko_div, ssi1_sel, ssi2_sel, ssi1_div, ssi2_div,
69 clko_en, ssi2_ipg_gate, ssi1_ipg_gate, slcdc_ipg_gate, sdhc3_ipg_gate,
70 sdhc2_ipg_gate, sdhc1_ipg_gate, scc_ipg_gate, sahara_ipg_gate,
71 rtc_ipg_gate, pwm_ipg_gate, owire_ipg_gate, lcdc_ipg_gate,
72 kpp_ipg_gate, iim_ipg_gate, i2c2_ipg_gate, i2c1_ipg_gate,
73 gpt6_ipg_gate, gpt5_ipg_gate, gpt4_ipg_gate, gpt3_ipg_gate,
74 gpt2_ipg_gate, gpt1_ipg_gate, gpio_ipg_gate, fec_ipg_gate,
75 emma_ipg_gate, dma_ipg_gate, cspi3_ipg_gate, cspi2_ipg_gate,
76 cspi1_ipg_gate, nfc_baud_gate, ssi2_baud_gate, ssi1_baud_gate,
77 vpu_baud_gate, per4_gate, per3_gate, per2_gate, per1_gate,
78 usb_ahb_gate, slcdc_ahb_gate, sahara_ahb_gate, lcdc_ahb_gate,
79 vpu_ahb_gate, fec_ahb_gate, emma_ahb_gate, emi_ahb_gate, dma_ahb_gate,
80 csi_ahb_gate, brom_ahb_gate, ata_ahb_gate, wdog_ipg_gate, usb_ipg_gate,
81 uart6_ipg_gate, uart5_ipg_gate, uart4_ipg_gate, uart3_ipg_gate,
82 uart2_ipg_gate, uart1_ipg_gate, clk_max
83};
84
85static struct clk *clk[clk_max];
86
87int __init mx27_clocks_init(unsigned long fref)
88{
89 int i;
90
91 clk[dummy] = imx_clk_fixed("dummy", 0);
92 clk[ckih] = imx_clk_fixed("ckih", fref);
93 clk[ckil] = imx_clk_fixed("ckil", 32768);
94 clk[mpll] = imx_clk_pllv1("mpll", "ckih", CCM_MPCTL0);
95 clk[spll] = imx_clk_pllv1("spll", "ckih", CCM_SPCTL0);
96 clk[mpll_main2] = imx_clk_fixed_factor("mpll_main2", "mpll", 2, 3);
97
98 if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
99 clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 8, 2);
100 clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
101 } else {
102 clk[ahb] = imx_clk_divider("ahb", "mpll_main2", CCM_CSCR, 9, 4);
103 clk[ipg] = imx_clk_divider("ipg", "ahb", CCM_CSCR, 8, 1);
104 }
105
106 clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", CCM_PCDR0, 6, 4);
107 clk[per1_div] = imx_clk_divider("per1_div", "mpll_main2", CCM_PCDR1, 0, 6);
108 clk[per2_div] = imx_clk_divider("per2_div", "mpll_main2", CCM_PCDR1, 8, 6);
109 clk[per3_div] = imx_clk_divider("per3_div", "mpll_main2", CCM_PCDR1, 16, 6);
110 clk[per4_div] = imx_clk_divider("per4_div", "mpll_main2", CCM_PCDR1, 24, 6);
111 clk[vpu_sel] = imx_clk_mux("vpu_sel", CCM_CSCR, 21, 1, vpu_sel_clks, ARRAY_SIZE(vpu_sel_clks));
112 clk[vpu_div] = imx_clk_divider("vpu_div", "vpu_sel", CCM_PCDR0, 10, 3);
113 clk[usb_div] = imx_clk_divider("usb_div", "spll", CCM_CSCR, 28, 3);
114 clk[cpu_sel] = imx_clk_mux("cpu_sel", CCM_CSCR, 15, 1, cpu_sel_clks, ARRAY_SIZE(cpu_sel_clks));
115 clk[clko_sel] = imx_clk_mux("clko_sel", CCM_CCSR, 0, 5, clko_sel_clks, ARRAY_SIZE(clko_sel_clks));
116 if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
117 clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 12, 2);
118 else
119 clk[cpu_div] = imx_clk_divider("cpu_div", "cpu_sel", CCM_CSCR, 13, 3);
120 clk[clko_div] = imx_clk_divider("clko_div", "clko_sel", CCM_PCDR0, 22, 3);
121 clk[ssi1_sel] = imx_clk_mux("ssi1_sel", CCM_CSCR, 22, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
122 clk[ssi2_sel] = imx_clk_mux("ssi2_sel", CCM_CSCR, 23, 1, ssi_sel_clks, ARRAY_SIZE(ssi_sel_clks));
123 clk[ssi1_div] = imx_clk_divider("ssi1_div", "ssi1_sel", CCM_PCDR0, 16, 6);
124 clk[ssi2_div] = imx_clk_divider("ssi2_div", "ssi2_sel", CCM_PCDR0, 26, 3);
125 clk[clko_en] = imx_clk_gate("clko_en", "clko_div", CCM_PCCR0, 0);
126 clk[ssi2_ipg_gate] = imx_clk_gate("ssi2_ipg_gate", "ipg", CCM_PCCR0, 0);
127 clk[ssi1_ipg_gate] = imx_clk_gate("ssi1_ipg_gate", "ipg", CCM_PCCR0, 1);
128 clk[slcdc_ipg_gate] = imx_clk_gate("slcdc_ipg_gate", "ipg", CCM_PCCR0, 2);
129 clk[sdhc3_ipg_gate] = imx_clk_gate("sdhc3_ipg_gate", "ipg", CCM_PCCR0, 3);
130 clk[sdhc2_ipg_gate] = imx_clk_gate("sdhc2_ipg_gate", "ipg", CCM_PCCR0, 4);
131 clk[sdhc1_ipg_gate] = imx_clk_gate("sdhc1_ipg_gate", "ipg", CCM_PCCR0, 5);
132 clk[scc_ipg_gate] = imx_clk_gate("scc_ipg_gate", "ipg", CCM_PCCR0, 6);
133 clk[sahara_ipg_gate] = imx_clk_gate("sahara_ipg_gate", "ipg", CCM_PCCR0, 7);
134 clk[rtc_ipg_gate] = imx_clk_gate("rtc_ipg_gate", "ipg", CCM_PCCR0, 9);
135 clk[pwm_ipg_gate] = imx_clk_gate("pwm_ipg_gate", "ipg", CCM_PCCR0, 11);
136 clk[owire_ipg_gate] = imx_clk_gate("owire_ipg_gate", "ipg", CCM_PCCR0, 12);
137 clk[lcdc_ipg_gate] = imx_clk_gate("lcdc_ipg_gate", "ipg", CCM_PCCR0, 14);
138 clk[kpp_ipg_gate] = imx_clk_gate("kpp_ipg_gate", "ipg", CCM_PCCR0, 15);
139 clk[iim_ipg_gate] = imx_clk_gate("iim_ipg_gate", "ipg", CCM_PCCR0, 16);
140 clk[i2c2_ipg_gate] = imx_clk_gate("i2c2_ipg_gate", "ipg", CCM_PCCR0, 17);
141 clk[i2c1_ipg_gate] = imx_clk_gate("i2c1_ipg_gate", "ipg", CCM_PCCR0, 18);
142 clk[gpt6_ipg_gate] = imx_clk_gate("gpt6_ipg_gate", "ipg", CCM_PCCR0, 19);
143 clk[gpt5_ipg_gate] = imx_clk_gate("gpt5_ipg_gate", "ipg", CCM_PCCR0, 20);
144 clk[gpt4_ipg_gate] = imx_clk_gate("gpt4_ipg_gate", "ipg", CCM_PCCR0, 21);
145 clk[gpt3_ipg_gate] = imx_clk_gate("gpt3_ipg_gate", "ipg", CCM_PCCR0, 22);
146 clk[gpt2_ipg_gate] = imx_clk_gate("gpt2_ipg_gate", "ipg", CCM_PCCR0, 23);
147 clk[gpt1_ipg_gate] = imx_clk_gate("gpt1_ipg_gate", "ipg", CCM_PCCR0, 24);
148 clk[gpio_ipg_gate] = imx_clk_gate("gpio_ipg_gate", "ipg", CCM_PCCR0, 25);
149 clk[fec_ipg_gate] = imx_clk_gate("fec_ipg_gate", "ipg", CCM_PCCR0, 26);
150 clk[emma_ipg_gate] = imx_clk_gate("emma_ipg_gate", "ipg", CCM_PCCR0, 27);
151 clk[dma_ipg_gate] = imx_clk_gate("dma_ipg_gate", "ipg", CCM_PCCR0, 28);
152 clk[cspi3_ipg_gate] = imx_clk_gate("cspi3_ipg_gate", "ipg", CCM_PCCR0, 29);
153 clk[cspi2_ipg_gate] = imx_clk_gate("cspi2_ipg_gate", "ipg", CCM_PCCR0, 30);
154 clk[cspi1_ipg_gate] = imx_clk_gate("cspi1_ipg_gate", "ipg", CCM_PCCR0, 31);
155 clk[nfc_baud_gate] = imx_clk_gate("nfc_baud_gate", "nfc_div", CCM_PCCR1, 3);
156 clk[ssi2_baud_gate] = imx_clk_gate("ssi2_baud_gate", "ssi2_div", CCM_PCCR1, 4);
157 clk[ssi1_baud_gate] = imx_clk_gate("ssi1_baud_gate", "ssi1_div", CCM_PCCR1, 5);
158 clk[vpu_baud_gate] = imx_clk_gate("vpu_baud_gate", "vpu_div", CCM_PCCR1, 6);
159 clk[per4_gate] = imx_clk_gate("per4_gate", "per4_div", CCM_PCCR1, 7);
160 clk[per3_gate] = imx_clk_gate("per3_gate", "per3_div", CCM_PCCR1, 8);
161 clk[per2_gate] = imx_clk_gate("per2_gate", "per2_div", CCM_PCCR1, 9);
162 clk[per1_gate] = imx_clk_gate("per1_gate", "per1_div", CCM_PCCR1, 10);
163 clk[usb_ahb_gate] = imx_clk_gate("usb_ahb_gate", "ahb", CCM_PCCR1, 11);
164 clk[slcdc_ahb_gate] = imx_clk_gate("slcdc_ahb_gate", "ahb", CCM_PCCR1, 12);
165 clk[sahara_ahb_gate] = imx_clk_gate("sahara_ahb_gate", "ahb", CCM_PCCR1, 13);
166 clk[lcdc_ahb_gate] = imx_clk_gate("lcdc_ahb_gate", "ahb", CCM_PCCR1, 15);
167 clk[vpu_ahb_gate] = imx_clk_gate("vpu_ahb_gate", "ahb", CCM_PCCR1, 16);
168 clk[fec_ahb_gate] = imx_clk_gate("fec_ahb_gate", "ahb", CCM_PCCR1, 17);
169 clk[emma_ahb_gate] = imx_clk_gate("emma_ahb_gate", "ahb", CCM_PCCR1, 18);
170 clk[emi_ahb_gate] = imx_clk_gate("emi_ahb_gate", "ahb", CCM_PCCR1, 19);
171 clk[dma_ahb_gate] = imx_clk_gate("dma_ahb_gate", "ahb", CCM_PCCR1, 20);
172 clk[csi_ahb_gate] = imx_clk_gate("csi_ahb_gate", "ahb", CCM_PCCR1, 21);
173 clk[brom_ahb_gate] = imx_clk_gate("brom_ahb_gate", "ahb", CCM_PCCR1, 22);
174 clk[ata_ahb_gate] = imx_clk_gate("ata_ahb_gate", "ahb", CCM_PCCR1, 23);
175 clk[wdog_ipg_gate] = imx_clk_gate("wdog_ipg_gate", "ipg", CCM_PCCR1, 24);
176 clk[usb_ipg_gate] = imx_clk_gate("usb_ipg_gate", "ipg", CCM_PCCR1, 25);
177 clk[uart6_ipg_gate] = imx_clk_gate("uart6_ipg_gate", "ipg", CCM_PCCR1, 26);
178 clk[uart5_ipg_gate] = imx_clk_gate("uart5_ipg_gate", "ipg", CCM_PCCR1, 27);
179 clk[uart4_ipg_gate] = imx_clk_gate("uart4_ipg_gate", "ipg", CCM_PCCR1, 28);
180 clk[uart3_ipg_gate] = imx_clk_gate("uart3_ipg_gate", "ipg", CCM_PCCR1, 29);
181 clk[uart2_ipg_gate] = imx_clk_gate("uart2_ipg_gate", "ipg", CCM_PCCR1, 30);
182 clk[uart1_ipg_gate] = imx_clk_gate("uart1_ipg_gate", "ipg", CCM_PCCR1, 31);
183
184 for (i = 0; i < ARRAY_SIZE(clk); i++)
185 if (IS_ERR(clk[i]))
186 pr_err("i.MX27 clk %d: register failed with %ld\n",
187 i, PTR_ERR(clk[i]));
188
189 clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
190 clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.0");
191 clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
192 clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.1");
193 clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
194 clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.2");
195 clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
196 clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.3");
197 clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
198 clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.4");
199 clk_register_clkdev(clk[uart6_ipg_gate], "ipg", "imx21-uart.5");
200 clk_register_clkdev(clk[per1_gate], "per", "imx21-uart.5");
201 clk_register_clkdev(clk[gpt1_ipg_gate], "ipg", "imx-gpt.0");
202 clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.0");
203 clk_register_clkdev(clk[gpt2_ipg_gate], "ipg", "imx-gpt.1");
204 clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.1");
205 clk_register_clkdev(clk[gpt3_ipg_gate], "ipg", "imx-gpt.2");
206 clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.2");
207 clk_register_clkdev(clk[gpt4_ipg_gate], "ipg", "imx-gpt.3");
208 clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.3");
209 clk_register_clkdev(clk[gpt5_ipg_gate], "ipg", "imx-gpt.4");
210 clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.4");
211 clk_register_clkdev(clk[gpt6_ipg_gate], "ipg", "imx-gpt.5");
212 clk_register_clkdev(clk[per1_gate], "per", "imx-gpt.5");
213 clk_register_clkdev(clk[pwm_ipg_gate], NULL, "mxc_pwm.0");
214 clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.0");
215 clk_register_clkdev(clk[sdhc1_ipg_gate], "ipg", "mxc-mmc.0");
216 clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.1");
217 clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.1");
218 clk_register_clkdev(clk[per2_gate], "per", "mxc-mmc.2");
219 clk_register_clkdev(clk[sdhc2_ipg_gate], "ipg", "mxc-mmc.2");
220 clk_register_clkdev(clk[cspi1_ipg_gate], NULL, "imx27-cspi.0");
221 clk_register_clkdev(clk[cspi2_ipg_gate], NULL, "imx27-cspi.1");
222 clk_register_clkdev(clk[cspi3_ipg_gate], NULL, "imx27-cspi.2");
223 clk_register_clkdev(clk[per3_gate], "per", "imx-fb.0");
224 clk_register_clkdev(clk[lcdc_ipg_gate], "ipg", "imx-fb.0");
225 clk_register_clkdev(clk[lcdc_ahb_gate], "ahb", "imx-fb.0");
226 clk_register_clkdev(clk[csi_ahb_gate], NULL, "mx2-camera.0");
227 clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
228 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "fsl-usb2-udc");
229 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "fsl-usb2-udc");
230 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
231 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.0");
232 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.0");
233 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
234 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.1");
235 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.1");
236 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
237 clk_register_clkdev(clk[usb_ipg_gate], "ipg", "mxc-ehci.2");
238 clk_register_clkdev(clk[usb_ahb_gate], "ahb", "mxc-ehci.2");
239 clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
240 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
241 clk_register_clkdev(clk[nfc_baud_gate], NULL, "mxc_nand.0");
242 clk_register_clkdev(clk[vpu_baud_gate], "per", "imx-vpu");
243 clk_register_clkdev(clk[vpu_ahb_gate], "ahb", "imx-vpu");
244 clk_register_clkdev(clk[dma_ahb_gate], "ahb", "imx-dma");
245 clk_register_clkdev(clk[dma_ipg_gate], "ipg", "imx-dma");
246 clk_register_clkdev(clk[fec_ipg_gate], "ipg", "imx27-fec.0");
247 clk_register_clkdev(clk[fec_ahb_gate], "ahb", "imx27-fec.0");
248 clk_register_clkdev(clk[wdog_ipg_gate], NULL, "imx2-wdt.0");
249 clk_register_clkdev(clk[i2c1_ipg_gate], NULL, "imx-i2c.0");
250 clk_register_clkdev(clk[i2c2_ipg_gate], NULL, "imx-i2c.1");
251 clk_register_clkdev(clk[owire_ipg_gate], NULL, "mxc_w1.0");
252 clk_register_clkdev(clk[kpp_ipg_gate], NULL, "imx-keypad");
253 clk_register_clkdev(clk[emma_ahb_gate], "ahb", "imx-emma");
254 clk_register_clkdev(clk[emma_ipg_gate], "ipg", "imx-emma");
255 clk_register_clkdev(clk[iim_ipg_gate], "iim", NULL);
256 clk_register_clkdev(clk[gpio_ipg_gate], "gpio", NULL);
257 clk_register_clkdev(clk[brom_ahb_gate], "brom", NULL);
258 clk_register_clkdev(clk[ata_ahb_gate], "ata", NULL);
259 clk_register_clkdev(clk[rtc_ipg_gate], "rtc", NULL);
260 clk_register_clkdev(clk[scc_ipg_gate], "scc", NULL);
261 clk_register_clkdev(clk[cpu_div], "cpu", NULL);
262 clk_register_clkdev(clk[emi_ahb_gate], "emi_ahb" , NULL);
263 clk_register_clkdev(clk[ssi1_baud_gate], "bitrate" , "imx-ssi.0");
264 clk_register_clkdev(clk[ssi2_baud_gate], "bitrate" , "imx-ssi.1");
265
266 mxc_timer_init(NULL, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
267 MX27_INT_GPT1);
268
269 clk_prepare_enable(clk[emi_ahb_gate]);
270
271 return 0;
272}
273
274#ifdef CONFIG_OF
275int __init mx27_clocks_init_dt(void)
276{
277 struct device_node *np;
278 u32 fref = 26000000; /* default */
279
280 for_each_compatible_node(np, NULL, "fixed-clock") {
281 if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
282 continue;
283
284 if (!of_property_read_u32(np, "clock-frequency", &fref))
285 break;
286 }
287
288 return mx27_clocks_init(fref);
289}
290#endif
diff --git a/arch/arm/mach-imx/clk-imx31.c b/arch/arm/mach-imx/clk-imx31.c
new file mode 100644
index 000000000000..a854b9cae5ea
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx31.c
@@ -0,0 +1,182 @@
1/*
2 * Copyright (C) 2012 Sascha Hauer <kernel@pengutronix.de>
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation.
16 */
17
18#include <linux/module.h>
19#include <linux/clk.h>
20#include <linux/clkdev.h>
21#include <linux/io.h>
22#include <linux/err.h>
23
24#include <mach/hardware.h>
25#include <mach/mx31.h>
26#include <mach/common.h>
27
28#include "clk.h"
29#include "crmregs-imx3.h"
30
31static const char *mcu_main_sel[] = { "spll", "mpll", };
32static const char *per_sel[] = { "per_div", "ipg", };
33static const char *csi_sel[] = { "upll", "spll", };
34static const char *fir_sel[] = { "mcu_main", "upll", "spll" };
35
36enum mx31_clks {
37 ckih, ckil, mpll, spll, upll, mcu_main, hsp, ahb, nfc, ipg, per_div,
38 per, csi, fir, csi_div, usb_div_pre, usb_div_post, fir_div_pre,
39 fir_div_post, sdhc1_gate, sdhc2_gate, gpt_gate, epit1_gate, epit2_gate,
40 iim_gate, ata_gate, sdma_gate, cspi3_gate, rng_gate, uart1_gate,
41 uart2_gate, ssi1_gate, i2c1_gate, i2c2_gate, i2c3_gate, hantro_gate,
42 mstick1_gate, mstick2_gate, csi_gate, rtc_gate, wdog_gate, pwm_gate,
43 sim_gate, ect_gate, usb_gate, kpp_gate, ipu_gate, uart3_gate,
44 uart4_gate, uart5_gate, owire_gate, ssi2_gate, cspi1_gate, cspi2_gate,
45 gacc_gate, emi_gate, rtic_gate, firi_gate, clk_max
46};
47
48static struct clk *clk[clk_max];
49
50int __init mx31_clocks_init(unsigned long fref)
51{
52 void __iomem *base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
53 int i;
54
55 clk[ckih] = imx_clk_fixed("ckih", fref);
56 clk[ckil] = imx_clk_fixed("ckil", 32768);
57 clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MXC_CCM_MPCTL);
58 clk[spll] = imx_clk_pllv1("spll", "ckih", base + MXC_CCM_SRPCTL);
59 clk[upll] = imx_clk_pllv1("upll", "ckih", base + MXC_CCM_UPCTL);
60 clk[mcu_main] = imx_clk_mux("mcu_main", base + MXC_CCM_PMCR0, 31, 1, mcu_main_sel, ARRAY_SIZE(mcu_main_sel));
61 clk[hsp] = imx_clk_divider("hsp", "mcu_main", base + MXC_CCM_PDR0, 11, 3);
62 clk[ahb] = imx_clk_divider("ahb", "mcu_main", base + MXC_CCM_PDR0, 3, 3);
63 clk[nfc] = imx_clk_divider("nfc", "ahb", base + MXC_CCM_PDR0, 8, 3);
64 clk[ipg] = imx_clk_divider("ipg", "ahb", base + MXC_CCM_PDR0, 6, 2);
65 clk[per_div] = imx_clk_divider("per_div", "upll", base + MXC_CCM_PDR0, 16, 5);
66 clk[per] = imx_clk_mux("per", base + MXC_CCM_CCMR, 24, 1, per_sel, ARRAY_SIZE(per_sel));
67 clk[csi] = imx_clk_mux("csi_sel", base + MXC_CCM_CCMR, 25, 1, csi_sel, ARRAY_SIZE(csi_sel));
68 clk[fir] = imx_clk_mux("fir_sel", base + MXC_CCM_CCMR, 11, 2, fir_sel, ARRAY_SIZE(fir_sel));
69 clk[csi_div] = imx_clk_divider("csi_div", "csi_sel", base + MXC_CCM_PDR0, 23, 9);
70 clk[usb_div_pre] = imx_clk_divider("usb_div_pre", "upll", base + MXC_CCM_PDR1, 30, 2);
71 clk[usb_div_post] = imx_clk_divider("usb_div_post", "usb_div_pre", base + MXC_CCM_PDR1, 27, 3);
72 clk[fir_div_pre] = imx_clk_divider("fir_div_pre", "fir_sel", base + MXC_CCM_PDR1, 24, 3);
73 clk[fir_div_post] = imx_clk_divider("fir_div_post", "fir_div_pre", base + MXC_CCM_PDR1, 23, 6);
74 clk[sdhc1_gate] = imx_clk_gate2("sdhc1_gate", "per", base + MXC_CCM_CGR0, 0);
75 clk[sdhc2_gate] = imx_clk_gate2("sdhc2_gate", "per", base + MXC_CCM_CGR0, 2);
76 clk[gpt_gate] = imx_clk_gate2("gpt_gate", "per", base + MXC_CCM_CGR0, 4);
77 clk[epit1_gate] = imx_clk_gate2("epit1_gate", "per", base + MXC_CCM_CGR0, 6);
78 clk[epit2_gate] = imx_clk_gate2("epit2_gate", "per", base + MXC_CCM_CGR0, 8);
79 clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MXC_CCM_CGR0, 10);
80 clk[ata_gate] = imx_clk_gate2("ata_gate", "ipg", base + MXC_CCM_CGR0, 12);
81 clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MXC_CCM_CGR0, 14);
82 clk[cspi3_gate] = imx_clk_gate2("cspi3_gate", "ipg", base + MXC_CCM_CGR0, 16);
83 clk[rng_gate] = imx_clk_gate2("rng_gate", "ipg", base + MXC_CCM_CGR0, 18);
84 clk[uart1_gate] = imx_clk_gate2("uart1_gate", "per", base + MXC_CCM_CGR0, 20);
85 clk[uart2_gate] = imx_clk_gate2("uart2_gate", "per", base + MXC_CCM_CGR0, 22);
86 clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "spll", base + MXC_CCM_CGR0, 24);
87 clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per", base + MXC_CCM_CGR0, 26);
88 clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per", base + MXC_CCM_CGR0, 28);
89 clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per", base + MXC_CCM_CGR0, 30);
90 clk[hantro_gate] = imx_clk_gate2("hantro_gate", "per", base + MXC_CCM_CGR1, 0);
91 clk[mstick1_gate] = imx_clk_gate2("mstick1_gate", "per", base + MXC_CCM_CGR1, 2);
92 clk[mstick2_gate] = imx_clk_gate2("mstick2_gate", "per", base + MXC_CCM_CGR1, 4);
93 clk[csi_gate] = imx_clk_gate2("csi_gate", "csi_div", base + MXC_CCM_CGR1, 6);
94 clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MXC_CCM_CGR1, 8);
95 clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MXC_CCM_CGR1, 10);
96 clk[pwm_gate] = imx_clk_gate2("pwm_gate", "per", base + MXC_CCM_CGR1, 12);
97 clk[sim_gate] = imx_clk_gate2("sim_gate", "per", base + MXC_CCM_CGR1, 14);
98 clk[ect_gate] = imx_clk_gate2("ect_gate", "per", base + MXC_CCM_CGR1, 16);
99 clk[usb_gate] = imx_clk_gate2("usb_gate", "ahb", base + MXC_CCM_CGR1, 18);
100 clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MXC_CCM_CGR1, 20);
101 clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MXC_CCM_CGR1, 22);
102 clk[uart3_gate] = imx_clk_gate2("uart3_gate", "per", base + MXC_CCM_CGR1, 24);
103 clk[uart4_gate] = imx_clk_gate2("uart4_gate", "per", base + MXC_CCM_CGR1, 26);
104 clk[uart5_gate] = imx_clk_gate2("uart5_gate", "per", base + MXC_CCM_CGR1, 28);
105 clk[owire_gate] = imx_clk_gate2("owire_gate", "per", base + MXC_CCM_CGR1, 30);
106 clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "spll", base + MXC_CCM_CGR2, 0);
107 clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MXC_CCM_CGR2, 2);
108 clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MXC_CCM_CGR2, 4);
109 clk[gacc_gate] = imx_clk_gate2("gacc_gate", "per", base + MXC_CCM_CGR2, 6);
110 clk[emi_gate] = imx_clk_gate2("emi_gate", "ahb", base + MXC_CCM_CGR2, 8);
111 clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MXC_CCM_CGR2, 10);
112 clk[firi_gate] = imx_clk_gate2("firi_gate", "upll", base+MXC_CCM_CGR2, 12);
113
114 for (i = 0; i < ARRAY_SIZE(clk); i++)
115 if (IS_ERR(clk[i]))
116 pr_err("imx31 clk %d: register failed with %ld\n",
117 i, PTR_ERR(clk[i]));
118
119 clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
120 clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
121 clk_register_clkdev(clk[cspi1_gate], NULL, "imx31-cspi.0");
122 clk_register_clkdev(clk[cspi2_gate], NULL, "imx31-cspi.1");
123 clk_register_clkdev(clk[cspi3_gate], NULL, "imx31-cspi.2");
124 clk_register_clkdev(clk[pwm_gate], "pwm", NULL);
125 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
126 clk_register_clkdev(clk[rtc_gate], "rtc", NULL);
127 clk_register_clkdev(clk[epit1_gate], "epit", NULL);
128 clk_register_clkdev(clk[epit2_gate], "epit", NULL);
129 clk_register_clkdev(clk[nfc], NULL, "mxc_nand.0");
130 clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
131 clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
132 clk_register_clkdev(clk[kpp_gate], "kpp", NULL);
133 clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.0");
134 clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.0");
135 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
136 clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.1");
137 clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.1");
138 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
139 clk_register_clkdev(clk[usb_div_post], "per", "mxc-ehci.2");
140 clk_register_clkdev(clk[usb_gate], "ahb", "mxc-ehci.2");
141 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
142 clk_register_clkdev(clk[usb_div_post], "per", "fsl-usb2-udc");
143 clk_register_clkdev(clk[usb_gate], "ahb", "fsl-usb2-udc");
144 clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
145 clk_register_clkdev(clk[csi_gate], NULL, "mx3-camera.0");
146 /* i.mx31 has the i.mx21 type uart */
147 clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
148 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
149 clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
150 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
151 clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
152 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
153 clk_register_clkdev(clk[uart4_gate], "per", "imx21-uart.3");
154 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.3");
155 clk_register_clkdev(clk[uart5_gate], "per", "imx21-uart.4");
156 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.4");
157 clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
158 clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
159 clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
160 clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1.0");
161 clk_register_clkdev(clk[sdhc1_gate], NULL, "mxc-mmc.0");
162 clk_register_clkdev(clk[sdhc2_gate], NULL, "mxc-mmc.1");
163 clk_register_clkdev(clk[ssi1_gate], NULL, "imx-ssi.0");
164 clk_register_clkdev(clk[ssi2_gate], NULL, "imx-ssi.1");
165 clk_register_clkdev(clk[firi_gate], "firi", NULL);
166 clk_register_clkdev(clk[ata_gate], NULL, "pata_imx");
167 clk_register_clkdev(clk[rtic_gate], "rtic", NULL);
168 clk_register_clkdev(clk[rng_gate], "rng", NULL);
169 clk_register_clkdev(clk[sdma_gate], NULL, "imx31-sdma");
170 clk_register_clkdev(clk[iim_gate], "iim", NULL);
171
172 clk_set_parent(clk[csi], clk[upll]);
173 clk_prepare_enable(clk[emi_gate]);
174 clk_prepare_enable(clk[iim_gate]);
175 mx31_revision();
176 clk_disable_unprepare(clk[iim_gate]);
177
178 mxc_timer_init(NULL, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
179 MX31_INT_GPT);
180
181 return 0;
182}
diff --git a/arch/arm/mach-imx/clk-imx35.c b/arch/arm/mach-imx/clk-imx35.c
new file mode 100644
index 000000000000..a9e60bf7dd75
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx35.c
@@ -0,0 +1,278 @@
1/*
2 * Copyright (C) 2012 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9#include <linux/mm.h>
10#include <linux/delay.h>
11#include <linux/clk.h>
12#include <linux/io.h>
13#include <linux/clkdev.h>
14#include <linux/of.h>
15#include <linux/err.h>
16
17#include <mach/hardware.h>
18#include <mach/common.h>
19
20#include "crmregs-imx3.h"
21#include "clk.h"
22
23struct arm_ahb_div {
24 unsigned char arm, ahb, sel;
25};
26
27static struct arm_ahb_div clk_consumer[] = {
28 { .arm = 1, .ahb = 4, .sel = 0},
29 { .arm = 1, .ahb = 3, .sel = 1},
30 { .arm = 2, .ahb = 2, .sel = 0},
31 { .arm = 0, .ahb = 0, .sel = 0},
32 { .arm = 0, .ahb = 0, .sel = 0},
33 { .arm = 0, .ahb = 0, .sel = 0},
34 { .arm = 4, .ahb = 1, .sel = 0},
35 { .arm = 1, .ahb = 5, .sel = 0},
36 { .arm = 1, .ahb = 8, .sel = 0},
37 { .arm = 1, .ahb = 6, .sel = 1},
38 { .arm = 2, .ahb = 4, .sel = 0},
39 { .arm = 0, .ahb = 0, .sel = 0},
40 { .arm = 0, .ahb = 0, .sel = 0},
41 { .arm = 0, .ahb = 0, .sel = 0},
42 { .arm = 4, .ahb = 2, .sel = 0},
43 { .arm = 0, .ahb = 0, .sel = 0},
44};
45
46static char hsp_div_532[] = { 4, 8, 3, 0 };
47static char hsp_div_400[] = { 3, 6, 3, 0 };
48
49static const char *std_sel[] = {"ppll", "arm"};
50static const char *ipg_per_sel[] = {"ahb_per_div", "arm_per_div"};
51
52enum mx35_clks {
53 ckih, mpll, ppll, mpll_075, arm, hsp, hsp_div, hsp_sel, ahb, ipg,
54 arm_per_div, ahb_per_div, ipg_per, uart_sel, uart_div, esdhc_sel,
55 esdhc1_div, esdhc2_div, esdhc3_div, spdif_sel, spdif_div_pre,
56 spdif_div_post, ssi_sel, ssi1_div_pre, ssi1_div_post, ssi2_div_pre,
57 ssi2_div_post, usb_sel, usb_div, nfc_div, asrc_gate, pata_gate,
58 audmux_gate, can1_gate, can2_gate, cspi1_gate, cspi2_gate, ect_gate,
59 edio_gate, emi_gate, epit1_gate, epit2_gate, esai_gate, esdhc1_gate,
60 esdhc2_gate, esdhc3_gate, fec_gate, gpio1_gate, gpio2_gate, gpio3_gate,
61 gpt_gate, i2c1_gate, i2c2_gate, i2c3_gate, iomuxc_gate, ipu_gate,
62 kpp_gate, mlb_gate, mshc_gate, owire_gate, pwm_gate, rngc_gate,
63 rtc_gate, rtic_gate, scc_gate, sdma_gate, spba_gate, spdif_gate,
64 ssi1_gate, ssi2_gate, uart1_gate, uart2_gate, uart3_gate, usbotg_gate,
65 wdog_gate, max_gate, admux_gate, csi_gate, iim_gate, gpu2d_gate,
66 clk_max
67};
68
69static struct clk *clk[clk_max];
70
71int __init mx35_clocks_init()
72{
73 void __iomem *base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
74 u32 pdr0, consumer_sel, hsp_sel;
75 struct arm_ahb_div *aad;
76 unsigned char *hsp_div;
77 int i;
78
79 pdr0 = __raw_readl(base + MXC_CCM_PDR0);
80 consumer_sel = (pdr0 >> 16) & 0xf;
81 aad = &clk_consumer[consumer_sel];
82 if (!aad->arm) {
83 pr_err("i.MX35 clk: illegal consumer mux selection 0x%x\n", consumer_sel);
84 /*
85 * We are basically stuck. Continue with a default entry and hope we
86 * get far enough to actually show the above message
87 */
88 aad = &clk_consumer[0];
89 }
90
91 clk[ckih] = imx_clk_fixed("ckih", 24000000);
92 clk[mpll] = imx_clk_pllv1("mpll", "ckih", base + MX35_CCM_MPCTL);
93 clk[ppll] = imx_clk_pllv1("ppll", "ckih", base + MX35_CCM_PPCTL);
94
95 clk[mpll] = imx_clk_fixed_factor("mpll_075", "mpll", 3, 4);
96
97 if (aad->sel)
98 clk[arm] = imx_clk_fixed_factor("arm", "mpll_075", 1, aad->arm);
99 else
100 clk[arm] = imx_clk_fixed_factor("arm", "mpll", 1, aad->arm);
101
102 if (clk_get_rate(clk[arm]) > 400000000)
103 hsp_div = hsp_div_532;
104 else
105 hsp_div = hsp_div_400;
106
107 hsp_sel = (pdr0 >> 20) & 0x3;
108 if (!hsp_div[hsp_sel]) {
109 pr_err("i.MX35 clk: illegal hsp clk selection 0x%x\n", hsp_sel);
110 hsp_sel = 0;
111 }
112
113 clk[hsp] = imx_clk_fixed_factor("hsp", "arm", 1, hsp_div[hsp_sel]);
114
115 clk[ahb] = imx_clk_fixed_factor("ahb", "arm", 1, aad->ahb);
116 clk[ipg] = imx_clk_fixed_factor("ipg", "ahb", 1, 2);
117
118 clk[arm_per_div] = imx_clk_divider("arm_per_div", "arm", base + MX35_CCM_PDR4, 16, 6);
119 clk[ahb_per_div] = imx_clk_divider("ahb_per_div", "ahb", base + MXC_CCM_PDR0, 12, 3);
120 clk[ipg_per] = imx_clk_mux("ipg_per", base + MXC_CCM_PDR0, 26, 1, ipg_per_sel, ARRAY_SIZE(ipg_per_sel));
121
122 clk[uart_sel] = imx_clk_mux("uart_sel", base + MX35_CCM_PDR3, 14, 1, std_sel, ARRAY_SIZE(std_sel));
123 clk[uart_div] = imx_clk_divider("uart_div", "uart_sel", base + MX35_CCM_PDR4, 10, 6);
124
125 clk[esdhc_sel] = imx_clk_mux("esdhc_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
126 clk[esdhc1_div] = imx_clk_divider("esdhc1_div", "esdhc_sel", base + MX35_CCM_PDR3, 0, 6);
127 clk[esdhc2_div] = imx_clk_divider("esdhc2_div", "esdhc_sel", base + MX35_CCM_PDR3, 8, 6);
128 clk[esdhc3_div] = imx_clk_divider("esdhc3_div", "esdhc_sel", base + MX35_CCM_PDR3, 16, 6);
129
130 clk[spdif_sel] = imx_clk_mux("spdif_sel", base + MX35_CCM_PDR3, 22, 1, std_sel, ARRAY_SIZE(std_sel));
131 clk[spdif_div_pre] = imx_clk_divider("spdif_div_pre", "spdif_sel", base + MX35_CCM_PDR3, 29, 3); /* divide by 1 not allowed */
132 clk[spdif_div_post] = imx_clk_divider("spdif_div_post", "spdif_div_pre", base + MX35_CCM_PDR3, 23, 6);
133
134 clk[ssi_sel] = imx_clk_mux("ssi_sel", base + MX35_CCM_PDR2, 6, 1, std_sel, ARRAY_SIZE(std_sel));
135 clk[ssi1_div_pre] = imx_clk_divider("ssi1_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 24, 3);
136 clk[ssi1_div_post] = imx_clk_divider("ssi1_div_post", "ssi1_div_pre", base + MX35_CCM_PDR2, 0, 6);
137 clk[ssi2_div_pre] = imx_clk_divider("ssi2_div_pre", "ssi_sel", base + MX35_CCM_PDR2, 27, 3);
138 clk[ssi2_div_post] = imx_clk_divider("ssi2_div_post", "ssi2_div_pre", base + MX35_CCM_PDR2, 8, 6);
139
140 clk[usb_sel] = imx_clk_mux("usb_sel", base + MX35_CCM_PDR4, 9, 1, std_sel, ARRAY_SIZE(std_sel));
141 clk[usb_div] = imx_clk_divider("usb_div", "usb_sel", base + MX35_CCM_PDR4, 22, 6);
142
143 clk[nfc_div] = imx_clk_divider("nfc_div", "ahb", base + MX35_CCM_PDR4, 28, 4);
144
145 clk[asrc_gate] = imx_clk_gate2("asrc_gate", "ipg", base + MX35_CCM_CGR0, 0);
146 clk[pata_gate] = imx_clk_gate2("pata_gate", "ipg", base + MX35_CCM_CGR0, 2);
147 clk[audmux_gate] = imx_clk_gate2("audmux_gate", "ipg", base + MX35_CCM_CGR0, 4);
148 clk[can1_gate] = imx_clk_gate2("can1_gate", "ipg", base + MX35_CCM_CGR0, 6);
149 clk[can2_gate] = imx_clk_gate2("can2_gate", "ipg", base + MX35_CCM_CGR0, 8);
150 clk[cspi1_gate] = imx_clk_gate2("cspi1_gate", "ipg", base + MX35_CCM_CGR0, 10);
151 clk[cspi2_gate] = imx_clk_gate2("cspi2_gate", "ipg", base + MX35_CCM_CGR0, 12);
152 clk[ect_gate] = imx_clk_gate2("ect_gate", "ipg", base + MX35_CCM_CGR0, 14);
153 clk[edio_gate] = imx_clk_gate2("edio_gate", "ipg", base + MX35_CCM_CGR0, 16);
154 clk[emi_gate] = imx_clk_gate2("emi_gate", "ipg", base + MX35_CCM_CGR0, 18);
155 clk[epit1_gate] = imx_clk_gate2("epit1_gate", "ipg", base + MX35_CCM_CGR0, 20);
156 clk[epit2_gate] = imx_clk_gate2("epit2_gate", "ipg", base + MX35_CCM_CGR0, 22);
157 clk[esai_gate] = imx_clk_gate2("esai_gate", "ipg", base + MX35_CCM_CGR0, 24);
158 clk[esdhc1_gate] = imx_clk_gate2("esdhc1_gate", "esdhc1_div", base + MX35_CCM_CGR0, 26);
159 clk[esdhc2_gate] = imx_clk_gate2("esdhc2_gate", "esdhc2_div", base + MX35_CCM_CGR0, 28);
160 clk[esdhc3_gate] = imx_clk_gate2("esdhc3_gate", "esdhc3_div", base + MX35_CCM_CGR0, 30);
161
162 clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", base + MX35_CCM_CGR1, 0);
163 clk[gpio1_gate] = imx_clk_gate2("gpio1_gate", "ipg", base + MX35_CCM_CGR1, 2);
164 clk[gpio2_gate] = imx_clk_gate2("gpio2_gate", "ipg", base + MX35_CCM_CGR1, 4);
165 clk[gpio3_gate] = imx_clk_gate2("gpio3_gate", "ipg", base + MX35_CCM_CGR1, 6);
166 clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", base + MX35_CCM_CGR1, 8);
167 clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "ipg_per", base + MX35_CCM_CGR1, 10);
168 clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "ipg_per", base + MX35_CCM_CGR1, 12);
169 clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "ipg_per", base + MX35_CCM_CGR1, 14);
170 clk[iomuxc_gate] = imx_clk_gate2("iomuxc_gate", "ipg", base + MX35_CCM_CGR1, 16);
171 clk[ipu_gate] = imx_clk_gate2("ipu_gate", "hsp", base + MX35_CCM_CGR1, 18);
172 clk[kpp_gate] = imx_clk_gate2("kpp_gate", "ipg", base + MX35_CCM_CGR1, 20);
173 clk[mlb_gate] = imx_clk_gate2("mlb_gate", "ahb", base + MX35_CCM_CGR1, 22);
174 clk[mshc_gate] = imx_clk_gate2("mshc_gate", "dummy", base + MX35_CCM_CGR1, 24);
175 clk[owire_gate] = imx_clk_gate2("owire_gate", "ipg_per", base + MX35_CCM_CGR1, 26);
176 clk[pwm_gate] = imx_clk_gate2("pwm_gate", "ipg_per", base + MX35_CCM_CGR1, 28);
177 clk[rngc_gate] = imx_clk_gate2("rngc_gate", "ipg", base + MX35_CCM_CGR1, 30);
178
179 clk[rtc_gate] = imx_clk_gate2("rtc_gate", "ipg", base + MX35_CCM_CGR2, 0);
180 clk[rtic_gate] = imx_clk_gate2("rtic_gate", "ahb", base + MX35_CCM_CGR2, 2);
181 clk[scc_gate] = imx_clk_gate2("scc_gate", "ipg", base + MX35_CCM_CGR2, 4);
182 clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ahb", base + MX35_CCM_CGR2, 6);
183 clk[spba_gate] = imx_clk_gate2("spba_gate", "ipg", base + MX35_CCM_CGR2, 8);
184 clk[spdif_gate] = imx_clk_gate2("spdif_gate", "spdif_div_post", base + MX35_CCM_CGR2, 10);
185 clk[ssi1_gate] = imx_clk_gate2("ssi1_gate", "ssi1_div_post", base + MX35_CCM_CGR2, 12);
186 clk[ssi2_gate] = imx_clk_gate2("ssi2_gate", "ssi2_div_post", base + MX35_CCM_CGR2, 14);
187 clk[uart1_gate] = imx_clk_gate2("uart1_gate", "uart_div", base + MX35_CCM_CGR2, 16);
188 clk[uart2_gate] = imx_clk_gate2("uart2_gate", "uart_div", base + MX35_CCM_CGR2, 18);
189 clk[uart3_gate] = imx_clk_gate2("uart3_gate", "uart_div", base + MX35_CCM_CGR2, 20);
190 clk[usbotg_gate] = imx_clk_gate2("usbotg_gate", "ahb", base + MX35_CCM_CGR2, 22);
191 clk[wdog_gate] = imx_clk_gate2("wdog_gate", "ipg", base + MX35_CCM_CGR2, 24);
192 clk[max_gate] = imx_clk_gate2("max_gate", "dummy", base + MX35_CCM_CGR2, 26);
193 clk[admux_gate] = imx_clk_gate2("admux_gate", "ipg", base + MX35_CCM_CGR2, 30);
194
195 clk[csi_gate] = imx_clk_gate2("csi_gate", "ipg", base + MX35_CCM_CGR3, 0);
196 clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", base + MX35_CCM_CGR3, 2);
197 clk[gpu2d_gate] = imx_clk_gate2("gpu2d_gate", "ahb", base + MX35_CCM_CGR3, 4);
198
199 for (i = 0; i < ARRAY_SIZE(clk); i++)
200 if (IS_ERR(clk[i]))
201 pr_err("i.MX35 clk %d: register failed with %ld\n",
202 i, PTR_ERR(clk[i]));
203
204
205 clk_register_clkdev(clk[pata_gate], NULL, "pata_imx");
206 clk_register_clkdev(clk[can1_gate], NULL, "flexcan.0");
207 clk_register_clkdev(clk[can2_gate], NULL, "flexcan.1");
208 clk_register_clkdev(clk[cspi1_gate], "per", "imx35-cspi.0");
209 clk_register_clkdev(clk[cspi1_gate], "ipg", "imx35-cspi.0");
210 clk_register_clkdev(clk[cspi2_gate], "per", "imx35-cspi.1");
211 clk_register_clkdev(clk[cspi2_gate], "ipg", "imx35-cspi.1");
212 clk_register_clkdev(clk[epit1_gate], NULL, "imx-epit.0");
213 clk_register_clkdev(clk[epit2_gate], NULL, "imx-epit.1");
214 clk_register_clkdev(clk[esdhc1_gate], "per", "sdhci-esdhc-imx35.0");
215 clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.0");
216 clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.0");
217 clk_register_clkdev(clk[esdhc2_gate], "per", "sdhci-esdhc-imx35.1");
218 clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.1");
219 clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.1");
220 clk_register_clkdev(clk[esdhc3_gate], "per", "sdhci-esdhc-imx35.2");
221 clk_register_clkdev(clk[ipg], "ipg", "sdhci-esdhc-imx35.2");
222 clk_register_clkdev(clk[ahb], "ahb", "sdhci-esdhc-imx35.2");
223 /* i.mx35 has the i.mx27 type fec */
224 clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
225 clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
226 clk_register_clkdev(clk[ipg], "ipg", "imx-gpt.0");
227 clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
228 clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
229 clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
230 clk_register_clkdev(clk[ipu_gate], NULL, "ipu-core");
231 clk_register_clkdev(clk[ipu_gate], NULL, "mx3_sdc_fb");
232 clk_register_clkdev(clk[owire_gate], NULL, "mxc_w1");
233 clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
234 clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.0");
235 clk_register_clkdev(clk[ssi1_div_post], "per", "imx-ssi.0");
236 clk_register_clkdev(clk[ipg], "ipg", "imx-ssi.1");
237 clk_register_clkdev(clk[ssi2_div_post], "per", "imx-ssi.1");
238 /* i.mx35 has the i.mx21 type uart */
239 clk_register_clkdev(clk[uart1_gate], "per", "imx21-uart.0");
240 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.0");
241 clk_register_clkdev(clk[uart2_gate], "per", "imx21-uart.1");
242 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.1");
243 clk_register_clkdev(clk[uart3_gate], "per", "imx21-uart.2");
244 clk_register_clkdev(clk[ipg], "ipg", "imx21-uart.2");
245 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.0");
246 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.0");
247 clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.0");
248 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.1");
249 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.1");
250 clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.1");
251 clk_register_clkdev(clk[usb_div], "per", "mxc-ehci.2");
252 clk_register_clkdev(clk[ipg], "ipg", "mxc-ehci.2");
253 clk_register_clkdev(clk[usbotg_gate], "ahb", "mxc-ehci.2");
254 clk_register_clkdev(clk[usb_div], "per", "fsl-usb2-udc");
255 clk_register_clkdev(clk[ipg], "ipg", "fsl-usb2-udc");
256 clk_register_clkdev(clk[usbotg_gate], "ahb", "fsl-usb2-udc");
257 clk_register_clkdev(clk[wdog_gate], NULL, "imx2-wdt.0");
258 clk_register_clkdev(clk[nfc_div], NULL, "mxc_nand.0");
259
260 clk_prepare_enable(clk[spba_gate]);
261 clk_prepare_enable(clk[gpio1_gate]);
262 clk_prepare_enable(clk[gpio2_gate]);
263 clk_prepare_enable(clk[gpio3_gate]);
264 clk_prepare_enable(clk[iim_gate]);
265 clk_prepare_enable(clk[emi_gate]);
266
267 imx_print_silicon_rev("i.MX35", mx35_revision());
268
269#ifdef CONFIG_MXC_USE_EPIT
270 epit_timer_init(&epit1_clk,
271 MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
272#else
273 mxc_timer_init(NULL, MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR),
274 MX35_INT_GPT);
275#endif
276
277 return 0;
278}
diff --git a/arch/arm/mach-imx/clk-imx51-imx53.c b/arch/arm/mach-imx/clk-imx51-imx53.c
new file mode 100644
index 000000000000..fcd94f3b0f0e
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx51-imx53.c
@@ -0,0 +1,506 @@
1/*
2 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 */
9#include <linux/mm.h>
10#include <linux/delay.h>
11#include <linux/clk.h>
12#include <linux/io.h>
13#include <linux/clkdev.h>
14#include <linux/of.h>
15#include <linux/err.h>
16
17#include <mach/hardware.h>
18#include <mach/common.h>
19
20#include "crm-regs-imx5.h"
21#include "clk.h"
22
23/* Low-power Audio Playback Mode clock */
24static const char *lp_apm_sel[] = { "osc", };
25
26/* This is used multiple times */
27static const char *standard_pll_sel[] = { "pll1_sw", "pll2_sw", "pll3_sw", "lp_apm", };
28static const char *periph_apm_sel[] = { "pll1_sw", "pll3_sw", "lp_apm", };
29static const char *main_bus_sel[] = { "pll2_sw", "periph_apm", };
30static const char *per_lp_apm_sel[] = { "main_bus", "lp_apm", };
31static const char *per_root_sel[] = { "per_podf", "ipg", };
32static const char *esdhc_c_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
33static const char *esdhc_d_sel[] = { "esdhc_a_podf", "esdhc_b_podf", };
34static const char *ssi_apm_sels[] = { "ckih1", "lp_amp", "ckih2", };
35static const char *ssi_clk_sels[] = { "pll1_sw", "pll2_sw", "pll3_sw", "ssi_apm", };
36static const char *ssi3_clk_sels[] = { "ssi1_root_gate", "ssi2_root_gate", };
37static const char *ssi_ext1_com_sels[] = { "ssi_ext1_podf", "ssi1_root_gate", };
38static const char *ssi_ext2_com_sels[] = { "ssi_ext2_podf", "ssi2_root_gate", };
39static const char *emi_slow_sel[] = { "main_bus", "ahb", };
40static const char *usb_phy_sel_str[] = { "osc", "usb_phy_podf", };
41static const char *mx51_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "tve_di", };
42static const char *mx53_ipu_di0_sel[] = { "di_pred", "osc", "ckih1", "di_pll4_podf", "dummy", "ldb_di0", };
43static const char *mx53_ldb_di0_sel[] = { "pll3_sw", "pll4_sw", };
44static const char *mx51_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", };
45static const char *mx53_ipu_di1_sel[] = { "di_pred", "osc", "ckih1", "tve_di", "ipp_di1", "ldb_di1", };
46static const char *mx53_ldb_di1_sel[] = { "pll3_sw", "pll4_sw", };
47static const char *mx51_tve_ext_sel[] = { "osc", "ckih1", };
48static const char *mx53_tve_ext_sel[] = { "pll4_sw", "ckih1", };
49static const char *tve_sel[] = { "tve_pred", "tve_ext_sel", };
50static const char *ipu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
51static const char *vpu_sel[] = { "axi_a", "axi_b", "emi_slow_gate", "ahb", };
52
53enum imx5_clks {
54 dummy, ckil, osc, ckih1, ckih2, ahb, ipg, axi_a, axi_b, uart_pred,
55 uart_root, esdhc_a_pred, esdhc_b_pred, esdhc_c_s, esdhc_d_s,
56 emi_sel, emi_slow_podf, nfc_podf, ecspi_pred, ecspi_podf, usboh3_pred,
57 usboh3_podf, usb_phy_pred, usb_phy_podf, cpu_podf, di_pred, tve_di,
58 tve_s, uart1_ipg_gate, uart1_per_gate, uart2_ipg_gate,
59 uart2_per_gate, uart3_ipg_gate, uart3_per_gate, i2c1_gate, i2c2_gate,
60 gpt_ipg_gate, pwm1_ipg_gate, pwm1_hf_gate, pwm2_ipg_gate, pwm2_hf_gate,
61 gpt_gate, fec_gate, usboh3_per_gate, esdhc1_ipg_gate, esdhc2_ipg_gate,
62 esdhc3_ipg_gate, esdhc4_ipg_gate, ssi1_ipg_gate, ssi2_ipg_gate,
63 ssi3_ipg_gate, ecspi1_ipg_gate, ecspi1_per_gate, ecspi2_ipg_gate,
64 ecspi2_per_gate, cspi_ipg_gate, sdma_gate, emi_slow_gate, ipu_s,
65 ipu_gate, nfc_gate, ipu_di1_gate, vpu_s, vpu_gate,
66 vpu_reference_gate, uart4_ipg_gate, uart4_per_gate, uart5_ipg_gate,
67 uart5_per_gate, tve_gate, tve_pred, esdhc1_per_gate, esdhc2_per_gate,
68 esdhc3_per_gate, esdhc4_per_gate, usb_phy_gate, hsi2c_gate,
69 mipi_hsc1_gate, mipi_hsc2_gate, mipi_esc_gate, mipi_hsp_gate,
70 ldb_di1_div_3_5, ldb_di1_div, ldb_di0_div_3_5, ldb_di0_div,
71 ldb_di1_gate, can2_serial_gate, can2_ipg_gate, i2c3_gate, lp_apm,
72 periph_apm, main_bus, ahb_max, aips_tz1, aips_tz2, tmax1, tmax2,
73 tmax3, spba, uart_sel, esdhc_a_sel, esdhc_b_sel, esdhc_a_podf,
74 esdhc_b_podf, ecspi_sel, usboh3_sel, usb_phy_sel, iim_gate,
75 usboh3_gate, emi_fast_gate, ipu_di0_gate,gpc_dvfs, pll1_sw, pll2_sw,
76 pll3_sw, ipu_di0_sel, ipu_di1_sel, tve_ext_sel, mx51_mipi, pll4_sw,
77 ldb_di1_sel, di_pll4_podf, ldb_di0_sel, ldb_di0_gate, usb_phy1_gate,
78 usb_phy2_gate, per_lp_apm, per_pred1, per_pred2, per_podf, per_root,
79 ssi_apm, ssi1_root_sel, ssi2_root_sel, ssi3_root_sel, ssi_ext1_sel,
80 ssi_ext2_sel, ssi_ext1_com_sel, ssi_ext2_com_sel, ssi1_root_pred,
81 ssi1_root_podf, ssi2_root_pred, ssi2_root_podf, ssi_ext1_pred,
82 ssi_ext1_podf, ssi_ext2_pred, ssi_ext2_podf, ssi1_root_gate,
83 ssi2_root_gate, ssi3_root_gate, ssi_ext1_gate, ssi_ext2_gate,
84 clk_max
85};
86
87static struct clk *clk[clk_max];
88
89static void __init mx5_clocks_common_init(unsigned long rate_ckil,
90 unsigned long rate_osc, unsigned long rate_ckih1,
91 unsigned long rate_ckih2)
92{
93 int i;
94
95 clk[dummy] = imx_clk_fixed("dummy", 0);
96 clk[ckil] = imx_clk_fixed("ckil", rate_ckil);
97 clk[osc] = imx_clk_fixed("osc", rate_osc);
98 clk[ckih1] = imx_clk_fixed("ckih1", rate_ckih1);
99 clk[ckih2] = imx_clk_fixed("ckih2", rate_ckih2);
100
101 clk[lp_apm] = imx_clk_mux("lp_apm", MXC_CCM_CCSR, 9, 1,
102 lp_apm_sel, ARRAY_SIZE(lp_apm_sel));
103 clk[periph_apm] = imx_clk_mux("periph_apm", MXC_CCM_CBCMR, 12, 2,
104 periph_apm_sel, ARRAY_SIZE(periph_apm_sel));
105 clk[main_bus] = imx_clk_mux("main_bus", MXC_CCM_CBCDR, 25, 1,
106 main_bus_sel, ARRAY_SIZE(main_bus_sel));
107 clk[per_lp_apm] = imx_clk_mux("per_lp_apm", MXC_CCM_CBCDR, 1, 1,
108 per_lp_apm_sel, ARRAY_SIZE(per_lp_apm_sel));
109 clk[per_pred1] = imx_clk_divider("per_pred1", "per_lp_apm", MXC_CCM_CBCDR, 6, 2);
110 clk[per_pred2] = imx_clk_divider("per_pred2", "per_pred1", MXC_CCM_CBCDR, 3, 3);
111 clk[per_podf] = imx_clk_divider("per_podf", "per_pred2", MXC_CCM_CBCDR, 0, 3);
112 clk[per_root] = imx_clk_mux("per_root", MXC_CCM_CBCDR, 1, 0,
113 per_root_sel, ARRAY_SIZE(per_root_sel));
114 clk[ahb] = imx_clk_divider("ahb", "main_bus", MXC_CCM_CBCDR, 10, 3);
115 clk[ahb_max] = imx_clk_gate2("ahb_max", "ahb", MXC_CCM_CCGR0, 28);
116 clk[aips_tz1] = imx_clk_gate2("aips_tz1", "ahb", MXC_CCM_CCGR0, 24);
117 clk[aips_tz2] = imx_clk_gate2("aips_tz2", "ahb", MXC_CCM_CCGR0, 26);
118 clk[tmax1] = imx_clk_gate2("tmax1", "ahb", MXC_CCM_CCGR1, 0);
119 clk[tmax2] = imx_clk_gate2("tmax2", "ahb", MXC_CCM_CCGR1, 2);
120 clk[tmax3] = imx_clk_gate2("tmax3", "ahb", MXC_CCM_CCGR1, 4);
121 clk[spba] = imx_clk_gate2("spba", "ipg", MXC_CCM_CCGR5, 0);
122 clk[ipg] = imx_clk_divider("ipg", "ahb", MXC_CCM_CBCDR, 8, 2);
123 clk[axi_a] = imx_clk_divider("axi_a", "main_bus", MXC_CCM_CBCDR, 16, 3);
124 clk[axi_b] = imx_clk_divider("axi_b", "main_bus", MXC_CCM_CBCDR, 19, 3);
125 clk[uart_sel] = imx_clk_mux("uart_sel", MXC_CCM_CSCMR1, 24, 2,
126 standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
127 clk[uart_pred] = imx_clk_divider("uart_pred", "uart_sel", MXC_CCM_CSCDR1, 3, 3);
128 clk[uart_root] = imx_clk_divider("uart_root", "uart_pred", MXC_CCM_CSCDR1, 0, 3);
129
130 clk[esdhc_a_sel] = imx_clk_mux("esdhc_a_sel", MXC_CCM_CSCMR1, 20, 2,
131 standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
132 clk[esdhc_b_sel] = imx_clk_mux("esdhc_b_sel", MXC_CCM_CSCMR1, 16, 2,
133 standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
134 clk[esdhc_a_pred] = imx_clk_divider("esdhc_a_pred", "esdhc_a_sel", MXC_CCM_CSCDR1, 16, 3);
135 clk[esdhc_a_podf] = imx_clk_divider("esdhc_a_podf", "esdhc_a_pred", MXC_CCM_CSCDR1, 11, 3);
136 clk[esdhc_b_pred] = imx_clk_divider("esdhc_b_pred", "esdhc_b_sel", MXC_CCM_CSCDR1, 22, 3);
137 clk[esdhc_b_podf] = imx_clk_divider("esdhc_b_podf", "esdhc_b_pred", MXC_CCM_CSCDR1, 19, 3);
138 clk[esdhc_c_s] = imx_clk_mux("esdhc_c_sel", MXC_CCM_CSCMR1, 19, 1, esdhc_c_sel, ARRAY_SIZE(esdhc_c_sel));
139 clk[esdhc_d_s] = imx_clk_mux("esdhc_d_sel", MXC_CCM_CSCMR1, 18, 1, esdhc_d_sel, ARRAY_SIZE(esdhc_d_sel));
140
141 clk[emi_sel] = imx_clk_mux("emi_sel", MXC_CCM_CBCDR, 26, 1,
142 emi_slow_sel, ARRAY_SIZE(emi_slow_sel));
143 clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_sel", MXC_CCM_CBCDR, 22, 3);
144 clk[nfc_podf] = imx_clk_divider("nfc_podf", "emi_slow_podf", MXC_CCM_CBCDR, 13, 3);
145 clk[ecspi_sel] = imx_clk_mux("ecspi_sel", MXC_CCM_CSCMR1, 4, 2,
146 standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
147 clk[ecspi_pred] = imx_clk_divider("ecspi_pred", "ecspi_sel", MXC_CCM_CSCDR2, 25, 3);
148 clk[ecspi_podf] = imx_clk_divider("ecspi_podf", "ecspi_pred", MXC_CCM_CSCDR2, 19, 6);
149 clk[usboh3_sel] = imx_clk_mux("usboh3_sel", MXC_CCM_CSCMR1, 22, 2,
150 standard_pll_sel, ARRAY_SIZE(standard_pll_sel));
151 clk[usboh3_pred] = imx_clk_divider("usboh3_pred", "usboh3_sel", MXC_CCM_CSCDR1, 8, 3);
152 clk[usboh3_podf] = imx_clk_divider("usboh3_podf", "usboh3_pred", MXC_CCM_CSCDR1, 6, 2);
153 clk[usb_phy_pred] = imx_clk_divider("usb_phy_pred", "pll3_sw", MXC_CCM_CDCDR, 3, 3);
154 clk[usb_phy_podf] = imx_clk_divider("usb_phy_podf", "usb_phy_pred", MXC_CCM_CDCDR, 0, 3);
155 clk[usb_phy_sel] = imx_clk_mux("usb_phy_sel", MXC_CCM_CSCMR1, 26, 1,
156 usb_phy_sel_str, ARRAY_SIZE(usb_phy_sel_str));
157 clk[cpu_podf] = imx_clk_divider("cpu_podf", "pll1_sw", MXC_CCM_CACRR, 0, 3);
158 clk[di_pred] = imx_clk_divider("di_pred", "pll3_sw", MXC_CCM_CDCDR, 6, 3);
159 clk[tve_di] = imx_clk_fixed("tve_di", 65000000); /* FIXME */
160 clk[tve_s] = imx_clk_mux("tve_sel", MXC_CCM_CSCMR1, 7, 1, tve_sel, ARRAY_SIZE(tve_sel));
161 clk[iim_gate] = imx_clk_gate2("iim_gate", "ipg", MXC_CCM_CCGR0, 30);
162 clk[uart1_ipg_gate] = imx_clk_gate2("uart1_ipg_gate", "ipg", MXC_CCM_CCGR1, 6);
163 clk[uart1_per_gate] = imx_clk_gate2("uart1_per_gate", "uart_root", MXC_CCM_CCGR1, 8);
164 clk[uart2_ipg_gate] = imx_clk_gate2("uart2_ipg_gate", "ipg", MXC_CCM_CCGR1, 10);
165 clk[uart2_per_gate] = imx_clk_gate2("uart2_per_gate", "uart_root", MXC_CCM_CCGR1, 12);
166 clk[uart3_ipg_gate] = imx_clk_gate2("uart3_ipg_gate", "ipg", MXC_CCM_CCGR1, 14);
167 clk[uart3_per_gate] = imx_clk_gate2("uart3_per_gate", "uart_root", MXC_CCM_CCGR1, 16);
168 clk[i2c1_gate] = imx_clk_gate2("i2c1_gate", "per_root", MXC_CCM_CCGR1, 18);
169 clk[i2c2_gate] = imx_clk_gate2("i2c2_gate", "per_root", MXC_CCM_CCGR1, 20);
170 clk[gpt_ipg_gate] = imx_clk_gate2("gpt_ipg_gate", "ipg", MXC_CCM_CCGR2, 20);
171 clk[pwm1_ipg_gate] = imx_clk_gate2("pwm1_ipg_gate", "ipg", MXC_CCM_CCGR2, 10);
172 clk[pwm1_hf_gate] = imx_clk_gate2("pwm1_hf_gate", "ipg", MXC_CCM_CCGR2, 12);
173 clk[pwm2_ipg_gate] = imx_clk_gate2("pwm2_ipg_gate", "ipg", MXC_CCM_CCGR2, 14);
174 clk[pwm2_hf_gate] = imx_clk_gate2("pwm2_hf_gate", "ipg", MXC_CCM_CCGR2, 16);
175 clk[gpt_gate] = imx_clk_gate2("gpt_gate", "ipg", MXC_CCM_CCGR2, 18);
176 clk[fec_gate] = imx_clk_gate2("fec_gate", "ipg", MXC_CCM_CCGR2, 24);
177 clk[usboh3_gate] = imx_clk_gate2("usboh3_gate", "ipg", MXC_CCM_CCGR2, 26);
178 clk[usboh3_per_gate] = imx_clk_gate2("usboh3_per_gate", "usboh3_podf", MXC_CCM_CCGR2, 28);
179 clk[esdhc1_ipg_gate] = imx_clk_gate2("esdhc1_ipg_gate", "ipg", MXC_CCM_CCGR3, 0);
180 clk[esdhc2_ipg_gate] = imx_clk_gate2("esdhc2_ipg_gate", "ipg", MXC_CCM_CCGR3, 4);
181 clk[esdhc3_ipg_gate] = imx_clk_gate2("esdhc3_ipg_gate", "ipg", MXC_CCM_CCGR3, 8);
182 clk[esdhc4_ipg_gate] = imx_clk_gate2("esdhc4_ipg_gate", "ipg", MXC_CCM_CCGR3, 12);
183 clk[ssi1_ipg_gate] = imx_clk_gate2("ssi1_ipg_gate", "ipg", MXC_CCM_CCGR3, 16);
184 clk[ssi2_ipg_gate] = imx_clk_gate2("ssi2_ipg_gate", "ipg", MXC_CCM_CCGR3, 20);
185 clk[ssi3_ipg_gate] = imx_clk_gate2("ssi3_ipg_gate", "ipg", MXC_CCM_CCGR3, 24);
186 clk[ecspi1_ipg_gate] = imx_clk_gate2("ecspi1_ipg_gate", "ipg", MXC_CCM_CCGR4, 18);
187 clk[ecspi1_per_gate] = imx_clk_gate2("ecspi1_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 20);
188 clk[ecspi2_ipg_gate] = imx_clk_gate2("ecspi2_ipg_gate", "ipg", MXC_CCM_CCGR4, 22);
189 clk[ecspi2_per_gate] = imx_clk_gate2("ecspi2_per_gate", "ecspi_podf", MXC_CCM_CCGR4, 24);
190 clk[cspi_ipg_gate] = imx_clk_gate2("cspi_ipg_gate", "ipg", MXC_CCM_CCGR4, 26);
191 clk[sdma_gate] = imx_clk_gate2("sdma_gate", "ipg", MXC_CCM_CCGR4, 30);
192 clk[emi_fast_gate] = imx_clk_gate2("emi_fast_gate", "dummy", MXC_CCM_CCGR5, 14);
193 clk[emi_slow_gate] = imx_clk_gate2("emi_slow_gate", "emi_slow_podf", MXC_CCM_CCGR5, 16);
194 clk[ipu_s] = imx_clk_mux("ipu_sel", MXC_CCM_CBCMR, 6, 2, ipu_sel, ARRAY_SIZE(ipu_sel));
195 clk[ipu_gate] = imx_clk_gate2("ipu_gate", "ipu_sel", MXC_CCM_CCGR5, 10);
196 clk[nfc_gate] = imx_clk_gate2("nfc_gate", "nfc_podf", MXC_CCM_CCGR5, 20);
197 clk[ipu_di0_gate] = imx_clk_gate2("ipu_di0_gate", "ipu_di0_sel", MXC_CCM_CCGR6, 10);
198 clk[ipu_di1_gate] = imx_clk_gate2("ipu_di1_gate", "ipu_di1_sel", MXC_CCM_CCGR6, 12);
199 clk[vpu_s] = imx_clk_mux("vpu_sel", MXC_CCM_CBCMR, 14, 2, vpu_sel, ARRAY_SIZE(vpu_sel));
200 clk[vpu_gate] = imx_clk_gate2("vpu_gate", "vpu_sel", MXC_CCM_CCGR5, 6);
201 clk[vpu_reference_gate] = imx_clk_gate2("vpu_reference_gate", "osc", MXC_CCM_CCGR5, 8);
202 clk[uart4_ipg_gate] = imx_clk_gate2("uart4_ipg_gate", "ipg", MXC_CCM_CCGR7, 8);
203 clk[uart4_per_gate] = imx_clk_gate2("uart4_per_gate", "uart_root", MXC_CCM_CCGR7, 10);
204 clk[uart5_ipg_gate] = imx_clk_gate2("uart5_ipg_gate", "ipg", MXC_CCM_CCGR7, 12);
205 clk[uart5_per_gate] = imx_clk_gate2("uart5_per_gate", "uart_root", MXC_CCM_CCGR7, 14);
206 clk[gpc_dvfs] = imx_clk_gate2("gpc_dvfs", "dummy", MXC_CCM_CCGR5, 24);
207
208 clk[ssi_apm] = imx_clk_mux("ssi_apm", MXC_CCM_CSCMR1, 8, 2, ssi_apm_sels, ARRAY_SIZE(ssi_apm_sels));
209 clk[ssi1_root_sel] = imx_clk_mux("ssi1_root_sel", MXC_CCM_CSCMR1, 14, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
210 clk[ssi2_root_sel] = imx_clk_mux("ssi2_root_sel", MXC_CCM_CSCMR1, 12, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
211 clk[ssi3_root_sel] = imx_clk_mux("ssi3_root_sel", MXC_CCM_CSCMR1, 11, 1, ssi3_clk_sels, ARRAY_SIZE(ssi3_clk_sels));
212 clk[ssi_ext1_sel] = imx_clk_mux("ssi_ext1_sel", MXC_CCM_CSCMR1, 28, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
213 clk[ssi_ext2_sel] = imx_clk_mux("ssi_ext2_sel", MXC_CCM_CSCMR1, 30, 2, ssi_clk_sels, ARRAY_SIZE(ssi_clk_sels));
214 clk[ssi_ext1_com_sel] = imx_clk_mux("ssi_ext1_com_sel", MXC_CCM_CSCMR1, 0, 1, ssi_ext1_com_sels, ARRAY_SIZE(ssi_ext1_com_sels));
215 clk[ssi_ext2_com_sel] = imx_clk_mux("ssi_ext2_com_sel", MXC_CCM_CSCMR1, 1, 1, ssi_ext2_com_sels, ARRAY_SIZE(ssi_ext2_com_sels));
216 clk[ssi1_root_pred] = imx_clk_divider("ssi1_root_pred", "ssi1_root_sel", MXC_CCM_CS1CDR, 6, 3);
217 clk[ssi1_root_podf] = imx_clk_divider("ssi1_root_podf", "ssi1_root_pred", MXC_CCM_CS1CDR, 0, 6);
218 clk[ssi2_root_pred] = imx_clk_divider("ssi2_root_pred", "ssi2_root_sel", MXC_CCM_CS2CDR, 6, 3);
219 clk[ssi2_root_podf] = imx_clk_divider("ssi2_root_podf", "ssi2_root_pred", MXC_CCM_CS2CDR, 0, 6);
220 clk[ssi_ext1_pred] = imx_clk_divider("ssi_ext1_pred", "ssi_ext1_sel", MXC_CCM_CS1CDR, 22, 3);
221 clk[ssi_ext1_podf] = imx_clk_divider("ssi_ext1_podf", "ssi_ext1_pred", MXC_CCM_CS1CDR, 16, 6);
222 clk[ssi_ext2_pred] = imx_clk_divider("ssi_ext2_pred", "ssi_ext2_sel", MXC_CCM_CS2CDR, 22, 3);
223 clk[ssi_ext2_podf] = imx_clk_divider("ssi_ext2_podf", "ssi_ext2_pred", MXC_CCM_CS2CDR, 16, 6);
224 clk[ssi1_root_gate] = imx_clk_gate2("ssi1_root_gate", "ssi1_root_podf", MXC_CCM_CCGR3, 18);
225 clk[ssi2_root_gate] = imx_clk_gate2("ssi2_root_gate", "ssi2_root_podf", MXC_CCM_CCGR3, 22);
226 clk[ssi3_root_gate] = imx_clk_gate2("ssi3_root_gate", "ssi3_root_sel", MXC_CCM_CCGR3, 26);
227 clk[ssi_ext1_gate] = imx_clk_gate2("ssi_ext1_gate", "ssi_ext1_com_sel", MXC_CCM_CCGR3, 28);
228 clk[ssi_ext2_gate] = imx_clk_gate2("ssi_ext2_gate", "ssi_ext2_com_sel", MXC_CCM_CCGR3, 30);
229
230 for (i = 0; i < ARRAY_SIZE(clk); i++)
231 if (IS_ERR(clk[i]))
232 pr_err("i.MX5 clk %d: register failed with %ld\n",
233 i, PTR_ERR(clk[i]));
234
235 clk_register_clkdev(clk[gpt_gate], "per", "imx-gpt.0");
236 clk_register_clkdev(clk[gpt_ipg_gate], "ipg", "imx-gpt.0");
237 clk_register_clkdev(clk[uart1_per_gate], "per", "imx21-uart.0");
238 clk_register_clkdev(clk[uart1_ipg_gate], "ipg", "imx21-uart.0");
239 clk_register_clkdev(clk[uart2_per_gate], "per", "imx21-uart.1");
240 clk_register_clkdev(clk[uart2_ipg_gate], "ipg", "imx21-uart.1");
241 clk_register_clkdev(clk[uart3_per_gate], "per", "imx21-uart.2");
242 clk_register_clkdev(clk[uart3_ipg_gate], "ipg", "imx21-uart.2");
243 clk_register_clkdev(clk[uart4_per_gate], "per", "imx21-uart.3");
244 clk_register_clkdev(clk[uart4_ipg_gate], "ipg", "imx21-uart.3");
245 clk_register_clkdev(clk[uart5_per_gate], "per", "imx21-uart.4");
246 clk_register_clkdev(clk[uart5_ipg_gate], "ipg", "imx21-uart.4");
247 clk_register_clkdev(clk[ecspi1_per_gate], "per", "imx51-ecspi.0");
248 clk_register_clkdev(clk[ecspi1_ipg_gate], "ipg", "imx51-ecspi.0");
249 clk_register_clkdev(clk[ecspi2_per_gate], "per", "imx51-ecspi.1");
250 clk_register_clkdev(clk[ecspi2_ipg_gate], "ipg", "imx51-ecspi.1");
251 clk_register_clkdev(clk[cspi_ipg_gate], NULL, "imx51-cspi.0");
252 clk_register_clkdev(clk[pwm1_ipg_gate], "pwm", "mxc_pwm.0");
253 clk_register_clkdev(clk[pwm2_ipg_gate], "pwm", "mxc_pwm.1");
254 clk_register_clkdev(clk[i2c1_gate], NULL, "imx-i2c.0");
255 clk_register_clkdev(clk[i2c2_gate], NULL, "imx-i2c.1");
256 clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.0");
257 clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.0");
258 clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.0");
259 clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.1");
260 clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.1");
261 clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.1");
262 clk_register_clkdev(clk[usboh3_per_gate], "per", "mxc-ehci.2");
263 clk_register_clkdev(clk[usboh3_gate], "ipg", "mxc-ehci.2");
264 clk_register_clkdev(clk[usboh3_gate], "ahb", "mxc-ehci.2");
265 clk_register_clkdev(clk[usboh3_per_gate], "per", "fsl-usb2-udc");
266 clk_register_clkdev(clk[usboh3_gate], "ipg", "fsl-usb2-udc");
267 clk_register_clkdev(clk[usboh3_gate], "ahb", "fsl-usb2-udc");
268 clk_register_clkdev(clk[nfc_gate], NULL, "mxc_nand");
269 clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "imx-ssi.0");
270 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "imx-ssi.1");
271 clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "imx-ssi.2");
272 clk_register_clkdev(clk[ssi_ext1_gate], "ssi_ext1", NULL);
273 clk_register_clkdev(clk[ssi_ext2_gate], "ssi_ext2", NULL);
274 clk_register_clkdev(clk[sdma_gate], NULL, "imx35-sdma");
275 clk_register_clkdev(clk[cpu_podf], "cpu", NULL);
276 clk_register_clkdev(clk[iim_gate], "iim", NULL);
277 clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.0");
278 clk_register_clkdev(clk[dummy], NULL, "imx2-wdt.1");
279 clk_register_clkdev(clk[dummy], NULL, "imx-keypad");
280 clk_register_clkdev(clk[tve_gate], NULL, "imx-tve.0");
281 clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx-tve.0");
282
283 /* Set SDHC parents to be PLL2 */
284 clk_set_parent(clk[esdhc_a_sel], clk[pll2_sw]);
285 clk_set_parent(clk[esdhc_b_sel], clk[pll2_sw]);
286
287 /* move usb phy clk to 24MHz */
288 clk_set_parent(clk[usb_phy_sel], clk[osc]);
289
290 clk_prepare_enable(clk[gpc_dvfs]);
291 clk_prepare_enable(clk[ahb_max]); /* esdhc3 */
292 clk_prepare_enable(clk[aips_tz1]);
293 clk_prepare_enable(clk[aips_tz2]); /* fec */
294 clk_prepare_enable(clk[spba]);
295 clk_prepare_enable(clk[emi_fast_gate]); /* fec */
296 clk_prepare_enable(clk[tmax1]);
297 clk_prepare_enable(clk[tmax2]); /* esdhc2, fec */
298 clk_prepare_enable(clk[tmax3]); /* esdhc1, esdhc4 */
299}
300
301int __init mx51_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
302 unsigned long rate_ckih1, unsigned long rate_ckih2)
303{
304 int i;
305
306 clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX51_DPLL1_BASE);
307 clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX51_DPLL2_BASE);
308 clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX51_DPLL3_BASE);
309 clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
310 mx51_ipu_di0_sel, ARRAY_SIZE(mx51_ipu_di0_sel));
311 clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
312 mx51_ipu_di1_sel, ARRAY_SIZE(mx51_ipu_di1_sel));
313 clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
314 mx51_tve_ext_sel, ARRAY_SIZE(mx51_tve_ext_sel));
315 clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_sel", MXC_CCM_CCGR2, 30);
316 clk[tve_pred] = imx_clk_divider("tve_pred", "pll3_sw", MXC_CCM_CDCDR, 28, 3);
317 clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
318 clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 6);
319 clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 10);
320 clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
321 clk[usb_phy_gate] = imx_clk_gate2("usb_phy_gate", "usb_phy_sel", MXC_CCM_CCGR2, 0);
322 clk[hsi2c_gate] = imx_clk_gate2("hsi2c_gate", "ipg", MXC_CCM_CCGR1, 22);
323 clk[mipi_hsc1_gate] = imx_clk_gate2("mipi_hsc1_gate", "ipg", MXC_CCM_CCGR4, 6);
324 clk[mipi_hsc2_gate] = imx_clk_gate2("mipi_hsc2_gate", "ipg", MXC_CCM_CCGR4, 8);
325 clk[mipi_esc_gate] = imx_clk_gate2("mipi_esc_gate", "ipg", MXC_CCM_CCGR4, 10);
326 clk[mipi_hsp_gate] = imx_clk_gate2("mipi_hsp_gate", "ipg", MXC_CCM_CCGR4, 12);
327
328 for (i = 0; i < ARRAY_SIZE(clk); i++)
329 if (IS_ERR(clk[i]))
330 pr_err("i.MX51 clk %d: register failed with %ld\n",
331 i, PTR_ERR(clk[i]));
332
333 mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
334
335 clk_register_clkdev(clk[hsi2c_gate], NULL, "imx-i2c.2");
336 clk_register_clkdev(clk[mx51_mipi], "mipi_hsp", NULL);
337 clk_register_clkdev(clk[vpu_gate], NULL, "imx51-vpu.0");
338 clk_register_clkdev(clk[fec_gate], NULL, "imx27-fec.0");
339 clk_register_clkdev(clk[gpc_dvfs], "gpc_dvfs", NULL);
340 clk_register_clkdev(clk[ipu_gate], "bus", "imx51-ipu");
341 clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx51-ipu");
342 clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx51-ipu");
343 clk_register_clkdev(clk[ipu_gate], "hsp", "imx51-ipu");
344 clk_register_clkdev(clk[usb_phy_gate], "phy", "mxc-ehci.0");
345 clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx51.0");
346 clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.0");
347 clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx51.0");
348 clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx51.1");
349 clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.1");
350 clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx51.1");
351 clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx51.2");
352 clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.2");
353 clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx51.2");
354 clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx51.3");
355 clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx51.3");
356 clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx51.3");
357 clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "83fcc000.ssi");
358 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "70014000.ssi");
359 clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "83fe8000.ssi");
360
361 /* set the usboh3 parent to pll2_sw */
362 clk_set_parent(clk[usboh3_sel], clk[pll2_sw]);
363
364 /* set SDHC root clock to 166.25MHZ*/
365 clk_set_rate(clk[esdhc_a_podf], 166250000);
366 clk_set_rate(clk[esdhc_b_podf], 166250000);
367
368 /* System timer */
369 mxc_timer_init(NULL, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
370 MX51_INT_GPT);
371
372 clk_prepare_enable(clk[iim_gate]);
373 imx_print_silicon_rev("i.MX51", mx51_revision());
374 clk_disable_unprepare(clk[iim_gate]);
375
376 return 0;
377}
378
379int __init mx53_clocks_init(unsigned long rate_ckil, unsigned long rate_osc,
380 unsigned long rate_ckih1, unsigned long rate_ckih2)
381{
382 int i;
383 unsigned long r;
384
385 clk[pll1_sw] = imx_clk_pllv2("pll1_sw", "osc", MX53_DPLL1_BASE);
386 clk[pll2_sw] = imx_clk_pllv2("pll2_sw", "osc", MX53_DPLL2_BASE);
387 clk[pll3_sw] = imx_clk_pllv2("pll3_sw", "osc", MX53_DPLL3_BASE);
388 clk[pll4_sw] = imx_clk_pllv2("pll4_sw", "osc", MX53_DPLL4_BASE);
389
390 clk[ldb_di1_sel] = imx_clk_mux("ldb_di1_sel", MXC_CCM_CSCMR2, 9, 1,
391 mx53_ldb_di1_sel, ARRAY_SIZE(mx53_ldb_di1_sel));
392 clk[ldb_di1_div_3_5] = imx_clk_fixed_factor("ldb_di1_div_3_5", "ldb_di1_sel", 2, 7);
393 clk[ldb_di1_div] = imx_clk_divider("ldb_di1_div", "ldb_di1_div_3_5", MXC_CCM_CSCMR2, 11, 1);
394 clk[di_pll4_podf] = imx_clk_divider("di_pll4_podf", "pll4_sw", MXC_CCM_CDCDR, 16, 3);
395 clk[ldb_di0_sel] = imx_clk_mux("ldb_di0_sel", MXC_CCM_CSCMR2, 8, 1,
396 mx53_ldb_di0_sel, ARRAY_SIZE(mx53_ldb_di0_sel));
397 clk[ldb_di0_div_3_5] = imx_clk_fixed_factor("ldb_di0_div_3_5", "ldb_di0_sel", 2, 7);
398 clk[ldb_di0_div] = imx_clk_divider("ldb_di0_div", "ldb_di0_div_3_5", MXC_CCM_CSCMR2, 10, 1);
399 clk[ldb_di0_gate] = imx_clk_gate2("ldb_di0_gate", "ldb_di0_div", MXC_CCM_CCGR6, 28);
400 clk[ldb_di1_gate] = imx_clk_gate2("ldb_di1_gate", "ldb_di1_div", MXC_CCM_CCGR6, 30);
401 clk[ipu_di0_sel] = imx_clk_mux("ipu_di0_sel", MXC_CCM_CSCMR2, 26, 3,
402 mx53_ipu_di0_sel, ARRAY_SIZE(mx53_ipu_di0_sel));
403 clk[ipu_di1_sel] = imx_clk_mux("ipu_di1_sel", MXC_CCM_CSCMR2, 29, 3,
404 mx53_ipu_di1_sel, ARRAY_SIZE(mx53_ipu_di1_sel));
405 clk[tve_ext_sel] = imx_clk_mux("tve_ext_sel", MXC_CCM_CSCMR1, 6, 1,
406 mx53_tve_ext_sel, ARRAY_SIZE(mx53_tve_ext_sel));
407 clk[tve_gate] = imx_clk_gate2("tve_gate", "tve_pred", MXC_CCM_CCGR2, 30);
408 clk[tve_pred] = imx_clk_divider("tve_pred", "tve_ext_sel", MXC_CCM_CDCDR, 28, 3);
409 clk[esdhc1_per_gate] = imx_clk_gate2("esdhc1_per_gate", "esdhc_a_podf", MXC_CCM_CCGR3, 2);
410 clk[esdhc2_per_gate] = imx_clk_gate2("esdhc2_per_gate", "esdhc_c_sel", MXC_CCM_CCGR3, 6);
411 clk[esdhc3_per_gate] = imx_clk_gate2("esdhc3_per_gate", "esdhc_b_podf", MXC_CCM_CCGR3, 10);
412 clk[esdhc4_per_gate] = imx_clk_gate2("esdhc4_per_gate", "esdhc_d_sel", MXC_CCM_CCGR3, 14);
413 clk[usb_phy1_gate] = imx_clk_gate2("usb_phy1_gate", "usb_phy_sel", MXC_CCM_CCGR4, 10);
414 clk[usb_phy2_gate] = imx_clk_gate2("usb_phy2_gate", "usb_phy_sel", MXC_CCM_CCGR4, 12);
415 clk[can2_serial_gate] = imx_clk_gate2("can2_serial_gate", "ipg", MXC_CCM_CCGR4, 6);
416 clk[can2_ipg_gate] = imx_clk_gate2("can2_ipg_gate", "ipg", MXC_CCM_CCGR4, 8);
417 clk[i2c3_gate] = imx_clk_gate2("i2c3_gate", "per_root", MXC_CCM_CCGR1, 22);
418
419 for (i = 0; i < ARRAY_SIZE(clk); i++)
420 if (IS_ERR(clk[i]))
421 pr_err("i.MX53 clk %d: register failed with %ld\n",
422 i, PTR_ERR(clk[i]));
423
424 mx5_clocks_common_init(rate_ckil, rate_osc, rate_ckih1, rate_ckih2);
425
426 clk_register_clkdev(clk[vpu_gate], NULL, "imx53-vpu.0");
427 clk_register_clkdev(clk[i2c3_gate], NULL, "imx-i2c.2");
428 clk_register_clkdev(clk[fec_gate], NULL, "imx25-fec.0");
429 clk_register_clkdev(clk[ipu_gate], "bus", "imx53-ipu");
430 clk_register_clkdev(clk[ipu_di0_gate], "di0", "imx53-ipu");
431 clk_register_clkdev(clk[ipu_di1_gate], "di1", "imx53-ipu");
432 clk_register_clkdev(clk[ipu_gate], "hsp", "imx53-ipu");
433 clk_register_clkdev(clk[usb_phy1_gate], "usb_phy1", "mxc-ehci.0");
434 clk_register_clkdev(clk[esdhc1_ipg_gate], "ipg", "sdhci-esdhc-imx53.0");
435 clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.0");
436 clk_register_clkdev(clk[esdhc1_per_gate], "per", "sdhci-esdhc-imx53.0");
437 clk_register_clkdev(clk[esdhc2_ipg_gate], "ipg", "sdhci-esdhc-imx53.1");
438 clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.1");
439 clk_register_clkdev(clk[esdhc2_per_gate], "per", "sdhci-esdhc-imx53.1");
440 clk_register_clkdev(clk[esdhc3_ipg_gate], "ipg", "sdhci-esdhc-imx53.2");
441 clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.2");
442 clk_register_clkdev(clk[esdhc3_per_gate], "per", "sdhci-esdhc-imx53.2");
443 clk_register_clkdev(clk[esdhc4_ipg_gate], "ipg", "sdhci-esdhc-imx53.3");
444 clk_register_clkdev(clk[dummy], "ahb", "sdhci-esdhc-imx53.3");
445 clk_register_clkdev(clk[esdhc4_per_gate], "per", "sdhci-esdhc-imx53.3");
446 clk_register_clkdev(clk[ssi1_ipg_gate], NULL, "63fcc000.ssi");
447 clk_register_clkdev(clk[ssi2_ipg_gate], NULL, "50014000.ssi");
448 clk_register_clkdev(clk[ssi3_ipg_gate], NULL, "63fd0000.ssi");
449
450 /* set SDHC root clock to 200MHZ*/
451 clk_set_rate(clk[esdhc_a_podf], 200000000);
452 clk_set_rate(clk[esdhc_b_podf], 200000000);
453
454 /* System timer */
455 mxc_timer_init(NULL, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
456 MX53_INT_GPT);
457
458 clk_prepare_enable(clk[iim_gate]);
459 imx_print_silicon_rev("i.MX53", mx53_revision());
460 clk_disable_unprepare(clk[iim_gate]);
461
462 r = clk_round_rate(clk[usboh3_per_gate], 54000000);
463 clk_set_rate(clk[usboh3_per_gate], r);
464
465 return 0;
466}
467
468#ifdef CONFIG_OF
469static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
470 unsigned long *ckih1, unsigned long *ckih2)
471{
472 struct device_node *np;
473
474 /* retrieve the freqency of fixed clocks from device tree */
475 for_each_compatible_node(np, NULL, "fixed-clock") {
476 u32 rate;
477 if (of_property_read_u32(np, "clock-frequency", &rate))
478 continue;
479
480 if (of_device_is_compatible(np, "fsl,imx-ckil"))
481 *ckil = rate;
482 else if (of_device_is_compatible(np, "fsl,imx-osc"))
483 *osc = rate;
484 else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
485 *ckih1 = rate;
486 else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
487 *ckih2 = rate;
488 }
489}
490
491int __init mx51_clocks_init_dt(void)
492{
493 unsigned long ckil, osc, ckih1, ckih2;
494
495 clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
496 return mx51_clocks_init(ckil, osc, ckih1, ckih2);
497}
498
499int __init mx53_clocks_init_dt(void)
500{
501 unsigned long ckil, osc, ckih1, ckih2;
502
503 clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
504 return mx53_clocks_init(ckil, osc, ckih1, ckih2);
505}
506#endif
diff --git a/arch/arm/mach-imx/clk-imx6q.c b/arch/arm/mach-imx/clk-imx6q.c
new file mode 100644
index 000000000000..cab02d0a15d6
--- /dev/null
+++ b/arch/arm/mach-imx/clk-imx6q.c
@@ -0,0 +1,444 @@
1/*
2 * Copyright 2011 Freescale Semiconductor, Inc.
3 * Copyright 2011 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/clk.h>
16#include <linux/clkdev.h>
17#include <linux/err.h>
18#include <linux/io.h>
19#include <linux/of.h>
20#include <linux/of_address.h>
21#include <linux/of_irq.h>
22#include <mach/common.h>
23#include "clk.h"
24
25#define CCGR0 0x68
26#define CCGR1 0x6c
27#define CCGR2 0x70
28#define CCGR3 0x74
29#define CCGR4 0x78
30#define CCGR5 0x7c
31#define CCGR6 0x80
32#define CCGR7 0x84
33
34#define CLPCR 0x54
35#define BP_CLPCR_LPM 0
36#define BM_CLPCR_LPM (0x3 << 0)
37#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
38#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
39#define BM_CLPCR_SBYOS (0x1 << 6)
40#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
41#define BM_CLPCR_VSTBY (0x1 << 8)
42#define BP_CLPCR_STBY_COUNT 9
43#define BM_CLPCR_STBY_COUNT (0x3 << 9)
44#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
45#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
46#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
47#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
48#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
49#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
50#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
51#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
52#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
53#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
54#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
55
56static void __iomem *ccm_base;
57
58void __init imx6q_clock_map_io(void) { }
59
60int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
61{
62 u32 val = readl_relaxed(ccm_base + CLPCR);
63
64 val &= ~BM_CLPCR_LPM;
65 switch (mode) {
66 case WAIT_CLOCKED:
67 break;
68 case WAIT_UNCLOCKED:
69 val |= 0x1 << BP_CLPCR_LPM;
70 break;
71 case STOP_POWER_ON:
72 val |= 0x2 << BP_CLPCR_LPM;
73 break;
74 case WAIT_UNCLOCKED_POWER_OFF:
75 val |= 0x1 << BP_CLPCR_LPM;
76 val &= ~BM_CLPCR_VSTBY;
77 val &= ~BM_CLPCR_SBYOS;
78 break;
79 case STOP_POWER_OFF:
80 val |= 0x2 << BP_CLPCR_LPM;
81 val |= 0x3 << BP_CLPCR_STBY_COUNT;
82 val |= BM_CLPCR_VSTBY;
83 val |= BM_CLPCR_SBYOS;
84 break;
85 default:
86 return -EINVAL;
87 }
88
89 writel_relaxed(val, ccm_base + CLPCR);
90
91 return 0;
92}
93
94static const char *step_sels[] = { "osc", "pll2_pfd2_396m", };
95static const char *pll1_sw_sels[] = { "pll1_sys", "step", };
96static const char *periph_pre_sels[] = { "pll2_bus", "pll2_pfd2_396m", "pll2_pfd0_352m", "pll2_198m", };
97static const char *periph_clk2_sels[] = { "pll3_usb_otg", "osc", };
98static const char *periph_sels[] = { "periph_pre", "periph_clk2", };
99static const char *periph2_sels[] = { "periph2_pre", "periph2_clk2", };
100static const char *axi_sels[] = { "periph", "pll2_pfd2_396m", "pll3_pfd1_540m", };
101static const char *audio_sels[] = { "pll4_audio", "pll3_pfd2_508m", "pll3_pfd3_454m", "pll3_usb_otg", };
102static const char *gpu_axi_sels[] = { "axi", "ahb", };
103static const char *gpu2d_core_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd0_352m", "pll2_pfd2_396m", };
104static const char *gpu3d_core_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd2_396m", };
105static const char *gpu3d_shader_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll2_pfd1_594m", "pll2_pfd9_720m", };
106static const char *ipu_sels[] = { "mmdc_ch0_axi", "pll2_pfd2_396m", "pll3_120m", "pll3_pfd1_540m", };
107static const char *ldb_di_sels[] = { "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
108static const char *ipu_di_pre_sels[] = { "mmdc_ch0_axi", "pll3_usb_otg", "pll5_video", "pll2_pfd0_352m", "pll2_pfd2_396m", "pll3_pfd1_540m", };
109static const char *ipu1_di0_sels[] = { "ipu1_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
110static const char *ipu1_di1_sels[] = { "ipu1_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
111static const char *ipu2_di0_sels[] = { "ipu2_di0_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
112static const char *ipu2_di1_sels[] = { "ipu2_di1_pre", "dummy", "dummy", "ldb_di0", "ldb_di1", };
113static const char *hsi_tx_sels[] = { "pll3_120m", "pll2_pfd2_396m", };
114static const char *pcie_axi_sels[] = { "axi", "ahb", };
115static const char *ssi_sels[] = { "pll3_pfd2_508m", "pll3_pfd3_454m", "pll4_audio", };
116static const char *usdhc_sels[] = { "pll2_pfd2_396m", "pll2_pfd0_352m", };
117static const char *enfc_sels[] = { "pll2_pfd0_352m", "pll2_bus", "pll3_usb_otg", "pll2_pfd2_396m", };
118static const char *emi_sels[] = { "axi", "pll3_usb_otg", "pll2_pfd2_396m", "pll2_pfd0_352m", };
119static const char *vdo_axi_sels[] = { "axi", "ahb", };
120static const char *vpu_axi_sels[] = { "axi", "pll2_pfd2_396m", "pll2_pfd0_352m", };
121static const char *cko1_sels[] = { "pll3_usb_otg", "pll2_bus", "pll1_sys", "pll5_video",
122 "dummy", "axi", "enfc", "ipu1_di0", "ipu1_di1", "ipu2_di0",
123 "ipu2_di1", "ahb", "ipg", "ipg_per", "ckil", "pll4_audio", };
124
125static const char * const clks_init_on[] __initconst = {
126 "mmdc_ch0_axi", "mmdc_ch1_axi", "usboh3",
127};
128
129enum mx6q_clks {
130 dummy, ckil, ckih, osc, pll2_pfd0_352m, pll2_pfd1_594m, pll2_pfd2_396m,
131 pll3_pfd0_720m, pll3_pfd1_540m, pll3_pfd2_508m, pll3_pfd3_454m,
132 pll2_198m, pll3_120m, pll3_80m, pll3_60m, twd, step, pll1_sw,
133 periph_pre, periph2_pre, periph_clk2_sel, periph2_clk2_sel, axi_sel,
134 esai_sel, asrc_sel, spdif_sel, gpu2d_axi, gpu3d_axi, gpu2d_core_sel,
135 gpu3d_core_sel, gpu3d_shader_sel, ipu1_sel, ipu2_sel, ldb_di0_sel,
136 ldb_di1_sel, ipu1_di0_pre_sel, ipu1_di1_pre_sel, ipu2_di0_pre_sel,
137 ipu2_di1_pre_sel, ipu1_di0_sel, ipu1_di1_sel, ipu2_di0_sel,
138 ipu2_di1_sel, hsi_tx_sel, pcie_axi_sel, ssi1_sel, ssi2_sel, ssi3_sel,
139 usdhc1_sel, usdhc2_sel, usdhc3_sel, usdhc4_sel, enfc_sel, emi_sel,
140 emi_slow_sel, vdo_axi_sel, vpu_axi_sel, cko1_sel, periph, periph2,
141 periph_clk2, periph2_clk2, ipg, ipg_per, esai_pred, esai_podf,
142 asrc_pred, asrc_podf, spdif_pred, spdif_podf, can_root, ecspi_root,
143 gpu2d_core_podf, gpu3d_core_podf, gpu3d_shader, ipu1_podf, ipu2_podf,
144 ldb_di0_podf, ldb_di1_podf, ipu1_di0_pre, ipu1_di1_pre, ipu2_di0_pre,
145 ipu2_di1_pre, hsi_tx_podf, ssi1_pred, ssi1_podf, ssi2_pred, ssi2_podf,
146 ssi3_pred, ssi3_podf, uart_serial_podf, usdhc1_podf, usdhc2_podf,
147 usdhc3_podf, usdhc4_podf, enfc_pred, enfc_podf, emi_podf,
148 emi_slow_podf, vpu_axi_podf, cko1_podf, axi, mmdc_ch0_axi_podf,
149 mmdc_ch1_axi_podf, arm, ahb, apbh_dma, asrc, can1_ipg, can1_serial,
150 can2_ipg, can2_serial, ecspi1, ecspi2, ecspi3, ecspi4, ecspi5, enet,
151 esai, gpt_ipg, gpt_ipg_per, gpu2d_core, gpu3d_core, hdmi_iahb,
152 hdmi_isfr, i2c1, i2c2, i2c3, iim, enfc, ipu1, ipu1_di0, ipu1_di1, ipu2,
153 ipu2_di0, ldb_di0, ldb_di1, ipu2_di1, hsi_tx, mlb, mmdc_ch0_axi,
154 mmdc_ch1_axi, ocram, openvg_axi, pcie_axi, pwm1, pwm2, pwm3, pwm4,
155 gpmi_bch_apb, gpmi_bch, gpmi_io, gpmi_apb, sata, sdma, spba, ssi1,
156 ssi2, ssi3, uart_ipg, uart_serial, usboh3, usdhc1, usdhc2, usdhc3,
157 usdhc4, vdo_axi, vpu_axi, cko1, pll1_sys, pll2_bus, pll3_usb_otg,
158 pll4_audio, pll5_video, pll6_mlb, pll7_usb_host, pll8_enet, ssi1_ipg,
159 ssi2_ipg, ssi3_ipg, clk_max
160};
161
162static struct clk *clk[clk_max];
163
164int __init mx6q_clocks_init(void)
165{
166 struct device_node *np;
167 void __iomem *base;
168 struct clk *c;
169 int i, irq;
170
171 clk[dummy] = imx_clk_fixed("dummy", 0);
172
173 /* retrieve the freqency of fixed clocks from device tree */
174 for_each_compatible_node(np, NULL, "fixed-clock") {
175 u32 rate;
176 if (of_property_read_u32(np, "clock-frequency", &rate))
177 continue;
178
179 if (of_device_is_compatible(np, "fsl,imx-ckil"))
180 clk[ckil] = imx_clk_fixed("ckil", rate);
181 else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
182 clk[ckih] = imx_clk_fixed("ckih", rate);
183 else if (of_device_is_compatible(np, "fsl,imx-osc"))
184 clk[osc] = imx_clk_fixed("osc", rate);
185 }
186
187 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-anatop");
188 base = of_iomap(np, 0);
189 WARN_ON(!base);
190
191 /* type name parent_name base gate_mask div_mask */
192 clk[pll1_sys] = imx_clk_pllv3(IMX_PLLV3_SYS, "pll1_sys", "osc", base, 0x2000, 0x7f);
193 clk[pll2_bus] = imx_clk_pllv3(IMX_PLLV3_GENERIC, "pll2_bus", "osc", base + 0x30, 0x2000, 0x1);
194 clk[pll3_usb_otg] = imx_clk_pllv3(IMX_PLLV3_USB, "pll3_usb_otg", "osc", base + 0x10, 0x2000, 0x3);
195 clk[pll4_audio] = imx_clk_pllv3(IMX_PLLV3_AV, "pll4_audio", "osc", base + 0x70, 0x2000, 0x7f);
196 clk[pll5_video] = imx_clk_pllv3(IMX_PLLV3_AV, "pll5_video", "osc", base + 0xa0, 0x2000, 0x7f);
197 clk[pll6_mlb] = imx_clk_pllv3(IMX_PLLV3_MLB, "pll6_mlb", "osc", base + 0xd0, 0x2000, 0x0);
198 clk[pll7_usb_host] = imx_clk_pllv3(IMX_PLLV3_USB, "pll7_usb_host","osc", base + 0x20, 0x2000, 0x3);
199 clk[pll8_enet] = imx_clk_pllv3(IMX_PLLV3_ENET, "pll8_enet", "osc", base + 0xe0, 0x182000, 0x3);
200
201 /* name parent_name reg idx */
202 clk[pll2_pfd0_352m] = imx_clk_pfd("pll2_pfd0_352m", "pll2_bus", base + 0x100, 0);
203 clk[pll2_pfd1_594m] = imx_clk_pfd("pll2_pfd1_594m", "pll2_bus", base + 0x100, 1);
204 clk[pll2_pfd2_396m] = imx_clk_pfd("pll2_pfd2_396m", "pll2_bus", base + 0x100, 2);
205 clk[pll3_pfd0_720m] = imx_clk_pfd("pll3_pfd0_720m", "pll3_usb_otg", base + 0xf0, 0);
206 clk[pll3_pfd1_540m] = imx_clk_pfd("pll3_pfd1_540m", "pll3_usb_otg", base + 0xf0, 1);
207 clk[pll3_pfd2_508m] = imx_clk_pfd("pll3_pfd2_508m", "pll3_usb_otg", base + 0xf0, 2);
208 clk[pll3_pfd3_454m] = imx_clk_pfd("pll3_pfd3_454m", "pll3_usb_otg", base + 0xf0, 3);
209
210 /* name parent_name mult div */
211 clk[pll2_198m] = imx_clk_fixed_factor("pll2_198m", "pll2_pfd2_396m", 1, 2);
212 clk[pll3_120m] = imx_clk_fixed_factor("pll3_120m", "pll3_usb_otg", 1, 4);
213 clk[pll3_80m] = imx_clk_fixed_factor("pll3_80m", "pll3_usb_otg", 1, 6);
214 clk[pll3_60m] = imx_clk_fixed_factor("pll3_60m", "pll3_usb_otg", 1, 8);
215 clk[twd] = imx_clk_fixed_factor("twd", "arm", 1, 2);
216
217 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-ccm");
218 base = of_iomap(np, 0);
219 WARN_ON(!base);
220 ccm_base = base;
221
222 /* name reg shift width parent_names num_parents */
223 clk[step] = imx_clk_mux("step", base + 0xc, 8, 1, step_sels, ARRAY_SIZE(step_sels));
224 clk[pll1_sw] = imx_clk_mux("pll1_sw", base + 0xc, 2, 1, pll1_sw_sels, ARRAY_SIZE(pll1_sw_sels));
225 clk[periph_pre] = imx_clk_mux("periph_pre", base + 0x18, 18, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
226 clk[periph2_pre] = imx_clk_mux("periph2_pre", base + 0x18, 21, 2, periph_pre_sels, ARRAY_SIZE(periph_pre_sels));
227 clk[periph_clk2_sel] = imx_clk_mux("periph_clk2_sel", base + 0x18, 12, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
228 clk[periph2_clk2_sel] = imx_clk_mux("periph2_clk2_sel", base + 0x18, 20, 1, periph_clk2_sels, ARRAY_SIZE(periph_clk2_sels));
229 clk[axi_sel] = imx_clk_mux("axi_sel", base + 0x14, 6, 2, axi_sels, ARRAY_SIZE(axi_sels));
230 clk[esai_sel] = imx_clk_mux("esai_sel", base + 0x20, 19, 2, audio_sels, ARRAY_SIZE(audio_sels));
231 clk[asrc_sel] = imx_clk_mux("asrc_sel", base + 0x30, 7, 2, audio_sels, ARRAY_SIZE(audio_sels));
232 clk[spdif_sel] = imx_clk_mux("spdif_sel", base + 0x30, 20, 2, audio_sels, ARRAY_SIZE(audio_sels));
233 clk[gpu2d_axi] = imx_clk_mux("gpu2d_axi", base + 0x18, 0, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
234 clk[gpu3d_axi] = imx_clk_mux("gpu3d_axi", base + 0x18, 1, 1, gpu_axi_sels, ARRAY_SIZE(gpu_axi_sels));
235 clk[gpu2d_core_sel] = imx_clk_mux("gpu2d_core_sel", base + 0x18, 16, 2, gpu2d_core_sels, ARRAY_SIZE(gpu2d_core_sels));
236 clk[gpu3d_core_sel] = imx_clk_mux("gpu3d_core_sel", base + 0x18, 4, 2, gpu3d_core_sels, ARRAY_SIZE(gpu3d_core_sels));
237 clk[gpu3d_shader_sel] = imx_clk_mux("gpu3d_shader_sel", base + 0x18, 8, 2, gpu3d_shader_sels, ARRAY_SIZE(gpu3d_shader_sels));
238 clk[ipu1_sel] = imx_clk_mux("ipu1_sel", base + 0x3c, 9, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
239 clk[ipu2_sel] = imx_clk_mux("ipu2_sel", base + 0x3c, 14, 2, ipu_sels, ARRAY_SIZE(ipu_sels));
240 clk[ldb_di0_sel] = imx_clk_mux("ldb_di0_sel", base + 0x2c, 9, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
241 clk[ldb_di1_sel] = imx_clk_mux("ldb_di1_sel", base + 0x2c, 12, 3, ldb_di_sels, ARRAY_SIZE(ldb_di_sels));
242 clk[ipu1_di0_pre_sel] = imx_clk_mux("ipu1_di0_pre_sel", base + 0x34, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
243 clk[ipu1_di1_pre_sel] = imx_clk_mux("ipu1_di1_pre_sel", base + 0x34, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
244 clk[ipu2_di0_pre_sel] = imx_clk_mux("ipu2_di0_pre_sel", base + 0x38, 6, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
245 clk[ipu2_di1_pre_sel] = imx_clk_mux("ipu2_di1_pre_sel", base + 0x38, 15, 3, ipu_di_pre_sels, ARRAY_SIZE(ipu_di_pre_sels));
246 clk[ipu1_di0_sel] = imx_clk_mux("ipu1_di0_sel", base + 0x34, 0, 3, ipu1_di0_sels, ARRAY_SIZE(ipu1_di0_sels));
247 clk[ipu1_di1_sel] = imx_clk_mux("ipu1_di1_sel", base + 0x34, 9, 3, ipu1_di1_sels, ARRAY_SIZE(ipu1_di1_sels));
248 clk[ipu2_di0_sel] = imx_clk_mux("ipu2_di0_sel", base + 0x38, 0, 3, ipu2_di0_sels, ARRAY_SIZE(ipu2_di0_sels));
249 clk[ipu2_di1_sel] = imx_clk_mux("ipu2_di1_sel", base + 0x38, 9, 3, ipu2_di1_sels, ARRAY_SIZE(ipu2_di1_sels));
250 clk[hsi_tx_sel] = imx_clk_mux("hsi_tx_sel", base + 0x30, 28, 1, hsi_tx_sels, ARRAY_SIZE(hsi_tx_sels));
251 clk[pcie_axi_sel] = imx_clk_mux("pcie_axi_sel", base + 0x18, 10, 1, pcie_axi_sels, ARRAY_SIZE(pcie_axi_sels));
252 clk[ssi1_sel] = imx_clk_mux("ssi1_sel", base + 0x1c, 10, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
253 clk[ssi2_sel] = imx_clk_mux("ssi2_sel", base + 0x1c, 12, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
254 clk[ssi3_sel] = imx_clk_mux("ssi3_sel", base + 0x1c, 14, 2, ssi_sels, ARRAY_SIZE(ssi_sels));
255 clk[usdhc1_sel] = imx_clk_mux("usdhc1_sel", base + 0x1c, 16, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
256 clk[usdhc2_sel] = imx_clk_mux("usdhc2_sel", base + 0x1c, 17, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
257 clk[usdhc3_sel] = imx_clk_mux("usdhc3_sel", base + 0x1c, 18, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
258 clk[usdhc4_sel] = imx_clk_mux("usdhc4_sel", base + 0x1c, 19, 1, usdhc_sels, ARRAY_SIZE(usdhc_sels));
259 clk[enfc_sel] = imx_clk_mux("enfc_sel", base + 0x2c, 16, 2, enfc_sels, ARRAY_SIZE(enfc_sels));
260 clk[emi_sel] = imx_clk_mux("emi_sel", base + 0x1c, 27, 2, emi_sels, ARRAY_SIZE(emi_sels));
261 clk[emi_slow_sel] = imx_clk_mux("emi_slow_sel", base + 0x1c, 29, 2, emi_sels, ARRAY_SIZE(emi_sels));
262 clk[vdo_axi_sel] = imx_clk_mux("vdo_axi_sel", base + 0x18, 11, 1, vdo_axi_sels, ARRAY_SIZE(vdo_axi_sels));
263 clk[vpu_axi_sel] = imx_clk_mux("vpu_axi_sel", base + 0x18, 14, 2, vpu_axi_sels, ARRAY_SIZE(vpu_axi_sels));
264 clk[cko1_sel] = imx_clk_mux("cko1_sel", base + 0x60, 0, 4, cko1_sels, ARRAY_SIZE(cko1_sels));
265
266 /* name reg shift width busy: reg, shift parent_names num_parents */
267 clk[periph] = imx_clk_busy_mux("periph", base + 0x14, 25, 1, base + 0x48, 5, periph_sels, ARRAY_SIZE(periph_sels));
268 clk[periph2] = imx_clk_busy_mux("periph2", base + 0x14, 26, 1, base + 0x48, 3, periph2_sels, ARRAY_SIZE(periph2_sels));
269
270 /* name parent_name reg shift width */
271 clk[periph_clk2] = imx_clk_divider("periph_clk2", "periph_clk2_sel", base + 0x14, 27, 3);
272 clk[periph2_clk2] = imx_clk_divider("periph2_clk2", "periph2_clk2_sel", base + 0x14, 0, 3);
273 clk[ipg] = imx_clk_divider("ipg", "ahb", base + 0x14, 8, 2);
274 clk[ipg_per] = imx_clk_divider("ipg_per", "ipg", base + 0x1c, 0, 6);
275 clk[esai_pred] = imx_clk_divider("esai_pred", "esai_sel", base + 0x28, 9, 3);
276 clk[esai_podf] = imx_clk_divider("esai_podf", "esai_pred", base + 0x28, 25, 3);
277 clk[asrc_pred] = imx_clk_divider("asrc_pred", "asrc_sel", base + 0x30, 12, 3);
278 clk[asrc_podf] = imx_clk_divider("asrc_podf", "asrc_pred", base + 0x30, 9, 3);
279 clk[spdif_pred] = imx_clk_divider("spdif_pred", "spdif_sel", base + 0x30, 25, 3);
280 clk[spdif_podf] = imx_clk_divider("spdif_podf", "spdif_pred", base + 0x30, 22, 3);
281 clk[can_root] = imx_clk_divider("can_root", "pll3_usb_otg", base + 0x20, 2, 6);
282 clk[ecspi_root] = imx_clk_divider("ecspi_root", "pll3_60m", base + 0x38, 19, 6);
283 clk[gpu2d_core_podf] = imx_clk_divider("gpu2d_core_podf", "gpu2d_core_sel", base + 0x18, 23, 3);
284 clk[gpu3d_core_podf] = imx_clk_divider("gpu3d_core_podf", "gpu3d_core_sel", base + 0x18, 26, 3);
285 clk[gpu3d_shader] = imx_clk_divider("gpu3d_shader", "gpu3d_shader_sel", base + 0x18, 29, 3);
286 clk[ipu1_podf] = imx_clk_divider("ipu1_podf", "ipu1_sel", base + 0x3c, 11, 3);
287 clk[ipu2_podf] = imx_clk_divider("ipu2_podf", "ipu2_sel", base + 0x3c, 16, 3);
288 clk[ldb_di0_podf] = imx_clk_divider("ldb_di0_podf", "ldb_di0_sel", base + 0x20, 10, 1);
289 clk[ldb_di1_podf] = imx_clk_divider("ldb_di1_podf", "ldb_di1_sel", base + 0x20, 11, 1);
290 clk[ipu1_di0_pre] = imx_clk_divider("ipu1_di0_pre", "ipu1_di0_pre_sel", base + 0x34, 3, 3);
291 clk[ipu1_di1_pre] = imx_clk_divider("ipu1_di1_pre", "ipu1_di1_pre_sel", base + 0x34, 12, 3);
292 clk[ipu2_di0_pre] = imx_clk_divider("ipu2_di0_pre", "ipu2_di0_pre_sel", base + 0x38, 3, 3);
293 clk[ipu2_di1_pre] = imx_clk_divider("ipu2_di1_pre", "ipu2_di1_pre_sel", base + 0x38, 12, 3);
294 clk[hsi_tx_podf] = imx_clk_divider("hsi_tx_podf", "hsi_tx_sel", base + 0x30, 29, 3);
295 clk[ssi1_pred] = imx_clk_divider("ssi1_pred", "ssi1_sel", base + 0x28, 6, 3);
296 clk[ssi1_podf] = imx_clk_divider("ssi1_podf", "ssi1_pred", base + 0x28, 0, 6);
297 clk[ssi2_pred] = imx_clk_divider("ssi2_pred", "ssi2_sel", base + 0x2c, 6, 3);
298 clk[ssi2_podf] = imx_clk_divider("ssi2_podf", "ssi2_pred", base + 0x2c, 0, 6);
299 clk[ssi3_pred] = imx_clk_divider("ssi3_pred", "ssi3_sel", base + 0x28, 22, 3);
300 clk[ssi3_podf] = imx_clk_divider("ssi3_podf", "ssi3_pred", base + 0x28, 16, 6);
301 clk[uart_serial_podf] = imx_clk_divider("uart_serial_podf", "pll3_80m", base + 0x24, 0, 6);
302 clk[usdhc1_podf] = imx_clk_divider("usdhc1_podf", "usdhc1_sel", base + 0x24, 11, 3);
303 clk[usdhc2_podf] = imx_clk_divider("usdhc2_podf", "usdhc2_sel", base + 0x24, 16, 3);
304 clk[usdhc3_podf] = imx_clk_divider("usdhc3_podf", "usdhc3_sel", base + 0x24, 19, 3);
305 clk[usdhc4_podf] = imx_clk_divider("usdhc4_podf", "usdhc4_sel", base + 0x24, 22, 3);
306 clk[enfc_pred] = imx_clk_divider("enfc_pred", "enfc_sel", base + 0x2c, 18, 3);
307 clk[enfc_podf] = imx_clk_divider("enfc_podf", "enfc_pred", base + 0x2c, 21, 6);
308 clk[emi_podf] = imx_clk_divider("emi_podf", "emi_sel", base + 0x1c, 20, 3);
309 clk[emi_slow_podf] = imx_clk_divider("emi_slow_podf", "emi_slow_sel", base + 0x1c, 23, 3);
310 clk[vpu_axi_podf] = imx_clk_divider("vpu_axi_podf", "vpu_axi_sel", base + 0x24, 25, 3);
311 clk[cko1_podf] = imx_clk_divider("cko1_podf", "cko1_sel", base + 0x60, 4, 3);
312
313 /* name parent_name reg shift width busy: reg, shift */
314 clk[axi] = imx_clk_busy_divider("axi", "axi_sel", base + 0x14, 16, 3, base + 0x48, 0);
315 clk[mmdc_ch0_axi_podf] = imx_clk_busy_divider("mmdc_ch0_axi_podf", "periph", base + 0x14, 19, 3, base + 0x48, 4);
316 clk[mmdc_ch1_axi_podf] = imx_clk_busy_divider("mmdc_ch1_axi_podf", "periph2", base + 0x14, 3, 3, base + 0x48, 2);
317 clk[arm] = imx_clk_busy_divider("arm", "pll1_sw", base + 0x10, 0, 3, base + 0x48, 16);
318 clk[ahb] = imx_clk_busy_divider("ahb", "periph", base + 0x14, 10, 3, base + 0x48, 1);
319
320 /* name parent_name reg shift */
321 clk[apbh_dma] = imx_clk_gate2("apbh_dma", "ahb", base + 0x68, 4);
322 clk[asrc] = imx_clk_gate2("asrc", "asrc_podf", base + 0x68, 6);
323 clk[can1_ipg] = imx_clk_gate2("can1_ipg", "ipg", base + 0x68, 14);
324 clk[can1_serial] = imx_clk_gate2("can1_serial", "can_root", base + 0x68, 16);
325 clk[can2_ipg] = imx_clk_gate2("can2_ipg", "ipg", base + 0x68, 18);
326 clk[can2_serial] = imx_clk_gate2("can2_serial", "can_root", base + 0x68, 20);
327 clk[ecspi1] = imx_clk_gate2("ecspi1", "ecspi_root", base + 0x6c, 0);
328 clk[ecspi2] = imx_clk_gate2("ecspi2", "ecspi_root", base + 0x6c, 2);
329 clk[ecspi3] = imx_clk_gate2("ecspi3", "ecspi_root", base + 0x6c, 4);
330 clk[ecspi4] = imx_clk_gate2("ecspi4", "ecspi_root", base + 0x6c, 6);
331 clk[ecspi5] = imx_clk_gate2("ecspi5", "ecspi_root", base + 0x6c, 8);
332 clk[enet] = imx_clk_gate2("enet", "ipg", base + 0x6c, 10);
333 clk[esai] = imx_clk_gate2("esai", "esai_podf", base + 0x6c, 16);
334 clk[gpt_ipg] = imx_clk_gate2("gpt_ipg", "ipg", base + 0x6c, 20);
335 clk[gpt_ipg_per] = imx_clk_gate2("gpt_ipg_per", "ipg_per", base + 0x6c, 22);
336 clk[gpu2d_core] = imx_clk_gate2("gpu2d_core", "gpu2d_core_podf", base + 0x6c, 24);
337 clk[gpu3d_core] = imx_clk_gate2("gpu3d_core", "gpu3d_core_podf", base + 0x6c, 26);
338 clk[hdmi_iahb] = imx_clk_gate2("hdmi_iahb", "ahb", base + 0x70, 0);
339 clk[hdmi_isfr] = imx_clk_gate2("hdmi_isfr", "pll3_pfd1_540m", base + 0x70, 4);
340 clk[i2c1] = imx_clk_gate2("i2c1", "ipg_per", base + 0x70, 6);
341 clk[i2c2] = imx_clk_gate2("i2c2", "ipg_per", base + 0x70, 8);
342 clk[i2c3] = imx_clk_gate2("i2c3", "ipg_per", base + 0x70, 10);
343 clk[iim] = imx_clk_gate2("iim", "ipg", base + 0x70, 12);
344 clk[enfc] = imx_clk_gate2("enfc", "enfc_podf", base + 0x70, 14);
345 clk[ipu1] = imx_clk_gate2("ipu1", "ipu1_podf", base + 0x74, 0);
346 clk[ipu1_di0] = imx_clk_gate2("ipu1_di0", "ipu1_di0_sel", base + 0x74, 2);
347 clk[ipu1_di1] = imx_clk_gate2("ipu1_di1", "ipu1_di1_sel", base + 0x74, 4);
348 clk[ipu2] = imx_clk_gate2("ipu2", "ipu2_podf", base + 0x74, 6);
349 clk[ipu2_di0] = imx_clk_gate2("ipu2_di0", "ipu2_di0_sel", base + 0x74, 8);
350 clk[ldb_di0] = imx_clk_gate2("ldb_di0", "ldb_di0_podf", base + 0x74, 12);
351 clk[ldb_di1] = imx_clk_gate2("ldb_di1", "ldb_di1_podf", base + 0x74, 14);
352 clk[ipu2_di1] = imx_clk_gate2("ipu2_di1", "ipu2_di1_sel", base + 0x74, 10);
353 clk[hsi_tx] = imx_clk_gate2("hsi_tx", "hsi_tx_podf", base + 0x74, 16);
354 clk[mlb] = imx_clk_gate2("mlb", "pll6_mlb", base + 0x74, 18);
355 clk[mmdc_ch0_axi] = imx_clk_gate2("mmdc_ch0_axi", "mmdc_ch0_axi_podf", base + 0x74, 20);
356 clk[mmdc_ch1_axi] = imx_clk_gate2("mmdc_ch1_axi", "mmdc_ch1_axi_podf", base + 0x74, 22);
357 clk[ocram] = imx_clk_gate2("ocram", "ahb", base + 0x74, 28);
358 clk[openvg_axi] = imx_clk_gate2("openvg_axi", "axi", base + 0x74, 30);
359 clk[pcie_axi] = imx_clk_gate2("pcie_axi", "pcie_axi_sel", base + 0x78, 0);
360 clk[pwm1] = imx_clk_gate2("pwm1", "ipg_per", base + 0x78, 16);
361 clk[pwm2] = imx_clk_gate2("pwm2", "ipg_per", base + 0x78, 18);
362 clk[pwm3] = imx_clk_gate2("pwm3", "ipg_per", base + 0x78, 20);
363 clk[pwm4] = imx_clk_gate2("pwm4", "ipg_per", base + 0x78, 22);
364 clk[gpmi_bch_apb] = imx_clk_gate2("gpmi_bch_apb", "usdhc3", base + 0x78, 24);
365 clk[gpmi_bch] = imx_clk_gate2("gpmi_bch", "usdhc4", base + 0x78, 26);
366 clk[gpmi_io] = imx_clk_gate2("gpmi_io", "enfc", base + 0x78, 28);
367 clk[gpmi_apb] = imx_clk_gate2("gpmi_apb", "usdhc3", base + 0x78, 30);
368 clk[sata] = imx_clk_gate2("sata", "ipg", base + 0x7c, 4);
369 clk[sdma] = imx_clk_gate2("sdma", "ahb", base + 0x7c, 6);
370 clk[spba] = imx_clk_gate2("spba", "ipg", base + 0x7c, 12);
371 clk[ssi1_ipg] = imx_clk_gate2("ssi1_ipg", "ipg", base + 0x7c, 18);
372 clk[ssi2_ipg] = imx_clk_gate2("ssi2_ipg", "ipg", base + 0x7c, 20);
373 clk[ssi3_ipg] = imx_clk_gate2("ssi3_ipg", "ipg", base + 0x7c, 22);
374 clk[uart_ipg] = imx_clk_gate2("uart_ipg", "ipg", base + 0x7c, 24);
375 clk[uart_serial] = imx_clk_gate2("uart_serial", "uart_serial_podf", base + 0x7c, 26);
376 clk[usboh3] = imx_clk_gate2("usboh3", "ipg", base + 0x80, 0);
377 clk[usdhc1] = imx_clk_gate2("usdhc1", "usdhc1_podf", base + 0x80, 2);
378 clk[usdhc2] = imx_clk_gate2("usdhc2", "usdhc2_podf", base + 0x80, 4);
379 clk[usdhc3] = imx_clk_gate2("usdhc3", "usdhc3_podf", base + 0x80, 6);
380 clk[usdhc4] = imx_clk_gate2("usdhc4", "usdhc4_podf", base + 0x80, 8);
381 clk[vdo_axi] = imx_clk_gate2("vdo_axi", "vdo_axi_sel", base + 0x80, 12);
382 clk[vpu_axi] = imx_clk_gate2("vpu_axi", "vpu_axi_podf", base + 0x80, 14);
383 clk[cko1] = imx_clk_gate("cko1", "cko1_podf", base + 0x60, 7);
384
385 for (i = 0; i < ARRAY_SIZE(clk); i++)
386 if (IS_ERR(clk[i]))
387 pr_err("i.MX6q clk %d: register failed with %ld\n",
388 i, PTR_ERR(clk[i]));
389
390 clk_register_clkdev(clk[mmdc_ch0_axi], NULL, "mmdc_ch0_axi");
391 clk_register_clkdev(clk[mmdc_ch1_axi], NULL, "mmdc_ch1_axi");
392 clk_register_clkdev(clk[gpt_ipg], "ipg", "imx-gpt.0");
393 clk_register_clkdev(clk[gpt_ipg_per], "per", "imx-gpt.0");
394 clk_register_clkdev(clk[twd], NULL, "smp_twd");
395 clk_register_clkdev(clk[usboh3], NULL, "usboh3");
396 clk_register_clkdev(clk[uart_serial], "per", "2020000.serial");
397 clk_register_clkdev(clk[uart_ipg], "ipg", "2020000.serial");
398 clk_register_clkdev(clk[uart_serial], "per", "21e8000.serial");
399 clk_register_clkdev(clk[uart_ipg], "ipg", "21e8000.serial");
400 clk_register_clkdev(clk[uart_serial], "per", "21ec000.serial");
401 clk_register_clkdev(clk[uart_ipg], "ipg", "21ec000.serial");
402 clk_register_clkdev(clk[uart_serial], "per", "21f0000.serial");
403 clk_register_clkdev(clk[uart_ipg], "ipg", "21f0000.serial");
404 clk_register_clkdev(clk[uart_serial], "per", "21f4000.serial");
405 clk_register_clkdev(clk[uart_ipg], "ipg", "21f4000.serial");
406 clk_register_clkdev(clk[enet], NULL, "2188000.ethernet");
407 clk_register_clkdev(clk[usdhc1], NULL, "2190000.usdhc");
408 clk_register_clkdev(clk[usdhc2], NULL, "2194000.usdhc");
409 clk_register_clkdev(clk[usdhc3], NULL, "2198000.usdhc");
410 clk_register_clkdev(clk[usdhc4], NULL, "219c000.usdhc");
411 clk_register_clkdev(clk[i2c1], NULL, "21a0000.i2c");
412 clk_register_clkdev(clk[i2c2], NULL, "21a4000.i2c");
413 clk_register_clkdev(clk[i2c3], NULL, "21a8000.i2c");
414 clk_register_clkdev(clk[ecspi1], NULL, "2008000.ecspi");
415 clk_register_clkdev(clk[ecspi2], NULL, "200c000.ecspi");
416 clk_register_clkdev(clk[ecspi3], NULL, "2010000.ecspi");
417 clk_register_clkdev(clk[ecspi4], NULL, "2014000.ecspi");
418 clk_register_clkdev(clk[ecspi5], NULL, "2018000.ecspi");
419 clk_register_clkdev(clk[sdma], NULL, "20ec000.sdma");
420 clk_register_clkdev(clk[dummy], NULL, "20bc000.wdog");
421 clk_register_clkdev(clk[dummy], NULL, "20c0000.wdog");
422 clk_register_clkdev(clk[ssi1_ipg], NULL, "2028000.ssi");
423 clk_register_clkdev(clk[cko1_sel], "cko1_sel", NULL);
424 clk_register_clkdev(clk[ahb], "ahb", NULL);
425 clk_register_clkdev(clk[cko1], "cko1", NULL);
426
427 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++) {
428 c = clk_get_sys(clks_init_on[i], NULL);
429 if (IS_ERR(c)) {
430 pr_err("%s: failed to get clk %s", __func__,
431 clks_init_on[i]);
432 return PTR_ERR(c);
433 }
434 clk_prepare_enable(c);
435 }
436
437 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
438 base = of_iomap(np, 0);
439 WARN_ON(!base);
440 irq = irq_of_parse_and_map(np, 0);
441 mxc_timer_init(NULL, base, irq);
442
443 return 0;
444}
diff --git a/arch/arm/mach-imx/clk-pfd.c b/arch/arm/mach-imx/clk-pfd.c
new file mode 100644
index 000000000000..e2ed4160f329
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pfd.c
@@ -0,0 +1,147 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 * Copyright 2012 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include "clk.h"
19
20/**
21 * struct clk_pfd - IMX PFD clock
22 * @clk_hw: clock source
23 * @reg: PFD register address
24 * @idx: the index of PFD encoded in the register
25 *
26 * PFD clock found on i.MX6 series. Each register for PFD has 4 clk_pfd
27 * data encoded, and member idx is used to specify the one. And each
28 * register has SET, CLR and TOG registers at offset 0x4 0x8 and 0xc.
29 */
30struct clk_pfd {
31 struct clk_hw hw;
32 void __iomem *reg;
33 u8 idx;
34};
35
36#define to_clk_pfd(_hw) container_of(_hw, struct clk_pfd, hw)
37
38#define SET 0x4
39#define CLR 0x8
40#define OTG 0xc
41
42static int clk_pfd_enable(struct clk_hw *hw)
43{
44 struct clk_pfd *pfd = to_clk_pfd(hw);
45
46 writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + CLR);
47
48 return 0;
49}
50
51static void clk_pfd_disable(struct clk_hw *hw)
52{
53 struct clk_pfd *pfd = to_clk_pfd(hw);
54
55 writel_relaxed(1 << ((pfd->idx + 1) * 8 - 1), pfd->reg + SET);
56}
57
58static unsigned long clk_pfd_recalc_rate(struct clk_hw *hw,
59 unsigned long parent_rate)
60{
61 struct clk_pfd *pfd = to_clk_pfd(hw);
62 u64 tmp = parent_rate;
63 u8 frac = (readl_relaxed(pfd->reg) >> (pfd->idx * 8)) & 0x3f;
64
65 tmp *= 18;
66 do_div(tmp, frac);
67
68 return tmp;
69}
70
71static long clk_pfd_round_rate(struct clk_hw *hw, unsigned long rate,
72 unsigned long *prate)
73{
74 u64 tmp = *prate;
75 u8 frac;
76
77 tmp = tmp * 18 + rate / 2;
78 do_div(tmp, rate);
79 frac = tmp;
80 if (frac < 12)
81 frac = 12;
82 else if (frac > 35)
83 frac = 35;
84 tmp = *prate;
85 tmp *= 18;
86 do_div(tmp, frac);
87
88 return tmp;
89}
90
91static int clk_pfd_set_rate(struct clk_hw *hw, unsigned long rate,
92 unsigned long parent_rate)
93{
94 struct clk_pfd *pfd = to_clk_pfd(hw);
95 u64 tmp = parent_rate;
96 u8 frac;
97
98 tmp = tmp * 18 + rate / 2;
99 do_div(tmp, rate);
100 frac = tmp;
101 if (frac < 12)
102 frac = 12;
103 else if (frac > 35)
104 frac = 35;
105
106 writel_relaxed(0x3f << (pfd->idx * 8), pfd->reg + CLR);
107 writel_relaxed(frac << (pfd->idx * 8), pfd->reg + SET);
108
109 return 0;
110}
111
112static const struct clk_ops clk_pfd_ops = {
113 .enable = clk_pfd_enable,
114 .disable = clk_pfd_disable,
115 .recalc_rate = clk_pfd_recalc_rate,
116 .round_rate = clk_pfd_round_rate,
117 .set_rate = clk_pfd_set_rate,
118};
119
120struct clk *imx_clk_pfd(const char *name, const char *parent_name,
121 void __iomem *reg, u8 idx)
122{
123 struct clk_pfd *pfd;
124 struct clk *clk;
125 struct clk_init_data init;
126
127 pfd = kzalloc(sizeof(*pfd), GFP_KERNEL);
128 if (!pfd)
129 return ERR_PTR(-ENOMEM);
130
131 pfd->reg = reg;
132 pfd->idx = idx;
133
134 init.name = name;
135 init.ops = &clk_pfd_ops;
136 init.flags = 0;
137 init.parent_names = &parent_name;
138 init.num_parents = 1;
139
140 pfd->hw.init = &init;
141
142 clk = clk_register(NULL, &pfd->hw);
143 if (IS_ERR(clk))
144 kfree(pfd);
145
146 return clk;
147}
diff --git a/arch/arm/mach-imx/clk-pllv1.c b/arch/arm/mach-imx/clk-pllv1.c
new file mode 100644
index 000000000000..2d856f9ccf59
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv1.c
@@ -0,0 +1,66 @@
1#include <linux/clk.h>
2#include <linux/clk-provider.h>
3#include <linux/io.h>
4#include <linux/slab.h>
5#include <linux/kernel.h>
6#include <linux/err.h>
7#include <mach/common.h>
8#include <mach/hardware.h>
9#include <mach/clock.h>
10#include "clk.h"
11
12/**
13 * pll v1
14 *
15 * @clk_hw clock source
16 * @parent the parent clock name
17 * @base base address of pll registers
18 *
19 * PLL clock version 1, found on i.MX1/21/25/27/31/35
20 */
21struct clk_pllv1 {
22 struct clk_hw hw;
23 void __iomem *base;
24};
25
26#define to_clk_pllv1(clk) (container_of(clk, struct clk_pllv1, clk))
27
28static unsigned long clk_pllv1_recalc_rate(struct clk_hw *hw,
29 unsigned long parent_rate)
30{
31 struct clk_pllv1 *pll = to_clk_pllv1(hw);
32
33 return mxc_decode_pll(readl(pll->base), parent_rate);
34}
35
36struct clk_ops clk_pllv1_ops = {
37 .recalc_rate = clk_pllv1_recalc_rate,
38};
39
40struct clk *imx_clk_pllv1(const char *name, const char *parent,
41 void __iomem *base)
42{
43 struct clk_pllv1 *pll;
44 struct clk *clk;
45 struct clk_init_data init;
46
47 pll = kmalloc(sizeof(*pll), GFP_KERNEL);
48 if (!pll)
49 return ERR_PTR(-ENOMEM);
50
51 pll->base = base;
52
53 init.name = name;
54 init.ops = &clk_pllv1_ops;
55 init.flags = 0;
56 init.parent_names = &parent;
57 init.num_parents = 1;
58
59 pll->hw.init = &init;
60
61 clk = clk_register(NULL, &pll->hw);
62 if (IS_ERR(clk))
63 kfree(pll);
64
65 return clk;
66}
diff --git a/arch/arm/mach-imx/clk-pllv2.c b/arch/arm/mach-imx/clk-pllv2.c
new file mode 100644
index 000000000000..4685919deb63
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv2.c
@@ -0,0 +1,249 @@
1#include <linux/kernel.h>
2#include <linux/clk.h>
3#include <linux/io.h>
4#include <linux/errno.h>
5#include <linux/delay.h>
6#include <linux/slab.h>
7#include <linux/err.h>
8
9#include <asm/div64.h>
10
11#include "clk.h"
12
13#define to_clk_pllv2(clk) (container_of(clk, struct clk_pllv2, clk))
14
15/* PLL Register Offsets */
16#define MXC_PLL_DP_CTL 0x00
17#define MXC_PLL_DP_CONFIG 0x04
18#define MXC_PLL_DP_OP 0x08
19#define MXC_PLL_DP_MFD 0x0C
20#define MXC_PLL_DP_MFN 0x10
21#define MXC_PLL_DP_MFNMINUS 0x14
22#define MXC_PLL_DP_MFNPLUS 0x18
23#define MXC_PLL_DP_HFS_OP 0x1C
24#define MXC_PLL_DP_HFS_MFD 0x20
25#define MXC_PLL_DP_HFS_MFN 0x24
26#define MXC_PLL_DP_MFN_TOGC 0x28
27#define MXC_PLL_DP_DESTAT 0x2c
28
29/* PLL Register Bit definitions */
30#define MXC_PLL_DP_CTL_MUL_CTRL 0x2000
31#define MXC_PLL_DP_CTL_DPDCK0_2_EN 0x1000
32#define MXC_PLL_DP_CTL_DPDCK0_2_OFFSET 12
33#define MXC_PLL_DP_CTL_ADE 0x800
34#define MXC_PLL_DP_CTL_REF_CLK_DIV 0x400
35#define MXC_PLL_DP_CTL_REF_CLK_SEL_MASK (3 << 8)
36#define MXC_PLL_DP_CTL_REF_CLK_SEL_OFFSET 8
37#define MXC_PLL_DP_CTL_HFSM 0x80
38#define MXC_PLL_DP_CTL_PRE 0x40
39#define MXC_PLL_DP_CTL_UPEN 0x20
40#define MXC_PLL_DP_CTL_RST 0x10
41#define MXC_PLL_DP_CTL_RCP 0x8
42#define MXC_PLL_DP_CTL_PLM 0x4
43#define MXC_PLL_DP_CTL_BRM0 0x2
44#define MXC_PLL_DP_CTL_LRF 0x1
45
46#define MXC_PLL_DP_CONFIG_BIST 0x8
47#define MXC_PLL_DP_CONFIG_SJC_CE 0x4
48#define MXC_PLL_DP_CONFIG_AREN 0x2
49#define MXC_PLL_DP_CONFIG_LDREQ 0x1
50
51#define MXC_PLL_DP_OP_MFI_OFFSET 4
52#define MXC_PLL_DP_OP_MFI_MASK (0xF << 4)
53#define MXC_PLL_DP_OP_PDF_OFFSET 0
54#define MXC_PLL_DP_OP_PDF_MASK 0xF
55
56#define MXC_PLL_DP_MFD_OFFSET 0
57#define MXC_PLL_DP_MFD_MASK 0x07FFFFFF
58
59#define MXC_PLL_DP_MFN_OFFSET 0x0
60#define MXC_PLL_DP_MFN_MASK 0x07FFFFFF
61
62#define MXC_PLL_DP_MFN_TOGC_TOG_DIS (1 << 17)
63#define MXC_PLL_DP_MFN_TOGC_TOG_EN (1 << 16)
64#define MXC_PLL_DP_MFN_TOGC_CNT_OFFSET 0x0
65#define MXC_PLL_DP_MFN_TOGC_CNT_MASK 0xFFFF
66
67#define MXC_PLL_DP_DESTAT_TOG_SEL (1 << 31)
68#define MXC_PLL_DP_DESTAT_MFN 0x07FFFFFF
69
70#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
71
72struct clk_pllv2 {
73 struct clk_hw hw;
74 void __iomem *base;
75};
76
77static unsigned long clk_pllv2_recalc_rate(struct clk_hw *hw,
78 unsigned long parent_rate)
79{
80 long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
81 unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
82 void __iomem *pllbase;
83 s64 temp;
84 struct clk_pllv2 *pll = to_clk_pllv2(hw);
85
86 pllbase = pll->base;
87
88 dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
89 pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
90 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
91
92 if (pll_hfsm == 0) {
93 dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
94 dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
95 dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
96 } else {
97 dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
98 dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
99 dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
100 }
101 pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
102 mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
103 mfi = (mfi <= 5) ? 5 : mfi;
104 mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
105 mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
106 /* Sign extend to 32-bits */
107 if (mfn >= 0x04000000) {
108 mfn |= 0xFC000000;
109 mfn_abs = -mfn;
110 }
111
112 ref_clk = 2 * parent_rate;
113 if (dbl != 0)
114 ref_clk *= 2;
115
116 ref_clk /= (pdf + 1);
117 temp = (u64) ref_clk * mfn_abs;
118 do_div(temp, mfd + 1);
119 if (mfn < 0)
120 temp = -temp;
121 temp = (ref_clk * mfi) + temp;
122
123 return temp;
124}
125
126static int clk_pllv2_set_rate(struct clk_hw *hw, unsigned long rate,
127 unsigned long parent_rate)
128{
129 struct clk_pllv2 *pll = to_clk_pllv2(hw);
130 u32 reg;
131 void __iomem *pllbase;
132 long mfi, pdf, mfn, mfd = 999999;
133 s64 temp64;
134 unsigned long quad_parent_rate;
135 unsigned long pll_hfsm, dp_ctl;
136
137 pllbase = pll->base;
138
139 quad_parent_rate = 4 * parent_rate;
140 pdf = mfi = -1;
141 while (++pdf < 16 && mfi < 5)
142 mfi = rate * (pdf+1) / quad_parent_rate;
143 if (mfi > 15)
144 return -EINVAL;
145 pdf--;
146
147 temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
148 do_div(temp64, quad_parent_rate/1000000);
149 mfn = (long)temp64;
150
151 dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
152 /* use dpdck0_2 */
153 __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
154 pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
155 if (pll_hfsm == 0) {
156 reg = mfi << 4 | pdf;
157 __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
158 __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
159 __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
160 } else {
161 reg = mfi << 4 | pdf;
162 __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
163 __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
164 __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
165 }
166
167 return 0;
168}
169
170static long clk_pllv2_round_rate(struct clk_hw *hw, unsigned long rate,
171 unsigned long *prate)
172{
173 return rate;
174}
175
176static int clk_pllv2_prepare(struct clk_hw *hw)
177{
178 struct clk_pllv2 *pll = to_clk_pllv2(hw);
179 u32 reg;
180 void __iomem *pllbase;
181 int i = 0;
182
183 pllbase = pll->base;
184 reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) | MXC_PLL_DP_CTL_UPEN;
185 __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
186
187 /* Wait for lock */
188 do {
189 reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
190 if (reg & MXC_PLL_DP_CTL_LRF)
191 break;
192
193 udelay(1);
194 } while (++i < MAX_DPLL_WAIT_TRIES);
195
196 if (i == MAX_DPLL_WAIT_TRIES) {
197 pr_err("MX5: pll locking failed\n");
198 return -EINVAL;
199 }
200
201 return 0;
202}
203
204static void clk_pllv2_unprepare(struct clk_hw *hw)
205{
206 struct clk_pllv2 *pll = to_clk_pllv2(hw);
207 u32 reg;
208 void __iomem *pllbase;
209
210 pllbase = pll->base;
211 reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
212 __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
213}
214
215struct clk_ops clk_pllv2_ops = {
216 .prepare = clk_pllv2_prepare,
217 .unprepare = clk_pllv2_unprepare,
218 .recalc_rate = clk_pllv2_recalc_rate,
219 .round_rate = clk_pllv2_round_rate,
220 .set_rate = clk_pllv2_set_rate,
221};
222
223struct clk *imx_clk_pllv2(const char *name, const char *parent,
224 void __iomem *base)
225{
226 struct clk_pllv2 *pll;
227 struct clk *clk;
228 struct clk_init_data init;
229
230 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
231 if (!pll)
232 return ERR_PTR(-ENOMEM);
233
234 pll->base = base;
235
236 init.name = name;
237 init.ops = &clk_pllv2_ops;
238 init.flags = 0;
239 init.parent_names = &parent;
240 init.num_parents = 1;
241
242 pll->hw.init = &init;
243
244 clk = clk_register(NULL, &pll->hw);
245 if (IS_ERR(clk))
246 kfree(pll);
247
248 return clk;
249}
diff --git a/arch/arm/mach-imx/clk-pllv3.c b/arch/arm/mach-imx/clk-pllv3.c
new file mode 100644
index 000000000000..36aac947bce1
--- /dev/null
+++ b/arch/arm/mach-imx/clk-pllv3.c
@@ -0,0 +1,419 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 * Copyright 2012 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/clk.h>
14#include <linux/clk-provider.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include <linux/jiffies.h>
18#include <linux/err.h>
19#include "clk.h"
20
21#define PLL_NUM_OFFSET 0x10
22#define PLL_DENOM_OFFSET 0x20
23
24#define BM_PLL_POWER (0x1 << 12)
25#define BM_PLL_ENABLE (0x1 << 13)
26#define BM_PLL_BYPASS (0x1 << 16)
27#define BM_PLL_LOCK (0x1 << 31)
28
29/**
30 * struct clk_pllv3 - IMX PLL clock version 3
31 * @clk_hw: clock source
32 * @base: base address of PLL registers
33 * @powerup_set: set POWER bit to power up the PLL
34 * @gate_mask: mask of gate bits
35 * @div_mask: mask of divider bits
36 *
37 * IMX PLL clock version 3, found on i.MX6 series. Divider for pllv3
38 * is actually a multiplier, and always sits at bit 0.
39 */
40struct clk_pllv3 {
41 struct clk_hw hw;
42 void __iomem *base;
43 bool powerup_set;
44 u32 gate_mask;
45 u32 div_mask;
46};
47
48#define to_clk_pllv3(_hw) container_of(_hw, struct clk_pllv3, hw)
49
50static int clk_pllv3_prepare(struct clk_hw *hw)
51{
52 struct clk_pllv3 *pll = to_clk_pllv3(hw);
53 unsigned long timeout = jiffies + msecs_to_jiffies(10);
54 u32 val;
55
56 val = readl_relaxed(pll->base);
57 val &= ~BM_PLL_BYPASS;
58 if (pll->powerup_set)
59 val |= BM_PLL_POWER;
60 else
61 val &= ~BM_PLL_POWER;
62 writel_relaxed(val, pll->base);
63
64 /* Wait for PLL to lock */
65 while (!(readl_relaxed(pll->base) & BM_PLL_LOCK))
66 if (time_after(jiffies, timeout))
67 return -ETIMEDOUT;
68
69 return 0;
70}
71
72static void clk_pllv3_unprepare(struct clk_hw *hw)
73{
74 struct clk_pllv3 *pll = to_clk_pllv3(hw);
75 u32 val;
76
77 val = readl_relaxed(pll->base);
78 val |= BM_PLL_BYPASS;
79 if (pll->powerup_set)
80 val &= ~BM_PLL_POWER;
81 else
82 val |= BM_PLL_POWER;
83 writel_relaxed(val, pll->base);
84}
85
86static int clk_pllv3_enable(struct clk_hw *hw)
87{
88 struct clk_pllv3 *pll = to_clk_pllv3(hw);
89 u32 val;
90
91 val = readl_relaxed(pll->base);
92 val |= pll->gate_mask;
93 writel_relaxed(val, pll->base);
94
95 return 0;
96}
97
98static void clk_pllv3_disable(struct clk_hw *hw)
99{
100 struct clk_pllv3 *pll = to_clk_pllv3(hw);
101 u32 val;
102
103 val = readl_relaxed(pll->base);
104 val &= ~pll->gate_mask;
105 writel_relaxed(val, pll->base);
106}
107
108static unsigned long clk_pllv3_recalc_rate(struct clk_hw *hw,
109 unsigned long parent_rate)
110{
111 struct clk_pllv3 *pll = to_clk_pllv3(hw);
112 u32 div = readl_relaxed(pll->base) & pll->div_mask;
113
114 return (div == 1) ? parent_rate * 22 : parent_rate * 20;
115}
116
117static long clk_pllv3_round_rate(struct clk_hw *hw, unsigned long rate,
118 unsigned long *prate)
119{
120 unsigned long parent_rate = *prate;
121
122 return (rate >= parent_rate * 22) ? parent_rate * 22 :
123 parent_rate * 20;
124}
125
126static int clk_pllv3_set_rate(struct clk_hw *hw, unsigned long rate,
127 unsigned long parent_rate)
128{
129 struct clk_pllv3 *pll = to_clk_pllv3(hw);
130 u32 val, div;
131
132 if (rate == parent_rate * 22)
133 div = 1;
134 else if (rate == parent_rate * 20)
135 div = 0;
136 else
137 return -EINVAL;
138
139 val = readl_relaxed(pll->base);
140 val &= ~pll->div_mask;
141 val |= div;
142 writel_relaxed(val, pll->base);
143
144 return 0;
145}
146
147static const struct clk_ops clk_pllv3_ops = {
148 .prepare = clk_pllv3_prepare,
149 .unprepare = clk_pllv3_unprepare,
150 .enable = clk_pllv3_enable,
151 .disable = clk_pllv3_disable,
152 .recalc_rate = clk_pllv3_recalc_rate,
153 .round_rate = clk_pllv3_round_rate,
154 .set_rate = clk_pllv3_set_rate,
155};
156
157static unsigned long clk_pllv3_sys_recalc_rate(struct clk_hw *hw,
158 unsigned long parent_rate)
159{
160 struct clk_pllv3 *pll = to_clk_pllv3(hw);
161 u32 div = readl_relaxed(pll->base) & pll->div_mask;
162
163 return parent_rate * div / 2;
164}
165
166static long clk_pllv3_sys_round_rate(struct clk_hw *hw, unsigned long rate,
167 unsigned long *prate)
168{
169 unsigned long parent_rate = *prate;
170 unsigned long min_rate = parent_rate * 54 / 2;
171 unsigned long max_rate = parent_rate * 108 / 2;
172 u32 div;
173
174 if (rate > max_rate)
175 rate = max_rate;
176 else if (rate < min_rate)
177 rate = min_rate;
178 div = rate * 2 / parent_rate;
179
180 return parent_rate * div / 2;
181}
182
183static int clk_pllv3_sys_set_rate(struct clk_hw *hw, unsigned long rate,
184 unsigned long parent_rate)
185{
186 struct clk_pllv3 *pll = to_clk_pllv3(hw);
187 unsigned long min_rate = parent_rate * 54 / 2;
188 unsigned long max_rate = parent_rate * 108 / 2;
189 u32 val, div;
190
191 if (rate < min_rate || rate > max_rate)
192 return -EINVAL;
193
194 div = rate * 2 / parent_rate;
195 val = readl_relaxed(pll->base);
196 val &= ~pll->div_mask;
197 val |= div;
198 writel_relaxed(val, pll->base);
199
200 return 0;
201}
202
203static const struct clk_ops clk_pllv3_sys_ops = {
204 .prepare = clk_pllv3_prepare,
205 .unprepare = clk_pllv3_unprepare,
206 .enable = clk_pllv3_enable,
207 .disable = clk_pllv3_disable,
208 .recalc_rate = clk_pllv3_sys_recalc_rate,
209 .round_rate = clk_pllv3_sys_round_rate,
210 .set_rate = clk_pllv3_sys_set_rate,
211};
212
213static unsigned long clk_pllv3_av_recalc_rate(struct clk_hw *hw,
214 unsigned long parent_rate)
215{
216 struct clk_pllv3 *pll = to_clk_pllv3(hw);
217 u32 mfn = readl_relaxed(pll->base + PLL_NUM_OFFSET);
218 u32 mfd = readl_relaxed(pll->base + PLL_DENOM_OFFSET);
219 u32 div = readl_relaxed(pll->base) & pll->div_mask;
220
221 return (parent_rate * div) + ((parent_rate / mfd) * mfn);
222}
223
224static long clk_pllv3_av_round_rate(struct clk_hw *hw, unsigned long rate,
225 unsigned long *prate)
226{
227 unsigned long parent_rate = *prate;
228 unsigned long min_rate = parent_rate * 27;
229 unsigned long max_rate = parent_rate * 54;
230 u32 div;
231 u32 mfn, mfd = 1000000;
232 s64 temp64;
233
234 if (rate > max_rate)
235 rate = max_rate;
236 else if (rate < min_rate)
237 rate = min_rate;
238
239 div = rate / parent_rate;
240 temp64 = (u64) (rate - div * parent_rate);
241 temp64 *= mfd;
242 do_div(temp64, parent_rate);
243 mfn = temp64;
244
245 return parent_rate * div + parent_rate / mfd * mfn;
246}
247
248static int clk_pllv3_av_set_rate(struct clk_hw *hw, unsigned long rate,
249 unsigned long parent_rate)
250{
251 struct clk_pllv3 *pll = to_clk_pllv3(hw);
252 unsigned long min_rate = parent_rate * 27;
253 unsigned long max_rate = parent_rate * 54;
254 u32 val, div;
255 u32 mfn, mfd = 1000000;
256 s64 temp64;
257
258 if (rate < min_rate || rate > max_rate)
259 return -EINVAL;
260
261 div = rate / parent_rate;
262 temp64 = (u64) (rate - div * parent_rate);
263 temp64 *= mfd;
264 do_div(temp64, parent_rate);
265 mfn = temp64;
266
267 val = readl_relaxed(pll->base);
268 val &= ~pll->div_mask;
269 val |= div;
270 writel_relaxed(val, pll->base);
271 writel_relaxed(mfn, pll->base + PLL_NUM_OFFSET);
272 writel_relaxed(mfd, pll->base + PLL_DENOM_OFFSET);
273
274 return 0;
275}
276
277static const struct clk_ops clk_pllv3_av_ops = {
278 .prepare = clk_pllv3_prepare,
279 .unprepare = clk_pllv3_unprepare,
280 .enable = clk_pllv3_enable,
281 .disable = clk_pllv3_disable,
282 .recalc_rate = clk_pllv3_av_recalc_rate,
283 .round_rate = clk_pllv3_av_round_rate,
284 .set_rate = clk_pllv3_av_set_rate,
285};
286
287static unsigned long clk_pllv3_enet_recalc_rate(struct clk_hw *hw,
288 unsigned long parent_rate)
289{
290 struct clk_pllv3 *pll = to_clk_pllv3(hw);
291 u32 div = readl_relaxed(pll->base) & pll->div_mask;
292
293 switch (div) {
294 case 0:
295 return 25000000;
296 case 1:
297 return 50000000;
298 case 2:
299 return 100000000;
300 case 3:
301 return 125000000;
302 }
303
304 return 0;
305}
306
307static long clk_pllv3_enet_round_rate(struct clk_hw *hw, unsigned long rate,
308 unsigned long *prate)
309{
310 if (rate >= 125000000)
311 rate = 125000000;
312 else if (rate >= 100000000)
313 rate = 100000000;
314 else if (rate >= 50000000)
315 rate = 50000000;
316 else
317 rate = 25000000;
318 return rate;
319}
320
321static int clk_pllv3_enet_set_rate(struct clk_hw *hw, unsigned long rate,
322 unsigned long parent_rate)
323{
324 struct clk_pllv3 *pll = to_clk_pllv3(hw);
325 u32 val, div;
326
327 switch (rate) {
328 case 25000000:
329 div = 0;
330 break;
331 case 50000000:
332 div = 1;
333 break;
334 case 100000000:
335 div = 2;
336 break;
337 case 125000000:
338 div = 3;
339 break;
340 default:
341 return -EINVAL;
342 }
343
344 val = readl_relaxed(pll->base);
345 val &= ~pll->div_mask;
346 val |= div;
347 writel_relaxed(val, pll->base);
348
349 return 0;
350}
351
352static const struct clk_ops clk_pllv3_enet_ops = {
353 .prepare = clk_pllv3_prepare,
354 .unprepare = clk_pllv3_unprepare,
355 .enable = clk_pllv3_enable,
356 .disable = clk_pllv3_disable,
357 .recalc_rate = clk_pllv3_enet_recalc_rate,
358 .round_rate = clk_pllv3_enet_round_rate,
359 .set_rate = clk_pllv3_enet_set_rate,
360};
361
362static const struct clk_ops clk_pllv3_mlb_ops = {
363 .prepare = clk_pllv3_prepare,
364 .unprepare = clk_pllv3_unprepare,
365 .enable = clk_pllv3_enable,
366 .disable = clk_pllv3_disable,
367};
368
369struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
370 const char *parent_name, void __iomem *base,
371 u32 gate_mask, u32 div_mask)
372{
373 struct clk_pllv3 *pll;
374 const struct clk_ops *ops;
375 struct clk *clk;
376 struct clk_init_data init;
377
378 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
379 if (!pll)
380 return ERR_PTR(-ENOMEM);
381
382 switch (type) {
383 case IMX_PLLV3_SYS:
384 ops = &clk_pllv3_sys_ops;
385 break;
386 case IMX_PLLV3_USB:
387 ops = &clk_pllv3_ops;
388 pll->powerup_set = true;
389 break;
390 case IMX_PLLV3_AV:
391 ops = &clk_pllv3_av_ops;
392 break;
393 case IMX_PLLV3_ENET:
394 ops = &clk_pllv3_enet_ops;
395 break;
396 case IMX_PLLV3_MLB:
397 ops = &clk_pllv3_mlb_ops;
398 break;
399 default:
400 ops = &clk_pllv3_ops;
401 }
402 pll->base = base;
403 pll->gate_mask = gate_mask;
404 pll->div_mask = div_mask;
405
406 init.name = name;
407 init.ops = ops;
408 init.flags = 0;
409 init.parent_names = &parent_name;
410 init.num_parents = 1;
411
412 pll->hw.init = &init;
413
414 clk = clk_register(NULL, &pll->hw);
415 if (IS_ERR(clk))
416 kfree(pll);
417
418 return clk;
419}
diff --git a/arch/arm/mach-imx/clk.h b/arch/arm/mach-imx/clk.h
new file mode 100644
index 000000000000..1bf64fe2523c
--- /dev/null
+++ b/arch/arm/mach-imx/clk.h
@@ -0,0 +1,83 @@
1#ifndef __MACH_IMX_CLK_H
2#define __MACH_IMX_CLK_H
3
4#include <linux/spinlock.h>
5#include <linux/clk-provider.h>
6#include <mach/clock.h>
7
8struct clk *imx_clk_pllv1(const char *name, const char *parent,
9 void __iomem *base);
10
11struct clk *imx_clk_pllv2(const char *name, const char *parent,
12 void __iomem *base);
13
14enum imx_pllv3_type {
15 IMX_PLLV3_GENERIC,
16 IMX_PLLV3_SYS,
17 IMX_PLLV3_USB,
18 IMX_PLLV3_AV,
19 IMX_PLLV3_ENET,
20 IMX_PLLV3_MLB,
21};
22
23struct clk *imx_clk_pllv3(enum imx_pllv3_type type, const char *name,
24 const char *parent_name, void __iomem *base, u32 gate_mask,
25 u32 div_mask);
26
27struct clk *clk_register_gate2(struct device *dev, const char *name,
28 const char *parent_name, unsigned long flags,
29 void __iomem *reg, u8 bit_idx,
30 u8 clk_gate_flags, spinlock_t *lock);
31
32static inline struct clk *imx_clk_gate2(const char *name, const char *parent,
33 void __iomem *reg, u8 shift)
34{
35 return clk_register_gate2(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
36 shift, 0, &imx_ccm_lock);
37}
38
39struct clk *imx_clk_pfd(const char *name, const char *parent_name,
40 void __iomem *reg, u8 idx);
41
42struct clk *imx_clk_busy_divider(const char *name, const char *parent_name,
43 void __iomem *reg, u8 shift, u8 width,
44 void __iomem *busy_reg, u8 busy_shift);
45
46struct clk *imx_clk_busy_mux(const char *name, void __iomem *reg, u8 shift,
47 u8 width, void __iomem *busy_reg, u8 busy_shift,
48 const char **parent_names, int num_parents);
49
50static inline struct clk *imx_clk_fixed(const char *name, int rate)
51{
52 return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
53}
54
55static inline struct clk *imx_clk_divider(const char *name, const char *parent,
56 void __iomem *reg, u8 shift, u8 width)
57{
58 return clk_register_divider(NULL, name, parent, CLK_SET_RATE_PARENT,
59 reg, shift, width, 0, &imx_ccm_lock);
60}
61
62static inline struct clk *imx_clk_gate(const char *name, const char *parent,
63 void __iomem *reg, u8 shift)
64{
65 return clk_register_gate(NULL, name, parent, CLK_SET_RATE_PARENT, reg,
66 shift, 0, &imx_ccm_lock);
67}
68
69static inline struct clk *imx_clk_mux(const char *name, void __iomem *reg,
70 u8 shift, u8 width, const char **parents, int num_parents)
71{
72 return clk_register_mux(NULL, name, parents, num_parents, 0, reg, shift,
73 width, 0, &imx_ccm_lock);
74}
75
76static inline struct clk *imx_clk_fixed_factor(const char *name,
77 const char *parent, unsigned int mult, unsigned int div)
78{
79 return clk_register_fixed_factor(NULL, name, parent,
80 CLK_SET_RATE_PARENT, mult, div);
81}
82
83#endif
diff --git a/arch/arm/mach-imx/clock-imx1.c b/arch/arm/mach-imx/clock-imx1.c
deleted file mode 100644
index 4aabeb241563..000000000000
--- a/arch/arm/mach-imx/clock-imx1.c
+++ /dev/null
@@ -1,636 +0,0 @@
1/*
2 * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License along
14 * with this program; if not, write to the Free Software Foundation, Inc.,
15 * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
16 */
17
18#include <linux/kernel.h>
19#include <linux/init.h>
20#include <linux/list.h>
21#include <linux/math64.h>
22#include <linux/err.h>
23#include <linux/clk.h>
24#include <linux/io.h>
25#include <linux/clkdev.h>
26
27#include <mach/clock.h>
28#include <mach/hardware.h>
29#include <mach/common.h>
30
31#define IO_ADDR_CCM(off) (MX1_IO_ADDRESS(MX1_CCM_BASE_ADDR + (off)))
32
33/* CCM register addresses */
34#define CCM_CSCR IO_ADDR_CCM(0x0)
35#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
36#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
37#define CCM_PCDR IO_ADDR_CCM(0x20)
38
39#define CCM_CSCR_CLKO_OFFSET 29
40#define CCM_CSCR_CLKO_MASK (0x7 << 29)
41#define CCM_CSCR_USB_OFFSET 26
42#define CCM_CSCR_USB_MASK (0x7 << 26)
43#define CCM_CSCR_OSC_EN_SHIFT 17
44#define CCM_CSCR_SYSTEM_SEL (1 << 16)
45#define CCM_CSCR_BCLK_OFFSET 10
46#define CCM_CSCR_BCLK_MASK (0xf << 10)
47#define CCM_CSCR_PRESC (1 << 15)
48
49#define CCM_PCDR_PCLK3_OFFSET 16
50#define CCM_PCDR_PCLK3_MASK (0x7f << 16)
51#define CCM_PCDR_PCLK2_OFFSET 4
52#define CCM_PCDR_PCLK2_MASK (0xf << 4)
53#define CCM_PCDR_PCLK1_OFFSET 0
54#define CCM_PCDR_PCLK1_MASK 0xf
55
56#define IO_ADDR_SCM(off) (MX1_IO_ADDRESS(MX1_SCM_BASE_ADDR + (off)))
57
58/* SCM register addresses */
59#define SCM_GCCR IO_ADDR_SCM(0xc)
60
61#define SCM_GCCR_DMA_CLK_EN_OFFSET 3
62#define SCM_GCCR_CSI_CLK_EN_OFFSET 2
63#define SCM_GCCR_MMA_CLK_EN_OFFSET 1
64#define SCM_GCCR_USBD_CLK_EN_OFFSET 0
65
66static int _clk_enable(struct clk *clk)
67{
68 unsigned int reg;
69
70 reg = __raw_readl(clk->enable_reg);
71 reg |= 1 << clk->enable_shift;
72 __raw_writel(reg, clk->enable_reg);
73
74 return 0;
75}
76
77static void _clk_disable(struct clk *clk)
78{
79 unsigned int reg;
80
81 reg = __raw_readl(clk->enable_reg);
82 reg &= ~(1 << clk->enable_shift);
83 __raw_writel(reg, clk->enable_reg);
84}
85
86static int _clk_can_use_parent(const struct clk *clk_arr[], unsigned int size,
87 struct clk *parent)
88{
89 int i;
90
91 for (i = 0; i < size; i++)
92 if (parent == clk_arr[i])
93 return i;
94
95 return -EINVAL;
96}
97
98static unsigned long
99_clk_simple_round_rate(struct clk *clk, unsigned long rate, unsigned int limit)
100{
101 int div;
102 unsigned long parent_rate;
103
104 parent_rate = clk_get_rate(clk->parent);
105
106 div = parent_rate / rate;
107 if (parent_rate % rate)
108 div++;
109
110 if (div > limit)
111 div = limit;
112
113 return parent_rate / div;
114}
115
116static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
117{
118 return clk->parent->round_rate(clk->parent, rate);
119}
120
121static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
122{
123 return clk->parent->set_rate(clk->parent, rate);
124}
125
126static unsigned long clk16m_get_rate(struct clk *clk)
127{
128 return 16000000;
129}
130
131static struct clk clk16m = {
132 .get_rate = clk16m_get_rate,
133 .enable = _clk_enable,
134 .enable_reg = CCM_CSCR,
135 .enable_shift = CCM_CSCR_OSC_EN_SHIFT,
136 .disable = _clk_disable,
137};
138
139/* in Hz */
140static unsigned long clk32_rate;
141
142static unsigned long clk32_get_rate(struct clk *clk)
143{
144 return clk32_rate;
145}
146
147static struct clk clk32 = {
148 .get_rate = clk32_get_rate,
149};
150
151static unsigned long clk32_premult_get_rate(struct clk *clk)
152{
153 return clk_get_rate(clk->parent) * 512;
154}
155
156static struct clk clk32_premult = {
157 .parent = &clk32,
158 .get_rate = clk32_premult_get_rate,
159};
160
161static const struct clk *prem_clk_clocks[] = {
162 &clk32_premult,
163 &clk16m,
164};
165
166static int prem_clk_set_parent(struct clk *clk, struct clk *parent)
167{
168 int i;
169 unsigned int reg = __raw_readl(CCM_CSCR);
170
171 i = _clk_can_use_parent(prem_clk_clocks, ARRAY_SIZE(prem_clk_clocks),
172 parent);
173
174 switch (i) {
175 case 0:
176 reg &= ~CCM_CSCR_SYSTEM_SEL;
177 break;
178 case 1:
179 reg |= CCM_CSCR_SYSTEM_SEL;
180 break;
181 default:
182 return i;
183 }
184
185 __raw_writel(reg, CCM_CSCR);
186
187 return 0;
188}
189
190static struct clk prem_clk = {
191 .set_parent = prem_clk_set_parent,
192};
193
194static unsigned long system_clk_get_rate(struct clk *clk)
195{
196 return mxc_decode_pll(__raw_readl(CCM_SPCTL0),
197 clk_get_rate(clk->parent));
198}
199
200static struct clk system_clk = {
201 .parent = &prem_clk,
202 .get_rate = system_clk_get_rate,
203};
204
205static unsigned long mcu_clk_get_rate(struct clk *clk)
206{
207 return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
208 clk_get_rate(clk->parent));
209}
210
211static struct clk mcu_clk = {
212 .parent = &clk32_premult,
213 .get_rate = mcu_clk_get_rate,
214};
215
216static unsigned long fclk_get_rate(struct clk *clk)
217{
218 unsigned long fclk = clk_get_rate(clk->parent);
219
220 if (__raw_readl(CCM_CSCR) & CCM_CSCR_PRESC)
221 fclk /= 2;
222
223 return fclk;
224}
225
226static struct clk fclk = {
227 .parent = &mcu_clk,
228 .get_rate = fclk_get_rate,
229};
230
231/*
232 * get hclk ( SDRAM, CSI, Memory Stick, I2C, DMA )
233 */
234static unsigned long hclk_get_rate(struct clk *clk)
235{
236 return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
237 CCM_CSCR_BCLK_MASK) >> CCM_CSCR_BCLK_OFFSET) + 1);
238}
239
240static unsigned long hclk_round_rate(struct clk *clk, unsigned long rate)
241{
242 return _clk_simple_round_rate(clk, rate, 16);
243}
244
245static int hclk_set_rate(struct clk *clk, unsigned long rate)
246{
247 unsigned int div;
248 unsigned int reg;
249 unsigned long parent_rate;
250
251 parent_rate = clk_get_rate(clk->parent);
252
253 div = parent_rate / rate;
254
255 if (div > 16 || div < 1 || ((parent_rate / div) != rate))
256 return -EINVAL;
257
258 div--;
259
260 reg = __raw_readl(CCM_CSCR);
261 reg &= ~CCM_CSCR_BCLK_MASK;
262 reg |= div << CCM_CSCR_BCLK_OFFSET;
263 __raw_writel(reg, CCM_CSCR);
264
265 return 0;
266}
267
268static struct clk hclk = {
269 .parent = &system_clk,
270 .get_rate = hclk_get_rate,
271 .round_rate = hclk_round_rate,
272 .set_rate = hclk_set_rate,
273};
274
275static unsigned long clk48m_get_rate(struct clk *clk)
276{
277 return clk_get_rate(clk->parent) / (((__raw_readl(CCM_CSCR) &
278 CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET) + 1);
279}
280
281static unsigned long clk48m_round_rate(struct clk *clk, unsigned long rate)
282{
283 return _clk_simple_round_rate(clk, rate, 8);
284}
285
286static int clk48m_set_rate(struct clk *clk, unsigned long rate)
287{
288 unsigned int div;
289 unsigned int reg;
290 unsigned long parent_rate;
291
292 parent_rate = clk_get_rate(clk->parent);
293
294 div = parent_rate / rate;
295
296 if (div > 8 || div < 1 || ((parent_rate / div) != rate))
297 return -EINVAL;
298
299 div--;
300
301 reg = __raw_readl(CCM_CSCR);
302 reg &= ~CCM_CSCR_USB_MASK;
303 reg |= div << CCM_CSCR_USB_OFFSET;
304 __raw_writel(reg, CCM_CSCR);
305
306 return 0;
307}
308
309static struct clk clk48m = {
310 .parent = &system_clk,
311 .get_rate = clk48m_get_rate,
312 .round_rate = clk48m_round_rate,
313 .set_rate = clk48m_set_rate,
314};
315
316/*
317 * get peripheral clock 1 ( UART[12], Timer[12], PWM )
318 */
319static unsigned long perclk1_get_rate(struct clk *clk)
320{
321 return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
322 CCM_PCDR_PCLK1_MASK) >> CCM_PCDR_PCLK1_OFFSET) + 1);
323}
324
325static unsigned long perclk1_round_rate(struct clk *clk, unsigned long rate)
326{
327 return _clk_simple_round_rate(clk, rate, 16);
328}
329
330static int perclk1_set_rate(struct clk *clk, unsigned long rate)
331{
332 unsigned int div;
333 unsigned int reg;
334 unsigned long parent_rate;
335
336 parent_rate = clk_get_rate(clk->parent);
337
338 div = parent_rate / rate;
339
340 if (div > 16 || div < 1 || ((parent_rate / div) != rate))
341 return -EINVAL;
342
343 div--;
344
345 reg = __raw_readl(CCM_PCDR);
346 reg &= ~CCM_PCDR_PCLK1_MASK;
347 reg |= div << CCM_PCDR_PCLK1_OFFSET;
348 __raw_writel(reg, CCM_PCDR);
349
350 return 0;
351}
352
353/*
354 * get peripheral clock 2 ( LCD, SD, SPI[12] )
355 */
356static unsigned long perclk2_get_rate(struct clk *clk)
357{
358 return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
359 CCM_PCDR_PCLK2_MASK) >> CCM_PCDR_PCLK2_OFFSET) + 1);
360}
361
362static unsigned long perclk2_round_rate(struct clk *clk, unsigned long rate)
363{
364 return _clk_simple_round_rate(clk, rate, 16);
365}
366
367static int perclk2_set_rate(struct clk *clk, unsigned long rate)
368{
369 unsigned int div;
370 unsigned int reg;
371 unsigned long parent_rate;
372
373 parent_rate = clk_get_rate(clk->parent);
374
375 div = parent_rate / rate;
376
377 if (div > 16 || div < 1 || ((parent_rate / div) != rate))
378 return -EINVAL;
379
380 div--;
381
382 reg = __raw_readl(CCM_PCDR);
383 reg &= ~CCM_PCDR_PCLK2_MASK;
384 reg |= div << CCM_PCDR_PCLK2_OFFSET;
385 __raw_writel(reg, CCM_PCDR);
386
387 return 0;
388}
389
390/*
391 * get peripheral clock 3 ( SSI )
392 */
393static unsigned long perclk3_get_rate(struct clk *clk)
394{
395 return clk_get_rate(clk->parent) / (((__raw_readl(CCM_PCDR) &
396 CCM_PCDR_PCLK3_MASK) >> CCM_PCDR_PCLK3_OFFSET) + 1);
397}
398
399static unsigned long perclk3_round_rate(struct clk *clk, unsigned long rate)
400{
401 return _clk_simple_round_rate(clk, rate, 128);
402}
403
404static int perclk3_set_rate(struct clk *clk, unsigned long rate)
405{
406 unsigned int div;
407 unsigned int reg;
408 unsigned long parent_rate;
409
410 parent_rate = clk_get_rate(clk->parent);
411
412 div = parent_rate / rate;
413
414 if (div > 128 || div < 1 || ((parent_rate / div) != rate))
415 return -EINVAL;
416
417 div--;
418
419 reg = __raw_readl(CCM_PCDR);
420 reg &= ~CCM_PCDR_PCLK3_MASK;
421 reg |= div << CCM_PCDR_PCLK3_OFFSET;
422 __raw_writel(reg, CCM_PCDR);
423
424 return 0;
425}
426
427static struct clk perclk[] = {
428 {
429 .id = 0,
430 .parent = &system_clk,
431 .get_rate = perclk1_get_rate,
432 .round_rate = perclk1_round_rate,
433 .set_rate = perclk1_set_rate,
434 }, {
435 .id = 1,
436 .parent = &system_clk,
437 .get_rate = perclk2_get_rate,
438 .round_rate = perclk2_round_rate,
439 .set_rate = perclk2_set_rate,
440 }, {
441 .id = 2,
442 .parent = &system_clk,
443 .get_rate = perclk3_get_rate,
444 .round_rate = perclk3_round_rate,
445 .set_rate = perclk3_set_rate,
446 }
447};
448
449static const struct clk *clko_clocks[] = {
450 &perclk[0],
451 &hclk,
452 &clk48m,
453 &clk16m,
454 &prem_clk,
455 &fclk,
456};
457
458static int clko_set_parent(struct clk *clk, struct clk *parent)
459{
460 int i;
461 unsigned int reg;
462
463 i = _clk_can_use_parent(clko_clocks, ARRAY_SIZE(clko_clocks), parent);
464 if (i < 0)
465 return i;
466
467 reg = __raw_readl(CCM_CSCR) & ~CCM_CSCR_CLKO_MASK;
468 reg |= i << CCM_CSCR_CLKO_OFFSET;
469 __raw_writel(reg, CCM_CSCR);
470
471 if (clko_clocks[i]->set_rate && clko_clocks[i]->round_rate) {
472 clk->set_rate = _clk_parent_set_rate;
473 clk->round_rate = _clk_parent_round_rate;
474 } else {
475 clk->set_rate = NULL;
476 clk->round_rate = NULL;
477 }
478
479 return 0;
480}
481
482static struct clk clko_clk = {
483 .set_parent = clko_set_parent,
484};
485
486static struct clk dma_clk = {
487 .parent = &hclk,
488 .round_rate = _clk_parent_round_rate,
489 .set_rate = _clk_parent_set_rate,
490 .enable = _clk_enable,
491 .enable_reg = SCM_GCCR,
492 .enable_shift = SCM_GCCR_DMA_CLK_EN_OFFSET,
493 .disable = _clk_disable,
494};
495
496static struct clk csi_clk = {
497 .parent = &hclk,
498 .round_rate = _clk_parent_round_rate,
499 .set_rate = _clk_parent_set_rate,
500 .enable = _clk_enable,
501 .enable_reg = SCM_GCCR,
502 .enable_shift = SCM_GCCR_CSI_CLK_EN_OFFSET,
503 .disable = _clk_disable,
504};
505
506static struct clk mma_clk = {
507 .parent = &hclk,
508 .round_rate = _clk_parent_round_rate,
509 .set_rate = _clk_parent_set_rate,
510 .enable = _clk_enable,
511 .enable_reg = SCM_GCCR,
512 .enable_shift = SCM_GCCR_MMA_CLK_EN_OFFSET,
513 .disable = _clk_disable,
514};
515
516static struct clk usbd_clk = {
517 .parent = &clk48m,
518 .round_rate = _clk_parent_round_rate,
519 .set_rate = _clk_parent_set_rate,
520 .enable = _clk_enable,
521 .enable_reg = SCM_GCCR,
522 .enable_shift = SCM_GCCR_USBD_CLK_EN_OFFSET,
523 .disable = _clk_disable,
524};
525
526static struct clk gpt_clk = {
527 .parent = &perclk[0],
528 .round_rate = _clk_parent_round_rate,
529 .set_rate = _clk_parent_set_rate,
530};
531
532static struct clk uart_clk = {
533 .parent = &perclk[0],
534 .round_rate = _clk_parent_round_rate,
535 .set_rate = _clk_parent_set_rate,
536};
537
538static struct clk i2c_clk = {
539 .parent = &hclk,
540 .round_rate = _clk_parent_round_rate,
541 .set_rate = _clk_parent_set_rate,
542};
543
544static struct clk spi_clk = {
545 .parent = &perclk[1],
546 .round_rate = _clk_parent_round_rate,
547 .set_rate = _clk_parent_set_rate,
548};
549
550static struct clk sdhc_clk = {
551 .parent = &perclk[1],
552 .round_rate = _clk_parent_round_rate,
553 .set_rate = _clk_parent_set_rate,
554};
555
556static struct clk lcdc_clk = {
557 .parent = &perclk[1],
558 .round_rate = _clk_parent_round_rate,
559 .set_rate = _clk_parent_set_rate,
560};
561
562static struct clk mshc_clk = {
563 .parent = &hclk,
564 .round_rate = _clk_parent_round_rate,
565 .set_rate = _clk_parent_set_rate,
566};
567
568static struct clk ssi_clk = {
569 .parent = &perclk[2],
570 .round_rate = _clk_parent_round_rate,
571 .set_rate = _clk_parent_set_rate,
572};
573
574static struct clk rtc_clk = {
575 .parent = &clk32,
576};
577
578#define _REGISTER_CLOCK(d, n, c) \
579 { \
580 .dev_id = d, \
581 .con_id = n, \
582 .clk = &c, \
583 },
584static struct clk_lookup lookups[] __initdata = {
585 _REGISTER_CLOCK(NULL, "dma", dma_clk)
586 _REGISTER_CLOCK("mx1-camera.0", NULL, csi_clk)
587 _REGISTER_CLOCK(NULL, "mma", mma_clk)
588 _REGISTER_CLOCK("imx_udc.0", NULL, usbd_clk)
589 _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
590 _REGISTER_CLOCK("imx1-uart.0", NULL, uart_clk)
591 _REGISTER_CLOCK("imx1-uart.1", NULL, uart_clk)
592 _REGISTER_CLOCK("imx1-uart.2", NULL, uart_clk)
593 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
594 _REGISTER_CLOCK("imx1-cspi.0", NULL, spi_clk)
595 _REGISTER_CLOCK("imx1-cspi.1", NULL, spi_clk)
596 _REGISTER_CLOCK("imx-mmc.0", NULL, sdhc_clk)
597 _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
598 _REGISTER_CLOCK(NULL, "mshc", mshc_clk)
599 _REGISTER_CLOCK(NULL, "ssi", ssi_clk)
600 _REGISTER_CLOCK("mxc_rtc.0", NULL, rtc_clk)
601};
602
603int __init mx1_clocks_init(unsigned long fref)
604{
605 unsigned int reg;
606
607 /* disable clocks we are able to */
608 __raw_writel(0, SCM_GCCR);
609
610 clk32_rate = fref;
611 reg = __raw_readl(CCM_CSCR);
612
613 /* detect clock reference for system PLL */
614 if (reg & CCM_CSCR_SYSTEM_SEL) {
615 prem_clk.parent = &clk16m;
616 } else {
617 /* ensure that oscillator is disabled */
618 reg &= ~(1 << CCM_CSCR_OSC_EN_SHIFT);
619 __raw_writel(reg, CCM_CSCR);
620 prem_clk.parent = &clk32_premult;
621 }
622
623 /* detect reference for CLKO */
624 reg = (reg & CCM_CSCR_CLKO_MASK) >> CCM_CSCR_CLKO_OFFSET;
625 clko_clk.parent = (struct clk *)clko_clocks[reg];
626
627 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
628
629 clk_enable(&hclk);
630 clk_enable(&fclk);
631
632 mxc_timer_init(&gpt_clk, MX1_IO_ADDRESS(MX1_TIM1_BASE_ADDR),
633 MX1_TIM1_INT);
634
635 return 0;
636}
diff --git a/arch/arm/mach-imx/clock-imx21.c b/arch/arm/mach-imx/clock-imx21.c
deleted file mode 100644
index ee15d8c9db08..000000000000
--- a/arch/arm/mach-imx/clock-imx21.c
+++ /dev/null
@@ -1,1239 +0,0 @@
1/*
2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
4 * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
18 * MA 02110-1301, USA.
19 */
20
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/clkdev.h>
25
26#include <mach/clock.h>
27#include <mach/hardware.h>
28#include <mach/common.h>
29#include <asm/div64.h>
30
31#define IO_ADDR_CCM(off) (MX21_IO_ADDRESS(MX21_CCM_BASE_ADDR + (off)))
32
33/* Register offsets */
34#define CCM_CSCR IO_ADDR_CCM(0x0)
35#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
36#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
37#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
38#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
39#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
40#define CCM_PCDR0 IO_ADDR_CCM(0x18)
41#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
42#define CCM_PCCR0 IO_ADDR_CCM(0x20)
43#define CCM_PCCR1 IO_ADDR_CCM(0x24)
44#define CCM_CCSR IO_ADDR_CCM(0x28)
45#define CCM_PMCTL IO_ADDR_CCM(0x2c)
46#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
47#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
48
49#define CCM_CSCR_PRESC_OFFSET 29
50#define CCM_CSCR_PRESC_MASK (0x7 << CCM_CSCR_PRESC_OFFSET)
51
52#define CCM_CSCR_USB_OFFSET 26
53#define CCM_CSCR_USB_MASK (0x7 << CCM_CSCR_USB_OFFSET)
54#define CCM_CSCR_SD_OFFSET 24
55#define CCM_CSCR_SD_MASK (0x3 << CCM_CSCR_SD_OFFSET)
56#define CCM_CSCR_SPLLRES (1 << 22)
57#define CCM_CSCR_MPLLRES (1 << 21)
58#define CCM_CSCR_SSI2_OFFSET 20
59#define CCM_CSCR_SSI2 (1 << CCM_CSCR_SSI2_OFFSET)
60#define CCM_CSCR_SSI1_OFFSET 19
61#define CCM_CSCR_SSI1 (1 << CCM_CSCR_SSI1_OFFSET)
62#define CCM_CSCR_FIR_OFFSET 18
63#define CCM_CSCR_FIR (1 << CCM_CSCR_FIR_OFFSET)
64#define CCM_CSCR_SP (1 << 17)
65#define CCM_CSCR_MCU (1 << 16)
66#define CCM_CSCR_BCLK_OFFSET 10
67#define CCM_CSCR_BCLK_MASK (0xf << CCM_CSCR_BCLK_OFFSET)
68#define CCM_CSCR_IPDIV_OFFSET 9
69#define CCM_CSCR_IPDIV (1 << CCM_CSCR_IPDIV_OFFSET)
70
71#define CCM_CSCR_OSC26MDIV (1 << 4)
72#define CCM_CSCR_OSC26M (1 << 3)
73#define CCM_CSCR_FPM (1 << 2)
74#define CCM_CSCR_SPEN (1 << 1)
75#define CCM_CSCR_MPEN 1
76
77#define CCM_MPCTL0_CPLM (1 << 31)
78#define CCM_MPCTL0_PD_OFFSET 26
79#define CCM_MPCTL0_PD_MASK (0xf << 26)
80#define CCM_MPCTL0_MFD_OFFSET 16
81#define CCM_MPCTL0_MFD_MASK (0x3ff << 16)
82#define CCM_MPCTL0_MFI_OFFSET 10
83#define CCM_MPCTL0_MFI_MASK (0xf << 10)
84#define CCM_MPCTL0_MFN_OFFSET 0
85#define CCM_MPCTL0_MFN_MASK 0x3ff
86
87#define CCM_MPCTL1_LF (1 << 15)
88#define CCM_MPCTL1_BRMO (1 << 6)
89
90#define CCM_SPCTL0_CPLM (1 << 31)
91#define CCM_SPCTL0_PD_OFFSET 26
92#define CCM_SPCTL0_PD_MASK (0xf << 26)
93#define CCM_SPCTL0_MFD_OFFSET 16
94#define CCM_SPCTL0_MFD_MASK (0x3ff << 16)
95#define CCM_SPCTL0_MFI_OFFSET 10
96#define CCM_SPCTL0_MFI_MASK (0xf << 10)
97#define CCM_SPCTL0_MFN_OFFSET 0
98#define CCM_SPCTL0_MFN_MASK 0x3ff
99
100#define CCM_SPCTL1_LF (1 << 15)
101#define CCM_SPCTL1_BRMO (1 << 6)
102
103#define CCM_OSC26MCTL_PEAK_OFFSET 16
104#define CCM_OSC26MCTL_PEAK_MASK (0x3 << 16)
105#define CCM_OSC26MCTL_AGC_OFFSET 8
106#define CCM_OSC26MCTL_AGC_MASK (0x3f << 8)
107#define CCM_OSC26MCTL_ANATEST_OFFSET 0
108#define CCM_OSC26MCTL_ANATEST_MASK 0x3f
109
110#define CCM_PCDR0_SSI2BAUDDIV_OFFSET 26
111#define CCM_PCDR0_SSI2BAUDDIV_MASK (0x3f << 26)
112#define CCM_PCDR0_SSI1BAUDDIV_OFFSET 16
113#define CCM_PCDR0_SSI1BAUDDIV_MASK (0x3f << 16)
114#define CCM_PCDR0_NFCDIV_OFFSET 12
115#define CCM_PCDR0_NFCDIV_MASK (0xf << 12)
116#define CCM_PCDR0_48MDIV_OFFSET 5
117#define CCM_PCDR0_48MDIV_MASK (0x7 << CCM_PCDR0_48MDIV_OFFSET)
118#define CCM_PCDR0_FIRIDIV_OFFSET 0
119#define CCM_PCDR0_FIRIDIV_MASK 0x1f
120#define CCM_PCDR1_PERDIV4_OFFSET 24
121#define CCM_PCDR1_PERDIV4_MASK (0x3f << 24)
122#define CCM_PCDR1_PERDIV3_OFFSET 16
123#define CCM_PCDR1_PERDIV3_MASK (0x3f << 16)
124#define CCM_PCDR1_PERDIV2_OFFSET 8
125#define CCM_PCDR1_PERDIV2_MASK (0x3f << 8)
126#define CCM_PCDR1_PERDIV1_OFFSET 0
127#define CCM_PCDR1_PERDIV1_MASK 0x3f
128
129#define CCM_PCCR_HCLK_CSI_OFFSET 31
130#define CCM_PCCR_HCLK_CSI_REG CCM_PCCR0
131#define CCM_PCCR_HCLK_DMA_OFFSET 30
132#define CCM_PCCR_HCLK_DMA_REG CCM_PCCR0
133#define CCM_PCCR_HCLK_BROM_OFFSET 28
134#define CCM_PCCR_HCLK_BROM_REG CCM_PCCR0
135#define CCM_PCCR_HCLK_EMMA_OFFSET 27
136#define CCM_PCCR_HCLK_EMMA_REG CCM_PCCR0
137#define CCM_PCCR_HCLK_LCDC_OFFSET 26
138#define CCM_PCCR_HCLK_LCDC_REG CCM_PCCR0
139#define CCM_PCCR_HCLK_SLCDC_OFFSET 25
140#define CCM_PCCR_HCLK_SLCDC_REG CCM_PCCR0
141#define CCM_PCCR_HCLK_USBOTG_OFFSET 24
142#define CCM_PCCR_HCLK_USBOTG_REG CCM_PCCR0
143#define CCM_PCCR_HCLK_BMI_OFFSET 23
144#define CCM_PCCR_BMI_MASK (1 << CCM_PCCR_BMI_MASK)
145#define CCM_PCCR_HCLK_BMI_REG CCM_PCCR0
146#define CCM_PCCR_PERCLK4_OFFSET 22
147#define CCM_PCCR_PERCLK4_REG CCM_PCCR0
148#define CCM_PCCR_SLCDC_OFFSET 21
149#define CCM_PCCR_SLCDC_REG CCM_PCCR0
150#define CCM_PCCR_FIRI_BAUD_OFFSET 20
151#define CCM_PCCR_FIRI_BAUD_MASK (1 << CCM_PCCR_FIRI_BAUD_MASK)
152#define CCM_PCCR_FIRI_BAUD_REG CCM_PCCR0
153#define CCM_PCCR_NFC_OFFSET 19
154#define CCM_PCCR_NFC_REG CCM_PCCR0
155#define CCM_PCCR_LCDC_OFFSET 18
156#define CCM_PCCR_LCDC_REG CCM_PCCR0
157#define CCM_PCCR_SSI1_BAUD_OFFSET 17
158#define CCM_PCCR_SSI1_BAUD_REG CCM_PCCR0
159#define CCM_PCCR_SSI2_BAUD_OFFSET 16
160#define CCM_PCCR_SSI2_BAUD_REG CCM_PCCR0
161#define CCM_PCCR_EMMA_OFFSET 15
162#define CCM_PCCR_EMMA_REG CCM_PCCR0
163#define CCM_PCCR_USBOTG_OFFSET 14
164#define CCM_PCCR_USBOTG_REG CCM_PCCR0
165#define CCM_PCCR_DMA_OFFSET 13
166#define CCM_PCCR_DMA_REG CCM_PCCR0
167#define CCM_PCCR_I2C1_OFFSET 12
168#define CCM_PCCR_I2C1_REG CCM_PCCR0
169#define CCM_PCCR_GPIO_OFFSET 11
170#define CCM_PCCR_GPIO_REG CCM_PCCR0
171#define CCM_PCCR_SDHC2_OFFSET 10
172#define CCM_PCCR_SDHC2_REG CCM_PCCR0
173#define CCM_PCCR_SDHC1_OFFSET 9
174#define CCM_PCCR_SDHC1_REG CCM_PCCR0
175#define CCM_PCCR_FIRI_OFFSET 8
176#define CCM_PCCR_FIRI_MASK (1 << CCM_PCCR_BAUD_MASK)
177#define CCM_PCCR_FIRI_REG CCM_PCCR0
178#define CCM_PCCR_SSI2_IPG_OFFSET 7
179#define CCM_PCCR_SSI2_REG CCM_PCCR0
180#define CCM_PCCR_SSI1_IPG_OFFSET 6
181#define CCM_PCCR_SSI1_REG CCM_PCCR0
182#define CCM_PCCR_CSPI2_OFFSET 5
183#define CCM_PCCR_CSPI2_REG CCM_PCCR0
184#define CCM_PCCR_CSPI1_OFFSET 4
185#define CCM_PCCR_CSPI1_REG CCM_PCCR0
186#define CCM_PCCR_UART4_OFFSET 3
187#define CCM_PCCR_UART4_REG CCM_PCCR0
188#define CCM_PCCR_UART3_OFFSET 2
189#define CCM_PCCR_UART3_REG CCM_PCCR0
190#define CCM_PCCR_UART2_OFFSET 1
191#define CCM_PCCR_UART2_REG CCM_PCCR0
192#define CCM_PCCR_UART1_OFFSET 0
193#define CCM_PCCR_UART1_REG CCM_PCCR0
194
195#define CCM_PCCR_OWIRE_OFFSET 31
196#define CCM_PCCR_OWIRE_REG CCM_PCCR1
197#define CCM_PCCR_KPP_OFFSET 30
198#define CCM_PCCR_KPP_REG CCM_PCCR1
199#define CCM_PCCR_RTC_OFFSET 29
200#define CCM_PCCR_RTC_REG CCM_PCCR1
201#define CCM_PCCR_PWM_OFFSET 28
202#define CCM_PCCR_PWM_REG CCM_PCCR1
203#define CCM_PCCR_GPT3_OFFSET 27
204#define CCM_PCCR_GPT3_REG CCM_PCCR1
205#define CCM_PCCR_GPT2_OFFSET 26
206#define CCM_PCCR_GPT2_REG CCM_PCCR1
207#define CCM_PCCR_GPT1_OFFSET 25
208#define CCM_PCCR_GPT1_REG CCM_PCCR1
209#define CCM_PCCR_WDT_OFFSET 24
210#define CCM_PCCR_WDT_REG CCM_PCCR1
211#define CCM_PCCR_CSPI3_OFFSET 23
212#define CCM_PCCR_CSPI3_REG CCM_PCCR1
213
214#define CCM_PCCR_CSPI1_MASK (1 << CCM_PCCR_CSPI1_OFFSET)
215#define CCM_PCCR_CSPI2_MASK (1 << CCM_PCCR_CSPI2_OFFSET)
216#define CCM_PCCR_CSPI3_MASK (1 << CCM_PCCR_CSPI3_OFFSET)
217#define CCM_PCCR_DMA_MASK (1 << CCM_PCCR_DMA_OFFSET)
218#define CCM_PCCR_EMMA_MASK (1 << CCM_PCCR_EMMA_OFFSET)
219#define CCM_PCCR_GPIO_MASK (1 << CCM_PCCR_GPIO_OFFSET)
220#define CCM_PCCR_GPT1_MASK (1 << CCM_PCCR_GPT1_OFFSET)
221#define CCM_PCCR_GPT2_MASK (1 << CCM_PCCR_GPT2_OFFSET)
222#define CCM_PCCR_GPT3_MASK (1 << CCM_PCCR_GPT3_OFFSET)
223#define CCM_PCCR_HCLK_BROM_MASK (1 << CCM_PCCR_HCLK_BROM_OFFSET)
224#define CCM_PCCR_HCLK_CSI_MASK (1 << CCM_PCCR_HCLK_CSI_OFFSET)
225#define CCM_PCCR_HCLK_DMA_MASK (1 << CCM_PCCR_HCLK_DMA_OFFSET)
226#define CCM_PCCR_HCLK_EMMA_MASK (1 << CCM_PCCR_HCLK_EMMA_OFFSET)
227#define CCM_PCCR_HCLK_LCDC_MASK (1 << CCM_PCCR_HCLK_LCDC_OFFSET)
228#define CCM_PCCR_HCLK_SLCDC_MASK (1 << CCM_PCCR_HCLK_SLCDC_OFFSET)
229#define CCM_PCCR_HCLK_USBOTG_MASK (1 << CCM_PCCR_HCLK_USBOTG_OFFSET)
230#define CCM_PCCR_I2C1_MASK (1 << CCM_PCCR_I2C1_OFFSET)
231#define CCM_PCCR_KPP_MASK (1 << CCM_PCCR_KPP_OFFSET)
232#define CCM_PCCR_LCDC_MASK (1 << CCM_PCCR_LCDC_OFFSET)
233#define CCM_PCCR_NFC_MASK (1 << CCM_PCCR_NFC_OFFSET)
234#define CCM_PCCR_OWIRE_MASK (1 << CCM_PCCR_OWIRE_OFFSET)
235#define CCM_PCCR_PERCLK4_MASK (1 << CCM_PCCR_PERCLK4_OFFSET)
236#define CCM_PCCR_PWM_MASK (1 << CCM_PCCR_PWM_OFFSET)
237#define CCM_PCCR_RTC_MASK (1 << CCM_PCCR_RTC_OFFSET)
238#define CCM_PCCR_SDHC1_MASK (1 << CCM_PCCR_SDHC1_OFFSET)
239#define CCM_PCCR_SDHC2_MASK (1 << CCM_PCCR_SDHC2_OFFSET)
240#define CCM_PCCR_SLCDC_MASK (1 << CCM_PCCR_SLCDC_OFFSET)
241#define CCM_PCCR_SSI1_BAUD_MASK (1 << CCM_PCCR_SSI1_BAUD_OFFSET)
242#define CCM_PCCR_SSI1_IPG_MASK (1 << CCM_PCCR_SSI1_IPG_OFFSET)
243#define CCM_PCCR_SSI2_BAUD_MASK (1 << CCM_PCCR_SSI2_BAUD_OFFSET)
244#define CCM_PCCR_SSI2_IPG_MASK (1 << CCM_PCCR_SSI2_IPG_OFFSET)
245#define CCM_PCCR_UART1_MASK (1 << CCM_PCCR_UART1_OFFSET)
246#define CCM_PCCR_UART2_MASK (1 << CCM_PCCR_UART2_OFFSET)
247#define CCM_PCCR_UART3_MASK (1 << CCM_PCCR_UART3_OFFSET)
248#define CCM_PCCR_UART4_MASK (1 << CCM_PCCR_UART4_OFFSET)
249#define CCM_PCCR_USBOTG_MASK (1 << CCM_PCCR_USBOTG_OFFSET)
250#define CCM_PCCR_WDT_MASK (1 << CCM_PCCR_WDT_OFFSET)
251
252#define CCM_CCSR_32KSR (1 << 15)
253
254#define CCM_CCSR_CLKMODE1 (1 << 9)
255#define CCM_CCSR_CLKMODE0 (1 << 8)
256
257#define CCM_CCSR_CLKOSEL_OFFSET 0
258#define CCM_CCSR_CLKOSEL_MASK 0x1f
259
260#define SYS_FMCR 0x14 /* Functional Muxing Control Reg */
261#define SYS_CHIP_ID 0x00 /* The offset of CHIP ID register */
262
263static int _clk_enable(struct clk *clk)
264{
265 u32 reg;
266
267 reg = __raw_readl(clk->enable_reg);
268 reg |= 1 << clk->enable_shift;
269 __raw_writel(reg, clk->enable_reg);
270 return 0;
271}
272
273static void _clk_disable(struct clk *clk)
274{
275 u32 reg;
276
277 reg = __raw_readl(clk->enable_reg);
278 reg &= ~(1 << clk->enable_shift);
279 __raw_writel(reg, clk->enable_reg);
280}
281
282static unsigned long _clk_generic_round_rate(struct clk *clk,
283 unsigned long rate,
284 u32 max_divisor)
285{
286 u32 div;
287 unsigned long parent_rate;
288
289 parent_rate = clk_get_rate(clk->parent);
290
291 div = parent_rate / rate;
292 if (parent_rate % rate)
293 div++;
294
295 if (div > max_divisor)
296 div = max_divisor;
297
298 return parent_rate / div;
299}
300
301static int _clk_spll_enable(struct clk *clk)
302{
303 u32 reg;
304
305 reg = __raw_readl(CCM_CSCR);
306 reg |= CCM_CSCR_SPEN;
307 __raw_writel(reg, CCM_CSCR);
308
309 while ((__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF) == 0)
310 ;
311 return 0;
312}
313
314static void _clk_spll_disable(struct clk *clk)
315{
316 u32 reg;
317
318 reg = __raw_readl(CCM_CSCR);
319 reg &= ~CCM_CSCR_SPEN;
320 __raw_writel(reg, CCM_CSCR);
321}
322
323
324#define CSCR() (__raw_readl(CCM_CSCR))
325#define PCDR0() (__raw_readl(CCM_PCDR0))
326#define PCDR1() (__raw_readl(CCM_PCDR1))
327
328static unsigned long _clk_perclkx_round_rate(struct clk *clk,
329 unsigned long rate)
330{
331 return _clk_generic_round_rate(clk, rate, 64);
332}
333
334static int _clk_perclkx_set_rate(struct clk *clk, unsigned long rate)
335{
336 u32 reg;
337 u32 div;
338 unsigned long parent_rate;
339
340 parent_rate = clk_get_rate(clk->parent);
341
342 if (clk->id < 0 || clk->id > 3)
343 return -EINVAL;
344
345 div = parent_rate / rate;
346 if (div > 64 || div < 1 || ((parent_rate / div) != rate))
347 return -EINVAL;
348 div--;
349
350 reg =
351 __raw_readl(CCM_PCDR1) & ~(CCM_PCDR1_PERDIV1_MASK <<
352 (clk->id << 3));
353 reg |= div << (clk->id << 3);
354 __raw_writel(reg, CCM_PCDR1);
355
356 return 0;
357}
358
359static unsigned long _clk_usb_recalc(struct clk *clk)
360{
361 unsigned long usb_pdf;
362 unsigned long parent_rate;
363
364 parent_rate = clk_get_rate(clk->parent);
365
366 usb_pdf = (CSCR() & CCM_CSCR_USB_MASK) >> CCM_CSCR_USB_OFFSET;
367
368 return parent_rate / (usb_pdf + 1U);
369}
370
371static unsigned long _clk_usb_round_rate(struct clk *clk,
372 unsigned long rate)
373{
374 return _clk_generic_round_rate(clk, rate, 8);
375}
376
377static int _clk_usb_set_rate(struct clk *clk, unsigned long rate)
378{
379 u32 reg;
380 u32 div;
381 unsigned long parent_rate;
382
383 parent_rate = clk_get_rate(clk->parent);
384
385 div = parent_rate / rate;
386 if (div > 8 || div < 1 || ((parent_rate / div) != rate))
387 return -EINVAL;
388 div--;
389
390 reg = CSCR() & ~CCM_CSCR_USB_MASK;
391 reg |= div << CCM_CSCR_USB_OFFSET;
392 __raw_writel(reg, CCM_CSCR);
393
394 return 0;
395}
396
397static unsigned long _clk_ssix_recalc(struct clk *clk, unsigned long pdf)
398{
399 unsigned long parent_rate;
400
401 parent_rate = clk_get_rate(clk->parent);
402
403 pdf = (pdf < 2) ? 124UL : pdf; /* MX21 & MX27 TO1 */
404
405 return 2UL * parent_rate / pdf;
406}
407
408static unsigned long _clk_ssi1_recalc(struct clk *clk)
409{
410 return _clk_ssix_recalc(clk,
411 (PCDR0() & CCM_PCDR0_SSI1BAUDDIV_MASK)
412 >> CCM_PCDR0_SSI1BAUDDIV_OFFSET);
413}
414
415static unsigned long _clk_ssi2_recalc(struct clk *clk)
416{
417 return _clk_ssix_recalc(clk,
418 (PCDR0() & CCM_PCDR0_SSI2BAUDDIV_MASK) >>
419 CCM_PCDR0_SSI2BAUDDIV_OFFSET);
420}
421
422static unsigned long _clk_nfc_recalc(struct clk *clk)
423{
424 unsigned long nfc_pdf;
425 unsigned long parent_rate;
426
427 parent_rate = clk_get_rate(clk->parent);
428
429 nfc_pdf = (PCDR0() & CCM_PCDR0_NFCDIV_MASK)
430 >> CCM_PCDR0_NFCDIV_OFFSET;
431
432 return parent_rate / (nfc_pdf + 1);
433}
434
435static unsigned long _clk_parent_round_rate(struct clk *clk, unsigned long rate)
436{
437 return clk->parent->round_rate(clk->parent, rate);
438}
439
440static int _clk_parent_set_rate(struct clk *clk, unsigned long rate)
441{
442 return clk->parent->set_rate(clk->parent, rate);
443}
444
445static unsigned long external_high_reference; /* in Hz */
446
447static unsigned long get_high_reference_clock_rate(struct clk *clk)
448{
449 return external_high_reference;
450}
451
452/*
453 * the high frequency external clock reference
454 * Default case is 26MHz.
455 */
456static struct clk ckih_clk = {
457 .get_rate = get_high_reference_clock_rate,
458};
459
460static unsigned long external_low_reference; /* in Hz */
461
462static unsigned long get_low_reference_clock_rate(struct clk *clk)
463{
464 return external_low_reference;
465}
466
467/*
468 * the low frequency external clock reference
469 * Default case is 32.768kHz.
470 */
471static struct clk ckil_clk = {
472 .get_rate = get_low_reference_clock_rate,
473};
474
475
476static unsigned long _clk_fpm_recalc(struct clk *clk)
477{
478 return clk_get_rate(clk->parent) * 512;
479}
480
481/* Output of frequency pre multiplier */
482static struct clk fpm_clk = {
483 .parent = &ckil_clk,
484 .get_rate = _clk_fpm_recalc,
485};
486
487static unsigned long get_mpll_clk(struct clk *clk)
488{
489 uint32_t reg;
490 unsigned long ref_clk;
491 unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
492 unsigned long long temp;
493
494 ref_clk = clk_get_rate(clk->parent);
495
496 reg = __raw_readl(CCM_MPCTL0);
497 pdf = (reg & CCM_MPCTL0_PD_MASK) >> CCM_MPCTL0_PD_OFFSET;
498 mfd = (reg & CCM_MPCTL0_MFD_MASK) >> CCM_MPCTL0_MFD_OFFSET;
499 mfi = (reg & CCM_MPCTL0_MFI_MASK) >> CCM_MPCTL0_MFI_OFFSET;
500 mfn = (reg & CCM_MPCTL0_MFN_MASK) >> CCM_MPCTL0_MFN_OFFSET;
501
502 mfi = (mfi <= 5) ? 5 : mfi;
503 temp = 2LL * ref_clk * mfn;
504 do_div(temp, mfd + 1);
505 temp = 2LL * ref_clk * mfi + temp;
506 do_div(temp, pdf + 1);
507
508 return (unsigned long)temp;
509}
510
511static struct clk mpll_clk = {
512 .parent = &ckih_clk,
513 .get_rate = get_mpll_clk,
514};
515
516static unsigned long _clk_fclk_get_rate(struct clk *clk)
517{
518 unsigned long parent_rate;
519 u32 div;
520
521 div = (CSCR() & CCM_CSCR_PRESC_MASK) >> CCM_CSCR_PRESC_OFFSET;
522 parent_rate = clk_get_rate(clk->parent);
523
524 return parent_rate / (div+1);
525}
526
527static struct clk fclk_clk = {
528 .parent = &mpll_clk,
529 .get_rate = _clk_fclk_get_rate
530};
531
532static unsigned long get_spll_clk(struct clk *clk)
533{
534 uint32_t reg;
535 unsigned long ref_clk;
536 unsigned long mfi = 0, mfn = 0, mfd = 0, pdf = 0;
537 unsigned long long temp;
538
539 ref_clk = clk_get_rate(clk->parent);
540
541 reg = __raw_readl(CCM_SPCTL0);
542 pdf = (reg & CCM_SPCTL0_PD_MASK) >> CCM_SPCTL0_PD_OFFSET;
543 mfd = (reg & CCM_SPCTL0_MFD_MASK) >> CCM_SPCTL0_MFD_OFFSET;
544 mfi = (reg & CCM_SPCTL0_MFI_MASK) >> CCM_SPCTL0_MFI_OFFSET;
545 mfn = (reg & CCM_SPCTL0_MFN_MASK) >> CCM_SPCTL0_MFN_OFFSET;
546
547 mfi = (mfi <= 5) ? 5 : mfi;
548 temp = 2LL * ref_clk * mfn;
549 do_div(temp, mfd + 1);
550 temp = 2LL * ref_clk * mfi + temp;
551 do_div(temp, pdf + 1);
552
553 return (unsigned long)temp;
554}
555
556static struct clk spll_clk = {
557 .parent = &ckih_clk,
558 .get_rate = get_spll_clk,
559 .enable = _clk_spll_enable,
560 .disable = _clk_spll_disable,
561};
562
563static unsigned long get_hclk_clk(struct clk *clk)
564{
565 unsigned long rate;
566 unsigned long bclk_pdf;
567
568 bclk_pdf = (CSCR() & CCM_CSCR_BCLK_MASK)
569 >> CCM_CSCR_BCLK_OFFSET;
570
571 rate = clk_get_rate(clk->parent);
572 return rate / (bclk_pdf + 1);
573}
574
575static struct clk hclk_clk = {
576 .parent = &fclk_clk,
577 .get_rate = get_hclk_clk,
578};
579
580static unsigned long get_ipg_clk(struct clk *clk)
581{
582 unsigned long rate;
583 unsigned long ipg_pdf;
584
585 ipg_pdf = (CSCR() & CCM_CSCR_IPDIV) >> CCM_CSCR_IPDIV_OFFSET;
586
587 rate = clk_get_rate(clk->parent);
588 return rate / (ipg_pdf + 1);
589}
590
591static struct clk ipg_clk = {
592 .parent = &hclk_clk,
593 .get_rate = get_ipg_clk,
594};
595
596static unsigned long _clk_perclkx_recalc(struct clk *clk)
597{
598 unsigned long perclk_pdf;
599 unsigned long parent_rate;
600
601 parent_rate = clk_get_rate(clk->parent);
602
603 if (clk->id < 0 || clk->id > 3)
604 return 0;
605
606 perclk_pdf = (PCDR1() >> (clk->id << 3)) & CCM_PCDR1_PERDIV1_MASK;
607
608 return parent_rate / (perclk_pdf + 1);
609}
610
611static struct clk per_clk[] = {
612 {
613 .id = 0,
614 .parent = &mpll_clk,
615 .get_rate = _clk_perclkx_recalc,
616 }, {
617 .id = 1,
618 .parent = &mpll_clk,
619 .get_rate = _clk_perclkx_recalc,
620 }, {
621 .id = 2,
622 .parent = &mpll_clk,
623 .round_rate = _clk_perclkx_round_rate,
624 .set_rate = _clk_perclkx_set_rate,
625 .get_rate = _clk_perclkx_recalc,
626 /* Enable/Disable done via lcd_clkc[1] */
627 }, {
628 .id = 3,
629 .parent = &mpll_clk,
630 .round_rate = _clk_perclkx_round_rate,
631 .set_rate = _clk_perclkx_set_rate,
632 .get_rate = _clk_perclkx_recalc,
633 /* Enable/Disable done via csi_clk[1] */
634 },
635};
636
637static struct clk uart_ipg_clk[];
638
639static struct clk uart_clk[] = {
640 {
641 .id = 0,
642 .parent = &per_clk[0],
643 .secondary = &uart_ipg_clk[0],
644 }, {
645 .id = 1,
646 .parent = &per_clk[0],
647 .secondary = &uart_ipg_clk[1],
648 }, {
649 .id = 2,
650 .parent = &per_clk[0],
651 .secondary = &uart_ipg_clk[2],
652 }, {
653 .id = 3,
654 .parent = &per_clk[0],
655 .secondary = &uart_ipg_clk[3],
656 },
657};
658
659static struct clk uart_ipg_clk[] = {
660 {
661 .id = 0,
662 .parent = &ipg_clk,
663 .enable = _clk_enable,
664 .enable_reg = CCM_PCCR_UART1_REG,
665 .enable_shift = CCM_PCCR_UART1_OFFSET,
666 .disable = _clk_disable,
667 }, {
668 .id = 1,
669 .parent = &ipg_clk,
670 .enable = _clk_enable,
671 .enable_reg = CCM_PCCR_UART2_REG,
672 .enable_shift = CCM_PCCR_UART2_OFFSET,
673 .disable = _clk_disable,
674 }, {
675 .id = 2,
676 .parent = &ipg_clk,
677 .enable = _clk_enable,
678 .enable_reg = CCM_PCCR_UART3_REG,
679 .enable_shift = CCM_PCCR_UART3_OFFSET,
680 .disable = _clk_disable,
681 }, {
682 .id = 3,
683 .parent = &ipg_clk,
684 .enable = _clk_enable,
685 .enable_reg = CCM_PCCR_UART4_REG,
686 .enable_shift = CCM_PCCR_UART4_OFFSET,
687 .disable = _clk_disable,
688 },
689};
690
691static struct clk gpt_ipg_clk[];
692
693static struct clk gpt_clk[] = {
694 {
695 .id = 0,
696 .parent = &per_clk[0],
697 .secondary = &gpt_ipg_clk[0],
698 }, {
699 .id = 1,
700 .parent = &per_clk[0],
701 .secondary = &gpt_ipg_clk[1],
702 }, {
703 .id = 2,
704 .parent = &per_clk[0],
705 .secondary = &gpt_ipg_clk[2],
706 },
707};
708
709static struct clk gpt_ipg_clk[] = {
710 {
711 .id = 0,
712 .parent = &ipg_clk,
713 .enable = _clk_enable,
714 .enable_reg = CCM_PCCR_GPT1_REG,
715 .enable_shift = CCM_PCCR_GPT1_OFFSET,
716 .disable = _clk_disable,
717 }, {
718 .id = 1,
719 .parent = &ipg_clk,
720 .enable = _clk_enable,
721 .enable_reg = CCM_PCCR_GPT2_REG,
722 .enable_shift = CCM_PCCR_GPT2_OFFSET,
723 .disable = _clk_disable,
724 }, {
725 .id = 2,
726 .parent = &ipg_clk,
727 .enable = _clk_enable,
728 .enable_reg = CCM_PCCR_GPT3_REG,
729 .enable_shift = CCM_PCCR_GPT3_OFFSET,
730 .disable = _clk_disable,
731 },
732};
733
734static struct clk pwm_clk[] = {
735 {
736 .parent = &per_clk[0],
737 .secondary = &pwm_clk[1],
738 }, {
739 .parent = &ipg_clk,
740 .enable = _clk_enable,
741 .enable_reg = CCM_PCCR_PWM_REG,
742 .enable_shift = CCM_PCCR_PWM_OFFSET,
743 .disable = _clk_disable,
744 },
745};
746
747static struct clk sdhc_ipg_clk[];
748
749static struct clk sdhc_clk[] = {
750 {
751 .id = 0,
752 .parent = &per_clk[1],
753 .secondary = &sdhc_ipg_clk[0],
754 }, {
755 .id = 1,
756 .parent = &per_clk[1],
757 .secondary = &sdhc_ipg_clk[1],
758 },
759};
760
761static struct clk sdhc_ipg_clk[] = {
762 {
763 .id = 0,
764 .parent = &ipg_clk,
765 .enable = _clk_enable,
766 .enable_reg = CCM_PCCR_SDHC1_REG,
767 .enable_shift = CCM_PCCR_SDHC1_OFFSET,
768 .disable = _clk_disable,
769 }, {
770 .id = 1,
771 .parent = &ipg_clk,
772 .enable = _clk_enable,
773 .enable_reg = CCM_PCCR_SDHC2_REG,
774 .enable_shift = CCM_PCCR_SDHC2_OFFSET,
775 .disable = _clk_disable,
776 },
777};
778
779static struct clk cspi_ipg_clk[];
780
781static struct clk cspi_clk[] = {
782 {
783 .id = 0,
784 .parent = &per_clk[1],
785 .secondary = &cspi_ipg_clk[0],
786 }, {
787 .id = 1,
788 .parent = &per_clk[1],
789 .secondary = &cspi_ipg_clk[1],
790 }, {
791 .id = 2,
792 .parent = &per_clk[1],
793 .secondary = &cspi_ipg_clk[2],
794 },
795};
796
797static struct clk cspi_ipg_clk[] = {
798 {
799 .id = 0,
800 .parent = &ipg_clk,
801 .enable = _clk_enable,
802 .enable_reg = CCM_PCCR_CSPI1_REG,
803 .enable_shift = CCM_PCCR_CSPI1_OFFSET,
804 .disable = _clk_disable,
805 }, {
806 .id = 1,
807 .parent = &ipg_clk,
808 .enable = _clk_enable,
809 .enable_reg = CCM_PCCR_CSPI2_REG,
810 .enable_shift = CCM_PCCR_CSPI2_OFFSET,
811 .disable = _clk_disable,
812 }, {
813 .id = 3,
814 .parent = &ipg_clk,
815 .enable = _clk_enable,
816 .enable_reg = CCM_PCCR_CSPI3_REG,
817 .enable_shift = CCM_PCCR_CSPI3_OFFSET,
818 .disable = _clk_disable,
819 },
820};
821
822static struct clk lcdc_clk[] = {
823 {
824 .parent = &per_clk[2],
825 .secondary = &lcdc_clk[1],
826 .round_rate = _clk_parent_round_rate,
827 .set_rate = _clk_parent_set_rate,
828 }, {
829 .parent = &ipg_clk,
830 .secondary = &lcdc_clk[2],
831 .enable = _clk_enable,
832 .enable_reg = CCM_PCCR_LCDC_REG,
833 .enable_shift = CCM_PCCR_LCDC_OFFSET,
834 .disable = _clk_disable,
835 }, {
836 .parent = &hclk_clk,
837 .enable = _clk_enable,
838 .enable_reg = CCM_PCCR_HCLK_LCDC_REG,
839 .enable_shift = CCM_PCCR_HCLK_LCDC_OFFSET,
840 .disable = _clk_disable,
841 },
842};
843
844static struct clk csi_clk[] = {
845 {
846 .parent = &per_clk[3],
847 .secondary = &csi_clk[1],
848 .round_rate = _clk_parent_round_rate,
849 .set_rate = _clk_parent_set_rate,
850 }, {
851 .parent = &hclk_clk,
852 .enable = _clk_enable,
853 .enable_reg = CCM_PCCR_HCLK_CSI_REG,
854 .enable_shift = CCM_PCCR_HCLK_CSI_OFFSET,
855 .disable = _clk_disable,
856 },
857};
858
859static struct clk usb_clk[] = {
860 {
861 .parent = &spll_clk,
862 .secondary = &usb_clk[1],
863 .get_rate = _clk_usb_recalc,
864 .enable = _clk_enable,
865 .enable_reg = CCM_PCCR_USBOTG_REG,
866 .enable_shift = CCM_PCCR_USBOTG_OFFSET,
867 .disable = _clk_disable,
868 .round_rate = _clk_usb_round_rate,
869 .set_rate = _clk_usb_set_rate,
870 }, {
871 .parent = &hclk_clk,
872 .enable = _clk_enable,
873 .enable_reg = CCM_PCCR_HCLK_USBOTG_REG,
874 .enable_shift = CCM_PCCR_HCLK_USBOTG_OFFSET,
875 .disable = _clk_disable,
876 }
877};
878
879static struct clk ssi_ipg_clk[];
880
881static struct clk ssi_clk[] = {
882 {
883 .id = 0,
884 .parent = &mpll_clk,
885 .secondary = &ssi_ipg_clk[0],
886 .get_rate = _clk_ssi1_recalc,
887 .enable = _clk_enable,
888 .enable_reg = CCM_PCCR_SSI1_BAUD_REG,
889 .enable_shift = CCM_PCCR_SSI1_BAUD_OFFSET,
890 .disable = _clk_disable,
891 }, {
892 .id = 1,
893 .parent = &mpll_clk,
894 .secondary = &ssi_ipg_clk[1],
895 .get_rate = _clk_ssi2_recalc,
896 .enable = _clk_enable,
897 .enable_reg = CCM_PCCR_SSI2_BAUD_REG,
898 .enable_shift = CCM_PCCR_SSI2_BAUD_OFFSET,
899 .disable = _clk_disable,
900 },
901};
902
903static struct clk ssi_ipg_clk[] = {
904 {
905 .id = 0,
906 .parent = &ipg_clk,
907 .enable = _clk_enable,
908 .enable_reg = CCM_PCCR_SSI1_REG,
909 .enable_shift = CCM_PCCR_SSI1_IPG_OFFSET,
910 .disable = _clk_disable,
911 }, {
912 .id = 1,
913 .parent = &ipg_clk,
914 .enable = _clk_enable,
915 .enable_reg = CCM_PCCR_SSI2_REG,
916 .enable_shift = CCM_PCCR_SSI2_IPG_OFFSET,
917 .disable = _clk_disable,
918 },
919};
920
921
922static struct clk nfc_clk = {
923 .parent = &fclk_clk,
924 .get_rate = _clk_nfc_recalc,
925 .enable = _clk_enable,
926 .enable_reg = CCM_PCCR_NFC_REG,
927 .enable_shift = CCM_PCCR_NFC_OFFSET,
928 .disable = _clk_disable,
929};
930
931static struct clk dma_clk[] = {
932 {
933 .parent = &hclk_clk,
934 .enable = _clk_enable,
935 .enable_reg = CCM_PCCR_DMA_REG,
936 .enable_shift = CCM_PCCR_DMA_OFFSET,
937 .disable = _clk_disable,
938 .secondary = &dma_clk[1],
939 }, {
940 .enable = _clk_enable,
941 .enable_reg = CCM_PCCR_HCLK_DMA_REG,
942 .enable_shift = CCM_PCCR_HCLK_DMA_OFFSET,
943 .disable = _clk_disable,
944 },
945};
946
947static struct clk brom_clk = {
948 .parent = &hclk_clk,
949 .enable = _clk_enable,
950 .enable_reg = CCM_PCCR_HCLK_BROM_REG,
951 .enable_shift = CCM_PCCR_HCLK_BROM_OFFSET,
952 .disable = _clk_disable,
953};
954
955static struct clk emma_clk[] = {
956 {
957 .parent = &hclk_clk,
958 .enable = _clk_enable,
959 .enable_reg = CCM_PCCR_EMMA_REG,
960 .enable_shift = CCM_PCCR_EMMA_OFFSET,
961 .disable = _clk_disable,
962 .secondary = &emma_clk[1],
963 }, {
964 .enable = _clk_enable,
965 .enable_reg = CCM_PCCR_HCLK_EMMA_REG,
966 .enable_shift = CCM_PCCR_HCLK_EMMA_OFFSET,
967 .disable = _clk_disable,
968 }
969};
970
971static struct clk slcdc_clk[] = {
972 {
973 .parent = &hclk_clk,
974 .enable = _clk_enable,
975 .enable_reg = CCM_PCCR_SLCDC_REG,
976 .enable_shift = CCM_PCCR_SLCDC_OFFSET,
977 .disable = _clk_disable,
978 .secondary = &slcdc_clk[1],
979 }, {
980 .enable = _clk_enable,
981 .enable_reg = CCM_PCCR_HCLK_SLCDC_REG,
982 .enable_shift = CCM_PCCR_HCLK_SLCDC_OFFSET,
983 .disable = _clk_disable,
984 }
985};
986
987static struct clk wdog_clk = {
988 .parent = &ipg_clk,
989 .enable = _clk_enable,
990 .enable_reg = CCM_PCCR_WDT_REG,
991 .enable_shift = CCM_PCCR_WDT_OFFSET,
992 .disable = _clk_disable,
993};
994
995static struct clk gpio_clk = {
996 .parent = &ipg_clk,
997 .enable = _clk_enable,
998 .enable_reg = CCM_PCCR_GPIO_REG,
999 .enable_shift = CCM_PCCR_GPIO_OFFSET,
1000 .disable = _clk_disable,
1001};
1002
1003static struct clk i2c_clk = {
1004 .id = 0,
1005 .parent = &ipg_clk,
1006 .enable = _clk_enable,
1007 .enable_reg = CCM_PCCR_I2C1_REG,
1008 .enable_shift = CCM_PCCR_I2C1_OFFSET,
1009 .disable = _clk_disable,
1010};
1011
1012static struct clk kpp_clk = {
1013 .parent = &ipg_clk,
1014 .enable = _clk_enable,
1015 .enable_reg = CCM_PCCR_KPP_REG,
1016 .enable_shift = CCM_PCCR_KPP_OFFSET,
1017 .disable = _clk_disable,
1018};
1019
1020static struct clk owire_clk = {
1021 .parent = &ipg_clk,
1022 .enable = _clk_enable,
1023 .enable_reg = CCM_PCCR_OWIRE_REG,
1024 .enable_shift = CCM_PCCR_OWIRE_OFFSET,
1025 .disable = _clk_disable,
1026};
1027
1028static struct clk rtc_clk = {
1029 .parent = &ipg_clk,
1030 .enable = _clk_enable,
1031 .enable_reg = CCM_PCCR_RTC_REG,
1032 .enable_shift = CCM_PCCR_RTC_OFFSET,
1033 .disable = _clk_disable,
1034};
1035
1036static unsigned long _clk_clko_round_rate(struct clk *clk, unsigned long rate)
1037{
1038 return _clk_generic_round_rate(clk, rate, 8);
1039}
1040
1041static int _clk_clko_set_rate(struct clk *clk, unsigned long rate)
1042{
1043 u32 reg;
1044 u32 div;
1045 unsigned long parent_rate;
1046
1047 parent_rate = clk_get_rate(clk->parent);
1048
1049 div = parent_rate / rate;
1050
1051 if (div > 8 || div < 1 || ((parent_rate / div) != rate))
1052 return -EINVAL;
1053 div--;
1054
1055 reg = __raw_readl(CCM_PCDR0);
1056
1057 if (clk->parent == &usb_clk[0]) {
1058 reg &= ~CCM_PCDR0_48MDIV_MASK;
1059 reg |= div << CCM_PCDR0_48MDIV_OFFSET;
1060 }
1061 __raw_writel(reg, CCM_PCDR0);
1062
1063 return 0;
1064}
1065
1066static unsigned long _clk_clko_recalc(struct clk *clk)
1067{
1068 u32 div = 0;
1069 unsigned long parent_rate;
1070
1071 parent_rate = clk_get_rate(clk->parent);
1072
1073 if (clk->parent == &usb_clk[0]) /* 48M */
1074 div = __raw_readl(CCM_PCDR0) & CCM_PCDR0_48MDIV_MASK
1075 >> CCM_PCDR0_48MDIV_OFFSET;
1076 div++;
1077
1078 return parent_rate / div;
1079}
1080
1081static struct clk clko_clk;
1082
1083static int _clk_clko_set_parent(struct clk *clk, struct clk *parent)
1084{
1085 u32 reg;
1086
1087 reg = __raw_readl(CCM_CCSR) & ~CCM_CCSR_CLKOSEL_MASK;
1088
1089 if (parent == &ckil_clk)
1090 reg |= 0 << CCM_CCSR_CLKOSEL_OFFSET;
1091 else if (parent == &fpm_clk)
1092 reg |= 1 << CCM_CCSR_CLKOSEL_OFFSET;
1093 else if (parent == &ckih_clk)
1094 reg |= 2 << CCM_CCSR_CLKOSEL_OFFSET;
1095 else if (parent == mpll_clk.parent)
1096 reg |= 3 << CCM_CCSR_CLKOSEL_OFFSET;
1097 else if (parent == spll_clk.parent)
1098 reg |= 4 << CCM_CCSR_CLKOSEL_OFFSET;
1099 else if (parent == &mpll_clk)
1100 reg |= 5 << CCM_CCSR_CLKOSEL_OFFSET;
1101 else if (parent == &spll_clk)
1102 reg |= 6 << CCM_CCSR_CLKOSEL_OFFSET;
1103 else if (parent == &fclk_clk)
1104 reg |= 7 << CCM_CCSR_CLKOSEL_OFFSET;
1105 else if (parent == &hclk_clk)
1106 reg |= 8 << CCM_CCSR_CLKOSEL_OFFSET;
1107 else if (parent == &ipg_clk)
1108 reg |= 9 << CCM_CCSR_CLKOSEL_OFFSET;
1109 else if (parent == &per_clk[0])
1110 reg |= 0xA << CCM_CCSR_CLKOSEL_OFFSET;
1111 else if (parent == &per_clk[1])
1112 reg |= 0xB << CCM_CCSR_CLKOSEL_OFFSET;
1113 else if (parent == &per_clk[2])
1114 reg |= 0xC << CCM_CCSR_CLKOSEL_OFFSET;
1115 else if (parent == &per_clk[3])
1116 reg |= 0xD << CCM_CCSR_CLKOSEL_OFFSET;
1117 else if (parent == &ssi_clk[0])
1118 reg |= 0xE << CCM_CCSR_CLKOSEL_OFFSET;
1119 else if (parent == &ssi_clk[1])
1120 reg |= 0xF << CCM_CCSR_CLKOSEL_OFFSET;
1121 else if (parent == &nfc_clk)
1122 reg |= 0x10 << CCM_CCSR_CLKOSEL_OFFSET;
1123 else if (parent == &usb_clk[0])
1124 reg |= 0x14 << CCM_CCSR_CLKOSEL_OFFSET;
1125 else if (parent == &clko_clk)
1126 reg |= 0x15 << CCM_CCSR_CLKOSEL_OFFSET;
1127 else
1128 return -EINVAL;
1129
1130 __raw_writel(reg, CCM_CCSR);
1131
1132 return 0;
1133}
1134
1135static struct clk clko_clk = {
1136 .get_rate = _clk_clko_recalc,
1137 .set_rate = _clk_clko_set_rate,
1138 .round_rate = _clk_clko_round_rate,
1139 .set_parent = _clk_clko_set_parent,
1140};
1141
1142
1143#define _REGISTER_CLOCK(d, n, c) \
1144 { \
1145 .dev_id = d, \
1146 .con_id = n, \
1147 .clk = &c, \
1148 },
1149static struct clk_lookup lookups[] = {
1150/* It's unlikely that any driver wants one of them directly:
1151 _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
1152 _REGISTER_CLOCK(NULL, "ckil", ckil_clk)
1153 _REGISTER_CLOCK(NULL, "fpm", fpm_clk)
1154 _REGISTER_CLOCK(NULL, "mpll", mpll_clk)
1155 _REGISTER_CLOCK(NULL, "spll", spll_clk)
1156 _REGISTER_CLOCK(NULL, "fclk", fclk_clk)
1157 _REGISTER_CLOCK(NULL, "hclk", hclk_clk)
1158 _REGISTER_CLOCK(NULL, "ipg", ipg_clk)
1159*/
1160 _REGISTER_CLOCK(NULL, "perclk1", per_clk[0])
1161 _REGISTER_CLOCK(NULL, "perclk2", per_clk[1])
1162 _REGISTER_CLOCK(NULL, "perclk3", per_clk[2])
1163 _REGISTER_CLOCK(NULL, "perclk4", per_clk[3])
1164 _REGISTER_CLOCK(NULL, "clko", clko_clk)
1165 _REGISTER_CLOCK("imx21-uart.0", NULL, uart_clk[0])
1166 _REGISTER_CLOCK("imx21-uart.1", NULL, uart_clk[1])
1167 _REGISTER_CLOCK("imx21-uart.2", NULL, uart_clk[2])
1168 _REGISTER_CLOCK("imx21-uart.3", NULL, uart_clk[3])
1169 _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[0])
1170 _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[1])
1171 _REGISTER_CLOCK(NULL, "gpt1", gpt_clk[2])
1172 _REGISTER_CLOCK(NULL, "pwm", pwm_clk[0])
1173 _REGISTER_CLOCK(NULL, "sdhc1", sdhc_clk[0])
1174 _REGISTER_CLOCK(NULL, "sdhc2", sdhc_clk[1])
1175 _REGISTER_CLOCK("imx21-cspi.0", NULL, cspi_clk[0])
1176 _REGISTER_CLOCK("imx21-cspi.1", NULL, cspi_clk[1])
1177 _REGISTER_CLOCK("imx21-cspi.2", NULL, cspi_clk[2])
1178 _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk[0])
1179 _REGISTER_CLOCK(NULL, "csi", csi_clk[0])
1180 _REGISTER_CLOCK("imx21-hcd.0", NULL, usb_clk[0])
1181 _REGISTER_CLOCK(NULL, "ssi1", ssi_clk[0])
1182 _REGISTER_CLOCK(NULL, "ssi2", ssi_clk[1])
1183 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
1184 _REGISTER_CLOCK(NULL, "dma", dma_clk[0])
1185 _REGISTER_CLOCK(NULL, "brom", brom_clk)
1186 _REGISTER_CLOCK(NULL, "emma", emma_clk[0])
1187 _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk[0])
1188 _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
1189 _REGISTER_CLOCK(NULL, "gpio", gpio_clk)
1190 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
1191 _REGISTER_CLOCK("mxc-keypad", NULL, kpp_clk)
1192 _REGISTER_CLOCK(NULL, "owire", owire_clk)
1193 _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
1194};
1195
1196/*
1197 * must be called very early to get information about the
1198 * available clock rate when the timer framework starts
1199 */
1200int __init mx21_clocks_init(unsigned long lref, unsigned long href)
1201{
1202 u32 cscr;
1203
1204 external_low_reference = lref;
1205 external_high_reference = href;
1206
1207 /* detect clock reference for both system PLL */
1208 cscr = CSCR();
1209 if (cscr & CCM_CSCR_MCU)
1210 mpll_clk.parent = &ckih_clk;
1211 else
1212 mpll_clk.parent = &fpm_clk;
1213
1214 if (cscr & CCM_CSCR_SP)
1215 spll_clk.parent = &ckih_clk;
1216 else
1217 spll_clk.parent = &fpm_clk;
1218
1219 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
1220
1221 /* Turn off all clock gates */
1222 __raw_writel(0, CCM_PCCR0);
1223 __raw_writel(CCM_PCCR_GPT1_MASK, CCM_PCCR1);
1224
1225 /* This turns of the serial PLL as well */
1226 spll_clk.disable(&spll_clk);
1227
1228 /* This will propagate to all children and init all the clock rates. */
1229 clk_enable(&per_clk[0]);
1230 clk_enable(&gpio_clk);
1231
1232#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
1233 clk_enable(&uart_clk[0]);
1234#endif
1235
1236 mxc_timer_init(&gpt_clk[0], MX21_IO_ADDRESS(MX21_GPT1_BASE_ADDR),
1237 MX21_INT_GPT1);
1238 return 0;
1239}
diff --git a/arch/arm/mach-imx/clock-imx25.c b/arch/arm/mach-imx/clock-imx25.c
deleted file mode 100644
index b0fec74c8c91..000000000000
--- a/arch/arm/mach-imx/clock-imx25.c
+++ /dev/null
@@ -1,346 +0,0 @@
1/*
2 * Copyright (C) 2009 by Sascha Hauer, Pengutronix
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
16 * MA 02110-1301, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/list.h>
22#include <linux/clk.h>
23#include <linux/io.h>
24#include <linux/clkdev.h>
25
26#include <mach/clock.h>
27#include <mach/hardware.h>
28#include <mach/common.h>
29#include <mach/mx25.h>
30
31#define CRM_BASE MX25_IO_ADDRESS(MX25_CRM_BASE_ADDR)
32
33#define CCM_MPCTL 0x00
34#define CCM_UPCTL 0x04
35#define CCM_CCTL 0x08
36#define CCM_CGCR0 0x0C
37#define CCM_CGCR1 0x10
38#define CCM_CGCR2 0x14
39#define CCM_PCDR0 0x18
40#define CCM_PCDR1 0x1C
41#define CCM_PCDR2 0x20
42#define CCM_PCDR3 0x24
43#define CCM_RCSR 0x28
44#define CCM_CRDR 0x2C
45#define CCM_DCVR0 0x30
46#define CCM_DCVR1 0x34
47#define CCM_DCVR2 0x38
48#define CCM_DCVR3 0x3c
49#define CCM_LTR0 0x40
50#define CCM_LTR1 0x44
51#define CCM_LTR2 0x48
52#define CCM_LTR3 0x4c
53
54static unsigned long get_rate_mpll(void)
55{
56 ulong mpctl = __raw_readl(CRM_BASE + CCM_MPCTL);
57
58 return mxc_decode_pll(mpctl, 24000000);
59}
60
61static unsigned long get_rate_upll(void)
62{
63 ulong mpctl = __raw_readl(CRM_BASE + CCM_UPCTL);
64
65 return mxc_decode_pll(mpctl, 24000000);
66}
67
68unsigned long get_rate_arm(struct clk *clk)
69{
70 unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
71 unsigned long rate = get_rate_mpll();
72
73 if (cctl & (1 << 14))
74 rate = (rate * 3) >> 2;
75
76 return rate / ((cctl >> 30) + 1);
77}
78
79static unsigned long get_rate_ahb(struct clk *clk)
80{
81 unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
82
83 return get_rate_arm(NULL) / (((cctl >> 28) & 0x3) + 1);
84}
85
86static unsigned long get_rate_ipg(struct clk *clk)
87{
88 return get_rate_ahb(NULL) >> 1;
89}
90
91static unsigned long get_rate_per(int per)
92{
93 unsigned long ofs = (per & 0x3) * 8;
94 unsigned long reg = per & ~0x3;
95 unsigned long val = (readl(CRM_BASE + CCM_PCDR0 + reg) >> ofs) & 0x3f;
96 unsigned long fref;
97
98 if (readl(CRM_BASE + 0x64) & (1 << per))
99 fref = get_rate_upll();
100 else
101 fref = get_rate_ahb(NULL);
102
103 return fref / (val + 1);
104}
105
106static unsigned long get_rate_uart(struct clk *clk)
107{
108 return get_rate_per(15);
109}
110
111static unsigned long get_rate_ssi2(struct clk *clk)
112{
113 return get_rate_per(14);
114}
115
116static unsigned long get_rate_ssi1(struct clk *clk)
117{
118 return get_rate_per(13);
119}
120
121static unsigned long get_rate_i2c(struct clk *clk)
122{
123 return get_rate_per(6);
124}
125
126static unsigned long get_rate_nfc(struct clk *clk)
127{
128 return get_rate_per(8);
129}
130
131static unsigned long get_rate_gpt(struct clk *clk)
132{
133 return get_rate_per(5);
134}
135
136static unsigned long get_rate_lcdc(struct clk *clk)
137{
138 return get_rate_per(7);
139}
140
141static unsigned long get_rate_esdhc1(struct clk *clk)
142{
143 return get_rate_per(3);
144}
145
146static unsigned long get_rate_esdhc2(struct clk *clk)
147{
148 return get_rate_per(4);
149}
150
151static unsigned long get_rate_csi(struct clk *clk)
152{
153 return get_rate_per(0);
154}
155
156static unsigned long get_rate_otg(struct clk *clk)
157{
158 unsigned long cctl = readl(CRM_BASE + CCM_CCTL);
159 unsigned long rate = get_rate_upll();
160
161 return (cctl & (1 << 23)) ? 0 : rate / ((0x3F & (cctl >> 16)) + 1);
162}
163
164static int clk_cgcr_enable(struct clk *clk)
165{
166 u32 reg;
167
168 reg = __raw_readl(clk->enable_reg);
169 reg |= 1 << clk->enable_shift;
170 __raw_writel(reg, clk->enable_reg);
171
172 return 0;
173}
174
175static void clk_cgcr_disable(struct clk *clk)
176{
177 u32 reg;
178
179 reg = __raw_readl(clk->enable_reg);
180 reg &= ~(1 << clk->enable_shift);
181 __raw_writel(reg, clk->enable_reg);
182}
183
184#define DEFINE_CLOCK(name, i, er, es, gr, sr, s) \
185 static struct clk name = { \
186 .id = i, \
187 .enable_reg = CRM_BASE + er, \
188 .enable_shift = es, \
189 .get_rate = gr, \
190 .set_rate = sr, \
191 .enable = clk_cgcr_enable, \
192 .disable = clk_cgcr_disable, \
193 .secondary = s, \
194 }
195
196/*
197 * Note: the following IPG clock gating bits are wrongly marked "Reserved" in
198 * the i.MX25 Reference Manual Rev 1, table 15-13. The information below is
199 * taken from the Freescale released BSP.
200 *
201 * bit reg offset clock
202 *
203 * 0 CGCR1 0 AUDMUX
204 * 12 CGCR1 12 ESAI
205 * 16 CGCR1 16 GPIO1
206 * 17 CGCR1 17 GPIO2
207 * 18 CGCR1 18 GPIO3
208 * 23 CGCR1 23 I2C1
209 * 24 CGCR1 24 I2C2
210 * 25 CGCR1 25 I2C3
211 * 27 CGCR1 27 IOMUXC
212 * 28 CGCR1 28 KPP
213 * 30 CGCR1 30 OWIRE
214 * 36 CGCR2 4 RTIC
215 * 51 CGCR2 19 WDOG
216 */
217
218DEFINE_CLOCK(gpt_clk, 0, CCM_CGCR0, 5, get_rate_gpt, NULL, NULL);
219DEFINE_CLOCK(uart_per_clk, 0, CCM_CGCR0, 15, get_rate_uart, NULL, NULL);
220DEFINE_CLOCK(ssi1_per_clk, 0, CCM_CGCR0, 13, get_rate_ipg, NULL, NULL);
221DEFINE_CLOCK(ssi2_per_clk, 0, CCM_CGCR0, 14, get_rate_ipg, NULL, NULL);
222DEFINE_CLOCK(cspi1_clk, 0, CCM_CGCR1, 5, get_rate_ipg, NULL, NULL);
223DEFINE_CLOCK(cspi2_clk, 0, CCM_CGCR1, 6, get_rate_ipg, NULL, NULL);
224DEFINE_CLOCK(cspi3_clk, 0, CCM_CGCR1, 7, get_rate_ipg, NULL, NULL);
225DEFINE_CLOCK(esdhc1_ahb_clk, 0, CCM_CGCR0, 21, get_rate_esdhc1, NULL, NULL);
226DEFINE_CLOCK(esdhc1_per_clk, 0, CCM_CGCR0, 3, get_rate_esdhc1, NULL,
227 &esdhc1_ahb_clk);
228DEFINE_CLOCK(esdhc2_ahb_clk, 0, CCM_CGCR0, 22, get_rate_esdhc2, NULL, NULL);
229DEFINE_CLOCK(esdhc2_per_clk, 0, CCM_CGCR0, 4, get_rate_esdhc2, NULL,
230 &esdhc2_ahb_clk);
231DEFINE_CLOCK(sdma_ahb_clk, 0, CCM_CGCR0, 26, NULL, NULL, NULL);
232DEFINE_CLOCK(fec_ahb_clk, 0, CCM_CGCR0, 23, NULL, NULL, NULL);
233DEFINE_CLOCK(lcdc_ahb_clk, 0, CCM_CGCR0, 24, NULL, NULL, NULL);
234DEFINE_CLOCK(lcdc_per_clk, 0, CCM_CGCR0, 7, NULL, NULL, &lcdc_ahb_clk);
235DEFINE_CLOCK(csi_ahb_clk, 0, CCM_CGCR0, 18, get_rate_csi, NULL, NULL);
236DEFINE_CLOCK(csi_per_clk, 0, CCM_CGCR0, 0, get_rate_csi, NULL, &csi_ahb_clk);
237DEFINE_CLOCK(uart1_clk, 0, CCM_CGCR2, 14, get_rate_uart, NULL, &uart_per_clk);
238DEFINE_CLOCK(uart2_clk, 0, CCM_CGCR2, 15, get_rate_uart, NULL, &uart_per_clk);
239DEFINE_CLOCK(uart3_clk, 0, CCM_CGCR2, 16, get_rate_uart, NULL, &uart_per_clk);
240DEFINE_CLOCK(uart4_clk, 0, CCM_CGCR2, 17, get_rate_uart, NULL, &uart_per_clk);
241DEFINE_CLOCK(uart5_clk, 0, CCM_CGCR2, 18, get_rate_uart, NULL, &uart_per_clk);
242DEFINE_CLOCK(nfc_clk, 0, CCM_CGCR0, 8, get_rate_nfc, NULL, NULL);
243DEFINE_CLOCK(usbotg_clk, 0, CCM_CGCR0, 28, get_rate_otg, NULL, NULL);
244DEFINE_CLOCK(pwm1_clk, 0, CCM_CGCR1, 31, get_rate_ipg, NULL, NULL);
245DEFINE_CLOCK(pwm2_clk, 0, CCM_CGCR2, 0, get_rate_ipg, NULL, NULL);
246DEFINE_CLOCK(pwm3_clk, 0, CCM_CGCR2, 1, get_rate_ipg, NULL, NULL);
247DEFINE_CLOCK(pwm4_clk, 0, CCM_CGCR2, 2, get_rate_ipg, NULL, NULL);
248DEFINE_CLOCK(kpp_clk, 0, CCM_CGCR1, 28, get_rate_ipg, NULL, NULL);
249DEFINE_CLOCK(tsc_clk, 0, CCM_CGCR2, 13, get_rate_ipg, NULL, NULL);
250DEFINE_CLOCK(i2c_clk, 0, CCM_CGCR0, 6, get_rate_i2c, NULL, NULL);
251DEFINE_CLOCK(fec_clk, 0, CCM_CGCR1, 15, get_rate_ipg, NULL, &fec_ahb_clk);
252DEFINE_CLOCK(dryice_clk, 0, CCM_CGCR1, 8, get_rate_ipg, NULL, NULL);
253DEFINE_CLOCK(lcdc_clk, 0, CCM_CGCR1, 29, get_rate_lcdc, NULL, &lcdc_per_clk);
254DEFINE_CLOCK(wdt_clk, 0, CCM_CGCR2, 19, get_rate_ipg, NULL, NULL);
255DEFINE_CLOCK(ssi1_clk, 0, CCM_CGCR2, 11, get_rate_ssi1, NULL, &ssi1_per_clk);
256DEFINE_CLOCK(ssi2_clk, 1, CCM_CGCR2, 12, get_rate_ssi2, NULL, &ssi2_per_clk);
257DEFINE_CLOCK(sdma_clk, 0, CCM_CGCR2, 6, get_rate_ipg, NULL, &sdma_ahb_clk);
258DEFINE_CLOCK(esdhc1_clk, 0, CCM_CGCR1, 13, get_rate_esdhc1, NULL,
259 &esdhc1_per_clk);
260DEFINE_CLOCK(esdhc2_clk, 1, CCM_CGCR1, 14, get_rate_esdhc2, NULL,
261 &esdhc2_per_clk);
262DEFINE_CLOCK(audmux_clk, 0, CCM_CGCR1, 0, NULL, NULL, NULL);
263DEFINE_CLOCK(csi_clk, 0, CCM_CGCR1, 4, get_rate_csi, NULL, &csi_per_clk);
264DEFINE_CLOCK(can1_clk, 0, CCM_CGCR1, 2, get_rate_ipg, NULL, NULL);
265DEFINE_CLOCK(can2_clk, 1, CCM_CGCR1, 3, get_rate_ipg, NULL, NULL);
266DEFINE_CLOCK(iim_clk, 0, CCM_CGCR1, 26, NULL, NULL, NULL);
267
268#define _REGISTER_CLOCK(d, n, c) \
269 { \
270 .dev_id = d, \
271 .con_id = n, \
272 .clk = &c, \
273 },
274
275static struct clk_lookup lookups[] = {
276 /* i.mx25 has the i.mx21 type uart */
277 _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
278 _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
279 _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
280 _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
281 _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
282 _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
283 _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
284 _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
285 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
286 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
287 /* i.mx25 has the i.mx35 type cspi */
288 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
289 _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
290 _REGISTER_CLOCK("imx35-cspi.2", NULL, cspi3_clk)
291 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm1_clk)
292 _REGISTER_CLOCK("mxc_pwm.1", NULL, pwm2_clk)
293 _REGISTER_CLOCK("mxc_pwm.2", NULL, pwm3_clk)
294 _REGISTER_CLOCK("mxc_pwm.3", NULL, pwm4_clk)
295 _REGISTER_CLOCK("imx-keypad", NULL, kpp_clk)
296 _REGISTER_CLOCK("mx25-adc", NULL, tsc_clk)
297 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c_clk)
298 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c_clk)
299 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c_clk)
300 _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
301 _REGISTER_CLOCK("imxdi_rtc.0", NULL, dryice_clk)
302 _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
303 _REGISTER_CLOCK("imx2-wdt.0", NULL, wdt_clk)
304 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
305 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
306 _REGISTER_CLOCK("sdhci-esdhc-imx25.0", NULL, esdhc1_clk)
307 _REGISTER_CLOCK("sdhci-esdhc-imx25.1", NULL, esdhc2_clk)
308 _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
309 _REGISTER_CLOCK(NULL, "audmux", audmux_clk)
310 _REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
311 _REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
312 /* i.mx25 has the i.mx35 type sdma */
313 _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
314 _REGISTER_CLOCK(NULL, "iim", iim_clk)
315};
316
317int __init mx25_clocks_init(void)
318{
319 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
320
321 /* Turn off all clocks except the ones we need to survive, namely:
322 * EMI, GPIO1-3 (CCM_CGCR1[18:16]), GPT1, IOMUXC (CCM_CGCR1[27]), IIM,
323 * SCC
324 */
325 __raw_writel((1 << 19), CRM_BASE + CCM_CGCR0);
326 __raw_writel((0xf << 16) | (3 << 26), CRM_BASE + CCM_CGCR1);
327 __raw_writel((1 << 5), CRM_BASE + CCM_CGCR2);
328#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
329 clk_enable(&uart1_clk);
330#endif
331
332 /* Clock source for lcdc and csi is upll */
333 __raw_writel(__raw_readl(CRM_BASE+0x64) | (1 << 7) | (1 << 0),
334 CRM_BASE + 0x64);
335
336 /* Clock source for gpt is ahb_div */
337 __raw_writel(__raw_readl(CRM_BASE+0x64) & ~(1 << 5), CRM_BASE + 0x64);
338
339 clk_enable(&iim_clk);
340 imx_print_silicon_rev("i.MX25", mx25_revision());
341 clk_disable(&iim_clk);
342
343 mxc_timer_init(&gpt_clk, MX25_IO_ADDRESS(MX25_GPT1_BASE_ADDR), 54);
344
345 return 0;
346}
diff --git a/arch/arm/mach-imx/clock-imx27.c b/arch/arm/mach-imx/clock-imx27.c
deleted file mode 100644
index 98e04f5a87dd..000000000000
--- a/arch/arm/mach-imx/clock-imx27.c
+++ /dev/null
@@ -1,785 +0,0 @@
1/*
2 * Copyright 2004-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
4 * Copyright 2008 Martin Fuzzey, mfuzzey@gmail.com
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
18 * MA 02110-1301, USA.
19 */
20
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/module.h>
24#include <linux/clkdev.h>
25#include <linux/of.h>
26
27#include <asm/div64.h>
28
29#include <mach/clock.h>
30#include <mach/common.h>
31#include <mach/hardware.h>
32
33#define IO_ADDR_CCM(off) (MX27_IO_ADDRESS(MX27_CCM_BASE_ADDR + (off)))
34
35/* Register offsets */
36#define CCM_CSCR IO_ADDR_CCM(0x0)
37#define CCM_MPCTL0 IO_ADDR_CCM(0x4)
38#define CCM_MPCTL1 IO_ADDR_CCM(0x8)
39#define CCM_SPCTL0 IO_ADDR_CCM(0xc)
40#define CCM_SPCTL1 IO_ADDR_CCM(0x10)
41#define CCM_OSC26MCTL IO_ADDR_CCM(0x14)
42#define CCM_PCDR0 IO_ADDR_CCM(0x18)
43#define CCM_PCDR1 IO_ADDR_CCM(0x1c)
44#define CCM_PCCR0 IO_ADDR_CCM(0x20)
45#define CCM_PCCR1 IO_ADDR_CCM(0x24)
46#define CCM_CCSR IO_ADDR_CCM(0x28)
47#define CCM_PMCTL IO_ADDR_CCM(0x2c)
48#define CCM_PMCOUNT IO_ADDR_CCM(0x30)
49#define CCM_WKGDCTL IO_ADDR_CCM(0x34)
50
51#define CCM_CSCR_UPDATE_DIS (1 << 31)
52#define CCM_CSCR_SSI2 (1 << 23)
53#define CCM_CSCR_SSI1 (1 << 22)
54#define CCM_CSCR_VPU (1 << 21)
55#define CCM_CSCR_MSHC (1 << 20)
56#define CCM_CSCR_SPLLRES (1 << 19)
57#define CCM_CSCR_MPLLRES (1 << 18)
58#define CCM_CSCR_SP (1 << 17)
59#define CCM_CSCR_MCU (1 << 16)
60#define CCM_CSCR_OSC26MDIV (1 << 4)
61#define CCM_CSCR_OSC26M (1 << 3)
62#define CCM_CSCR_FPM (1 << 2)
63#define CCM_CSCR_SPEN (1 << 1)
64#define CCM_CSCR_MPEN (1 << 0)
65
66/* i.MX27 TO 2+ */
67#define CCM_CSCR_ARM_SRC (1 << 15)
68
69#define CCM_SPCTL1_LF (1 << 15)
70#define CCM_SPCTL1_BRMO (1 << 6)
71
72static struct clk mpll_main1_clk, mpll_main2_clk;
73
74static int clk_pccr_enable(struct clk *clk)
75{
76 unsigned long reg;
77
78 if (!clk->enable_reg)
79 return 0;
80
81 reg = __raw_readl(clk->enable_reg);
82 reg |= 1 << clk->enable_shift;
83 __raw_writel(reg, clk->enable_reg);
84
85 return 0;
86}
87
88static void clk_pccr_disable(struct clk *clk)
89{
90 unsigned long reg;
91
92 if (!clk->enable_reg)
93 return;
94
95 reg = __raw_readl(clk->enable_reg);
96 reg &= ~(1 << clk->enable_shift);
97 __raw_writel(reg, clk->enable_reg);
98}
99
100static int clk_spll_enable(struct clk *clk)
101{
102 unsigned long reg;
103
104 reg = __raw_readl(CCM_CSCR);
105 reg |= CCM_CSCR_SPEN;
106 __raw_writel(reg, CCM_CSCR);
107
108 while (!(__raw_readl(CCM_SPCTL1) & CCM_SPCTL1_LF));
109
110 return 0;
111}
112
113static void clk_spll_disable(struct clk *clk)
114{
115 unsigned long reg;
116
117 reg = __raw_readl(CCM_CSCR);
118 reg &= ~CCM_CSCR_SPEN;
119 __raw_writel(reg, CCM_CSCR);
120}
121
122static int clk_cpu_set_parent(struct clk *clk, struct clk *parent)
123{
124 int cscr = __raw_readl(CCM_CSCR);
125
126 if (clk->parent == parent)
127 return 0;
128
129 if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
130 if (parent == &mpll_main1_clk) {
131 cscr |= CCM_CSCR_ARM_SRC;
132 } else {
133 if (parent == &mpll_main2_clk)
134 cscr &= ~CCM_CSCR_ARM_SRC;
135 else
136 return -EINVAL;
137 }
138 __raw_writel(cscr, CCM_CSCR);
139 clk->parent = parent;
140 return 0;
141 }
142 return -ENODEV;
143}
144
145static unsigned long round_rate_cpu(struct clk *clk, unsigned long rate)
146{
147 int div;
148 unsigned long parent_rate;
149
150 parent_rate = clk_get_rate(clk->parent);
151
152 div = parent_rate / rate;
153 if (parent_rate % rate)
154 div++;
155
156 if (div > 4)
157 div = 4;
158
159 return parent_rate / div;
160}
161
162static int set_rate_cpu(struct clk *clk, unsigned long rate)
163{
164 unsigned int div;
165 uint32_t reg;
166 unsigned long parent_rate;
167
168 parent_rate = clk_get_rate(clk->parent);
169
170 div = parent_rate / rate;
171
172 if (div > 4 || div < 1 || ((parent_rate / div) != rate))
173 return -EINVAL;
174
175 div--;
176
177 reg = __raw_readl(CCM_CSCR);
178 if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
179 reg &= ~(3 << 12);
180 reg |= div << 12;
181 reg &= ~(CCM_CSCR_FPM | CCM_CSCR_SPEN);
182 __raw_writel(reg | CCM_CSCR_UPDATE_DIS, CCM_CSCR);
183 } else {
184 printk(KERN_ERR "Can't set CPU frequency!\n");
185 }
186
187 return 0;
188}
189
190static unsigned long round_rate_per(struct clk *clk, unsigned long rate)
191{
192 u32 div;
193 unsigned long parent_rate;
194
195 parent_rate = clk_get_rate(clk->parent);
196
197 div = parent_rate / rate;
198 if (parent_rate % rate)
199 div++;
200
201 if (div > 64)
202 div = 64;
203
204 return parent_rate / div;
205}
206
207static int set_rate_per(struct clk *clk, unsigned long rate)
208{
209 u32 reg;
210 u32 div;
211 unsigned long parent_rate;
212
213 parent_rate = clk_get_rate(clk->parent);
214
215 if (clk->id < 0 || clk->id > 3)
216 return -EINVAL;
217
218 div = parent_rate / rate;
219 if (div > 64 || div < 1 || ((parent_rate / div) != rate))
220 return -EINVAL;
221 div--;
222
223 reg = __raw_readl(CCM_PCDR1) & ~(0x3f << (clk->id << 3));
224 reg |= div << (clk->id << 3);
225 __raw_writel(reg, CCM_PCDR1);
226
227 return 0;
228}
229
230static unsigned long get_rate_usb(struct clk *clk)
231{
232 unsigned long usb_pdf;
233 unsigned long parent_rate;
234
235 parent_rate = clk_get_rate(clk->parent);
236
237 usb_pdf = (__raw_readl(CCM_CSCR) >> 28) & 0x7;
238
239 return parent_rate / (usb_pdf + 1U);
240}
241
242static unsigned long get_rate_ssix(struct clk *clk, unsigned long pdf)
243{
244 unsigned long parent_rate;
245
246 parent_rate = clk_get_rate(clk->parent);
247
248 if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
249 pdf += 4; /* MX27 TO2+ */
250 else
251 pdf = (pdf < 2) ? 124UL : pdf; /* MX21 & MX27 TO1 */
252
253 return 2UL * parent_rate / pdf;
254}
255
256static unsigned long get_rate_ssi1(struct clk *clk)
257{
258 return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 16) & 0x3f);
259}
260
261static unsigned long get_rate_ssi2(struct clk *clk)
262{
263 return get_rate_ssix(clk, (__raw_readl(CCM_PCDR0) >> 26) & 0x3f);
264}
265
266static unsigned long get_rate_nfc(struct clk *clk)
267{
268 unsigned long nfc_pdf;
269 unsigned long parent_rate;
270
271 parent_rate = clk_get_rate(clk->parent);
272
273 if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
274 nfc_pdf = (__raw_readl(CCM_PCDR0) >> 6) & 0xf;
275 else
276 nfc_pdf = (__raw_readl(CCM_PCDR0) >> 12) & 0xf;
277
278 return parent_rate / (nfc_pdf + 1);
279}
280
281static unsigned long get_rate_vpu(struct clk *clk)
282{
283 unsigned long vpu_pdf;
284 unsigned long parent_rate;
285
286 parent_rate = clk_get_rate(clk->parent);
287
288 if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
289 vpu_pdf = (__raw_readl(CCM_PCDR0) >> 10) & 0x3f;
290 vpu_pdf += 4;
291 } else {
292 vpu_pdf = (__raw_readl(CCM_PCDR0) >> 8) & 0xf;
293 vpu_pdf = (vpu_pdf < 2) ? 124 : vpu_pdf;
294 }
295
296 return 2UL * parent_rate / vpu_pdf;
297}
298
299static unsigned long round_rate_parent(struct clk *clk, unsigned long rate)
300{
301 return clk->parent->round_rate(clk->parent, rate);
302}
303
304static unsigned long get_rate_parent(struct clk *clk)
305{
306 return clk_get_rate(clk->parent);
307}
308
309static int set_rate_parent(struct clk *clk, unsigned long rate)
310{
311 return clk->parent->set_rate(clk->parent, rate);
312}
313
314/* in Hz */
315static unsigned long external_high_reference = 26000000;
316
317static unsigned long get_rate_high_reference(struct clk *clk)
318{
319 return external_high_reference;
320}
321
322/* in Hz */
323static unsigned long external_low_reference = 32768;
324
325static unsigned long get_rate_low_reference(struct clk *clk)
326{
327 return external_low_reference;
328}
329
330static unsigned long get_rate_fpm(struct clk *clk)
331{
332 return clk_get_rate(clk->parent) * 1024;
333}
334
335static unsigned long get_rate_mpll(struct clk *clk)
336{
337 return mxc_decode_pll(__raw_readl(CCM_MPCTL0),
338 clk_get_rate(clk->parent));
339}
340
341static unsigned long get_rate_mpll_main(struct clk *clk)
342{
343 unsigned long parent_rate;
344
345 parent_rate = clk_get_rate(clk->parent);
346
347 /* i.MX27 TO2:
348 * clk->id == 0: arm clock source path 1 which is from 2 * MPLL / 2
349 * clk->id == 1: arm clock source path 2 which is from 2 * MPLL / 3
350 */
351 if (mx27_revision() >= IMX_CHIP_REVISION_2_0 && clk->id == 1)
352 return 2UL * parent_rate / 3UL;
353
354 return parent_rate;
355}
356
357static unsigned long get_rate_spll(struct clk *clk)
358{
359 uint32_t reg;
360 unsigned long rate;
361
362 rate = clk_get_rate(clk->parent);
363
364 reg = __raw_readl(CCM_SPCTL0);
365
366 /* On TO2 we have to write the value back. Otherwise we
367 * read 0 from this register the next time.
368 */
369 if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
370 __raw_writel(reg, CCM_SPCTL0);
371
372 return mxc_decode_pll(reg, rate);
373}
374
375static unsigned long get_rate_cpu(struct clk *clk)
376{
377 u32 div;
378 unsigned long rate;
379
380 if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
381 div = (__raw_readl(CCM_CSCR) >> 12) & 0x3;
382 else
383 div = (__raw_readl(CCM_CSCR) >> 13) & 0x7;
384
385 rate = clk_get_rate(clk->parent);
386 return rate / (div + 1);
387}
388
389static unsigned long get_rate_ahb(struct clk *clk)
390{
391 unsigned long rate, bclk_pdf;
392
393 if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
394 bclk_pdf = (__raw_readl(CCM_CSCR) >> 8) & 0x3;
395 else
396 bclk_pdf = (__raw_readl(CCM_CSCR) >> 9) & 0xf;
397
398 rate = clk_get_rate(clk->parent);
399 return rate / (bclk_pdf + 1);
400}
401
402static unsigned long get_rate_ipg(struct clk *clk)
403{
404 unsigned long rate, ipg_pdf;
405
406 if (mx27_revision() >= IMX_CHIP_REVISION_2_0)
407 return clk_get_rate(clk->parent);
408 else
409 ipg_pdf = (__raw_readl(CCM_CSCR) >> 8) & 1;
410
411 rate = clk_get_rate(clk->parent);
412 return rate / (ipg_pdf + 1);
413}
414
415static unsigned long get_rate_per(struct clk *clk)
416{
417 unsigned long perclk_pdf, parent_rate;
418
419 parent_rate = clk_get_rate(clk->parent);
420
421 if (clk->id < 0 || clk->id > 3)
422 return 0;
423
424 perclk_pdf = (__raw_readl(CCM_PCDR1) >> (clk->id << 3)) & 0x3f;
425
426 return parent_rate / (perclk_pdf + 1);
427}
428
429/*
430 * the high frequency external clock reference
431 * Default case is 26MHz. Could be changed at runtime
432 * with a call to change_external_high_reference()
433 */
434static struct clk ckih_clk = {
435 .get_rate = get_rate_high_reference,
436};
437
438static struct clk mpll_clk = {
439 .parent = &ckih_clk,
440 .get_rate = get_rate_mpll,
441};
442
443/* For i.MX27 TO2, it is the MPLL path 1 of ARM core
444 * It provides the clock source whose rate is same as MPLL
445 */
446static struct clk mpll_main1_clk = {
447 .id = 0,
448 .parent = &mpll_clk,
449 .get_rate = get_rate_mpll_main,
450};
451
452/* For i.MX27 TO2, it is the MPLL path 2 of ARM core
453 * It provides the clock source whose rate is same MPLL * 2 / 3
454 */
455static struct clk mpll_main2_clk = {
456 .id = 1,
457 .parent = &mpll_clk,
458 .get_rate = get_rate_mpll_main,
459};
460
461static struct clk ahb_clk = {
462 .parent = &mpll_main2_clk,
463 .get_rate = get_rate_ahb,
464};
465
466static struct clk ipg_clk = {
467 .parent = &ahb_clk,
468 .get_rate = get_rate_ipg,
469};
470
471static struct clk cpu_clk = {
472 .parent = &mpll_main2_clk,
473 .set_parent = clk_cpu_set_parent,
474 .round_rate = round_rate_cpu,
475 .get_rate = get_rate_cpu,
476 .set_rate = set_rate_cpu,
477};
478
479static struct clk spll_clk = {
480 .parent = &ckih_clk,
481 .get_rate = get_rate_spll,
482 .enable = clk_spll_enable,
483 .disable = clk_spll_disable,
484};
485
486/*
487 * the low frequency external clock reference
488 * Default case is 32.768kHz.
489 */
490static struct clk ckil_clk = {
491 .get_rate = get_rate_low_reference,
492};
493
494/* Output of frequency pre multiplier */
495static struct clk fpm_clk = {
496 .parent = &ckil_clk,
497 .get_rate = get_rate_fpm,
498};
499
500#define PCCR0 CCM_PCCR0
501#define PCCR1 CCM_PCCR1
502
503#define DEFINE_CLOCK(name, i, er, es, gr, s, p) \
504 static struct clk name = { \
505 .id = i, \
506 .enable_reg = er, \
507 .enable_shift = es, \
508 .get_rate = gr, \
509 .enable = clk_pccr_enable, \
510 .disable = clk_pccr_disable, \
511 .secondary = s, \
512 .parent = p, \
513 }
514
515#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p) \
516 static struct clk name = { \
517 .id = i, \
518 .enable_reg = er, \
519 .enable_shift = es, \
520 .get_rate = get_rate_##getsetround, \
521 .set_rate = set_rate_##getsetround, \
522 .round_rate = round_rate_##getsetround, \
523 .enable = clk_pccr_enable, \
524 .disable = clk_pccr_disable, \
525 .secondary = s, \
526 .parent = p, \
527 }
528
529/* Forward declaration to keep the following list in order */
530static struct clk slcdc_clk1, sahara2_clk1, rtic_clk1, fec_clk1, emma_clk1,
531 dma_clk1, lcdc_clk2, vpu_clk1;
532
533/* All clocks we can gate through PCCRx in the order of PCCRx bits */
534DEFINE_CLOCK(ssi2_clk1, 1, PCCR0, 0, NULL, NULL, &ipg_clk);
535DEFINE_CLOCK(ssi1_clk1, 0, PCCR0, 1, NULL, NULL, &ipg_clk);
536DEFINE_CLOCK(slcdc_clk, 0, PCCR0, 2, NULL, &slcdc_clk1, &ahb_clk);
537DEFINE_CLOCK(sdhc3_clk1, 0, PCCR0, 3, NULL, NULL, &ipg_clk);
538DEFINE_CLOCK(sdhc2_clk1, 0, PCCR0, 4, NULL, NULL, &ipg_clk);
539DEFINE_CLOCK(sdhc1_clk1, 0, PCCR0, 5, NULL, NULL, &ipg_clk);
540DEFINE_CLOCK(scc_clk, 0, PCCR0, 6, NULL, NULL, &ipg_clk);
541DEFINE_CLOCK(sahara2_clk, 0, PCCR0, 7, NULL, &sahara2_clk1, &ahb_clk);
542DEFINE_CLOCK(rtic_clk, 0, PCCR0, 8, NULL, &rtic_clk1, &ahb_clk);
543DEFINE_CLOCK(rtc_clk, 0, PCCR0, 9, NULL, NULL, &ipg_clk);
544DEFINE_CLOCK(pwm_clk1, 0, PCCR0, 11, NULL, NULL, &ipg_clk);
545DEFINE_CLOCK(owire_clk, 0, PCCR0, 12, NULL, NULL, &ipg_clk);
546DEFINE_CLOCK(mstick_clk1, 0, PCCR0, 13, NULL, NULL, &ipg_clk);
547DEFINE_CLOCK(lcdc_clk1, 0, PCCR0, 14, NULL, &lcdc_clk2, &ipg_clk);
548DEFINE_CLOCK(kpp_clk, 0, PCCR0, 15, NULL, NULL, &ipg_clk);
549DEFINE_CLOCK(iim_clk, 0, PCCR0, 16, NULL, NULL, &ipg_clk);
550DEFINE_CLOCK(i2c2_clk, 1, PCCR0, 17, NULL, NULL, &ipg_clk);
551DEFINE_CLOCK(i2c1_clk, 0, PCCR0, 18, NULL, NULL, &ipg_clk);
552DEFINE_CLOCK(gpt6_clk1, 0, PCCR0, 29, NULL, NULL, &ipg_clk);
553DEFINE_CLOCK(gpt5_clk1, 0, PCCR0, 20, NULL, NULL, &ipg_clk);
554DEFINE_CLOCK(gpt4_clk1, 0, PCCR0, 21, NULL, NULL, &ipg_clk);
555DEFINE_CLOCK(gpt3_clk1, 0, PCCR0, 22, NULL, NULL, &ipg_clk);
556DEFINE_CLOCK(gpt2_clk1, 0, PCCR0, 23, NULL, NULL, &ipg_clk);
557DEFINE_CLOCK(gpt1_clk1, 0, PCCR0, 24, NULL, NULL, &ipg_clk);
558DEFINE_CLOCK(gpio_clk, 0, PCCR0, 25, NULL, NULL, &ipg_clk);
559DEFINE_CLOCK(fec_clk, 0, PCCR0, 26, NULL, &fec_clk1, &ahb_clk);
560DEFINE_CLOCK(emma_clk, 0, PCCR0, 27, NULL, &emma_clk1, &ahb_clk);
561DEFINE_CLOCK(dma_clk, 0, PCCR0, 28, NULL, &dma_clk1, &ahb_clk);
562DEFINE_CLOCK(cspi13_clk1, 0, PCCR0, 29, NULL, NULL, &ipg_clk);
563DEFINE_CLOCK(cspi2_clk1, 0, PCCR0, 30, NULL, NULL, &ipg_clk);
564DEFINE_CLOCK(cspi1_clk1, 0, PCCR0, 31, NULL, NULL, &ipg_clk);
565
566DEFINE_CLOCK(mstick_clk, 0, PCCR1, 2, NULL, &mstick_clk1, &ipg_clk);
567DEFINE_CLOCK(nfc_clk, 0, PCCR1, 3, get_rate_nfc, NULL, &cpu_clk);
568DEFINE_CLOCK(ssi2_clk, 1, PCCR1, 4, get_rate_ssi2, &ssi2_clk1, &mpll_main2_clk);
569DEFINE_CLOCK(ssi1_clk, 0, PCCR1, 5, get_rate_ssi1, &ssi1_clk1, &mpll_main2_clk);
570DEFINE_CLOCK(vpu_clk, 0, PCCR1, 6, get_rate_vpu, &vpu_clk1, &mpll_main2_clk);
571DEFINE_CLOCK1(per4_clk, 3, PCCR1, 7, per, NULL, &mpll_main2_clk);
572DEFINE_CLOCK1(per3_clk, 2, PCCR1, 8, per, NULL, &mpll_main2_clk);
573DEFINE_CLOCK1(per2_clk, 1, PCCR1, 9, per, NULL, &mpll_main2_clk);
574DEFINE_CLOCK1(per1_clk, 0, PCCR1, 10, per, NULL, &mpll_main2_clk);
575DEFINE_CLOCK(usb_clk1, 0, PCCR1, 11, NULL, NULL, &ahb_clk);
576DEFINE_CLOCK(slcdc_clk1, 0, PCCR1, 12, NULL, NULL, &ahb_clk);
577DEFINE_CLOCK(sahara2_clk1, 0, PCCR1, 13, NULL, NULL, &ahb_clk);
578DEFINE_CLOCK(rtic_clk1, 0, PCCR1, 14, NULL, NULL, &ahb_clk);
579DEFINE_CLOCK(lcdc_clk2, 0, PCCR1, 15, NULL, NULL, &ahb_clk);
580DEFINE_CLOCK(vpu_clk1, 0, PCCR1, 16, NULL, NULL, &ahb_clk);
581DEFINE_CLOCK(fec_clk1, 0, PCCR1, 17, NULL, NULL, &ahb_clk);
582DEFINE_CLOCK(emma_clk1, 0, PCCR1, 18, NULL, NULL, &ahb_clk);
583DEFINE_CLOCK(emi_clk, 0, PCCR1, 19, NULL, NULL, &ahb_clk);
584DEFINE_CLOCK(dma_clk1, 0, PCCR1, 20, NULL, NULL, &ahb_clk);
585DEFINE_CLOCK(csi_clk1, 0, PCCR1, 21, NULL, NULL, &ahb_clk);
586DEFINE_CLOCK(brom_clk, 0, PCCR1, 22, NULL, NULL, &ahb_clk);
587DEFINE_CLOCK(pata_clk, 0, PCCR1, 23, NULL, NULL, &ahb_clk);
588DEFINE_CLOCK(wdog_clk, 0, PCCR1, 24, NULL, NULL, &ipg_clk);
589DEFINE_CLOCK(usb_clk, 0, PCCR1, 25, get_rate_usb, &usb_clk1, &spll_clk);
590DEFINE_CLOCK(uart6_clk1, 0, PCCR1, 26, NULL, NULL, &ipg_clk);
591DEFINE_CLOCK(uart5_clk1, 0, PCCR1, 27, NULL, NULL, &ipg_clk);
592DEFINE_CLOCK(uart4_clk1, 0, PCCR1, 28, NULL, NULL, &ipg_clk);
593DEFINE_CLOCK(uart3_clk1, 0, PCCR1, 29, NULL, NULL, &ipg_clk);
594DEFINE_CLOCK(uart2_clk1, 0, PCCR1, 30, NULL, NULL, &ipg_clk);
595DEFINE_CLOCK(uart1_clk1, 0, PCCR1, 31, NULL, NULL, &ipg_clk);
596
597/* Clocks we cannot directly gate, but drivers need their rates */
598DEFINE_CLOCK(cspi1_clk, 0, NULL, 0, NULL, &cspi1_clk1, &per2_clk);
599DEFINE_CLOCK(cspi2_clk, 1, NULL, 0, NULL, &cspi2_clk1, &per2_clk);
600DEFINE_CLOCK(cspi3_clk, 2, NULL, 0, NULL, &cspi13_clk1, &per2_clk);
601DEFINE_CLOCK(sdhc1_clk, 0, NULL, 0, NULL, &sdhc1_clk1, &per2_clk);
602DEFINE_CLOCK(sdhc2_clk, 1, NULL, 0, NULL, &sdhc2_clk1, &per2_clk);
603DEFINE_CLOCK(sdhc3_clk, 2, NULL, 0, NULL, &sdhc3_clk1, &per2_clk);
604DEFINE_CLOCK(pwm_clk, 0, NULL, 0, NULL, &pwm_clk1, &per1_clk);
605DEFINE_CLOCK(gpt1_clk, 0, NULL, 0, NULL, &gpt1_clk1, &per1_clk);
606DEFINE_CLOCK(gpt2_clk, 1, NULL, 0, NULL, &gpt2_clk1, &per1_clk);
607DEFINE_CLOCK(gpt3_clk, 2, NULL, 0, NULL, &gpt3_clk1, &per1_clk);
608DEFINE_CLOCK(gpt4_clk, 3, NULL, 0, NULL, &gpt4_clk1, &per1_clk);
609DEFINE_CLOCK(gpt5_clk, 4, NULL, 0, NULL, &gpt5_clk1, &per1_clk);
610DEFINE_CLOCK(gpt6_clk, 5, NULL, 0, NULL, &gpt6_clk1, &per1_clk);
611DEFINE_CLOCK(uart1_clk, 0, NULL, 0, NULL, &uart1_clk1, &per1_clk);
612DEFINE_CLOCK(uart2_clk, 1, NULL, 0, NULL, &uart2_clk1, &per1_clk);
613DEFINE_CLOCK(uart3_clk, 2, NULL, 0, NULL, &uart3_clk1, &per1_clk);
614DEFINE_CLOCK(uart4_clk, 3, NULL, 0, NULL, &uart4_clk1, &per1_clk);
615DEFINE_CLOCK(uart5_clk, 4, NULL, 0, NULL, &uart5_clk1, &per1_clk);
616DEFINE_CLOCK(uart6_clk, 5, NULL, 0, NULL, &uart6_clk1, &per1_clk);
617DEFINE_CLOCK1(lcdc_clk, 0, NULL, 0, parent, &lcdc_clk1, &per3_clk);
618DEFINE_CLOCK1(csi_clk, 0, NULL, 0, parent, &csi_clk1, &per4_clk);
619
620#define _REGISTER_CLOCK(d, n, c) \
621 { \
622 .dev_id = d, \
623 .con_id = n, \
624 .clk = &c, \
625 },
626
627static struct clk_lookup lookups[] = {
628 /* i.mx27 has the i.mx21 type uart */
629 _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
630 _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
631 _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
632 _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
633 _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
634 _REGISTER_CLOCK("imx21-uart.5", NULL, uart6_clk)
635 _REGISTER_CLOCK(NULL, "gpt1", gpt1_clk)
636 _REGISTER_CLOCK(NULL, "gpt2", gpt2_clk)
637 _REGISTER_CLOCK(NULL, "gpt3", gpt3_clk)
638 _REGISTER_CLOCK(NULL, "gpt4", gpt4_clk)
639 _REGISTER_CLOCK(NULL, "gpt5", gpt5_clk)
640 _REGISTER_CLOCK(NULL, "gpt6", gpt6_clk)
641 _REGISTER_CLOCK("mxc_pwm.0", NULL, pwm_clk)
642 _REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
643 _REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
644 _REGISTER_CLOCK("mxc-mmc.2", NULL, sdhc3_clk)
645 _REGISTER_CLOCK("imx27-cspi.0", NULL, cspi1_clk)
646 _REGISTER_CLOCK("imx27-cspi.1", NULL, cspi2_clk)
647 _REGISTER_CLOCK("imx27-cspi.2", NULL, cspi3_clk)
648 _REGISTER_CLOCK("imx-fb.0", NULL, lcdc_clk)
649 _REGISTER_CLOCK("mx2-camera.0", NULL, csi_clk)
650 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk)
651 _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk1)
652 _REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk)
653 _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk1)
654 _REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk)
655 _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk1)
656 _REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk)
657 _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk1)
658 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
659 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
660 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
661 _REGISTER_CLOCK(NULL, "vpu", vpu_clk)
662 _REGISTER_CLOCK(NULL, "dma", dma_clk)
663 _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
664 _REGISTER_CLOCK(NULL, "brom", brom_clk)
665 _REGISTER_CLOCK(NULL, "emma", emma_clk)
666 _REGISTER_CLOCK("m2m-emmaprp.0", NULL, emma_clk)
667 _REGISTER_CLOCK(NULL, "slcdc", slcdc_clk)
668 _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
669 _REGISTER_CLOCK(NULL, "emi", emi_clk)
670 _REGISTER_CLOCK(NULL, "sahara2", sahara2_clk)
671 _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
672 _REGISTER_CLOCK(NULL, "mstick", mstick_clk)
673 _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
674 _REGISTER_CLOCK(NULL, "gpio", gpio_clk)
675 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
676 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
677 _REGISTER_CLOCK(NULL, "iim", iim_clk)
678 _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
679 _REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
680 _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
681 _REGISTER_CLOCK(NULL, "scc", scc_clk)
682};
683
684/* Adjust the clock path for TO2 and later */
685static void __init to2_adjust_clocks(void)
686{
687 unsigned long cscr = __raw_readl(CCM_CSCR);
688
689 if (mx27_revision() >= IMX_CHIP_REVISION_2_0) {
690 if (cscr & CCM_CSCR_ARM_SRC)
691 cpu_clk.parent = &mpll_main1_clk;
692
693 if (!(cscr & CCM_CSCR_SSI2))
694 ssi1_clk.parent = &spll_clk;
695
696 if (!(cscr & CCM_CSCR_SSI1))
697 ssi1_clk.parent = &spll_clk;
698
699 if (!(cscr & CCM_CSCR_VPU))
700 vpu_clk.parent = &spll_clk;
701 } else {
702 cpu_clk.parent = &mpll_clk;
703 cpu_clk.set_parent = NULL;
704 cpu_clk.round_rate = NULL;
705 cpu_clk.set_rate = NULL;
706 ahb_clk.parent = &mpll_clk;
707
708 per1_clk.parent = &mpll_clk;
709 per2_clk.parent = &mpll_clk;
710 per3_clk.parent = &mpll_clk;
711 per4_clk.parent = &mpll_clk;
712
713 ssi1_clk.parent = &mpll_clk;
714 ssi2_clk.parent = &mpll_clk;
715
716 vpu_clk.parent = &mpll_clk;
717 }
718}
719
720/*
721 * must be called very early to get information about the
722 * available clock rate when the timer framework starts
723 */
724int __init mx27_clocks_init(unsigned long fref)
725{
726 u32 cscr = __raw_readl(CCM_CSCR);
727
728 external_high_reference = fref;
729
730 /* detect clock reference for both system PLLs */
731 if (cscr & CCM_CSCR_MCU)
732 mpll_clk.parent = &ckih_clk;
733 else
734 mpll_clk.parent = &fpm_clk;
735
736 if (cscr & CCM_CSCR_SP)
737 spll_clk.parent = &ckih_clk;
738 else
739 spll_clk.parent = &fpm_clk;
740
741 to2_adjust_clocks();
742
743 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
744
745 /* Turn off all clocks we do not need */
746 __raw_writel(0, CCM_PCCR0);
747 __raw_writel((1 << 10) | (1 << 19), CCM_PCCR1);
748
749 spll_clk.disable(&spll_clk);
750
751 /* enable basic clocks */
752 clk_enable(&per1_clk);
753 clk_enable(&gpio_clk);
754 clk_enable(&emi_clk);
755 clk_enable(&iim_clk);
756 imx_print_silicon_rev("i.MX27", mx27_revision());
757 clk_disable(&iim_clk);
758
759#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
760 clk_enable(&uart1_clk);
761#endif
762
763 mxc_timer_init(&gpt1_clk, MX27_IO_ADDRESS(MX27_GPT1_BASE_ADDR),
764 MX27_INT_GPT1);
765
766 return 0;
767}
768
769#ifdef CONFIG_OF
770int __init mx27_clocks_init_dt(void)
771{
772 struct device_node *np;
773 u32 fref = 26000000; /* default */
774
775 for_each_compatible_node(np, NULL, "fixed-clock") {
776 if (!of_device_is_compatible(np, "fsl,imx-osc26m"))
777 continue;
778
779 if (!of_property_read_u32(np, "clock-frequency", &fref))
780 break;
781 }
782
783 return mx27_clocks_init(fref);
784}
785#endif
diff --git a/arch/arm/mach-imx/clock-imx31.c b/arch/arm/mach-imx/clock-imx31.c
deleted file mode 100644
index 3a943cd4159f..000000000000
--- a/arch/arm/mach-imx/clock-imx31.c
+++ /dev/null
@@ -1,630 +0,0 @@
1/*
2 * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright (C) 2008 by Sascha Hauer <kernel@pengutronix.de>
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301, USA.
18 */
19
20#include <linux/module.h>
21#include <linux/spinlock.h>
22#include <linux/delay.h>
23#include <linux/clk.h>
24#include <linux/err.h>
25#include <linux/io.h>
26#include <linux/clkdev.h>
27
28#include <asm/div64.h>
29
30#include <mach/clock.h>
31#include <mach/hardware.h>
32#include <mach/mx31.h>
33#include <mach/common.h>
34
35#include "crmregs-imx3.h"
36
37#define PRE_DIV_MIN_FREQ 10000000 /* Minimum Frequency after Predivider */
38
39static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post)
40{
41 u32 min_pre, temp_pre, old_err, err;
42
43 if (div >= 512) {
44 *pre = 8;
45 *post = 64;
46 } else if (div >= 64) {
47 min_pre = (div - 1) / 64 + 1;
48 old_err = 8;
49 for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
50 err = div % temp_pre;
51 if (err == 0) {
52 *pre = temp_pre;
53 break;
54 }
55 err = temp_pre - err;
56 if (err < old_err) {
57 old_err = err;
58 *pre = temp_pre;
59 }
60 }
61 *post = (div + *pre - 1) / *pre;
62 } else if (div <= 8) {
63 *pre = div;
64 *post = 1;
65 } else {
66 *pre = 1;
67 *post = div;
68 }
69}
70
71static struct clk mcu_pll_clk;
72static struct clk serial_pll_clk;
73static struct clk ipg_clk;
74static struct clk ckih_clk;
75
76static int cgr_enable(struct clk *clk)
77{
78 u32 reg;
79
80 if (!clk->enable_reg)
81 return 0;
82
83 reg = __raw_readl(clk->enable_reg);
84 reg |= 3 << clk->enable_shift;
85 __raw_writel(reg, clk->enable_reg);
86
87 return 0;
88}
89
90static void cgr_disable(struct clk *clk)
91{
92 u32 reg;
93
94 if (!clk->enable_reg)
95 return;
96
97 reg = __raw_readl(clk->enable_reg);
98 reg &= ~(3 << clk->enable_shift);
99
100 /* special case for EMI clock */
101 if (clk->enable_reg == MXC_CCM_CGR2 && clk->enable_shift == 8)
102 reg |= (1 << clk->enable_shift);
103
104 __raw_writel(reg, clk->enable_reg);
105}
106
107static unsigned long pll_ref_get_rate(void)
108{
109 unsigned long ccmr;
110 unsigned int prcs;
111
112 ccmr = __raw_readl(MXC_CCM_CCMR);
113 prcs = (ccmr & MXC_CCM_CCMR_PRCS_MASK) >> MXC_CCM_CCMR_PRCS_OFFSET;
114 if (prcs == 0x1)
115 return CKIL_CLK_FREQ * 1024;
116 else
117 return clk_get_rate(&ckih_clk);
118}
119
120static unsigned long usb_pll_get_rate(struct clk *clk)
121{
122 unsigned long reg;
123
124 reg = __raw_readl(MXC_CCM_UPCTL);
125
126 return mxc_decode_pll(reg, pll_ref_get_rate());
127}
128
129static unsigned long serial_pll_get_rate(struct clk *clk)
130{
131 unsigned long reg;
132
133 reg = __raw_readl(MXC_CCM_SRPCTL);
134
135 return mxc_decode_pll(reg, pll_ref_get_rate());
136}
137
138static unsigned long mcu_pll_get_rate(struct clk *clk)
139{
140 unsigned long reg, ccmr;
141
142 ccmr = __raw_readl(MXC_CCM_CCMR);
143
144 if (!(ccmr & MXC_CCM_CCMR_MPE) || (ccmr & MXC_CCM_CCMR_MDS))
145 return clk_get_rate(&ckih_clk);
146
147 reg = __raw_readl(MXC_CCM_MPCTL);
148
149 return mxc_decode_pll(reg, pll_ref_get_rate());
150}
151
152static int usb_pll_enable(struct clk *clk)
153{
154 u32 reg;
155
156 reg = __raw_readl(MXC_CCM_CCMR);
157 reg |= MXC_CCM_CCMR_UPE;
158 __raw_writel(reg, MXC_CCM_CCMR);
159
160 /* No lock bit on MX31, so using max time from spec */
161 udelay(80);
162
163 return 0;
164}
165
166static void usb_pll_disable(struct clk *clk)
167{
168 u32 reg;
169
170 reg = __raw_readl(MXC_CCM_CCMR);
171 reg &= ~MXC_CCM_CCMR_UPE;
172 __raw_writel(reg, MXC_CCM_CCMR);
173}
174
175static int serial_pll_enable(struct clk *clk)
176{
177 u32 reg;
178
179 reg = __raw_readl(MXC_CCM_CCMR);
180 reg |= MXC_CCM_CCMR_SPE;
181 __raw_writel(reg, MXC_CCM_CCMR);
182
183 /* No lock bit on MX31, so using max time from spec */
184 udelay(80);
185
186 return 0;
187}
188
189static void serial_pll_disable(struct clk *clk)
190{
191 u32 reg;
192
193 reg = __raw_readl(MXC_CCM_CCMR);
194 reg &= ~MXC_CCM_CCMR_SPE;
195 __raw_writel(reg, MXC_CCM_CCMR);
196}
197
198#define PDR0(mask, off) ((__raw_readl(MXC_CCM_PDR0) & mask) >> off)
199#define PDR1(mask, off) ((__raw_readl(MXC_CCM_PDR1) & mask) >> off)
200#define PDR2(mask, off) ((__raw_readl(MXC_CCM_PDR2) & mask) >> off)
201
202static unsigned long mcu_main_get_rate(struct clk *clk)
203{
204 u32 pmcr0 = __raw_readl(MXC_CCM_PMCR0);
205
206 if ((pmcr0 & MXC_CCM_PMCR0_DFSUP1) == MXC_CCM_PMCR0_DFSUP1_SPLL)
207 return clk_get_rate(&serial_pll_clk);
208 else
209 return clk_get_rate(&mcu_pll_clk);
210}
211
212static unsigned long ahb_get_rate(struct clk *clk)
213{
214 unsigned long max_pdf;
215
216 max_pdf = PDR0(MXC_CCM_PDR0_MAX_PODF_MASK,
217 MXC_CCM_PDR0_MAX_PODF_OFFSET);
218 return clk_get_rate(clk->parent) / (max_pdf + 1);
219}
220
221static unsigned long ipg_get_rate(struct clk *clk)
222{
223 unsigned long ipg_pdf;
224
225 ipg_pdf = PDR0(MXC_CCM_PDR0_IPG_PODF_MASK,
226 MXC_CCM_PDR0_IPG_PODF_OFFSET);
227 return clk_get_rate(clk->parent) / (ipg_pdf + 1);
228}
229
230static unsigned long nfc_get_rate(struct clk *clk)
231{
232 unsigned long nfc_pdf;
233
234 nfc_pdf = PDR0(MXC_CCM_PDR0_NFC_PODF_MASK,
235 MXC_CCM_PDR0_NFC_PODF_OFFSET);
236 return clk_get_rate(clk->parent) / (nfc_pdf + 1);
237}
238
239static unsigned long hsp_get_rate(struct clk *clk)
240{
241 unsigned long hsp_pdf;
242
243 hsp_pdf = PDR0(MXC_CCM_PDR0_HSP_PODF_MASK,
244 MXC_CCM_PDR0_HSP_PODF_OFFSET);
245 return clk_get_rate(clk->parent) / (hsp_pdf + 1);
246}
247
248static unsigned long usb_get_rate(struct clk *clk)
249{
250 unsigned long usb_pdf, usb_prepdf;
251
252 usb_pdf = PDR1(MXC_CCM_PDR1_USB_PODF_MASK,
253 MXC_CCM_PDR1_USB_PODF_OFFSET);
254 usb_prepdf = PDR1(MXC_CCM_PDR1_USB_PRDF_MASK,
255 MXC_CCM_PDR1_USB_PRDF_OFFSET);
256 return clk_get_rate(clk->parent) / (usb_prepdf + 1) / (usb_pdf + 1);
257}
258
259static unsigned long csi_get_rate(struct clk *clk)
260{
261 u32 reg, pre, post;
262
263 reg = __raw_readl(MXC_CCM_PDR0);
264 pre = (reg & MXC_CCM_PDR0_CSI_PRDF_MASK) >>
265 MXC_CCM_PDR0_CSI_PRDF_OFFSET;
266 pre++;
267 post = (reg & MXC_CCM_PDR0_CSI_PODF_MASK) >>
268 MXC_CCM_PDR0_CSI_PODF_OFFSET;
269 post++;
270 return clk_get_rate(clk->parent) / (pre * post);
271}
272
273static unsigned long csi_round_rate(struct clk *clk, unsigned long rate)
274{
275 u32 pre, post, parent = clk_get_rate(clk->parent);
276 u32 div = parent / rate;
277
278 if (parent % rate)
279 div++;
280
281 __calc_pre_post_dividers(div, &pre, &post);
282
283 return parent / (pre * post);
284}
285
286static int csi_set_rate(struct clk *clk, unsigned long rate)
287{
288 u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
289
290 div = parent / rate;
291
292 if ((parent / div) != rate)
293 return -EINVAL;
294
295 __calc_pre_post_dividers(div, &pre, &post);
296
297 /* Set CSI clock divider */
298 reg = __raw_readl(MXC_CCM_PDR0) &
299 ~(MXC_CCM_PDR0_CSI_PODF_MASK | MXC_CCM_PDR0_CSI_PRDF_MASK);
300 reg |= (post - 1) << MXC_CCM_PDR0_CSI_PODF_OFFSET;
301 reg |= (pre - 1) << MXC_CCM_PDR0_CSI_PRDF_OFFSET;
302 __raw_writel(reg, MXC_CCM_PDR0);
303
304 return 0;
305}
306
307static unsigned long ssi1_get_rate(struct clk *clk)
308{
309 unsigned long ssi1_pdf, ssi1_prepdf;
310
311 ssi1_pdf = PDR1(MXC_CCM_PDR1_SSI1_PODF_MASK,
312 MXC_CCM_PDR1_SSI1_PODF_OFFSET);
313 ssi1_prepdf = PDR1(MXC_CCM_PDR1_SSI1_PRE_PODF_MASK,
314 MXC_CCM_PDR1_SSI1_PRE_PODF_OFFSET);
315 return clk_get_rate(clk->parent) / (ssi1_prepdf + 1) / (ssi1_pdf + 1);
316}
317
318static unsigned long ssi2_get_rate(struct clk *clk)
319{
320 unsigned long ssi2_pdf, ssi2_prepdf;
321
322 ssi2_pdf = PDR1(MXC_CCM_PDR1_SSI2_PODF_MASK,
323 MXC_CCM_PDR1_SSI2_PODF_OFFSET);
324 ssi2_prepdf = PDR1(MXC_CCM_PDR1_SSI2_PRE_PODF_MASK,
325 MXC_CCM_PDR1_SSI2_PRE_PODF_OFFSET);
326 return clk_get_rate(clk->parent) / (ssi2_prepdf + 1) / (ssi2_pdf + 1);
327}
328
329static unsigned long firi_get_rate(struct clk *clk)
330{
331 unsigned long firi_pdf, firi_prepdf;
332
333 firi_pdf = PDR1(MXC_CCM_PDR1_FIRI_PODF_MASK,
334 MXC_CCM_PDR1_FIRI_PODF_OFFSET);
335 firi_prepdf = PDR1(MXC_CCM_PDR1_FIRI_PRE_PODF_MASK,
336 MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET);
337 return clk_get_rate(clk->parent) / (firi_prepdf + 1) / (firi_pdf + 1);
338}
339
340static unsigned long firi_round_rate(struct clk *clk, unsigned long rate)
341{
342 u32 pre, post;
343 u32 parent = clk_get_rate(clk->parent);
344 u32 div = parent / rate;
345
346 if (parent % rate)
347 div++;
348
349 __calc_pre_post_dividers(div, &pre, &post);
350
351 return parent / (pre * post);
352
353}
354
355static int firi_set_rate(struct clk *clk, unsigned long rate)
356{
357 u32 reg, div, pre, post, parent = clk_get_rate(clk->parent);
358
359 div = parent / rate;
360
361 if ((parent / div) != rate)
362 return -EINVAL;
363
364 __calc_pre_post_dividers(div, &pre, &post);
365
366 /* Set FIRI clock divider */
367 reg = __raw_readl(MXC_CCM_PDR1) &
368 ~(MXC_CCM_PDR1_FIRI_PODF_MASK | MXC_CCM_PDR1_FIRI_PRE_PODF_MASK);
369 reg |= (pre - 1) << MXC_CCM_PDR1_FIRI_PRE_PODF_OFFSET;
370 reg |= (post - 1) << MXC_CCM_PDR1_FIRI_PODF_OFFSET;
371 __raw_writel(reg, MXC_CCM_PDR1);
372
373 return 0;
374}
375
376static unsigned long mbx_get_rate(struct clk *clk)
377{
378 return clk_get_rate(clk->parent) / 2;
379}
380
381static unsigned long mstick1_get_rate(struct clk *clk)
382{
383 unsigned long msti_pdf;
384
385 msti_pdf = PDR2(MXC_CCM_PDR2_MST1_PDF_MASK,
386 MXC_CCM_PDR2_MST1_PDF_OFFSET);
387 return clk_get_rate(clk->parent) / (msti_pdf + 1);
388}
389
390static unsigned long mstick2_get_rate(struct clk *clk)
391{
392 unsigned long msti_pdf;
393
394 msti_pdf = PDR2(MXC_CCM_PDR2_MST2_PDF_MASK,
395 MXC_CCM_PDR2_MST2_PDF_OFFSET);
396 return clk_get_rate(clk->parent) / (msti_pdf + 1);
397}
398
399static unsigned long ckih_rate;
400
401static unsigned long clk_ckih_get_rate(struct clk *clk)
402{
403 return ckih_rate;
404}
405
406static unsigned long clk_ckil_get_rate(struct clk *clk)
407{
408 return CKIL_CLK_FREQ;
409}
410
411static struct clk ckih_clk = {
412 .get_rate = clk_ckih_get_rate,
413};
414
415static struct clk mcu_pll_clk = {
416 .parent = &ckih_clk,
417 .get_rate = mcu_pll_get_rate,
418};
419
420static struct clk mcu_main_clk = {
421 .parent = &mcu_pll_clk,
422 .get_rate = mcu_main_get_rate,
423};
424
425static struct clk serial_pll_clk = {
426 .parent = &ckih_clk,
427 .get_rate = serial_pll_get_rate,
428 .enable = serial_pll_enable,
429 .disable = serial_pll_disable,
430};
431
432static struct clk usb_pll_clk = {
433 .parent = &ckih_clk,
434 .get_rate = usb_pll_get_rate,
435 .enable = usb_pll_enable,
436 .disable = usb_pll_disable,
437};
438
439static struct clk ahb_clk = {
440 .parent = &mcu_main_clk,
441 .get_rate = ahb_get_rate,
442};
443
444#define DEFINE_CLOCK(name, i, er, es, gr, s, p) \
445 static struct clk name = { \
446 .id = i, \
447 .enable_reg = er, \
448 .enable_shift = es, \
449 .get_rate = gr, \
450 .enable = cgr_enable, \
451 .disable = cgr_disable, \
452 .secondary = s, \
453 .parent = p, \
454 }
455
456#define DEFINE_CLOCK1(name, i, er, es, getsetround, s, p) \
457 static struct clk name = { \
458 .id = i, \
459 .enable_reg = er, \
460 .enable_shift = es, \
461 .get_rate = getsetround##_get_rate, \
462 .set_rate = getsetround##_set_rate, \
463 .round_rate = getsetround##_round_rate, \
464 .enable = cgr_enable, \
465 .disable = cgr_disable, \
466 .secondary = s, \
467 .parent = p, \
468 }
469
470DEFINE_CLOCK(perclk_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
471DEFINE_CLOCK(ckil_clk, 0, NULL, 0, clk_ckil_get_rate, NULL, NULL);
472
473DEFINE_CLOCK(sdhc1_clk, 0, MXC_CCM_CGR0, 0, NULL, NULL, &perclk_clk);
474DEFINE_CLOCK(sdhc2_clk, 1, MXC_CCM_CGR0, 2, NULL, NULL, &perclk_clk);
475DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CGR0, 4, NULL, NULL, &perclk_clk);
476DEFINE_CLOCK(epit1_clk, 0, MXC_CCM_CGR0, 6, NULL, NULL, &perclk_clk);
477DEFINE_CLOCK(epit2_clk, 1, MXC_CCM_CGR0, 8, NULL, NULL, &perclk_clk);
478DEFINE_CLOCK(iim_clk, 0, MXC_CCM_CGR0, 10, NULL, NULL, &ipg_clk);
479DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CGR0, 12, NULL, NULL, &ipg_clk);
480DEFINE_CLOCK(sdma_clk1, 0, MXC_CCM_CGR0, 14, NULL, NULL, &ahb_clk);
481DEFINE_CLOCK(cspi3_clk, 2, MXC_CCM_CGR0, 16, NULL, NULL, &ipg_clk);
482DEFINE_CLOCK(rng_clk, 0, MXC_CCM_CGR0, 18, NULL, NULL, &ipg_clk);
483DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CGR0, 20, NULL, NULL, &perclk_clk);
484DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CGR0, 22, NULL, NULL, &perclk_clk);
485DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CGR0, 24, ssi1_get_rate, NULL, &serial_pll_clk);
486DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CGR0, 26, NULL, NULL, &perclk_clk);
487DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CGR0, 28, NULL, NULL, &perclk_clk);
488DEFINE_CLOCK(i2c3_clk, 2, MXC_CCM_CGR0, 30, NULL, NULL, &perclk_clk);
489
490DEFINE_CLOCK(mpeg4_clk, 0, MXC_CCM_CGR1, 0, NULL, NULL, &ahb_clk);
491DEFINE_CLOCK(mstick1_clk, 0, MXC_CCM_CGR1, 2, mstick1_get_rate, NULL, &usb_pll_clk);
492DEFINE_CLOCK(mstick2_clk, 1, MXC_CCM_CGR1, 4, mstick2_get_rate, NULL, &usb_pll_clk);
493DEFINE_CLOCK1(csi_clk, 0, MXC_CCM_CGR1, 6, csi, NULL, &serial_pll_clk);
494DEFINE_CLOCK(rtc_clk, 0, MXC_CCM_CGR1, 8, NULL, NULL, &ckil_clk);
495DEFINE_CLOCK(wdog_clk, 0, MXC_CCM_CGR1, 10, NULL, NULL, &ipg_clk);
496DEFINE_CLOCK(pwm_clk, 0, MXC_CCM_CGR1, 12, NULL, NULL, &perclk_clk);
497DEFINE_CLOCK(usb_clk2, 0, MXC_CCM_CGR1, 18, usb_get_rate, NULL, &ahb_clk);
498DEFINE_CLOCK(kpp_clk, 0, MXC_CCM_CGR1, 20, NULL, NULL, &ipg_clk);
499DEFINE_CLOCK(ipu_clk, 0, MXC_CCM_CGR1, 22, hsp_get_rate, NULL, &mcu_main_clk);
500DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CGR1, 24, NULL, NULL, &perclk_clk);
501DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CGR1, 26, NULL, NULL, &perclk_clk);
502DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CGR1, 28, NULL, NULL, &perclk_clk);
503DEFINE_CLOCK(owire_clk, 0, MXC_CCM_CGR1, 30, NULL, NULL, &perclk_clk);
504
505DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CGR2, 0, ssi2_get_rate, NULL, &serial_pll_clk);
506DEFINE_CLOCK(cspi1_clk, 0, MXC_CCM_CGR2, 2, NULL, NULL, &ipg_clk);
507DEFINE_CLOCK(cspi2_clk, 1, MXC_CCM_CGR2, 4, NULL, NULL, &ipg_clk);
508DEFINE_CLOCK(mbx_clk, 0, MXC_CCM_CGR2, 6, mbx_get_rate, NULL, &ahb_clk);
509DEFINE_CLOCK(emi_clk, 0, MXC_CCM_CGR2, 8, NULL, NULL, &ahb_clk);
510DEFINE_CLOCK(rtic_clk, 0, MXC_CCM_CGR2, 10, NULL, NULL, &ahb_clk);
511DEFINE_CLOCK1(firi_clk, 0, MXC_CCM_CGR2, 12, firi, NULL, &usb_pll_clk);
512
513DEFINE_CLOCK(sdma_clk2, 0, NULL, 0, NULL, NULL, &ipg_clk);
514DEFINE_CLOCK(usb_clk1, 0, NULL, 0, usb_get_rate, NULL, &usb_pll_clk);
515DEFINE_CLOCK(nfc_clk, 0, NULL, 0, nfc_get_rate, NULL, &ahb_clk);
516DEFINE_CLOCK(scc_clk, 0, NULL, 0, NULL, NULL, &ipg_clk);
517DEFINE_CLOCK(ipg_clk, 0, NULL, 0, ipg_get_rate, NULL, &ahb_clk);
518
519#define _REGISTER_CLOCK(d, n, c) \
520 { \
521 .dev_id = d, \
522 .con_id = n, \
523 .clk = &c, \
524 },
525
526static struct clk_lookup lookups[] = {
527 _REGISTER_CLOCK(NULL, "emi", emi_clk)
528 _REGISTER_CLOCK("imx31-cspi.0", NULL, cspi1_clk)
529 _REGISTER_CLOCK("imx31-cspi.1", NULL, cspi2_clk)
530 _REGISTER_CLOCK("imx31-cspi.2", NULL, cspi3_clk)
531 _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
532 _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
533 _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
534 _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
535 _REGISTER_CLOCK(NULL, "epit", epit1_clk)
536 _REGISTER_CLOCK(NULL, "epit", epit2_clk)
537 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
538 _REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
539 _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
540 _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
541 _REGISTER_CLOCK("mxc-ehci.0", "usb", usb_clk1)
542 _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_clk2)
543 _REGISTER_CLOCK("mxc-ehci.1", "usb", usb_clk1)
544 _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_clk2)
545 _REGISTER_CLOCK("mxc-ehci.2", "usb", usb_clk1)
546 _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_clk2)
547 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usb_clk1)
548 _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usb_clk2)
549 _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
550 /* i.mx31 has the i.mx21 type uart */
551 _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
552 _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
553 _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
554 _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
555 _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
556 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
557 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
558 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
559 _REGISTER_CLOCK("mxc_w1.0", NULL, owire_clk)
560 _REGISTER_CLOCK("mxc-mmc.0", NULL, sdhc1_clk)
561 _REGISTER_CLOCK("mxc-mmc.1", NULL, sdhc2_clk)
562 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
563 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
564 _REGISTER_CLOCK(NULL, "firi", firi_clk)
565 _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
566 _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
567 _REGISTER_CLOCK(NULL, "rng", rng_clk)
568 _REGISTER_CLOCK("imx31-sdma", NULL, sdma_clk1)
569 _REGISTER_CLOCK(NULL, "sdma_ipg", sdma_clk2)
570 _REGISTER_CLOCK(NULL, "mstick", mstick1_clk)
571 _REGISTER_CLOCK(NULL, "mstick", mstick2_clk)
572 _REGISTER_CLOCK(NULL, "scc", scc_clk)
573 _REGISTER_CLOCK(NULL, "iim", iim_clk)
574 _REGISTER_CLOCK(NULL, "mpeg4", mpeg4_clk)
575 _REGISTER_CLOCK(NULL, "mbx", mbx_clk)
576};
577
578int __init mx31_clocks_init(unsigned long fref)
579{
580 u32 reg;
581
582 ckih_rate = fref;
583
584 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
585
586 /* change the csi_clk parent if necessary */
587 reg = __raw_readl(MXC_CCM_CCMR);
588 if (!(reg & MXC_CCM_CCMR_CSCS))
589 if (clk_set_parent(&csi_clk, &usb_pll_clk))
590 pr_err("%s: error changing csi_clk parent\n", __func__);
591
592
593 /* Turn off all possible clocks */
594 __raw_writel((3 << 4), MXC_CCM_CGR0);
595 __raw_writel(0, MXC_CCM_CGR1);
596 __raw_writel((3 << 8) | (3 << 14) | (3 << 16)|
597 1 << 27 | 1 << 28, /* Bit 27 and 28 are not defined for
598 MX32, but still required to be set */
599 MXC_CCM_CGR2);
600
601 /*
602 * Before turning off usb_pll make sure ipg_per_clk is generated
603 * by ipg_clk and not usb_pll.
604 */
605 __raw_writel(__raw_readl(MXC_CCM_CCMR) | (1 << 24), MXC_CCM_CCMR);
606
607 usb_pll_disable(&usb_pll_clk);
608
609 pr_info("Clock input source is %ld\n", clk_get_rate(&ckih_clk));
610
611 clk_enable(&gpt_clk);
612 clk_enable(&emi_clk);
613 clk_enable(&iim_clk);
614 mx31_revision();
615 clk_disable(&iim_clk);
616
617 clk_enable(&serial_pll_clk);
618
619 if (mx31_revision() >= IMX_CHIP_REVISION_2_0) {
620 reg = __raw_readl(MXC_CCM_PMCR1);
621 /* No PLL restart on DVFS switch; enable auto EMI handshake */
622 reg |= MXC_CCM_PMCR1_PLLRDIS | MXC_CCM_PMCR1_EMIRQ_EN;
623 __raw_writel(reg, MXC_CCM_PMCR1);
624 }
625
626 mxc_timer_init(&ipg_clk, MX31_IO_ADDRESS(MX31_GPT1_BASE_ADDR),
627 MX31_INT_GPT);
628
629 return 0;
630}
diff --git a/arch/arm/mach-imx/clock-imx35.c b/arch/arm/mach-imx/clock-imx35.c
deleted file mode 100644
index e56c1a83eee3..000000000000
--- a/arch/arm/mach-imx/clock-imx35.c
+++ /dev/null
@@ -1,536 +0,0 @@
1/*
2 * Copyright (C) 2009 by Sascha Hauer, Pengutronix
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version 2
7 * of the License, or (at your option) any later version.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public License
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
16 * MA 02110-1301, USA.
17 */
18
19#include <linux/kernel.h>
20#include <linux/init.h>
21#include <linux/list.h>
22#include <linux/clk.h>
23#include <linux/io.h>
24#include <linux/clkdev.h>
25
26#include <mach/clock.h>
27#include <mach/hardware.h>
28#include <mach/common.h>
29
30#include "crmregs-imx3.h"
31
32#ifdef HAVE_SET_RATE_SUPPORT
33static void calc_dividers(u32 div, u32 *pre, u32 *post, u32 maxpost)
34{
35 u32 min_pre, temp_pre, old_err, err;
36
37 min_pre = (div - 1) / maxpost + 1;
38 old_err = 8;
39
40 for (temp_pre = 8; temp_pre >= min_pre; temp_pre--) {
41 if (div > (temp_pre * maxpost))
42 break;
43
44 if (div < (temp_pre * temp_pre))
45 continue;
46
47 err = div % temp_pre;
48
49 if (err == 0) {
50 *pre = temp_pre;
51 break;
52 }
53
54 err = temp_pre - err;
55
56 if (err < old_err) {
57 old_err = err;
58 *pre = temp_pre;
59 }
60 }
61
62 *post = (div + *pre - 1) / *pre;
63}
64
65/* get the best values for a 3-bit divider combined with a 6-bit divider */
66static void calc_dividers_3_6(u32 div, u32 *pre, u32 *post)
67{
68 if (div >= 512) {
69 *pre = 8;
70 *post = 64;
71 } else if (div >= 64) {
72 calc_dividers(div, pre, post, 64);
73 } else if (div <= 8) {
74 *pre = div;
75 *post = 1;
76 } else {
77 *pre = 1;
78 *post = div;
79 }
80}
81
82/* get the best values for two cascaded 3-bit dividers */
83static void calc_dividers_3_3(u32 div, u32 *pre, u32 *post)
84{
85 if (div >= 64) {
86 *pre = *post = 8;
87 } else if (div > 8) {
88 calc_dividers(div, pre, post, 8);
89 } else {
90 *pre = 1;
91 *post = div;
92 }
93}
94#endif
95
96static unsigned long get_rate_mpll(void)
97{
98 ulong mpctl = __raw_readl(MX35_CCM_MPCTL);
99
100 return mxc_decode_pll(mpctl, 24000000);
101}
102
103static unsigned long get_rate_ppll(void)
104{
105 ulong ppctl = __raw_readl(MX35_CCM_PPCTL);
106
107 return mxc_decode_pll(ppctl, 24000000);
108}
109
110struct arm_ahb_div {
111 unsigned char arm, ahb, sel;
112};
113
114static struct arm_ahb_div clk_consumer[] = {
115 { .arm = 1, .ahb = 4, .sel = 0},
116 { .arm = 1, .ahb = 3, .sel = 1},
117 { .arm = 2, .ahb = 2, .sel = 0},
118 { .arm = 0, .ahb = 0, .sel = 0},
119 { .arm = 0, .ahb = 0, .sel = 0},
120 { .arm = 0, .ahb = 0, .sel = 0},
121 { .arm = 4, .ahb = 1, .sel = 0},
122 { .arm = 1, .ahb = 5, .sel = 0},
123 { .arm = 1, .ahb = 8, .sel = 0},
124 { .arm = 1, .ahb = 6, .sel = 1},
125 { .arm = 2, .ahb = 4, .sel = 0},
126 { .arm = 0, .ahb = 0, .sel = 0},
127 { .arm = 0, .ahb = 0, .sel = 0},
128 { .arm = 0, .ahb = 0, .sel = 0},
129 { .arm = 4, .ahb = 2, .sel = 0},
130 { .arm = 0, .ahb = 0, .sel = 0},
131};
132
133static unsigned long get_rate_arm(void)
134{
135 unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
136 struct arm_ahb_div *aad;
137 unsigned long fref = get_rate_mpll();
138
139 aad = &clk_consumer[(pdr0 >> 16) & 0xf];
140 if (aad->sel)
141 fref = fref * 3 / 4;
142
143 return fref / aad->arm;
144}
145
146static unsigned long get_rate_ahb(struct clk *clk)
147{
148 unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
149 struct arm_ahb_div *aad;
150 unsigned long fref = get_rate_arm();
151
152 aad = &clk_consumer[(pdr0 >> 16) & 0xf];
153
154 return fref / aad->ahb;
155}
156
157static unsigned long get_rate_ipg(struct clk *clk)
158{
159 return get_rate_ahb(NULL) >> 1;
160}
161
162static unsigned long get_rate_uart(struct clk *clk)
163{
164 unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
165 unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
166 unsigned long div = ((pdr4 >> 10) & 0x3f) + 1;
167
168 if (pdr3 & (1 << 14))
169 return get_rate_arm() / div;
170 else
171 return get_rate_ppll() / div;
172}
173
174static unsigned long get_rate_sdhc(struct clk *clk)
175{
176 unsigned long pdr3 = __raw_readl(MX35_CCM_PDR3);
177 unsigned long div, rate;
178
179 if (pdr3 & (1 << 6))
180 rate = get_rate_arm();
181 else
182 rate = get_rate_ppll();
183
184 switch (clk->id) {
185 default:
186 case 0:
187 div = pdr3 & 0x3f;
188 break;
189 case 1:
190 div = (pdr3 >> 8) & 0x3f;
191 break;
192 case 2:
193 div = (pdr3 >> 16) & 0x3f;
194 break;
195 }
196
197 return rate / (div + 1);
198}
199
200static unsigned long get_rate_mshc(struct clk *clk)
201{
202 unsigned long pdr1 = __raw_readl(MXC_CCM_PDR1);
203 unsigned long div1, div2, rate;
204
205 if (pdr1 & (1 << 7))
206 rate = get_rate_arm();
207 else
208 rate = get_rate_ppll();
209
210 div1 = (pdr1 >> 29) & 0x7;
211 div2 = (pdr1 >> 22) & 0x3f;
212
213 return rate / ((div1 + 1) * (div2 + 1));
214}
215
216static unsigned long get_rate_ssi(struct clk *clk)
217{
218 unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
219 unsigned long div1, div2, rate;
220
221 if (pdr2 & (1 << 6))
222 rate = get_rate_arm();
223 else
224 rate = get_rate_ppll();
225
226 switch (clk->id) {
227 default:
228 case 0:
229 div1 = pdr2 & 0x3f;
230 div2 = (pdr2 >> 24) & 0x7;
231 break;
232 case 1:
233 div1 = (pdr2 >> 8) & 0x3f;
234 div2 = (pdr2 >> 27) & 0x7;
235 break;
236 }
237
238 return rate / ((div1 + 1) * (div2 + 1));
239}
240
241static unsigned long get_rate_csi(struct clk *clk)
242{
243 unsigned long pdr2 = __raw_readl(MX35_CCM_PDR2);
244 unsigned long rate;
245
246 if (pdr2 & (1 << 7))
247 rate = get_rate_arm();
248 else
249 rate = get_rate_ppll();
250
251 return rate / (((pdr2 >> 16) & 0x3f) + 1);
252}
253
254static unsigned long get_rate_otg(struct clk *clk)
255{
256 unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
257 unsigned long rate;
258
259 if (pdr4 & (1 << 9))
260 rate = get_rate_arm();
261 else
262 rate = get_rate_ppll();
263
264 return rate / (((pdr4 >> 22) & 0x3f) + 1);
265}
266
267static unsigned long get_rate_ipg_per(struct clk *clk)
268{
269 unsigned long pdr0 = __raw_readl(MXC_CCM_PDR0);
270 unsigned long pdr4 = __raw_readl(MX35_CCM_PDR4);
271 unsigned long div;
272
273 if (pdr0 & (1 << 26)) {
274 div = (pdr4 >> 16) & 0x3f;
275 return get_rate_arm() / (div + 1);
276 } else {
277 div = (pdr0 >> 12) & 0x7;
278 return get_rate_ahb(NULL) / (div + 1);
279 }
280}
281
282static unsigned long get_rate_hsp(struct clk *clk)
283{
284 unsigned long hsp_podf = (__raw_readl(MXC_CCM_PDR0) >> 20) & 0x03;
285 unsigned long fref = get_rate_mpll();
286
287 if (fref > 400 * 1000 * 1000) {
288 switch (hsp_podf) {
289 case 0:
290 return fref >> 2;
291 case 1:
292 return fref >> 3;
293 case 2:
294 return fref / 3;
295 }
296 } else {
297 switch (hsp_podf) {
298 case 0:
299 case 2:
300 return fref / 3;
301 case 1:
302 return fref / 6;
303 }
304 }
305
306 return 0;
307}
308
309static int clk_cgr_enable(struct clk *clk)
310{
311 u32 reg;
312
313 reg = __raw_readl(clk->enable_reg);
314 reg |= 3 << clk->enable_shift;
315 __raw_writel(reg, clk->enable_reg);
316
317 return 0;
318}
319
320static void clk_cgr_disable(struct clk *clk)
321{
322 u32 reg;
323
324 reg = __raw_readl(clk->enable_reg);
325 reg &= ~(3 << clk->enable_shift);
326 __raw_writel(reg, clk->enable_reg);
327}
328
329#define DEFINE_CLOCK(name, i, er, es, gr, sr) \
330 static struct clk name = { \
331 .id = i, \
332 .enable_reg = er, \
333 .enable_shift = es, \
334 .get_rate = gr, \
335 .set_rate = sr, \
336 .enable = clk_cgr_enable, \
337 .disable = clk_cgr_disable, \
338 }
339
340DEFINE_CLOCK(asrc_clk, 0, MX35_CCM_CGR0, 0, NULL, NULL);
341DEFINE_CLOCK(pata_clk, 0, MX35_CCM_CGR0, 2, get_rate_ipg, NULL);
342/* DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR0, 4, NULL, NULL); */
343DEFINE_CLOCK(can1_clk, 0, MX35_CCM_CGR0, 6, get_rate_ipg, NULL);
344DEFINE_CLOCK(can2_clk, 1, MX35_CCM_CGR0, 8, get_rate_ipg, NULL);
345DEFINE_CLOCK(cspi1_clk, 0, MX35_CCM_CGR0, 10, get_rate_ipg, NULL);
346DEFINE_CLOCK(cspi2_clk, 1, MX35_CCM_CGR0, 12, get_rate_ipg, NULL);
347DEFINE_CLOCK(ect_clk, 0, MX35_CCM_CGR0, 14, get_rate_ipg, NULL);
348DEFINE_CLOCK(edio_clk, 0, MX35_CCM_CGR0, 16, NULL, NULL);
349DEFINE_CLOCK(emi_clk, 0, MX35_CCM_CGR0, 18, get_rate_ipg, NULL);
350DEFINE_CLOCK(epit1_clk, 0, MX35_CCM_CGR0, 20, get_rate_ipg, NULL);
351DEFINE_CLOCK(epit2_clk, 1, MX35_CCM_CGR0, 22, get_rate_ipg, NULL);
352DEFINE_CLOCK(esai_clk, 0, MX35_CCM_CGR0, 24, NULL, NULL);
353DEFINE_CLOCK(esdhc1_clk, 0, MX35_CCM_CGR0, 26, get_rate_sdhc, NULL);
354DEFINE_CLOCK(esdhc2_clk, 1, MX35_CCM_CGR0, 28, get_rate_sdhc, NULL);
355DEFINE_CLOCK(esdhc3_clk, 2, MX35_CCM_CGR0, 30, get_rate_sdhc, NULL);
356
357DEFINE_CLOCK(fec_clk, 0, MX35_CCM_CGR1, 0, get_rate_ipg, NULL);
358DEFINE_CLOCK(gpio1_clk, 0, MX35_CCM_CGR1, 2, NULL, NULL);
359DEFINE_CLOCK(gpio2_clk, 1, MX35_CCM_CGR1, 4, NULL, NULL);
360DEFINE_CLOCK(gpio3_clk, 2, MX35_CCM_CGR1, 6, NULL, NULL);
361DEFINE_CLOCK(gpt_clk, 0, MX35_CCM_CGR1, 8, get_rate_ipg, NULL);
362DEFINE_CLOCK(i2c1_clk, 0, MX35_CCM_CGR1, 10, get_rate_ipg_per, NULL);
363DEFINE_CLOCK(i2c2_clk, 1, MX35_CCM_CGR1, 12, get_rate_ipg_per, NULL);
364DEFINE_CLOCK(i2c3_clk, 2, MX35_CCM_CGR1, 14, get_rate_ipg_per, NULL);
365DEFINE_CLOCK(iomuxc_clk, 0, MX35_CCM_CGR1, 16, NULL, NULL);
366DEFINE_CLOCK(ipu_clk, 0, MX35_CCM_CGR1, 18, get_rate_hsp, NULL);
367DEFINE_CLOCK(kpp_clk, 0, MX35_CCM_CGR1, 20, get_rate_ipg, NULL);
368DEFINE_CLOCK(mlb_clk, 0, MX35_CCM_CGR1, 22, get_rate_ahb, NULL);
369DEFINE_CLOCK(mshc_clk, 0, MX35_CCM_CGR1, 24, get_rate_mshc, NULL);
370DEFINE_CLOCK(owire_clk, 0, MX35_CCM_CGR1, 26, get_rate_ipg_per, NULL);
371DEFINE_CLOCK(pwm_clk, 0, MX35_CCM_CGR1, 28, get_rate_ipg_per, NULL);
372DEFINE_CLOCK(rngc_clk, 0, MX35_CCM_CGR1, 30, get_rate_ipg, NULL);
373
374DEFINE_CLOCK(rtc_clk, 0, MX35_CCM_CGR2, 0, get_rate_ipg, NULL);
375DEFINE_CLOCK(rtic_clk, 0, MX35_CCM_CGR2, 2, get_rate_ahb, NULL);
376DEFINE_CLOCK(scc_clk, 0, MX35_CCM_CGR2, 4, get_rate_ipg, NULL);
377DEFINE_CLOCK(sdma_clk, 0, MX35_CCM_CGR2, 6, NULL, NULL);
378DEFINE_CLOCK(spba_clk, 0, MX35_CCM_CGR2, 8, get_rate_ipg, NULL);
379DEFINE_CLOCK(spdif_clk, 0, MX35_CCM_CGR2, 10, NULL, NULL);
380DEFINE_CLOCK(ssi1_clk, 0, MX35_CCM_CGR2, 12, get_rate_ssi, NULL);
381DEFINE_CLOCK(ssi2_clk, 1, MX35_CCM_CGR2, 14, get_rate_ssi, NULL);
382DEFINE_CLOCK(uart1_clk, 0, MX35_CCM_CGR2, 16, get_rate_uart, NULL);
383DEFINE_CLOCK(uart2_clk, 1, MX35_CCM_CGR2, 18, get_rate_uart, NULL);
384DEFINE_CLOCK(uart3_clk, 2, MX35_CCM_CGR2, 20, get_rate_uart, NULL);
385DEFINE_CLOCK(usbotg_clk, 0, MX35_CCM_CGR2, 22, get_rate_otg, NULL);
386DEFINE_CLOCK(wdog_clk, 0, MX35_CCM_CGR2, 24, NULL, NULL);
387DEFINE_CLOCK(max_clk, 0, MX35_CCM_CGR2, 26, NULL, NULL);
388DEFINE_CLOCK(audmux_clk, 0, MX35_CCM_CGR2, 30, NULL, NULL);
389
390DEFINE_CLOCK(csi_clk, 0, MX35_CCM_CGR3, 0, get_rate_csi, NULL);
391DEFINE_CLOCK(iim_clk, 0, MX35_CCM_CGR3, 2, NULL, NULL);
392DEFINE_CLOCK(gpu2d_clk, 0, MX35_CCM_CGR3, 4, NULL, NULL);
393
394DEFINE_CLOCK(usbahb_clk, 0, 0, 0, get_rate_ahb, NULL);
395
396static int clk_dummy_enable(struct clk *clk)
397{
398 return 0;
399}
400
401static void clk_dummy_disable(struct clk *clk)
402{
403}
404
405static unsigned long get_rate_nfc(struct clk *clk)
406{
407 unsigned long div1;
408
409 div1 = (__raw_readl(MX35_CCM_PDR4) >> 28) + 1;
410
411 return get_rate_ahb(NULL) / div1;
412}
413
414/* NAND Controller: It seems it can't be disabled */
415static struct clk nfc_clk = {
416 .id = 0,
417 .enable_reg = 0,
418 .enable_shift = 0,
419 .get_rate = get_rate_nfc,
420 .set_rate = NULL, /* set_rate_nfc, */
421 .enable = clk_dummy_enable,
422 .disable = clk_dummy_disable
423};
424
425#define _REGISTER_CLOCK(d, n, c) \
426 { \
427 .dev_id = d, \
428 .con_id = n, \
429 .clk = &c, \
430 },
431
432static struct clk_lookup lookups[] = {
433 _REGISTER_CLOCK(NULL, "asrc", asrc_clk)
434 _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
435 _REGISTER_CLOCK("flexcan.0", NULL, can1_clk)
436 _REGISTER_CLOCK("flexcan.1", NULL, can2_clk)
437 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi1_clk)
438 _REGISTER_CLOCK("imx35-cspi.1", NULL, cspi2_clk)
439 _REGISTER_CLOCK(NULL, "ect", ect_clk)
440 _REGISTER_CLOCK(NULL, "edio", edio_clk)
441 _REGISTER_CLOCK(NULL, "emi", emi_clk)
442 _REGISTER_CLOCK("imx-epit.0", NULL, epit1_clk)
443 _REGISTER_CLOCK("imx-epit.1", NULL, epit2_clk)
444 _REGISTER_CLOCK(NULL, "esai", esai_clk)
445 _REGISTER_CLOCK("sdhci-esdhc-imx35.0", NULL, esdhc1_clk)
446 _REGISTER_CLOCK("sdhci-esdhc-imx35.1", NULL, esdhc2_clk)
447 _REGISTER_CLOCK("sdhci-esdhc-imx35.2", NULL, esdhc3_clk)
448 /* i.mx35 has the i.mx27 type fec */
449 _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
450 _REGISTER_CLOCK(NULL, "gpio", gpio1_clk)
451 _REGISTER_CLOCK(NULL, "gpio", gpio2_clk)
452 _REGISTER_CLOCK(NULL, "gpio", gpio3_clk)
453 _REGISTER_CLOCK("gpt.0", NULL, gpt_clk)
454 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
455 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
456 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_clk)
457 _REGISTER_CLOCK(NULL, "iomuxc", iomuxc_clk)
458 _REGISTER_CLOCK("ipu-core", NULL, ipu_clk)
459 _REGISTER_CLOCK("mx3_sdc_fb", NULL, ipu_clk)
460 _REGISTER_CLOCK(NULL, "kpp", kpp_clk)
461 _REGISTER_CLOCK(NULL, "mlb", mlb_clk)
462 _REGISTER_CLOCK(NULL, "mshc", mshc_clk)
463 _REGISTER_CLOCK("mxc_w1", NULL, owire_clk)
464 _REGISTER_CLOCK(NULL, "pwm", pwm_clk)
465 _REGISTER_CLOCK(NULL, "rngc", rngc_clk)
466 _REGISTER_CLOCK(NULL, "rtc", rtc_clk)
467 _REGISTER_CLOCK(NULL, "rtic", rtic_clk)
468 _REGISTER_CLOCK(NULL, "scc", scc_clk)
469 _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
470 _REGISTER_CLOCK(NULL, "spba", spba_clk)
471 _REGISTER_CLOCK(NULL, "spdif", spdif_clk)
472 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
473 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
474 /* i.mx35 has the i.mx21 type uart */
475 _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
476 _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
477 _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
478 _REGISTER_CLOCK("mxc-ehci.0", "usb", usbotg_clk)
479 _REGISTER_CLOCK("mxc-ehci.1", "usb", usbotg_clk)
480 _REGISTER_CLOCK("mxc-ehci.2", "usb", usbotg_clk)
481 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usbotg_clk)
482 _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", usbahb_clk)
483 _REGISTER_CLOCK("imx2-wdt.0", NULL, wdog_clk)
484 _REGISTER_CLOCK(NULL, "max", max_clk)
485 _REGISTER_CLOCK(NULL, "audmux", audmux_clk)
486 _REGISTER_CLOCK("mx3-camera.0", NULL, csi_clk)
487 _REGISTER_CLOCK(NULL, "iim", iim_clk)
488 _REGISTER_CLOCK(NULL, "gpu2d", gpu2d_clk)
489 _REGISTER_CLOCK("mxc_nand.0", NULL, nfc_clk)
490};
491
492int __init mx35_clocks_init()
493{
494 unsigned int cgr2 = 3 << 26;
495
496#if defined(CONFIG_DEBUG_LL) && !defined(CONFIG_DEBUG_ICEDCC)
497 cgr2 |= 3 << 16;
498#endif
499
500 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
501
502 /* Turn off all clocks except the ones we need to survive, namely:
503 * EMI, GPIO1/2/3, GPT, IOMUX, MAX and eventually uart
504 */
505 __raw_writel((3 << 18), MX35_CCM_CGR0);
506 __raw_writel((3 << 2) | (3 << 4) | (3 << 6) | (3 << 8) | (3 << 16),
507 MX35_CCM_CGR1);
508 __raw_writel(cgr2, MX35_CCM_CGR2);
509 __raw_writel(0, MX35_CCM_CGR3);
510
511 clk_enable(&iim_clk);
512 imx_print_silicon_rev("i.MX35", mx35_revision());
513 clk_disable(&iim_clk);
514
515 /*
516 * Check if we came up in internal boot mode. If yes, we need some
517 * extra clocks turned on, otherwise the MX35 boot ROM code will
518 * hang after a watchdog reset.
519 */
520 if (!(__raw_readl(MX35_CCM_RCSR) & (3 << 10))) {
521 /* Additionally turn on UART1, SCC, and IIM clocks */
522 clk_enable(&iim_clk);
523 clk_enable(&uart1_clk);
524 clk_enable(&scc_clk);
525 }
526
527#ifdef CONFIG_MXC_USE_EPIT
528 epit_timer_init(&epit1_clk,
529 MX35_IO_ADDRESS(MX35_EPIT1_BASE_ADDR), MX35_INT_EPIT1);
530#else
531 mxc_timer_init(&gpt_clk,
532 MX35_IO_ADDRESS(MX35_GPT1_BASE_ADDR), MX35_INT_GPT);
533#endif
534
535 return 0;
536}
diff --git a/arch/arm/mach-imx/clock-imx6q.c b/arch/arm/mach-imx/clock-imx6q.c
deleted file mode 100644
index 111c328f5420..000000000000
--- a/arch/arm/mach-imx/clock-imx6q.c
+++ /dev/null
@@ -1,2111 +0,0 @@
1/*
2 * Copyright 2011 Freescale Semiconductor, Inc.
3 * Copyright 2011 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/init.h>
14#include <linux/types.h>
15#include <linux/clk.h>
16#include <linux/clkdev.h>
17#include <linux/io.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/of_irq.h>
21#include <asm/div64.h>
22#include <asm/mach/map.h>
23#include <mach/clock.h>
24#include <mach/common.h>
25#include <mach/hardware.h>
26
27#define PLL_BASE IMX_IO_ADDRESS(MX6Q_ANATOP_BASE_ADDR)
28#define PLL1_SYS (PLL_BASE + 0x000)
29#define PLL2_BUS (PLL_BASE + 0x030)
30#define PLL3_USB_OTG (PLL_BASE + 0x010)
31#define PLL4_AUDIO (PLL_BASE + 0x070)
32#define PLL5_VIDEO (PLL_BASE + 0x0a0)
33#define PLL6_MLB (PLL_BASE + 0x0d0)
34#define PLL7_USB_HOST (PLL_BASE + 0x020)
35#define PLL8_ENET (PLL_BASE + 0x0e0)
36#define PFD_480 (PLL_BASE + 0x0f0)
37#define PFD_528 (PLL_BASE + 0x100)
38#define PLL_NUM_OFFSET 0x010
39#define PLL_DENOM_OFFSET 0x020
40
41#define PFD0 7
42#define PFD1 15
43#define PFD2 23
44#define PFD3 31
45#define PFD_FRAC_MASK 0x3f
46
47#define BM_PLL_BYPASS (0x1 << 16)
48#define BM_PLL_ENABLE (0x1 << 13)
49#define BM_PLL_POWER_DOWN (0x1 << 12)
50#define BM_PLL_LOCK (0x1 << 31)
51#define BP_PLL_SYS_DIV_SELECT 0
52#define BM_PLL_SYS_DIV_SELECT (0x7f << 0)
53#define BP_PLL_BUS_DIV_SELECT 0
54#define BM_PLL_BUS_DIV_SELECT (0x1 << 0)
55#define BP_PLL_USB_DIV_SELECT 0
56#define BM_PLL_USB_DIV_SELECT (0x3 << 0)
57#define BP_PLL_AV_DIV_SELECT 0
58#define BM_PLL_AV_DIV_SELECT (0x7f << 0)
59#define BP_PLL_ENET_DIV_SELECT 0
60#define BM_PLL_ENET_DIV_SELECT (0x3 << 0)
61#define BM_PLL_ENET_EN_PCIE (0x1 << 19)
62#define BM_PLL_ENET_EN_SATA (0x1 << 20)
63
64#define CCM_BASE IMX_IO_ADDRESS(MX6Q_CCM_BASE_ADDR)
65#define CCR (CCM_BASE + 0x00)
66#define CCDR (CCM_BASE + 0x04)
67#define CSR (CCM_BASE + 0x08)
68#define CCSR (CCM_BASE + 0x0c)
69#define CACRR (CCM_BASE + 0x10)
70#define CBCDR (CCM_BASE + 0x14)
71#define CBCMR (CCM_BASE + 0x18)
72#define CSCMR1 (CCM_BASE + 0x1c)
73#define CSCMR2 (CCM_BASE + 0x20)
74#define CSCDR1 (CCM_BASE + 0x24)
75#define CS1CDR (CCM_BASE + 0x28)
76#define CS2CDR (CCM_BASE + 0x2c)
77#define CDCDR (CCM_BASE + 0x30)
78#define CHSCCDR (CCM_BASE + 0x34)
79#define CSCDR2 (CCM_BASE + 0x38)
80#define CSCDR3 (CCM_BASE + 0x3c)
81#define CSCDR4 (CCM_BASE + 0x40)
82#define CWDR (CCM_BASE + 0x44)
83#define CDHIPR (CCM_BASE + 0x48)
84#define CDCR (CCM_BASE + 0x4c)
85#define CTOR (CCM_BASE + 0x50)
86#define CLPCR (CCM_BASE + 0x54)
87#define CISR (CCM_BASE + 0x58)
88#define CIMR (CCM_BASE + 0x5c)
89#define CCOSR (CCM_BASE + 0x60)
90#define CGPR (CCM_BASE + 0x64)
91#define CCGR0 (CCM_BASE + 0x68)
92#define CCGR1 (CCM_BASE + 0x6c)
93#define CCGR2 (CCM_BASE + 0x70)
94#define CCGR3 (CCM_BASE + 0x74)
95#define CCGR4 (CCM_BASE + 0x78)
96#define CCGR5 (CCM_BASE + 0x7c)
97#define CCGR6 (CCM_BASE + 0x80)
98#define CCGR7 (CCM_BASE + 0x84)
99#define CMEOR (CCM_BASE + 0x88)
100
101#define CG0 0
102#define CG1 2
103#define CG2 4
104#define CG3 6
105#define CG4 8
106#define CG5 10
107#define CG6 12
108#define CG7 14
109#define CG8 16
110#define CG9 18
111#define CG10 20
112#define CG11 22
113#define CG12 24
114#define CG13 26
115#define CG14 28
116#define CG15 30
117
118#define BM_CCSR_PLL1_SW_SEL (0x1 << 2)
119#define BM_CCSR_STEP_SEL (0x1 << 8)
120
121#define BP_CACRR_ARM_PODF 0
122#define BM_CACRR_ARM_PODF (0x7 << 0)
123
124#define BP_CBCDR_PERIPH2_CLK2_PODF 0
125#define BM_CBCDR_PERIPH2_CLK2_PODF (0x7 << 0)
126#define BP_CBCDR_MMDC_CH1_AXI_PODF 3
127#define BM_CBCDR_MMDC_CH1_AXI_PODF (0x7 << 3)
128#define BP_CBCDR_AXI_SEL 6
129#define BM_CBCDR_AXI_SEL (0x3 << 6)
130#define BP_CBCDR_IPG_PODF 8
131#define BM_CBCDR_IPG_PODF (0x3 << 8)
132#define BP_CBCDR_AHB_PODF 10
133#define BM_CBCDR_AHB_PODF (0x7 << 10)
134#define BP_CBCDR_AXI_PODF 16
135#define BM_CBCDR_AXI_PODF (0x7 << 16)
136#define BP_CBCDR_MMDC_CH0_AXI_PODF 19
137#define BM_CBCDR_MMDC_CH0_AXI_PODF (0x7 << 19)
138#define BP_CBCDR_PERIPH_CLK_SEL 25
139#define BM_CBCDR_PERIPH_CLK_SEL (0x1 << 25)
140#define BP_CBCDR_PERIPH2_CLK_SEL 26
141#define BM_CBCDR_PERIPH2_CLK_SEL (0x1 << 26)
142#define BP_CBCDR_PERIPH_CLK2_PODF 27
143#define BM_CBCDR_PERIPH_CLK2_PODF (0x7 << 27)
144
145#define BP_CBCMR_GPU2D_AXI_SEL 0
146#define BM_CBCMR_GPU2D_AXI_SEL (0x1 << 0)
147#define BP_CBCMR_GPU3D_AXI_SEL 1
148#define BM_CBCMR_GPU3D_AXI_SEL (0x1 << 1)
149#define BP_CBCMR_GPU3D_CORE_SEL 4
150#define BM_CBCMR_GPU3D_CORE_SEL (0x3 << 4)
151#define BP_CBCMR_GPU3D_SHADER_SEL 8
152#define BM_CBCMR_GPU3D_SHADER_SEL (0x3 << 8)
153#define BP_CBCMR_PCIE_AXI_SEL 10
154#define BM_CBCMR_PCIE_AXI_SEL (0x1 << 10)
155#define BP_CBCMR_VDO_AXI_SEL 11
156#define BM_CBCMR_VDO_AXI_SEL (0x1 << 11)
157#define BP_CBCMR_PERIPH_CLK2_SEL 12
158#define BM_CBCMR_PERIPH_CLK2_SEL (0x3 << 12)
159#define BP_CBCMR_VPU_AXI_SEL 14
160#define BM_CBCMR_VPU_AXI_SEL (0x3 << 14)
161#define BP_CBCMR_GPU2D_CORE_SEL 16
162#define BM_CBCMR_GPU2D_CORE_SEL (0x3 << 16)
163#define BP_CBCMR_PRE_PERIPH_CLK_SEL 18
164#define BM_CBCMR_PRE_PERIPH_CLK_SEL (0x3 << 18)
165#define BP_CBCMR_PERIPH2_CLK2_SEL 20
166#define BM_CBCMR_PERIPH2_CLK2_SEL (0x1 << 20)
167#define BP_CBCMR_PRE_PERIPH2_CLK_SEL 21
168#define BM_CBCMR_PRE_PERIPH2_CLK_SEL (0x3 << 21)
169#define BP_CBCMR_GPU2D_CORE_PODF 23
170#define BM_CBCMR_GPU2D_CORE_PODF (0x7 << 23)
171#define BP_CBCMR_GPU3D_CORE_PODF 26
172#define BM_CBCMR_GPU3D_CORE_PODF (0x7 << 26)
173#define BP_CBCMR_GPU3D_SHADER_PODF 29
174#define BM_CBCMR_GPU3D_SHADER_PODF (0x7 << 29)
175
176#define BP_CSCMR1_PERCLK_PODF 0
177#define BM_CSCMR1_PERCLK_PODF (0x3f << 0)
178#define BP_CSCMR1_SSI1_SEL 10
179#define BM_CSCMR1_SSI1_SEL (0x3 << 10)
180#define BP_CSCMR1_SSI2_SEL 12
181#define BM_CSCMR1_SSI2_SEL (0x3 << 12)
182#define BP_CSCMR1_SSI3_SEL 14
183#define BM_CSCMR1_SSI3_SEL (0x3 << 14)
184#define BP_CSCMR1_USDHC1_SEL 16
185#define BM_CSCMR1_USDHC1_SEL (0x1 << 16)
186#define BP_CSCMR1_USDHC2_SEL 17
187#define BM_CSCMR1_USDHC2_SEL (0x1 << 17)
188#define BP_CSCMR1_USDHC3_SEL 18
189#define BM_CSCMR1_USDHC3_SEL (0x1 << 18)
190#define BP_CSCMR1_USDHC4_SEL 19
191#define BM_CSCMR1_USDHC4_SEL (0x1 << 19)
192#define BP_CSCMR1_EMI_PODF 20
193#define BM_CSCMR1_EMI_PODF (0x7 << 20)
194#define BP_CSCMR1_EMI_SLOW_PODF 23
195#define BM_CSCMR1_EMI_SLOW_PODF (0x7 << 23)
196#define BP_CSCMR1_EMI_SEL 27
197#define BM_CSCMR1_EMI_SEL (0x3 << 27)
198#define BP_CSCMR1_EMI_SLOW_SEL 29
199#define BM_CSCMR1_EMI_SLOW_SEL (0x3 << 29)
200
201#define BP_CSCMR2_CAN_PODF 2
202#define BM_CSCMR2_CAN_PODF (0x3f << 2)
203#define BM_CSCMR2_LDB_DI0_IPU_DIV (0x1 << 10)
204#define BM_CSCMR2_LDB_DI1_IPU_DIV (0x1 << 11)
205#define BP_CSCMR2_ESAI_SEL 19
206#define BM_CSCMR2_ESAI_SEL (0x3 << 19)
207
208#define BP_CSCDR1_UART_PODF 0
209#define BM_CSCDR1_UART_PODF (0x3f << 0)
210#define BP_CSCDR1_USDHC1_PODF 11
211#define BM_CSCDR1_USDHC1_PODF (0x7 << 11)
212#define BP_CSCDR1_USDHC2_PODF 16
213#define BM_CSCDR1_USDHC2_PODF (0x7 << 16)
214#define BP_CSCDR1_USDHC3_PODF 19
215#define BM_CSCDR1_USDHC3_PODF (0x7 << 19)
216#define BP_CSCDR1_USDHC4_PODF 22
217#define BM_CSCDR1_USDHC4_PODF (0x7 << 22)
218#define BP_CSCDR1_VPU_AXI_PODF 25
219#define BM_CSCDR1_VPU_AXI_PODF (0x7 << 25)
220
221#define BP_CS1CDR_SSI1_PODF 0
222#define BM_CS1CDR_SSI1_PODF (0x3f << 0)
223#define BP_CS1CDR_SSI1_PRED 6
224#define BM_CS1CDR_SSI1_PRED (0x7 << 6)
225#define BP_CS1CDR_ESAI_PRED 9
226#define BM_CS1CDR_ESAI_PRED (0x7 << 9)
227#define BP_CS1CDR_SSI3_PODF 16
228#define BM_CS1CDR_SSI3_PODF (0x3f << 16)
229#define BP_CS1CDR_SSI3_PRED 22
230#define BM_CS1CDR_SSI3_PRED (0x7 << 22)
231#define BP_CS1CDR_ESAI_PODF 25
232#define BM_CS1CDR_ESAI_PODF (0x7 << 25)
233
234#define BP_CS2CDR_SSI2_PODF 0
235#define BM_CS2CDR_SSI2_PODF (0x3f << 0)
236#define BP_CS2CDR_SSI2_PRED 6
237#define BM_CS2CDR_SSI2_PRED (0x7 << 6)
238#define BP_CS2CDR_LDB_DI0_SEL 9
239#define BM_CS2CDR_LDB_DI0_SEL (0x7 << 9)
240#define BP_CS2CDR_LDB_DI1_SEL 12
241#define BM_CS2CDR_LDB_DI1_SEL (0x7 << 12)
242#define BP_CS2CDR_ENFC_SEL 16
243#define BM_CS2CDR_ENFC_SEL (0x3 << 16)
244#define BP_CS2CDR_ENFC_PRED 18
245#define BM_CS2CDR_ENFC_PRED (0x7 << 18)
246#define BP_CS2CDR_ENFC_PODF 21
247#define BM_CS2CDR_ENFC_PODF (0x3f << 21)
248
249#define BP_CDCDR_ASRC_SERIAL_SEL 7
250#define BM_CDCDR_ASRC_SERIAL_SEL (0x3 << 7)
251#define BP_CDCDR_ASRC_SERIAL_PODF 9
252#define BM_CDCDR_ASRC_SERIAL_PODF (0x7 << 9)
253#define BP_CDCDR_ASRC_SERIAL_PRED 12
254#define BM_CDCDR_ASRC_SERIAL_PRED (0x7 << 12)
255#define BP_CDCDR_SPDIF_SEL 20
256#define BM_CDCDR_SPDIF_SEL (0x3 << 20)
257#define BP_CDCDR_SPDIF_PODF 22
258#define BM_CDCDR_SPDIF_PODF (0x7 << 22)
259#define BP_CDCDR_SPDIF_PRED 25
260#define BM_CDCDR_SPDIF_PRED (0x7 << 25)
261#define BP_CDCDR_HSI_TX_PODF 29
262#define BM_CDCDR_HSI_TX_PODF (0x7 << 29)
263#define BP_CDCDR_HSI_TX_SEL 28
264#define BM_CDCDR_HSI_TX_SEL (0x1 << 28)
265
266#define BP_CHSCCDR_IPU1_DI0_SEL 0
267#define BM_CHSCCDR_IPU1_DI0_SEL (0x7 << 0)
268#define BP_CHSCCDR_IPU1_DI0_PRE_PODF 3
269#define BM_CHSCCDR_IPU1_DI0_PRE_PODF (0x7 << 3)
270#define BP_CHSCCDR_IPU1_DI0_PRE_SEL 6
271#define BM_CHSCCDR_IPU1_DI0_PRE_SEL (0x7 << 6)
272#define BP_CHSCCDR_IPU1_DI1_SEL 9
273#define BM_CHSCCDR_IPU1_DI1_SEL (0x7 << 9)
274#define BP_CHSCCDR_IPU1_DI1_PRE_PODF 12
275#define BM_CHSCCDR_IPU1_DI1_PRE_PODF (0x7 << 12)
276#define BP_CHSCCDR_IPU1_DI1_PRE_SEL 15
277#define BM_CHSCCDR_IPU1_DI1_PRE_SEL (0x7 << 15)
278
279#define BP_CSCDR2_IPU2_DI0_SEL 0
280#define BM_CSCDR2_IPU2_DI0_SEL (0x7)
281#define BP_CSCDR2_IPU2_DI0_PRE_PODF 3
282#define BM_CSCDR2_IPU2_DI0_PRE_PODF (0x7 << 3)
283#define BP_CSCDR2_IPU2_DI0_PRE_SEL 6
284#define BM_CSCDR2_IPU2_DI0_PRE_SEL (0x7 << 6)
285#define BP_CSCDR2_IPU2_DI1_SEL 9
286#define BM_CSCDR2_IPU2_DI1_SEL (0x7 << 9)
287#define BP_CSCDR2_IPU2_DI1_PRE_PODF 12
288#define BM_CSCDR2_IPU2_DI1_PRE_PODF (0x7 << 12)
289#define BP_CSCDR2_IPU2_DI1_PRE_SEL 15
290#define BM_CSCDR2_IPU2_DI1_PRE_SEL (0x7 << 15)
291#define BP_CSCDR2_ECSPI_CLK_PODF 19
292#define BM_CSCDR2_ECSPI_CLK_PODF (0x3f << 19)
293
294#define BP_CSCDR3_IPU1_HSP_SEL 9
295#define BM_CSCDR3_IPU1_HSP_SEL (0x3 << 9)
296#define BP_CSCDR3_IPU1_HSP_PODF 11
297#define BM_CSCDR3_IPU1_HSP_PODF (0x7 << 11)
298#define BP_CSCDR3_IPU2_HSP_SEL 14
299#define BM_CSCDR3_IPU2_HSP_SEL (0x3 << 14)
300#define BP_CSCDR3_IPU2_HSP_PODF 16
301#define BM_CSCDR3_IPU2_HSP_PODF (0x7 << 16)
302
303#define BM_CDHIPR_AXI_PODF_BUSY (0x1 << 0)
304#define BM_CDHIPR_AHB_PODF_BUSY (0x1 << 1)
305#define BM_CDHIPR_MMDC_CH1_PODF_BUSY (0x1 << 2)
306#define BM_CDHIPR_PERIPH2_SEL_BUSY (0x1 << 3)
307#define BM_CDHIPR_MMDC_CH0_PODF_BUSY (0x1 << 4)
308#define BM_CDHIPR_PERIPH_SEL_BUSY (0x1 << 5)
309#define BM_CDHIPR_ARM_PODF_BUSY (0x1 << 16)
310
311#define BP_CLPCR_LPM 0
312#define BM_CLPCR_LPM (0x3 << 0)
313#define BM_CLPCR_BYPASS_PMIC_READY (0x1 << 2)
314#define BM_CLPCR_ARM_CLK_DIS_ON_LPM (0x1 << 5)
315#define BM_CLPCR_SBYOS (0x1 << 6)
316#define BM_CLPCR_DIS_REF_OSC (0x1 << 7)
317#define BM_CLPCR_VSTBY (0x1 << 8)
318#define BP_CLPCR_STBY_COUNT 9
319#define BM_CLPCR_STBY_COUNT (0x3 << 9)
320#define BM_CLPCR_COSC_PWRDOWN (0x1 << 11)
321#define BM_CLPCR_WB_PER_AT_LPM (0x1 << 16)
322#define BM_CLPCR_WB_CORE_AT_LPM (0x1 << 17)
323#define BM_CLPCR_BYP_MMDC_CH0_LPM_HS (0x1 << 19)
324#define BM_CLPCR_BYP_MMDC_CH1_LPM_HS (0x1 << 21)
325#define BM_CLPCR_MASK_CORE0_WFI (0x1 << 22)
326#define BM_CLPCR_MASK_CORE1_WFI (0x1 << 23)
327#define BM_CLPCR_MASK_CORE2_WFI (0x1 << 24)
328#define BM_CLPCR_MASK_CORE3_WFI (0x1 << 25)
329#define BM_CLPCR_MASK_SCU_IDLE (0x1 << 26)
330#define BM_CLPCR_MASK_L2CC_IDLE (0x1 << 27)
331
332#define BP_CCOSR_CKO1_EN 7
333#define BP_CCOSR_CKO1_PODF 4
334#define BM_CCOSR_CKO1_PODF (0x7 << 4)
335#define BP_CCOSR_CKO1_SEL 0
336#define BM_CCOSR_CKO1_SEL (0xf << 0)
337
338#define FREQ_480M 480000000
339#define FREQ_528M 528000000
340#define FREQ_594M 594000000
341#define FREQ_650M 650000000
342#define FREQ_1300M 1300000000
343
344static struct clk pll1_sys;
345static struct clk pll2_bus;
346static struct clk pll3_usb_otg;
347static struct clk pll4_audio;
348static struct clk pll5_video;
349static struct clk pll6_mlb;
350static struct clk pll7_usb_host;
351static struct clk pll8_enet;
352static struct clk apbh_dma_clk;
353static struct clk arm_clk;
354static struct clk ipg_clk;
355static struct clk ahb_clk;
356static struct clk axi_clk;
357static struct clk mmdc_ch0_axi_clk;
358static struct clk mmdc_ch1_axi_clk;
359static struct clk periph_clk;
360static struct clk periph_pre_clk;
361static struct clk periph_clk2_clk;
362static struct clk periph2_clk;
363static struct clk periph2_pre_clk;
364static struct clk periph2_clk2_clk;
365static struct clk gpu2d_core_clk;
366static struct clk gpu3d_core_clk;
367static struct clk gpu3d_shader_clk;
368static struct clk ipg_perclk;
369static struct clk emi_clk;
370static struct clk emi_slow_clk;
371static struct clk can1_clk;
372static struct clk uart_clk;
373static struct clk usdhc1_clk;
374static struct clk usdhc2_clk;
375static struct clk usdhc3_clk;
376static struct clk usdhc4_clk;
377static struct clk vpu_clk;
378static struct clk hsi_tx_clk;
379static struct clk ipu1_di0_pre_clk;
380static struct clk ipu1_di1_pre_clk;
381static struct clk ipu2_di0_pre_clk;
382static struct clk ipu2_di1_pre_clk;
383static struct clk ipu1_clk;
384static struct clk ipu2_clk;
385static struct clk ssi1_clk;
386static struct clk ssi3_clk;
387static struct clk esai_clk;
388static struct clk ssi2_clk;
389static struct clk spdif_clk;
390static struct clk asrc_serial_clk;
391static struct clk gpu2d_axi_clk;
392static struct clk gpu3d_axi_clk;
393static struct clk pcie_clk;
394static struct clk vdo_axi_clk;
395static struct clk ldb_di0_clk;
396static struct clk ldb_di1_clk;
397static struct clk ipu1_di0_clk;
398static struct clk ipu1_di1_clk;
399static struct clk ipu2_di0_clk;
400static struct clk ipu2_di1_clk;
401static struct clk enfc_clk;
402static struct clk cko1_clk;
403static struct clk dummy_clk = {};
404
405static unsigned long external_high_reference;
406static unsigned long external_low_reference;
407static unsigned long oscillator_reference;
408
409static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
410{
411 return oscillator_reference;
412}
413
414static unsigned long get_high_reference_clock_rate(struct clk *clk)
415{
416 return external_high_reference;
417}
418
419static unsigned long get_low_reference_clock_rate(struct clk *clk)
420{
421 return external_low_reference;
422}
423
424static struct clk ckil_clk = {
425 .get_rate = get_low_reference_clock_rate,
426};
427
428static struct clk ckih_clk = {
429 .get_rate = get_high_reference_clock_rate,
430};
431
432static struct clk osc_clk = {
433 .get_rate = get_oscillator_reference_clock_rate,
434};
435
436static inline void __iomem *pll_get_reg_addr(struct clk *pll)
437{
438 if (pll == &pll1_sys)
439 return PLL1_SYS;
440 else if (pll == &pll2_bus)
441 return PLL2_BUS;
442 else if (pll == &pll3_usb_otg)
443 return PLL3_USB_OTG;
444 else if (pll == &pll4_audio)
445 return PLL4_AUDIO;
446 else if (pll == &pll5_video)
447 return PLL5_VIDEO;
448 else if (pll == &pll6_mlb)
449 return PLL6_MLB;
450 else if (pll == &pll7_usb_host)
451 return PLL7_USB_HOST;
452 else if (pll == &pll8_enet)
453 return PLL8_ENET;
454 else
455 BUG();
456
457 return NULL;
458}
459
460static int pll_enable(struct clk *clk)
461{
462 int timeout = 0x100000;
463 void __iomem *reg;
464 u32 val;
465
466 reg = pll_get_reg_addr(clk);
467 val = readl_relaxed(reg);
468 val &= ~BM_PLL_BYPASS;
469 val &= ~BM_PLL_POWER_DOWN;
470 /* 480MHz PLLs have the opposite definition for power bit */
471 if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
472 val |= BM_PLL_POWER_DOWN;
473 writel_relaxed(val, reg);
474
475 /* Wait for PLL to lock */
476 while (!(readl_relaxed(reg) & BM_PLL_LOCK) && --timeout)
477 cpu_relax();
478
479 if (unlikely(!timeout))
480 return -EBUSY;
481
482 /* Enable the PLL output now */
483 val = readl_relaxed(reg);
484 val |= BM_PLL_ENABLE;
485 writel_relaxed(val, reg);
486
487 return 0;
488}
489
490static void pll_disable(struct clk *clk)
491{
492 void __iomem *reg;
493 u32 val;
494
495 reg = pll_get_reg_addr(clk);
496 val = readl_relaxed(reg);
497 val &= ~BM_PLL_ENABLE;
498 val |= BM_PLL_BYPASS;
499 val |= BM_PLL_POWER_DOWN;
500 if (clk == &pll3_usb_otg || clk == &pll7_usb_host)
501 val &= ~BM_PLL_POWER_DOWN;
502 writel_relaxed(val, reg);
503}
504
505static unsigned long pll1_sys_get_rate(struct clk *clk)
506{
507 u32 div = (readl_relaxed(PLL1_SYS) & BM_PLL_SYS_DIV_SELECT) >>
508 BP_PLL_SYS_DIV_SELECT;
509
510 return clk_get_rate(clk->parent) * div / 2;
511}
512
513static int pll1_sys_set_rate(struct clk *clk, unsigned long rate)
514{
515 u32 val, div;
516
517 if (rate < FREQ_650M || rate > FREQ_1300M)
518 return -EINVAL;
519
520 div = rate * 2 / clk_get_rate(clk->parent);
521 val = readl_relaxed(PLL1_SYS);
522 val &= ~BM_PLL_SYS_DIV_SELECT;
523 val |= div << BP_PLL_SYS_DIV_SELECT;
524 writel_relaxed(val, PLL1_SYS);
525
526 return 0;
527}
528
529static unsigned long pll8_enet_get_rate(struct clk *clk)
530{
531 u32 div = (readl_relaxed(PLL8_ENET) & BM_PLL_ENET_DIV_SELECT) >>
532 BP_PLL_ENET_DIV_SELECT;
533
534 switch (div) {
535 case 0:
536 return 25000000;
537 case 1:
538 return 50000000;
539 case 2:
540 return 100000000;
541 case 3:
542 return 125000000;
543 }
544
545 return 0;
546}
547
548static int pll8_enet_set_rate(struct clk *clk, unsigned long rate)
549{
550 u32 val, div;
551
552 switch (rate) {
553 case 25000000:
554 div = 0;
555 break;
556 case 50000000:
557 div = 1;
558 break;
559 case 100000000:
560 div = 2;
561 break;
562 case 125000000:
563 div = 3;
564 break;
565 default:
566 return -EINVAL;
567 }
568
569 val = readl_relaxed(PLL8_ENET);
570 val &= ~BM_PLL_ENET_DIV_SELECT;
571 val |= div << BP_PLL_ENET_DIV_SELECT;
572 writel_relaxed(val, PLL8_ENET);
573
574 return 0;
575}
576
577static unsigned long pll_av_get_rate(struct clk *clk)
578{
579 void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
580 unsigned long parent_rate = clk_get_rate(clk->parent);
581 u32 mfn = readl_relaxed(reg + PLL_NUM_OFFSET);
582 u32 mfd = readl_relaxed(reg + PLL_DENOM_OFFSET);
583 u32 div = (readl_relaxed(reg) & BM_PLL_AV_DIV_SELECT) >>
584 BP_PLL_AV_DIV_SELECT;
585
586 return (parent_rate * div) + ((parent_rate / mfd) * mfn);
587}
588
589static int pll_av_set_rate(struct clk *clk, unsigned long rate)
590{
591 void __iomem *reg = (clk == &pll4_audio) ? PLL4_AUDIO : PLL5_VIDEO;
592 unsigned int parent_rate = clk_get_rate(clk->parent);
593 u32 val, div;
594 u32 mfn, mfd = 1000000;
595 s64 temp64;
596
597 if (rate < FREQ_650M || rate > FREQ_1300M)
598 return -EINVAL;
599
600 div = rate / parent_rate;
601 temp64 = (u64) (rate - div * parent_rate);
602 temp64 *= mfd;
603 do_div(temp64, parent_rate);
604 mfn = temp64;
605
606 val = readl_relaxed(reg);
607 val &= ~BM_PLL_AV_DIV_SELECT;
608 val |= div << BP_PLL_AV_DIV_SELECT;
609 writel_relaxed(val, reg);
610 writel_relaxed(mfn, reg + PLL_NUM_OFFSET);
611 writel_relaxed(mfd, reg + PLL_DENOM_OFFSET);
612
613 return 0;
614}
615
616static void __iomem *pll_get_div_reg_bit(struct clk *clk, u32 *bp, u32 *bm)
617{
618 void __iomem *reg;
619
620 if (clk == &pll2_bus) {
621 reg = PLL2_BUS;
622 *bp = BP_PLL_BUS_DIV_SELECT;
623 *bm = BM_PLL_BUS_DIV_SELECT;
624 } else if (clk == &pll3_usb_otg) {
625 reg = PLL3_USB_OTG;
626 *bp = BP_PLL_USB_DIV_SELECT;
627 *bm = BM_PLL_USB_DIV_SELECT;
628 } else if (clk == &pll7_usb_host) {
629 reg = PLL7_USB_HOST;
630 *bp = BP_PLL_USB_DIV_SELECT;
631 *bm = BM_PLL_USB_DIV_SELECT;
632 } else {
633 BUG();
634 }
635
636 return reg;
637}
638
639static unsigned long pll_get_rate(struct clk *clk)
640{
641 void __iomem *reg;
642 u32 div, bp, bm;
643
644 reg = pll_get_div_reg_bit(clk, &bp, &bm);
645 div = (readl_relaxed(reg) & bm) >> bp;
646
647 return (div == 1) ? clk_get_rate(clk->parent) * 22 :
648 clk_get_rate(clk->parent) * 20;
649}
650
651static int pll_set_rate(struct clk *clk, unsigned long rate)
652{
653 void __iomem *reg;
654 u32 val, div, bp, bm;
655
656 if (rate == FREQ_528M)
657 div = 1;
658 else if (rate == FREQ_480M)
659 div = 0;
660 else
661 return -EINVAL;
662
663 reg = pll_get_div_reg_bit(clk, &bp, &bm);
664 val = readl_relaxed(reg);
665 val &= ~bm;
666 val |= div << bp;
667 writel_relaxed(val, reg);
668
669 return 0;
670}
671
672#define pll2_bus_get_rate pll_get_rate
673#define pll2_bus_set_rate pll_set_rate
674#define pll3_usb_otg_get_rate pll_get_rate
675#define pll3_usb_otg_set_rate pll_set_rate
676#define pll7_usb_host_get_rate pll_get_rate
677#define pll7_usb_host_set_rate pll_set_rate
678#define pll4_audio_get_rate pll_av_get_rate
679#define pll4_audio_set_rate pll_av_set_rate
680#define pll5_video_get_rate pll_av_get_rate
681#define pll5_video_set_rate pll_av_set_rate
682#define pll6_mlb_get_rate NULL
683#define pll6_mlb_set_rate NULL
684
685#define DEF_PLL(name) \
686 static struct clk name = { \
687 .enable = pll_enable, \
688 .disable = pll_disable, \
689 .get_rate = name##_get_rate, \
690 .set_rate = name##_set_rate, \
691 .parent = &osc_clk, \
692 }
693
694DEF_PLL(pll1_sys);
695DEF_PLL(pll2_bus);
696DEF_PLL(pll3_usb_otg);
697DEF_PLL(pll4_audio);
698DEF_PLL(pll5_video);
699DEF_PLL(pll6_mlb);
700DEF_PLL(pll7_usb_host);
701DEF_PLL(pll8_enet);
702
703static unsigned long pfd_get_rate(struct clk *clk)
704{
705 u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
706 u32 frac, bp_frac;
707
708 if (apbh_dma_clk.usecount == 0)
709 apbh_dma_clk.enable(&apbh_dma_clk);
710
711 bp_frac = clk->enable_shift - 7;
712 frac = readl_relaxed(clk->enable_reg) >> bp_frac & PFD_FRAC_MASK;
713 do_div(tmp, frac);
714
715 return tmp;
716}
717
718static int pfd_set_rate(struct clk *clk, unsigned long rate)
719{
720 u32 val, frac, bp_frac;
721 u64 tmp = (u64) clk_get_rate(clk->parent) * 18;
722
723 if (apbh_dma_clk.usecount == 0)
724 apbh_dma_clk.enable(&apbh_dma_clk);
725
726 /*
727 * Round up the divider so that we don't set a rate
728 * higher than what is requested
729 */
730 tmp += rate / 2;
731 do_div(tmp, rate);
732 frac = tmp;
733 frac = (frac < 12) ? 12 : frac;
734 frac = (frac > 35) ? 35 : frac;
735
736 /*
737 * The frac field always starts from 7 bits lower
738 * position of enable bit
739 */
740 bp_frac = clk->enable_shift - 7;
741 val = readl_relaxed(clk->enable_reg);
742 val &= ~(PFD_FRAC_MASK << bp_frac);
743 val |= frac << bp_frac;
744 writel_relaxed(val, clk->enable_reg);
745
746 tmp = (u64) clk_get_rate(clk->parent) * 18;
747 do_div(tmp, frac);
748
749 if (apbh_dma_clk.usecount == 0)
750 apbh_dma_clk.disable(&apbh_dma_clk);
751
752 return 0;
753}
754
755static unsigned long pfd_round_rate(struct clk *clk, unsigned long rate)
756{
757 u32 frac;
758 u64 tmp;
759
760 tmp = (u64) clk_get_rate(clk->parent) * 18;
761 tmp += rate / 2;
762 do_div(tmp, rate);
763 frac = tmp;
764 frac = (frac < 12) ? 12 : frac;
765 frac = (frac > 35) ? 35 : frac;
766 tmp = (u64) clk_get_rate(clk->parent) * 18;
767 do_div(tmp, frac);
768
769 return tmp;
770}
771
772static int pfd_enable(struct clk *clk)
773{
774 u32 val;
775
776 if (apbh_dma_clk.usecount == 0)
777 apbh_dma_clk.enable(&apbh_dma_clk);
778
779 val = readl_relaxed(clk->enable_reg);
780 val &= ~(1 << clk->enable_shift);
781 writel_relaxed(val, clk->enable_reg);
782
783 if (apbh_dma_clk.usecount == 0)
784 apbh_dma_clk.disable(&apbh_dma_clk);
785
786 return 0;
787}
788
789static void pfd_disable(struct clk *clk)
790{
791 u32 val;
792
793 if (apbh_dma_clk.usecount == 0)
794 apbh_dma_clk.enable(&apbh_dma_clk);
795
796 val = readl_relaxed(clk->enable_reg);
797 val |= 1 << clk->enable_shift;
798 writel_relaxed(val, clk->enable_reg);
799
800 if (apbh_dma_clk.usecount == 0)
801 apbh_dma_clk.disable(&apbh_dma_clk);
802}
803
804#define DEF_PFD(name, er, es, p) \
805 static struct clk name = { \
806 .enable_reg = er, \
807 .enable_shift = es, \
808 .enable = pfd_enable, \
809 .disable = pfd_disable, \
810 .get_rate = pfd_get_rate, \
811 .set_rate = pfd_set_rate, \
812 .round_rate = pfd_round_rate, \
813 .parent = p, \
814 }
815
816DEF_PFD(pll2_pfd_352m, PFD_528, PFD0, &pll2_bus);
817DEF_PFD(pll2_pfd_594m, PFD_528, PFD1, &pll2_bus);
818DEF_PFD(pll2_pfd_400m, PFD_528, PFD2, &pll2_bus);
819DEF_PFD(pll3_pfd_720m, PFD_480, PFD0, &pll3_usb_otg);
820DEF_PFD(pll3_pfd_540m, PFD_480, PFD1, &pll3_usb_otg);
821DEF_PFD(pll3_pfd_508m, PFD_480, PFD2, &pll3_usb_otg);
822DEF_PFD(pll3_pfd_454m, PFD_480, PFD3, &pll3_usb_otg);
823
824static unsigned long twd_clk_get_rate(struct clk *clk)
825{
826 return clk_get_rate(clk->parent) / 2;
827}
828
829static struct clk twd_clk = {
830 .parent = &arm_clk,
831 .get_rate = twd_clk_get_rate,
832};
833
834static unsigned long pll2_200m_get_rate(struct clk *clk)
835{
836 return clk_get_rate(clk->parent) / 2;
837}
838
839static struct clk pll2_200m = {
840 .parent = &pll2_pfd_400m,
841 .get_rate = pll2_200m_get_rate,
842};
843
844static unsigned long pll3_120m_get_rate(struct clk *clk)
845{
846 return clk_get_rate(clk->parent) / 4;
847}
848
849static struct clk pll3_120m = {
850 .parent = &pll3_usb_otg,
851 .get_rate = pll3_120m_get_rate,
852};
853
854static unsigned long pll3_80m_get_rate(struct clk *clk)
855{
856 return clk_get_rate(clk->parent) / 6;
857}
858
859static struct clk pll3_80m = {
860 .parent = &pll3_usb_otg,
861 .get_rate = pll3_80m_get_rate,
862};
863
864static unsigned long pll3_60m_get_rate(struct clk *clk)
865{
866 return clk_get_rate(clk->parent) / 8;
867}
868
869static struct clk pll3_60m = {
870 .parent = &pll3_usb_otg,
871 .get_rate = pll3_60m_get_rate,
872};
873
874static int pll1_sw_clk_set_parent(struct clk *clk, struct clk *parent)
875{
876 u32 val = readl_relaxed(CCSR);
877
878 if (parent == &pll1_sys) {
879 val &= ~BM_CCSR_PLL1_SW_SEL;
880 val &= ~BM_CCSR_STEP_SEL;
881 } else if (parent == &osc_clk) {
882 val |= BM_CCSR_PLL1_SW_SEL;
883 val &= ~BM_CCSR_STEP_SEL;
884 } else if (parent == &pll2_pfd_400m) {
885 val |= BM_CCSR_PLL1_SW_SEL;
886 val |= BM_CCSR_STEP_SEL;
887 } else {
888 return -EINVAL;
889 }
890
891 writel_relaxed(val, CCSR);
892
893 return 0;
894}
895
896static struct clk pll1_sw_clk = {
897 .parent = &pll1_sys,
898 .set_parent = pll1_sw_clk_set_parent,
899};
900
901static void calc_pred_podf_dividers(u32 div, u32 *pred, u32 *podf)
902{
903 u32 min_pred, temp_pred, old_err, err;
904
905 if (div >= 512) {
906 *pred = 8;
907 *podf = 64;
908 } else if (div >= 8) {
909 min_pred = (div - 1) / 64 + 1;
910 old_err = 8;
911 for (temp_pred = 8; temp_pred >= min_pred; temp_pred--) {
912 err = div % temp_pred;
913 if (err == 0) {
914 *pred = temp_pred;
915 break;
916 }
917 err = temp_pred - err;
918 if (err < old_err) {
919 old_err = err;
920 *pred = temp_pred;
921 }
922 }
923 *podf = (div + *pred - 1) / *pred;
924 } else if (div < 8) {
925 *pred = div;
926 *podf = 1;
927 }
928}
929
930static int _clk_enable(struct clk *clk)
931{
932 u32 reg;
933 reg = readl_relaxed(clk->enable_reg);
934 reg |= 0x3 << clk->enable_shift;
935 writel_relaxed(reg, clk->enable_reg);
936
937 return 0;
938}
939
940static void _clk_disable(struct clk *clk)
941{
942 u32 reg;
943 reg = readl_relaxed(clk->enable_reg);
944 reg &= ~(0x3 << clk->enable_shift);
945 writel_relaxed(reg, clk->enable_reg);
946}
947
948static int _clk_enable_1b(struct clk *clk)
949{
950 u32 reg;
951 reg = readl_relaxed(clk->enable_reg);
952 reg |= 0x1 << clk->enable_shift;
953 writel_relaxed(reg, clk->enable_reg);
954
955 return 0;
956}
957
958static void _clk_disable_1b(struct clk *clk)
959{
960 u32 reg;
961 reg = readl_relaxed(clk->enable_reg);
962 reg &= ~(0x1 << clk->enable_shift);
963 writel_relaxed(reg, clk->enable_reg);
964}
965
966struct divider {
967 struct clk *clk;
968 void __iomem *reg;
969 u32 bp_pred;
970 u32 bm_pred;
971 u32 bp_podf;
972 u32 bm_podf;
973};
974
975#define DEF_CLK_DIV1(d, c, r, b) \
976 static struct divider d = { \
977 .clk = c, \
978 .reg = r, \
979 .bp_podf = BP_##r##_##b##_PODF, \
980 .bm_podf = BM_##r##_##b##_PODF, \
981 }
982
983DEF_CLK_DIV1(arm_div, &arm_clk, CACRR, ARM);
984DEF_CLK_DIV1(ipg_div, &ipg_clk, CBCDR, IPG);
985DEF_CLK_DIV1(ahb_div, &ahb_clk, CBCDR, AHB);
986DEF_CLK_DIV1(axi_div, &axi_clk, CBCDR, AXI);
987DEF_CLK_DIV1(mmdc_ch0_axi_div, &mmdc_ch0_axi_clk, CBCDR, MMDC_CH0_AXI);
988DEF_CLK_DIV1(mmdc_ch1_axi_div, &mmdc_ch1_axi_clk, CBCDR, MMDC_CH1_AXI);
989DEF_CLK_DIV1(periph_clk2_div, &periph_clk2_clk, CBCDR, PERIPH_CLK2);
990DEF_CLK_DIV1(periph2_clk2_div, &periph2_clk2_clk, CBCDR, PERIPH2_CLK2);
991DEF_CLK_DIV1(gpu2d_core_div, &gpu2d_core_clk, CBCMR, GPU2D_CORE);
992DEF_CLK_DIV1(gpu3d_core_div, &gpu3d_core_clk, CBCMR, GPU3D_CORE);
993DEF_CLK_DIV1(gpu3d_shader_div, &gpu3d_shader_clk, CBCMR, GPU3D_SHADER);
994DEF_CLK_DIV1(ipg_perclk_div, &ipg_perclk, CSCMR1, PERCLK);
995DEF_CLK_DIV1(emi_div, &emi_clk, CSCMR1, EMI);
996DEF_CLK_DIV1(emi_slow_div, &emi_slow_clk, CSCMR1, EMI_SLOW);
997DEF_CLK_DIV1(can_div, &can1_clk, CSCMR2, CAN);
998DEF_CLK_DIV1(uart_div, &uart_clk, CSCDR1, UART);
999DEF_CLK_DIV1(usdhc1_div, &usdhc1_clk, CSCDR1, USDHC1);
1000DEF_CLK_DIV1(usdhc2_div, &usdhc2_clk, CSCDR1, USDHC2);
1001DEF_CLK_DIV1(usdhc3_div, &usdhc3_clk, CSCDR1, USDHC3);
1002DEF_CLK_DIV1(usdhc4_div, &usdhc4_clk, CSCDR1, USDHC4);
1003DEF_CLK_DIV1(vpu_div, &vpu_clk, CSCDR1, VPU_AXI);
1004DEF_CLK_DIV1(hsi_tx_div, &hsi_tx_clk, CDCDR, HSI_TX);
1005DEF_CLK_DIV1(ipu1_di0_pre_div, &ipu1_di0_pre_clk, CHSCCDR, IPU1_DI0_PRE);
1006DEF_CLK_DIV1(ipu1_di1_pre_div, &ipu1_di1_pre_clk, CHSCCDR, IPU1_DI1_PRE);
1007DEF_CLK_DIV1(ipu2_di0_pre_div, &ipu2_di0_pre_clk, CSCDR2, IPU2_DI0_PRE);
1008DEF_CLK_DIV1(ipu2_di1_pre_div, &ipu2_di1_pre_clk, CSCDR2, IPU2_DI1_PRE);
1009DEF_CLK_DIV1(ipu1_div, &ipu1_clk, CSCDR3, IPU1_HSP);
1010DEF_CLK_DIV1(ipu2_div, &ipu2_clk, CSCDR3, IPU2_HSP);
1011DEF_CLK_DIV1(cko1_div, &cko1_clk, CCOSR, CKO1);
1012
1013#define DEF_CLK_DIV2(d, c, r, b) \
1014 static struct divider d = { \
1015 .clk = c, \
1016 .reg = r, \
1017 .bp_pred = BP_##r##_##b##_PRED, \
1018 .bm_pred = BM_##r##_##b##_PRED, \
1019 .bp_podf = BP_##r##_##b##_PODF, \
1020 .bm_podf = BM_##r##_##b##_PODF, \
1021 }
1022
1023DEF_CLK_DIV2(ssi1_div, &ssi1_clk, CS1CDR, SSI1);
1024DEF_CLK_DIV2(ssi3_div, &ssi3_clk, CS1CDR, SSI3);
1025DEF_CLK_DIV2(esai_div, &esai_clk, CS1CDR, ESAI);
1026DEF_CLK_DIV2(ssi2_div, &ssi2_clk, CS2CDR, SSI2);
1027DEF_CLK_DIV2(enfc_div, &enfc_clk, CS2CDR, ENFC);
1028DEF_CLK_DIV2(spdif_div, &spdif_clk, CDCDR, SPDIF);
1029DEF_CLK_DIV2(asrc_serial_div, &asrc_serial_clk, CDCDR, ASRC_SERIAL);
1030
1031static struct divider *dividers[] = {
1032 &arm_div,
1033 &ipg_div,
1034 &ahb_div,
1035 &axi_div,
1036 &mmdc_ch0_axi_div,
1037 &mmdc_ch1_axi_div,
1038 &periph_clk2_div,
1039 &periph2_clk2_div,
1040 &gpu2d_core_div,
1041 &gpu3d_core_div,
1042 &gpu3d_shader_div,
1043 &ipg_perclk_div,
1044 &emi_div,
1045 &emi_slow_div,
1046 &can_div,
1047 &uart_div,
1048 &usdhc1_div,
1049 &usdhc2_div,
1050 &usdhc3_div,
1051 &usdhc4_div,
1052 &vpu_div,
1053 &hsi_tx_div,
1054 &ipu1_di0_pre_div,
1055 &ipu1_di1_pre_div,
1056 &ipu2_di0_pre_div,
1057 &ipu2_di1_pre_div,
1058 &ipu1_div,
1059 &ipu2_div,
1060 &ssi1_div,
1061 &ssi3_div,
1062 &esai_div,
1063 &ssi2_div,
1064 &enfc_div,
1065 &spdif_div,
1066 &asrc_serial_div,
1067 &cko1_div,
1068};
1069
1070static unsigned long ldb_di_clk_get_rate(struct clk *clk)
1071{
1072 u32 val = readl_relaxed(CSCMR2);
1073
1074 val &= (clk == &ldb_di0_clk) ? BM_CSCMR2_LDB_DI0_IPU_DIV :
1075 BM_CSCMR2_LDB_DI1_IPU_DIV;
1076 if (val)
1077 return clk_get_rate(clk->parent) / 7;
1078 else
1079 return clk_get_rate(clk->parent) * 2 / 7;
1080}
1081
1082static int ldb_di_clk_set_rate(struct clk *clk, unsigned long rate)
1083{
1084 unsigned long parent_rate = clk_get_rate(clk->parent);
1085 u32 val = readl_relaxed(CSCMR2);
1086
1087 if (rate * 7 <= parent_rate + parent_rate / 20)
1088 val |= BM_CSCMR2_LDB_DI0_IPU_DIV;
1089 else
1090 val &= ~BM_CSCMR2_LDB_DI0_IPU_DIV;
1091
1092 writel_relaxed(val, CSCMR2);
1093
1094 return 0;
1095}
1096
1097static unsigned long ldb_di_clk_round_rate(struct clk *clk, unsigned long rate)
1098{
1099 unsigned long parent_rate = clk_get_rate(clk->parent);
1100
1101 if (rate * 7 <= parent_rate + parent_rate / 20)
1102 return parent_rate / 7;
1103 else
1104 return 2 * parent_rate / 7;
1105}
1106
1107static unsigned long _clk_get_rate(struct clk *clk)
1108{
1109 struct divider *d;
1110 u32 val, pred, podf;
1111 int i, num;
1112
1113 if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
1114 return ldb_di_clk_get_rate(clk);
1115
1116 num = ARRAY_SIZE(dividers);
1117 for (i = 0; i < num; i++)
1118 if (dividers[i]->clk == clk) {
1119 d = dividers[i];
1120 break;
1121 }
1122 if (i == num)
1123 return clk_get_rate(clk->parent);
1124
1125 val = readl_relaxed(d->reg);
1126 pred = ((val & d->bm_pred) >> d->bp_pred) + 1;
1127 podf = ((val & d->bm_podf) >> d->bp_podf) + 1;
1128
1129 return clk_get_rate(clk->parent) / (pred * podf);
1130}
1131
1132static int clk_busy_wait(struct clk *clk)
1133{
1134 int timeout = 0x100000;
1135 u32 bm;
1136
1137 if (clk == &axi_clk)
1138 bm = BM_CDHIPR_AXI_PODF_BUSY;
1139 else if (clk == &ahb_clk)
1140 bm = BM_CDHIPR_AHB_PODF_BUSY;
1141 else if (clk == &mmdc_ch0_axi_clk)
1142 bm = BM_CDHIPR_MMDC_CH0_PODF_BUSY;
1143 else if (clk == &periph_clk)
1144 bm = BM_CDHIPR_PERIPH_SEL_BUSY;
1145 else if (clk == &arm_clk)
1146 bm = BM_CDHIPR_ARM_PODF_BUSY;
1147 else
1148 return -EINVAL;
1149
1150 while ((readl_relaxed(CDHIPR) & bm) && --timeout)
1151 cpu_relax();
1152
1153 if (unlikely(!timeout))
1154 return -EBUSY;
1155
1156 return 0;
1157}
1158
1159static int _clk_set_rate(struct clk *clk, unsigned long rate)
1160{
1161 unsigned long parent_rate = clk_get_rate(clk->parent);
1162 struct divider *d;
1163 u32 val, div, max_div, pred = 0, podf;
1164 int i, num;
1165
1166 if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
1167 return ldb_di_clk_set_rate(clk, rate);
1168
1169 num = ARRAY_SIZE(dividers);
1170 for (i = 0; i < num; i++)
1171 if (dividers[i]->clk == clk) {
1172 d = dividers[i];
1173 break;
1174 }
1175 if (i == num)
1176 return -EINVAL;
1177
1178 max_div = ((d->bm_pred >> d->bp_pred) + 1) *
1179 ((d->bm_podf >> d->bp_podf) + 1);
1180
1181 div = parent_rate / rate;
1182 if (div == 0)
1183 div++;
1184
1185 if ((parent_rate / div != rate) || div > max_div)
1186 return -EINVAL;
1187
1188 if (d->bm_pred) {
1189 calc_pred_podf_dividers(div, &pred, &podf);
1190 } else {
1191 pred = 1;
1192 podf = div;
1193 }
1194
1195 val = readl_relaxed(d->reg);
1196 val &= ~(d->bm_pred | d->bm_podf);
1197 val |= (pred - 1) << d->bp_pred | (podf - 1) << d->bp_podf;
1198 writel_relaxed(val, d->reg);
1199
1200 if (clk == &axi_clk || clk == &ahb_clk ||
1201 clk == &mmdc_ch0_axi_clk || clk == &arm_clk)
1202 return clk_busy_wait(clk);
1203
1204 return 0;
1205}
1206
1207static unsigned long _clk_round_rate(struct clk *clk, unsigned long rate)
1208{
1209 unsigned long parent_rate = clk_get_rate(clk->parent);
1210 u32 div = parent_rate / rate;
1211 u32 div_max, pred = 0, podf;
1212 struct divider *d;
1213 int i, num;
1214
1215 if (clk == &ldb_di0_clk || clk == &ldb_di1_clk)
1216 return ldb_di_clk_round_rate(clk, rate);
1217
1218 num = ARRAY_SIZE(dividers);
1219 for (i = 0; i < num; i++)
1220 if (dividers[i]->clk == clk) {
1221 d = dividers[i];
1222 break;
1223 }
1224 if (i == num)
1225 return -EINVAL;
1226
1227 if (div == 0 || parent_rate % rate)
1228 div++;
1229
1230 if (d->bm_pred) {
1231 calc_pred_podf_dividers(div, &pred, &podf);
1232 div = pred * podf;
1233 } else {
1234 div_max = (d->bm_podf >> d->bp_podf) + 1;
1235 if (div > div_max)
1236 div = div_max;
1237 }
1238
1239 return parent_rate / div;
1240}
1241
1242struct multiplexer {
1243 struct clk *clk;
1244 void __iomem *reg;
1245 u32 bp;
1246 u32 bm;
1247 int pnum;
1248 struct clk *parents[];
1249};
1250
1251static struct multiplexer axi_mux = {
1252 .clk = &axi_clk,
1253 .reg = CBCDR,
1254 .bp = BP_CBCDR_AXI_SEL,
1255 .bm = BM_CBCDR_AXI_SEL,
1256 .parents = {
1257 &periph_clk,
1258 &pll2_pfd_400m,
1259 &pll3_pfd_540m,
1260 NULL
1261 },
1262};
1263
1264static struct multiplexer periph_mux = {
1265 .clk = &periph_clk,
1266 .reg = CBCDR,
1267 .bp = BP_CBCDR_PERIPH_CLK_SEL,
1268 .bm = BM_CBCDR_PERIPH_CLK_SEL,
1269 .parents = {
1270 &periph_pre_clk,
1271 &periph_clk2_clk,
1272 NULL
1273 },
1274};
1275
1276static struct multiplexer periph_pre_mux = {
1277 .clk = &periph_pre_clk,
1278 .reg = CBCMR,
1279 .bp = BP_CBCMR_PRE_PERIPH_CLK_SEL,
1280 .bm = BM_CBCMR_PRE_PERIPH_CLK_SEL,
1281 .parents = {
1282 &pll2_bus,
1283 &pll2_pfd_400m,
1284 &pll2_pfd_352m,
1285 &pll2_200m,
1286 NULL
1287 },
1288};
1289
1290static struct multiplexer periph_clk2_mux = {
1291 .clk = &periph_clk2_clk,
1292 .reg = CBCMR,
1293 .bp = BP_CBCMR_PERIPH_CLK2_SEL,
1294 .bm = BM_CBCMR_PERIPH_CLK2_SEL,
1295 .parents = {
1296 &pll3_usb_otg,
1297 &osc_clk,
1298 NULL
1299 },
1300};
1301
1302static struct multiplexer periph2_mux = {
1303 .clk = &periph2_clk,
1304 .reg = CBCDR,
1305 .bp = BP_CBCDR_PERIPH2_CLK_SEL,
1306 .bm = BM_CBCDR_PERIPH2_CLK_SEL,
1307 .parents = {
1308 &periph2_pre_clk,
1309 &periph2_clk2_clk,
1310 NULL
1311 },
1312};
1313
1314static struct multiplexer periph2_pre_mux = {
1315 .clk = &periph2_pre_clk,
1316 .reg = CBCMR,
1317 .bp = BP_CBCMR_PRE_PERIPH2_CLK_SEL,
1318 .bm = BM_CBCMR_PRE_PERIPH2_CLK_SEL,
1319 .parents = {
1320 &pll2_bus,
1321 &pll2_pfd_400m,
1322 &pll2_pfd_352m,
1323 &pll2_200m,
1324 NULL
1325 },
1326};
1327
1328static struct multiplexer periph2_clk2_mux = {
1329 .clk = &periph2_clk2_clk,
1330 .reg = CBCMR,
1331 .bp = BP_CBCMR_PERIPH2_CLK2_SEL,
1332 .bm = BM_CBCMR_PERIPH2_CLK2_SEL,
1333 .parents = {
1334 &pll3_usb_otg,
1335 &osc_clk,
1336 NULL
1337 },
1338};
1339
1340static struct multiplexer gpu2d_axi_mux = {
1341 .clk = &gpu2d_axi_clk,
1342 .reg = CBCMR,
1343 .bp = BP_CBCMR_GPU2D_AXI_SEL,
1344 .bm = BM_CBCMR_GPU2D_AXI_SEL,
1345 .parents = {
1346 &axi_clk,
1347 &ahb_clk,
1348 NULL
1349 },
1350};
1351
1352static struct multiplexer gpu3d_axi_mux = {
1353 .clk = &gpu3d_axi_clk,
1354 .reg = CBCMR,
1355 .bp = BP_CBCMR_GPU3D_AXI_SEL,
1356 .bm = BM_CBCMR_GPU3D_AXI_SEL,
1357 .parents = {
1358 &axi_clk,
1359 &ahb_clk,
1360 NULL
1361 },
1362};
1363
1364static struct multiplexer gpu3d_core_mux = {
1365 .clk = &gpu3d_core_clk,
1366 .reg = CBCMR,
1367 .bp = BP_CBCMR_GPU3D_CORE_SEL,
1368 .bm = BM_CBCMR_GPU3D_CORE_SEL,
1369 .parents = {
1370 &mmdc_ch0_axi_clk,
1371 &pll3_usb_otg,
1372 &pll2_pfd_594m,
1373 &pll2_pfd_400m,
1374 NULL
1375 },
1376};
1377
1378static struct multiplexer gpu3d_shader_mux = {
1379 .clk = &gpu3d_shader_clk,
1380 .reg = CBCMR,
1381 .bp = BP_CBCMR_GPU3D_SHADER_SEL,
1382 .bm = BM_CBCMR_GPU3D_SHADER_SEL,
1383 .parents = {
1384 &mmdc_ch0_axi_clk,
1385 &pll3_usb_otg,
1386 &pll2_pfd_594m,
1387 &pll3_pfd_720m,
1388 NULL
1389 },
1390};
1391
1392static struct multiplexer pcie_axi_mux = {
1393 .clk = &pcie_clk,
1394 .reg = CBCMR,
1395 .bp = BP_CBCMR_PCIE_AXI_SEL,
1396 .bm = BM_CBCMR_PCIE_AXI_SEL,
1397 .parents = {
1398 &axi_clk,
1399 &ahb_clk,
1400 NULL
1401 },
1402};
1403
1404static struct multiplexer vdo_axi_mux = {
1405 .clk = &vdo_axi_clk,
1406 .reg = CBCMR,
1407 .bp = BP_CBCMR_VDO_AXI_SEL,
1408 .bm = BM_CBCMR_VDO_AXI_SEL,
1409 .parents = {
1410 &axi_clk,
1411 &ahb_clk,
1412 NULL
1413 },
1414};
1415
1416static struct multiplexer vpu_axi_mux = {
1417 .clk = &vpu_clk,
1418 .reg = CBCMR,
1419 .bp = BP_CBCMR_VPU_AXI_SEL,
1420 .bm = BM_CBCMR_VPU_AXI_SEL,
1421 .parents = {
1422 &axi_clk,
1423 &pll2_pfd_400m,
1424 &pll2_pfd_352m,
1425 NULL
1426 },
1427};
1428
1429static struct multiplexer gpu2d_core_mux = {
1430 .clk = &gpu2d_core_clk,
1431 .reg = CBCMR,
1432 .bp = BP_CBCMR_GPU2D_CORE_SEL,
1433 .bm = BM_CBCMR_GPU2D_CORE_SEL,
1434 .parents = {
1435 &axi_clk,
1436 &pll3_usb_otg,
1437 &pll2_pfd_352m,
1438 &pll2_pfd_400m,
1439 NULL
1440 },
1441};
1442
1443#define DEF_SSI_MUX(id) \
1444 static struct multiplexer ssi##id##_mux = { \
1445 .clk = &ssi##id##_clk, \
1446 .reg = CSCMR1, \
1447 .bp = BP_CSCMR1_SSI##id##_SEL, \
1448 .bm = BM_CSCMR1_SSI##id##_SEL, \
1449 .parents = { \
1450 &pll3_pfd_508m, \
1451 &pll3_pfd_454m, \
1452 &pll4_audio, \
1453 NULL \
1454 }, \
1455 }
1456
1457DEF_SSI_MUX(1);
1458DEF_SSI_MUX(2);
1459DEF_SSI_MUX(3);
1460
1461#define DEF_USDHC_MUX(id) \
1462 static struct multiplexer usdhc##id##_mux = { \
1463 .clk = &usdhc##id##_clk, \
1464 .reg = CSCMR1, \
1465 .bp = BP_CSCMR1_USDHC##id##_SEL, \
1466 .bm = BM_CSCMR1_USDHC##id##_SEL, \
1467 .parents = { \
1468 &pll2_pfd_400m, \
1469 &pll2_pfd_352m, \
1470 NULL \
1471 }, \
1472 }
1473
1474DEF_USDHC_MUX(1);
1475DEF_USDHC_MUX(2);
1476DEF_USDHC_MUX(3);
1477DEF_USDHC_MUX(4);
1478
1479static struct multiplexer emi_mux = {
1480 .clk = &emi_clk,
1481 .reg = CSCMR1,
1482 .bp = BP_CSCMR1_EMI_SEL,
1483 .bm = BM_CSCMR1_EMI_SEL,
1484 .parents = {
1485 &axi_clk,
1486 &pll3_usb_otg,
1487 &pll2_pfd_400m,
1488 &pll2_pfd_352m,
1489 NULL
1490 },
1491};
1492
1493static struct multiplexer emi_slow_mux = {
1494 .clk = &emi_slow_clk,
1495 .reg = CSCMR1,
1496 .bp = BP_CSCMR1_EMI_SLOW_SEL,
1497 .bm = BM_CSCMR1_EMI_SLOW_SEL,
1498 .parents = {
1499 &axi_clk,
1500 &pll3_usb_otg,
1501 &pll2_pfd_400m,
1502 &pll2_pfd_352m,
1503 NULL
1504 },
1505};
1506
1507static struct multiplexer esai_mux = {
1508 .clk = &esai_clk,
1509 .reg = CSCMR2,
1510 .bp = BP_CSCMR2_ESAI_SEL,
1511 .bm = BM_CSCMR2_ESAI_SEL,
1512 .parents = {
1513 &pll4_audio,
1514 &pll3_pfd_508m,
1515 &pll3_pfd_454m,
1516 &pll3_usb_otg,
1517 NULL
1518 },
1519};
1520
1521#define DEF_LDB_DI_MUX(id) \
1522 static struct multiplexer ldb_di##id##_mux = { \
1523 .clk = &ldb_di##id##_clk, \
1524 .reg = CS2CDR, \
1525 .bp = BP_CS2CDR_LDB_DI##id##_SEL, \
1526 .bm = BM_CS2CDR_LDB_DI##id##_SEL, \
1527 .parents = { \
1528 &pll5_video, \
1529 &pll2_pfd_352m, \
1530 &pll2_pfd_400m, \
1531 &pll3_pfd_540m, \
1532 &pll3_usb_otg, \
1533 NULL \
1534 }, \
1535 }
1536
1537DEF_LDB_DI_MUX(0);
1538DEF_LDB_DI_MUX(1);
1539
1540static struct multiplexer enfc_mux = {
1541 .clk = &enfc_clk,
1542 .reg = CS2CDR,
1543 .bp = BP_CS2CDR_ENFC_SEL,
1544 .bm = BM_CS2CDR_ENFC_SEL,
1545 .parents = {
1546 &pll2_pfd_352m,
1547 &pll2_bus,
1548 &pll3_usb_otg,
1549 &pll2_pfd_400m,
1550 NULL
1551 },
1552};
1553
1554static struct multiplexer spdif_mux = {
1555 .clk = &spdif_clk,
1556 .reg = CDCDR,
1557 .bp = BP_CDCDR_SPDIF_SEL,
1558 .bm = BM_CDCDR_SPDIF_SEL,
1559 .parents = {
1560 &pll4_audio,
1561 &pll3_pfd_508m,
1562 &pll3_pfd_454m,
1563 &pll3_usb_otg,
1564 NULL
1565 },
1566};
1567
1568static struct multiplexer asrc_serial_mux = {
1569 .clk = &asrc_serial_clk,
1570 .reg = CDCDR,
1571 .bp = BP_CDCDR_ASRC_SERIAL_SEL,
1572 .bm = BM_CDCDR_ASRC_SERIAL_SEL,
1573 .parents = {
1574 &pll4_audio,
1575 &pll3_pfd_508m,
1576 &pll3_pfd_454m,
1577 &pll3_usb_otg,
1578 NULL
1579 },
1580};
1581
1582static struct multiplexer hsi_tx_mux = {
1583 .clk = &hsi_tx_clk,
1584 .reg = CDCDR,
1585 .bp = BP_CDCDR_HSI_TX_SEL,
1586 .bm = BM_CDCDR_HSI_TX_SEL,
1587 .parents = {
1588 &pll3_120m,
1589 &pll2_pfd_400m,
1590 NULL
1591 },
1592};
1593
1594#define DEF_IPU_DI_PRE_MUX(r, i, d) \
1595 static struct multiplexer ipu##i##_di##d##_pre_mux = { \
1596 .clk = &ipu##i##_di##d##_pre_clk, \
1597 .reg = r, \
1598 .bp = BP_##r##_IPU##i##_DI##d##_PRE_SEL, \
1599 .bm = BM_##r##_IPU##i##_DI##d##_PRE_SEL, \
1600 .parents = { \
1601 &mmdc_ch0_axi_clk, \
1602 &pll3_usb_otg, \
1603 &pll5_video, \
1604 &pll2_pfd_352m, \
1605 &pll2_pfd_400m, \
1606 &pll3_pfd_540m, \
1607 NULL \
1608 }, \
1609 }
1610
1611DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 0);
1612DEF_IPU_DI_PRE_MUX(CHSCCDR, 1, 1);
1613DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 0);
1614DEF_IPU_DI_PRE_MUX(CSCDR2, 2, 1);
1615
1616#define DEF_IPU_DI_MUX(r, i, d) \
1617 static struct multiplexer ipu##i##_di##d##_mux = { \
1618 .clk = &ipu##i##_di##d##_clk, \
1619 .reg = r, \
1620 .bp = BP_##r##_IPU##i##_DI##d##_SEL, \
1621 .bm = BM_##r##_IPU##i##_DI##d##_SEL, \
1622 .parents = { \
1623 &ipu##i##_di##d##_pre_clk, \
1624 &dummy_clk, \
1625 &dummy_clk, \
1626 &ldb_di0_clk, \
1627 &ldb_di1_clk, \
1628 NULL \
1629 }, \
1630 }
1631
1632DEF_IPU_DI_MUX(CHSCCDR, 1, 0);
1633DEF_IPU_DI_MUX(CHSCCDR, 1, 1);
1634DEF_IPU_DI_MUX(CSCDR2, 2, 0);
1635DEF_IPU_DI_MUX(CSCDR2, 2, 1);
1636
1637#define DEF_IPU_MUX(id) \
1638 static struct multiplexer ipu##id##_mux = { \
1639 .clk = &ipu##id##_clk, \
1640 .reg = CSCDR3, \
1641 .bp = BP_CSCDR3_IPU##id##_HSP_SEL, \
1642 .bm = BM_CSCDR3_IPU##id##_HSP_SEL, \
1643 .parents = { \
1644 &mmdc_ch0_axi_clk, \
1645 &pll2_pfd_400m, \
1646 &pll3_120m, \
1647 &pll3_pfd_540m, \
1648 NULL \
1649 }, \
1650 }
1651
1652DEF_IPU_MUX(1);
1653DEF_IPU_MUX(2);
1654
1655static struct multiplexer cko1_mux = {
1656 .clk = &cko1_clk,
1657 .reg = CCOSR,
1658 .bp = BP_CCOSR_CKO1_SEL,
1659 .bm = BM_CCOSR_CKO1_SEL,
1660 .parents = {
1661 &pll3_usb_otg,
1662 &pll2_bus,
1663 &pll1_sys,
1664 &pll5_video,
1665 &dummy_clk,
1666 &axi_clk,
1667 &enfc_clk,
1668 &ipu1_di0_clk,
1669 &ipu1_di1_clk,
1670 &ipu2_di0_clk,
1671 &ipu2_di1_clk,
1672 &ahb_clk,
1673 &ipg_clk,
1674 &ipg_perclk,
1675 &ckil_clk,
1676 &pll4_audio,
1677 NULL
1678 },
1679};
1680
1681static struct multiplexer *multiplexers[] = {
1682 &axi_mux,
1683 &periph_mux,
1684 &periph_pre_mux,
1685 &periph_clk2_mux,
1686 &periph2_mux,
1687 &periph2_pre_mux,
1688 &periph2_clk2_mux,
1689 &gpu2d_axi_mux,
1690 &gpu3d_axi_mux,
1691 &gpu3d_core_mux,
1692 &gpu3d_shader_mux,
1693 &pcie_axi_mux,
1694 &vdo_axi_mux,
1695 &vpu_axi_mux,
1696 &gpu2d_core_mux,
1697 &ssi1_mux,
1698 &ssi2_mux,
1699 &ssi3_mux,
1700 &usdhc1_mux,
1701 &usdhc2_mux,
1702 &usdhc3_mux,
1703 &usdhc4_mux,
1704 &emi_mux,
1705 &emi_slow_mux,
1706 &esai_mux,
1707 &ldb_di0_mux,
1708 &ldb_di1_mux,
1709 &enfc_mux,
1710 &spdif_mux,
1711 &asrc_serial_mux,
1712 &hsi_tx_mux,
1713 &ipu1_di0_pre_mux,
1714 &ipu1_di0_mux,
1715 &ipu1_di1_pre_mux,
1716 &ipu1_di1_mux,
1717 &ipu2_di0_pre_mux,
1718 &ipu2_di0_mux,
1719 &ipu2_di1_pre_mux,
1720 &ipu2_di1_mux,
1721 &ipu1_mux,
1722 &ipu2_mux,
1723 &cko1_mux,
1724};
1725
1726static int _clk_set_parent(struct clk *clk, struct clk *parent)
1727{
1728 struct multiplexer *m;
1729 int i, num;
1730 u32 val;
1731
1732 num = ARRAY_SIZE(multiplexers);
1733 for (i = 0; i < num; i++)
1734 if (multiplexers[i]->clk == clk) {
1735 m = multiplexers[i];
1736 break;
1737 }
1738 if (i == num)
1739 return -EINVAL;
1740
1741 i = 0;
1742 while (m->parents[i]) {
1743 if (parent == m->parents[i])
1744 break;
1745 i++;
1746 }
1747 if (!m->parents[i] || m->parents[i] == &dummy_clk)
1748 return -EINVAL;
1749
1750 val = readl_relaxed(m->reg);
1751 val &= ~m->bm;
1752 val |= i << m->bp;
1753 writel_relaxed(val, m->reg);
1754
1755 if (clk == &periph_clk)
1756 return clk_busy_wait(clk);
1757
1758 return 0;
1759}
1760
1761#define DEF_NG_CLK(name, p) \
1762 static struct clk name = { \
1763 .get_rate = _clk_get_rate, \
1764 .set_rate = _clk_set_rate, \
1765 .round_rate = _clk_round_rate, \
1766 .set_parent = _clk_set_parent, \
1767 .parent = p, \
1768 }
1769
1770DEF_NG_CLK(periph_clk2_clk, &osc_clk);
1771DEF_NG_CLK(periph_pre_clk, &pll2_bus);
1772DEF_NG_CLK(periph_clk, &periph_pre_clk);
1773DEF_NG_CLK(periph2_clk2_clk, &osc_clk);
1774DEF_NG_CLK(periph2_pre_clk, &pll2_bus);
1775DEF_NG_CLK(periph2_clk, &periph2_pre_clk);
1776DEF_NG_CLK(axi_clk, &periph_clk);
1777DEF_NG_CLK(emi_clk, &axi_clk);
1778DEF_NG_CLK(arm_clk, &pll1_sw_clk);
1779DEF_NG_CLK(ahb_clk, &periph_clk);
1780DEF_NG_CLK(ipg_clk, &ahb_clk);
1781DEF_NG_CLK(ipg_perclk, &ipg_clk);
1782DEF_NG_CLK(ipu1_di0_pre_clk, &pll3_pfd_540m);
1783DEF_NG_CLK(ipu1_di1_pre_clk, &pll3_pfd_540m);
1784DEF_NG_CLK(ipu2_di0_pre_clk, &pll3_pfd_540m);
1785DEF_NG_CLK(ipu2_di1_pre_clk, &pll3_pfd_540m);
1786DEF_NG_CLK(asrc_serial_clk, &pll3_usb_otg);
1787
1788#define DEF_CLK(name, er, es, p, s) \
1789 static struct clk name = { \
1790 .enable_reg = er, \
1791 .enable_shift = es, \
1792 .enable = _clk_enable, \
1793 .disable = _clk_disable, \
1794 .get_rate = _clk_get_rate, \
1795 .set_rate = _clk_set_rate, \
1796 .round_rate = _clk_round_rate, \
1797 .set_parent = _clk_set_parent, \
1798 .parent = p, \
1799 .secondary = s, \
1800 }
1801
1802#define DEF_CLK_1B(name, er, es, p, s) \
1803 static struct clk name = { \
1804 .enable_reg = er, \
1805 .enable_shift = es, \
1806 .enable = _clk_enable_1b, \
1807 .disable = _clk_disable_1b, \
1808 .get_rate = _clk_get_rate, \
1809 .set_rate = _clk_set_rate, \
1810 .round_rate = _clk_round_rate, \
1811 .set_parent = _clk_set_parent, \
1812 .parent = p, \
1813 .secondary = s, \
1814 }
1815
1816DEF_CLK(aips_tz1_clk, CCGR0, CG0, &ahb_clk, NULL);
1817DEF_CLK(aips_tz2_clk, CCGR0, CG1, &ahb_clk, NULL);
1818DEF_CLK(apbh_dma_clk, CCGR0, CG2, &ahb_clk, NULL);
1819DEF_CLK(asrc_clk, CCGR0, CG3, &pll4_audio, NULL);
1820DEF_CLK(can1_serial_clk, CCGR0, CG8, &pll3_usb_otg, NULL);
1821DEF_CLK(can1_clk, CCGR0, CG7, &pll3_usb_otg, &can1_serial_clk);
1822DEF_CLK(can2_serial_clk, CCGR0, CG10, &pll3_usb_otg, NULL);
1823DEF_CLK(can2_clk, CCGR0, CG9, &pll3_usb_otg, &can2_serial_clk);
1824DEF_CLK(ecspi1_clk, CCGR1, CG0, &pll3_60m, NULL);
1825DEF_CLK(ecspi2_clk, CCGR1, CG1, &pll3_60m, NULL);
1826DEF_CLK(ecspi3_clk, CCGR1, CG2, &pll3_60m, NULL);
1827DEF_CLK(ecspi4_clk, CCGR1, CG3, &pll3_60m, NULL);
1828DEF_CLK(ecspi5_clk, CCGR1, CG4, &pll3_60m, NULL);
1829DEF_CLK(enet_clk, CCGR1, CG5, &ipg_clk, NULL);
1830DEF_CLK(esai_clk, CCGR1, CG8, &pll3_usb_otg, NULL);
1831DEF_CLK(gpt_serial_clk, CCGR1, CG11, &ipg_perclk, NULL);
1832DEF_CLK(gpt_clk, CCGR1, CG10, &ipg_perclk, &gpt_serial_clk);
1833DEF_CLK(gpu2d_core_clk, CCGR1, CG12, &pll2_pfd_352m, &gpu2d_axi_clk);
1834DEF_CLK(gpu3d_core_clk, CCGR1, CG13, &pll2_pfd_594m, &gpu3d_axi_clk);
1835DEF_CLK(gpu3d_shader_clk, CCGR1, CG13, &pll3_pfd_720m, &gpu3d_axi_clk);
1836DEF_CLK(hdmi_iahb_clk, CCGR2, CG0, &ahb_clk, NULL);
1837DEF_CLK(hdmi_isfr_clk, CCGR2, CG2, &pll3_pfd_540m, &hdmi_iahb_clk);
1838DEF_CLK(i2c1_clk, CCGR2, CG3, &ipg_perclk, NULL);
1839DEF_CLK(i2c2_clk, CCGR2, CG4, &ipg_perclk, NULL);
1840DEF_CLK(i2c3_clk, CCGR2, CG5, &ipg_perclk, NULL);
1841DEF_CLK(iim_clk, CCGR2, CG6, &ipg_clk, NULL);
1842DEF_CLK(enfc_clk, CCGR2, CG7, &pll2_pfd_352m, NULL);
1843DEF_CLK(ipu1_clk, CCGR3, CG0, &mmdc_ch0_axi_clk, NULL);
1844DEF_CLK(ipu1_di0_clk, CCGR3, CG1, &ipu1_di0_pre_clk, NULL);
1845DEF_CLK(ipu1_di1_clk, CCGR3, CG2, &ipu1_di1_pre_clk, NULL);
1846DEF_CLK(ipu2_clk, CCGR3, CG3, &mmdc_ch0_axi_clk, NULL);
1847DEF_CLK(ipu2_di0_clk, CCGR3, CG4, &ipu2_di0_pre_clk, NULL);
1848DEF_CLK(ipu2_di1_clk, CCGR3, CG5, &ipu2_di1_pre_clk, NULL);
1849DEF_CLK(ldb_di0_clk, CCGR3, CG6, &pll3_pfd_540m, NULL);
1850DEF_CLK(ldb_di1_clk, CCGR3, CG7, &pll3_pfd_540m, NULL);
1851DEF_CLK(hsi_tx_clk, CCGR3, CG8, &pll2_pfd_400m, NULL);
1852DEF_CLK(mlb_clk, CCGR3, CG9, &pll6_mlb, NULL);
1853DEF_CLK(mmdc_ch0_ipg_clk, CCGR3, CG12, &ipg_clk, NULL);
1854DEF_CLK(mmdc_ch0_axi_clk, CCGR3, CG10, &periph_clk, &mmdc_ch0_ipg_clk);
1855DEF_CLK(mmdc_ch1_ipg_clk, CCGR3, CG13, &ipg_clk, NULL);
1856DEF_CLK(mmdc_ch1_axi_clk, CCGR3, CG11, &periph2_clk, &mmdc_ch1_ipg_clk);
1857DEF_CLK(openvg_axi_clk, CCGR3, CG13, &axi_clk, NULL);
1858DEF_CLK(pwm1_clk, CCGR4, CG8, &ipg_perclk, NULL);
1859DEF_CLK(pwm2_clk, CCGR4, CG9, &ipg_perclk, NULL);
1860DEF_CLK(pwm3_clk, CCGR4, CG10, &ipg_perclk, NULL);
1861DEF_CLK(pwm4_clk, CCGR4, CG11, &ipg_perclk, NULL);
1862DEF_CLK(gpmi_bch_apb_clk, CCGR4, CG12, &usdhc3_clk, NULL);
1863DEF_CLK(gpmi_bch_clk, CCGR4, CG13, &usdhc4_clk, &gpmi_bch_apb_clk);
1864DEF_CLK(gpmi_apb_clk, CCGR4, CG15, &usdhc3_clk, &gpmi_bch_clk);
1865DEF_CLK(gpmi_io_clk, CCGR4, CG14, &enfc_clk, &gpmi_apb_clk);
1866DEF_CLK(sdma_clk, CCGR5, CG3, &ahb_clk, NULL);
1867DEF_CLK(spba_clk, CCGR5, CG6, &ipg_clk, NULL);
1868DEF_CLK(spdif_clk, CCGR5, CG7, &pll3_usb_otg, &spba_clk);
1869DEF_CLK(ssi1_clk, CCGR5, CG9, &pll3_pfd_508m, NULL);
1870DEF_CLK(ssi2_clk, CCGR5, CG10, &pll3_pfd_508m, NULL);
1871DEF_CLK(ssi3_clk, CCGR5, CG11, &pll3_pfd_508m, NULL);
1872DEF_CLK(uart_serial_clk, CCGR5, CG13, &pll3_usb_otg, NULL);
1873DEF_CLK(uart_clk, CCGR5, CG12, &pll3_80m, &uart_serial_clk);
1874DEF_CLK(usboh3_clk, CCGR6, CG0, &ipg_clk, NULL);
1875DEF_CLK(usdhc1_clk, CCGR6, CG1, &pll2_pfd_400m, NULL);
1876DEF_CLK(usdhc2_clk, CCGR6, CG2, &pll2_pfd_400m, NULL);
1877DEF_CLK(usdhc3_clk, CCGR6, CG3, &pll2_pfd_400m, NULL);
1878DEF_CLK(usdhc4_clk, CCGR6, CG4, &pll2_pfd_400m, NULL);
1879DEF_CLK(emi_slow_clk, CCGR6, CG5, &axi_clk, NULL);
1880DEF_CLK(vdo_axi_clk, CCGR6, CG6, &axi_clk, NULL);
1881DEF_CLK(vpu_clk, CCGR6, CG7, &axi_clk, NULL);
1882DEF_CLK_1B(cko1_clk, CCOSR, BP_CCOSR_CKO1_EN, &pll2_bus, NULL);
1883
1884static int pcie_clk_enable(struct clk *clk)
1885{
1886 u32 val;
1887
1888 val = readl_relaxed(PLL8_ENET);
1889 val |= BM_PLL_ENET_EN_PCIE;
1890 writel_relaxed(val, PLL8_ENET);
1891
1892 return _clk_enable(clk);
1893}
1894
1895static void pcie_clk_disable(struct clk *clk)
1896{
1897 u32 val;
1898
1899 _clk_disable(clk);
1900
1901 val = readl_relaxed(PLL8_ENET);
1902 val &= BM_PLL_ENET_EN_PCIE;
1903 writel_relaxed(val, PLL8_ENET);
1904}
1905
1906static struct clk pcie_clk = {
1907 .enable_reg = CCGR4,
1908 .enable_shift = CG0,
1909 .enable = pcie_clk_enable,
1910 .disable = pcie_clk_disable,
1911 .set_parent = _clk_set_parent,
1912 .parent = &axi_clk,
1913 .secondary = &pll8_enet,
1914};
1915
1916static int sata_clk_enable(struct clk *clk)
1917{
1918 u32 val;
1919
1920 val = readl_relaxed(PLL8_ENET);
1921 val |= BM_PLL_ENET_EN_SATA;
1922 writel_relaxed(val, PLL8_ENET);
1923
1924 return _clk_enable(clk);
1925}
1926
1927static void sata_clk_disable(struct clk *clk)
1928{
1929 u32 val;
1930
1931 _clk_disable(clk);
1932
1933 val = readl_relaxed(PLL8_ENET);
1934 val &= BM_PLL_ENET_EN_SATA;
1935 writel_relaxed(val, PLL8_ENET);
1936}
1937
1938static struct clk sata_clk = {
1939 .enable_reg = CCGR5,
1940 .enable_shift = CG2,
1941 .enable = sata_clk_enable,
1942 .disable = sata_clk_disable,
1943 .parent = &ipg_clk,
1944 .secondary = &pll8_enet,
1945};
1946
1947#define _REGISTER_CLOCK(d, n, c) \
1948 { \
1949 .dev_id = d, \
1950 .con_id = n, \
1951 .clk = &c, \
1952 }
1953
1954static struct clk_lookup lookups[] = {
1955 _REGISTER_CLOCK("2020000.uart", NULL, uart_clk),
1956 _REGISTER_CLOCK("21e8000.uart", NULL, uart_clk),
1957 _REGISTER_CLOCK("21ec000.uart", NULL, uart_clk),
1958 _REGISTER_CLOCK("21f0000.uart", NULL, uart_clk),
1959 _REGISTER_CLOCK("21f4000.uart", NULL, uart_clk),
1960 _REGISTER_CLOCK("2188000.enet", NULL, enet_clk),
1961 _REGISTER_CLOCK("2190000.usdhc", NULL, usdhc1_clk),
1962 _REGISTER_CLOCK("2194000.usdhc", NULL, usdhc2_clk),
1963 _REGISTER_CLOCK("2198000.usdhc", NULL, usdhc3_clk),
1964 _REGISTER_CLOCK("219c000.usdhc", NULL, usdhc4_clk),
1965 _REGISTER_CLOCK("21a0000.i2c", NULL, i2c1_clk),
1966 _REGISTER_CLOCK("21a4000.i2c", NULL, i2c2_clk),
1967 _REGISTER_CLOCK("21a8000.i2c", NULL, i2c3_clk),
1968 _REGISTER_CLOCK("2008000.ecspi", NULL, ecspi1_clk),
1969 _REGISTER_CLOCK("200c000.ecspi", NULL, ecspi2_clk),
1970 _REGISTER_CLOCK("2010000.ecspi", NULL, ecspi3_clk),
1971 _REGISTER_CLOCK("2014000.ecspi", NULL, ecspi4_clk),
1972 _REGISTER_CLOCK("2018000.ecspi", NULL, ecspi5_clk),
1973 _REGISTER_CLOCK("20ec000.sdma", NULL, sdma_clk),
1974 _REGISTER_CLOCK("20bc000.wdog", NULL, dummy_clk),
1975 _REGISTER_CLOCK("20c0000.wdog", NULL, dummy_clk),
1976 _REGISTER_CLOCK("smp_twd", NULL, twd_clk),
1977 _REGISTER_CLOCK(NULL, "ckih", ckih_clk),
1978 _REGISTER_CLOCK(NULL, "ckil_clk", ckil_clk),
1979 _REGISTER_CLOCK(NULL, "aips_tz1_clk", aips_tz1_clk),
1980 _REGISTER_CLOCK(NULL, "aips_tz2_clk", aips_tz2_clk),
1981 _REGISTER_CLOCK(NULL, "asrc_clk", asrc_clk),
1982 _REGISTER_CLOCK(NULL, "can2_clk", can2_clk),
1983 _REGISTER_CLOCK(NULL, "hdmi_isfr_clk", hdmi_isfr_clk),
1984 _REGISTER_CLOCK(NULL, "iim_clk", iim_clk),
1985 _REGISTER_CLOCK(NULL, "mlb_clk", mlb_clk),
1986 _REGISTER_CLOCK(NULL, "openvg_axi_clk", openvg_axi_clk),
1987 _REGISTER_CLOCK(NULL, "pwm1_clk", pwm1_clk),
1988 _REGISTER_CLOCK(NULL, "pwm2_clk", pwm2_clk),
1989 _REGISTER_CLOCK(NULL, "pwm3_clk", pwm3_clk),
1990 _REGISTER_CLOCK(NULL, "pwm4_clk", pwm4_clk),
1991 _REGISTER_CLOCK(NULL, "gpmi_io_clk", gpmi_io_clk),
1992 _REGISTER_CLOCK(NULL, "usboh3_clk", usboh3_clk),
1993 _REGISTER_CLOCK(NULL, "sata_clk", sata_clk),
1994 _REGISTER_CLOCK(NULL, "cko1_clk", cko1_clk),
1995};
1996
1997int imx6q_set_lpm(enum mxc_cpu_pwr_mode mode)
1998{
1999 u32 val = readl_relaxed(CLPCR);
2000
2001 val &= ~BM_CLPCR_LPM;
2002 switch (mode) {
2003 case WAIT_CLOCKED:
2004 break;
2005 case WAIT_UNCLOCKED:
2006 val |= 0x1 << BP_CLPCR_LPM;
2007 break;
2008 case STOP_POWER_ON:
2009 val |= 0x2 << BP_CLPCR_LPM;
2010 break;
2011 case WAIT_UNCLOCKED_POWER_OFF:
2012 val |= 0x1 << BP_CLPCR_LPM;
2013 val &= ~BM_CLPCR_VSTBY;
2014 val &= ~BM_CLPCR_SBYOS;
2015 break;
2016 case STOP_POWER_OFF:
2017 val |= 0x2 << BP_CLPCR_LPM;
2018 val |= 0x3 << BP_CLPCR_STBY_COUNT;
2019 val |= BM_CLPCR_VSTBY;
2020 val |= BM_CLPCR_SBYOS;
2021 break;
2022 default:
2023 return -EINVAL;
2024 }
2025 writel_relaxed(val, CLPCR);
2026
2027 return 0;
2028}
2029
2030static struct map_desc imx6q_clock_desc[] = {
2031 imx_map_entry(MX6Q, CCM, MT_DEVICE),
2032 imx_map_entry(MX6Q, ANATOP, MT_DEVICE),
2033};
2034
2035void __init imx6q_clock_map_io(void)
2036{
2037 iotable_init(imx6q_clock_desc, ARRAY_SIZE(imx6q_clock_desc));
2038}
2039
2040int __init mx6q_clocks_init(void)
2041{
2042 struct device_node *np;
2043 void __iomem *base;
2044 int i, irq;
2045
2046 /* retrieve the freqency of fixed clocks from device tree */
2047 for_each_compatible_node(np, NULL, "fixed-clock") {
2048 u32 rate;
2049 if (of_property_read_u32(np, "clock-frequency", &rate))
2050 continue;
2051
2052 if (of_device_is_compatible(np, "fsl,imx-ckil"))
2053 external_low_reference = rate;
2054 else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
2055 external_high_reference = rate;
2056 else if (of_device_is_compatible(np, "fsl,imx-osc"))
2057 oscillator_reference = rate;
2058 }
2059
2060 for (i = 0; i < ARRAY_SIZE(lookups); i++)
2061 clkdev_add(&lookups[i]);
2062
2063 /* only keep necessary clocks on */
2064 writel_relaxed(0x3 << CG0 | 0x3 << CG1 | 0x3 << CG2, CCGR0);
2065 writel_relaxed(0x3 << CG8 | 0x3 << CG9 | 0x3 << CG10, CCGR2);
2066 writel_relaxed(0x3 << CG10 | 0x3 << CG12, CCGR3);
2067 writel_relaxed(0x3 << CG4 | 0x3 << CG6 | 0x3 << CG7, CCGR4);
2068 writel_relaxed(0x3 << CG0, CCGR5);
2069 writel_relaxed(0, CCGR6);
2070 writel_relaxed(0, CCGR7);
2071
2072 clk_enable(&uart_clk);
2073 clk_enable(&mmdc_ch0_axi_clk);
2074
2075 clk_set_rate(&pll4_audio, FREQ_650M);
2076 clk_set_rate(&pll5_video, FREQ_650M);
2077 clk_set_parent(&ipu1_di0_clk, &ipu1_di0_pre_clk);
2078 clk_set_parent(&ipu1_di0_pre_clk, &pll5_video);
2079 clk_set_parent(&gpu3d_shader_clk, &pll2_pfd_594m);
2080 clk_set_rate(&gpu3d_shader_clk, FREQ_594M);
2081 clk_set_parent(&gpu3d_core_clk, &mmdc_ch0_axi_clk);
2082 clk_set_rate(&gpu3d_core_clk, FREQ_528M);
2083 clk_set_parent(&asrc_serial_clk, &pll3_usb_otg);
2084 clk_set_rate(&asrc_serial_clk, 1500000);
2085 clk_set_rate(&enfc_clk, 11000000);
2086
2087 /*
2088 * Before pinctrl API is available, we have to rely on the pad
2089 * configuration set up by bootloader. For usdhc example here,
2090 * u-boot sets up the pads for 49.5 MHz case, and we have to lower
2091 * the usdhc clock from 198 to 49.5 MHz to match the pad configuration.
2092 *
2093 * FIXME: This is should be removed after pinctrl API is available.
2094 * At that time, usdhc driver can call pinctrl API to change pad
2095 * configuration dynamically per different usdhc clock settings.
2096 */
2097 clk_set_rate(&usdhc1_clk, 49500000);
2098 clk_set_rate(&usdhc2_clk, 49500000);
2099 clk_set_rate(&usdhc3_clk, 49500000);
2100 clk_set_rate(&usdhc4_clk, 49500000);
2101
2102 clk_set_parent(&cko1_clk, &ahb_clk);
2103
2104 np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpt");
2105 base = of_iomap(np, 0);
2106 WARN_ON(!base);
2107 irq = irq_of_parse_and_map(np, 0);
2108 mxc_timer_init(&gpt_clk, base, irq);
2109
2110 return 0;
2111}
diff --git a/arch/arm/mach-imx/clock-mx51-mx53.c b/arch/arm/mach-imx/clock-mx51-mx53.c
deleted file mode 100644
index 08470504a088..000000000000
--- a/arch/arm/mach-imx/clock-mx51-mx53.c
+++ /dev/null
@@ -1,1675 +0,0 @@
1/*
2 * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright (C) 2009-2010 Amit Kucheria <amit.kucheria@canonical.com>
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/mm.h>
14#include <linux/delay.h>
15#include <linux/clk.h>
16#include <linux/io.h>
17#include <linux/clkdev.h>
18#include <linux/of.h>
19
20#include <asm/div64.h>
21
22#include <mach/hardware.h>
23#include <mach/common.h>
24#include <mach/clock.h>
25
26#include "crm-regs-imx5.h"
27
28/* External clock values passed-in by the board code */
29static unsigned long external_high_reference, external_low_reference;
30static unsigned long oscillator_reference, ckih2_reference;
31
32static struct clk osc_clk;
33static struct clk pll1_main_clk;
34static struct clk pll1_sw_clk;
35static struct clk pll2_sw_clk;
36static struct clk pll3_sw_clk;
37static struct clk mx53_pll4_sw_clk;
38static struct clk lp_apm_clk;
39static struct clk periph_apm_clk;
40static struct clk ahb_clk;
41static struct clk ipg_clk;
42static struct clk usboh3_clk;
43static struct clk emi_fast_clk;
44static struct clk ipu_clk;
45static struct clk mipi_hsc1_clk;
46static struct clk esdhc1_clk;
47static struct clk esdhc2_clk;
48static struct clk esdhc3_mx53_clk;
49
50#define MAX_DPLL_WAIT_TRIES 1000 /* 1000 * udelay(1) = 1ms */
51
52/* calculate best pre and post dividers to get the required divider */
53static void __calc_pre_post_dividers(u32 div, u32 *pre, u32 *post,
54 u32 max_pre, u32 max_post)
55{
56 if (div >= max_pre * max_post) {
57 *pre = max_pre;
58 *post = max_post;
59 } else if (div >= max_pre) {
60 u32 min_pre, temp_pre, old_err, err;
61 min_pre = DIV_ROUND_UP(div, max_post);
62 old_err = max_pre;
63 for (temp_pre = max_pre; temp_pre >= min_pre; temp_pre--) {
64 err = div % temp_pre;
65 if (err == 0) {
66 *pre = temp_pre;
67 break;
68 }
69 err = temp_pre - err;
70 if (err < old_err) {
71 old_err = err;
72 *pre = temp_pre;
73 }
74 }
75 *post = DIV_ROUND_UP(div, *pre);
76 } else {
77 *pre = div;
78 *post = 1;
79 }
80}
81
82static void _clk_ccgr_setclk(struct clk *clk, unsigned mode)
83{
84 u32 reg = __raw_readl(clk->enable_reg);
85
86 reg &= ~(MXC_CCM_CCGRx_CG_MASK << clk->enable_shift);
87 reg |= mode << clk->enable_shift;
88
89 __raw_writel(reg, clk->enable_reg);
90}
91
92static int _clk_ccgr_enable(struct clk *clk)
93{
94 _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_ON);
95 return 0;
96}
97
98static void _clk_ccgr_disable(struct clk *clk)
99{
100 _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_OFF);
101}
102
103static int _clk_ccgr_enable_inrun(struct clk *clk)
104{
105 _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
106 return 0;
107}
108
109static void _clk_ccgr_disable_inwait(struct clk *clk)
110{
111 _clk_ccgr_setclk(clk, MXC_CCM_CCGRx_MOD_IDLE);
112}
113
114/*
115 * For the 4-to-1 muxed input clock
116 */
117static inline u32 _get_mux(struct clk *parent, struct clk *m0,
118 struct clk *m1, struct clk *m2, struct clk *m3)
119{
120 if (parent == m0)
121 return 0;
122 else if (parent == m1)
123 return 1;
124 else if (parent == m2)
125 return 2;
126 else if (parent == m3)
127 return 3;
128 else
129 BUG();
130
131 return -EINVAL;
132}
133
134static inline void __iomem *_mx51_get_pll_base(struct clk *pll)
135{
136 if (pll == &pll1_main_clk)
137 return MX51_DPLL1_BASE;
138 else if (pll == &pll2_sw_clk)
139 return MX51_DPLL2_BASE;
140 else if (pll == &pll3_sw_clk)
141 return MX51_DPLL3_BASE;
142 else
143 BUG();
144
145 return NULL;
146}
147
148static inline void __iomem *_mx53_get_pll_base(struct clk *pll)
149{
150 if (pll == &pll1_main_clk)
151 return MX53_DPLL1_BASE;
152 else if (pll == &pll2_sw_clk)
153 return MX53_DPLL2_BASE;
154 else if (pll == &pll3_sw_clk)
155 return MX53_DPLL3_BASE;
156 else if (pll == &mx53_pll4_sw_clk)
157 return MX53_DPLL4_BASE;
158 else
159 BUG();
160
161 return NULL;
162}
163
164static inline void __iomem *_get_pll_base(struct clk *pll)
165{
166 if (cpu_is_mx51())
167 return _mx51_get_pll_base(pll);
168 else
169 return _mx53_get_pll_base(pll);
170}
171
172static unsigned long clk_pll_get_rate(struct clk *clk)
173{
174 long mfi, mfn, mfd, pdf, ref_clk, mfn_abs;
175 unsigned long dp_op, dp_mfd, dp_mfn, dp_ctl, pll_hfsm, dbl;
176 void __iomem *pllbase;
177 s64 temp;
178 unsigned long parent_rate;
179
180 parent_rate = clk_get_rate(clk->parent);
181
182 pllbase = _get_pll_base(clk);
183
184 dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
185 pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
186 dbl = dp_ctl & MXC_PLL_DP_CTL_DPDCK0_2_EN;
187
188 if (pll_hfsm == 0) {
189 dp_op = __raw_readl(pllbase + MXC_PLL_DP_OP);
190 dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_MFD);
191 dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_MFN);
192 } else {
193 dp_op = __raw_readl(pllbase + MXC_PLL_DP_HFS_OP);
194 dp_mfd = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFD);
195 dp_mfn = __raw_readl(pllbase + MXC_PLL_DP_HFS_MFN);
196 }
197 pdf = dp_op & MXC_PLL_DP_OP_PDF_MASK;
198 mfi = (dp_op & MXC_PLL_DP_OP_MFI_MASK) >> MXC_PLL_DP_OP_MFI_OFFSET;
199 mfi = (mfi <= 5) ? 5 : mfi;
200 mfd = dp_mfd & MXC_PLL_DP_MFD_MASK;
201 mfn = mfn_abs = dp_mfn & MXC_PLL_DP_MFN_MASK;
202 /* Sign extend to 32-bits */
203 if (mfn >= 0x04000000) {
204 mfn |= 0xFC000000;
205 mfn_abs = -mfn;
206 }
207
208 ref_clk = 2 * parent_rate;
209 if (dbl != 0)
210 ref_clk *= 2;
211
212 ref_clk /= (pdf + 1);
213 temp = (u64) ref_clk * mfn_abs;
214 do_div(temp, mfd + 1);
215 if (mfn < 0)
216 temp = -temp;
217 temp = (ref_clk * mfi) + temp;
218
219 return temp;
220}
221
222static int _clk_pll_set_rate(struct clk *clk, unsigned long rate)
223{
224 u32 reg;
225 void __iomem *pllbase;
226
227 long mfi, pdf, mfn, mfd = 999999;
228 s64 temp64;
229 unsigned long quad_parent_rate;
230 unsigned long pll_hfsm, dp_ctl;
231 unsigned long parent_rate;
232
233 parent_rate = clk_get_rate(clk->parent);
234
235 pllbase = _get_pll_base(clk);
236
237 quad_parent_rate = 4 * parent_rate;
238 pdf = mfi = -1;
239 while (++pdf < 16 && mfi < 5)
240 mfi = rate * (pdf+1) / quad_parent_rate;
241 if (mfi > 15)
242 return -EINVAL;
243 pdf--;
244
245 temp64 = rate * (pdf+1) - quad_parent_rate * mfi;
246 do_div(temp64, quad_parent_rate/1000000);
247 mfn = (long)temp64;
248
249 dp_ctl = __raw_readl(pllbase + MXC_PLL_DP_CTL);
250 /* use dpdck0_2 */
251 __raw_writel(dp_ctl | 0x1000L, pllbase + MXC_PLL_DP_CTL);
252 pll_hfsm = dp_ctl & MXC_PLL_DP_CTL_HFSM;
253 if (pll_hfsm == 0) {
254 reg = mfi << 4 | pdf;
255 __raw_writel(reg, pllbase + MXC_PLL_DP_OP);
256 __raw_writel(mfd, pllbase + MXC_PLL_DP_MFD);
257 __raw_writel(mfn, pllbase + MXC_PLL_DP_MFN);
258 } else {
259 reg = mfi << 4 | pdf;
260 __raw_writel(reg, pllbase + MXC_PLL_DP_HFS_OP);
261 __raw_writel(mfd, pllbase + MXC_PLL_DP_HFS_MFD);
262 __raw_writel(mfn, pllbase + MXC_PLL_DP_HFS_MFN);
263 }
264
265 return 0;
266}
267
268static int _clk_pll_enable(struct clk *clk)
269{
270 u32 reg;
271 void __iomem *pllbase;
272 int i = 0;
273
274 pllbase = _get_pll_base(clk);
275 reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
276 if (reg & MXC_PLL_DP_CTL_UPEN)
277 return 0;
278
279 reg |= MXC_PLL_DP_CTL_UPEN;
280 __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
281
282 /* Wait for lock */
283 do {
284 reg = __raw_readl(pllbase + MXC_PLL_DP_CTL);
285 if (reg & MXC_PLL_DP_CTL_LRF)
286 break;
287
288 udelay(1);
289 } while (++i < MAX_DPLL_WAIT_TRIES);
290
291 if (i == MAX_DPLL_WAIT_TRIES) {
292 pr_err("MX5: pll locking failed\n");
293 return -EINVAL;
294 }
295
296 return 0;
297}
298
299static void _clk_pll_disable(struct clk *clk)
300{
301 u32 reg;
302 void __iomem *pllbase;
303
304 pllbase = _get_pll_base(clk);
305 reg = __raw_readl(pllbase + MXC_PLL_DP_CTL) & ~MXC_PLL_DP_CTL_UPEN;
306 __raw_writel(reg, pllbase + MXC_PLL_DP_CTL);
307}
308
309static int _clk_pll1_sw_set_parent(struct clk *clk, struct clk *parent)
310{
311 u32 reg, step;
312
313 reg = __raw_readl(MXC_CCM_CCSR);
314
315 /* When switching from pll_main_clk to a bypass clock, first select a
316 * multiplexed clock in 'step_sel', then shift the glitchless mux
317 * 'pll1_sw_clk_sel'.
318 *
319 * When switching back, do it in reverse order
320 */
321 if (parent == &pll1_main_clk) {
322 /* Switch to pll1_main_clk */
323 reg &= ~MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
324 __raw_writel(reg, MXC_CCM_CCSR);
325 /* step_clk mux switched to lp_apm, to save power. */
326 reg = __raw_readl(MXC_CCM_CCSR);
327 reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
328 reg |= (MXC_CCM_CCSR_STEP_SEL_LP_APM <<
329 MXC_CCM_CCSR_STEP_SEL_OFFSET);
330 } else {
331 if (parent == &lp_apm_clk) {
332 step = MXC_CCM_CCSR_STEP_SEL_LP_APM;
333 } else if (parent == &pll2_sw_clk) {
334 step = MXC_CCM_CCSR_STEP_SEL_PLL2_DIVIDED;
335 } else if (parent == &pll3_sw_clk) {
336 step = MXC_CCM_CCSR_STEP_SEL_PLL3_DIVIDED;
337 } else
338 return -EINVAL;
339
340 reg &= ~MXC_CCM_CCSR_STEP_SEL_MASK;
341 reg |= (step << MXC_CCM_CCSR_STEP_SEL_OFFSET);
342
343 __raw_writel(reg, MXC_CCM_CCSR);
344 /* Switch to step_clk */
345 reg = __raw_readl(MXC_CCM_CCSR);
346 reg |= MXC_CCM_CCSR_PLL1_SW_CLK_SEL;
347 }
348 __raw_writel(reg, MXC_CCM_CCSR);
349 return 0;
350}
351
352static unsigned long clk_pll1_sw_get_rate(struct clk *clk)
353{
354 u32 reg, div;
355 unsigned long parent_rate;
356
357 parent_rate = clk_get_rate(clk->parent);
358
359 reg = __raw_readl(MXC_CCM_CCSR);
360
361 if (clk->parent == &pll2_sw_clk) {
362 div = ((reg & MXC_CCM_CCSR_PLL2_PODF_MASK) >>
363 MXC_CCM_CCSR_PLL2_PODF_OFFSET) + 1;
364 } else if (clk->parent == &pll3_sw_clk) {
365 div = ((reg & MXC_CCM_CCSR_PLL3_PODF_MASK) >>
366 MXC_CCM_CCSR_PLL3_PODF_OFFSET) + 1;
367 } else
368 div = 1;
369 return parent_rate / div;
370}
371
372static int _clk_pll2_sw_set_parent(struct clk *clk, struct clk *parent)
373{
374 u32 reg;
375
376 reg = __raw_readl(MXC_CCM_CCSR);
377
378 if (parent == &pll2_sw_clk)
379 reg &= ~MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
380 else
381 reg |= MXC_CCM_CCSR_PLL2_SW_CLK_SEL;
382
383 __raw_writel(reg, MXC_CCM_CCSR);
384 return 0;
385}
386
387static int _clk_lp_apm_set_parent(struct clk *clk, struct clk *parent)
388{
389 u32 reg;
390
391 if (parent == &osc_clk)
392 reg = __raw_readl(MXC_CCM_CCSR) & ~MXC_CCM_CCSR_LP_APM_SEL;
393 else
394 return -EINVAL;
395
396 __raw_writel(reg, MXC_CCM_CCSR);
397
398 return 0;
399}
400
401static unsigned long clk_cpu_get_rate(struct clk *clk)
402{
403 u32 cacrr, div;
404 unsigned long parent_rate;
405
406 parent_rate = clk_get_rate(clk->parent);
407 cacrr = __raw_readl(MXC_CCM_CACRR);
408 div = (cacrr & MXC_CCM_CACRR_ARM_PODF_MASK) + 1;
409
410 return parent_rate / div;
411}
412
413static int clk_cpu_set_rate(struct clk *clk, unsigned long rate)
414{
415 u32 reg, cpu_podf;
416 unsigned long parent_rate;
417
418 parent_rate = clk_get_rate(clk->parent);
419 cpu_podf = parent_rate / rate - 1;
420 /* use post divider to change freq */
421 reg = __raw_readl(MXC_CCM_CACRR);
422 reg &= ~MXC_CCM_CACRR_ARM_PODF_MASK;
423 reg |= cpu_podf << MXC_CCM_CACRR_ARM_PODF_OFFSET;
424 __raw_writel(reg, MXC_CCM_CACRR);
425
426 return 0;
427}
428
429static int _clk_periph_apm_set_parent(struct clk *clk, struct clk *parent)
430{
431 u32 reg, mux;
432 int i = 0;
433
434 mux = _get_mux(parent, &pll1_sw_clk, &pll3_sw_clk, &lp_apm_clk, NULL);
435
436 reg = __raw_readl(MXC_CCM_CBCMR) & ~MXC_CCM_CBCMR_PERIPH_CLK_SEL_MASK;
437 reg |= mux << MXC_CCM_CBCMR_PERIPH_CLK_SEL_OFFSET;
438 __raw_writel(reg, MXC_CCM_CBCMR);
439
440 /* Wait for lock */
441 do {
442 reg = __raw_readl(MXC_CCM_CDHIPR);
443 if (!(reg & MXC_CCM_CDHIPR_PERIPH_CLK_SEL_BUSY))
444 break;
445
446 udelay(1);
447 } while (++i < MAX_DPLL_WAIT_TRIES);
448
449 if (i == MAX_DPLL_WAIT_TRIES) {
450 pr_err("MX5: Set parent for periph_apm clock failed\n");
451 return -EINVAL;
452 }
453
454 return 0;
455}
456
457static int _clk_main_bus_set_parent(struct clk *clk, struct clk *parent)
458{
459 u32 reg;
460
461 reg = __raw_readl(MXC_CCM_CBCDR);
462
463 if (parent == &pll2_sw_clk)
464 reg &= ~MXC_CCM_CBCDR_PERIPH_CLK_SEL;
465 else if (parent == &periph_apm_clk)
466 reg |= MXC_CCM_CBCDR_PERIPH_CLK_SEL;
467 else
468 return -EINVAL;
469
470 __raw_writel(reg, MXC_CCM_CBCDR);
471
472 return 0;
473}
474
475static struct clk main_bus_clk = {
476 .parent = &pll2_sw_clk,
477 .set_parent = _clk_main_bus_set_parent,
478};
479
480static unsigned long clk_ahb_get_rate(struct clk *clk)
481{
482 u32 reg, div;
483 unsigned long parent_rate;
484
485 parent_rate = clk_get_rate(clk->parent);
486
487 reg = __raw_readl(MXC_CCM_CBCDR);
488 div = ((reg & MXC_CCM_CBCDR_AHB_PODF_MASK) >>
489 MXC_CCM_CBCDR_AHB_PODF_OFFSET) + 1;
490 return parent_rate / div;
491}
492
493
494static int _clk_ahb_set_rate(struct clk *clk, unsigned long rate)
495{
496 u32 reg, div;
497 unsigned long parent_rate;
498 int i = 0;
499
500 parent_rate = clk_get_rate(clk->parent);
501
502 div = parent_rate / rate;
503 if (div > 8 || div < 1 || ((parent_rate / div) != rate))
504 return -EINVAL;
505
506 reg = __raw_readl(MXC_CCM_CBCDR);
507 reg &= ~MXC_CCM_CBCDR_AHB_PODF_MASK;
508 reg |= (div - 1) << MXC_CCM_CBCDR_AHB_PODF_OFFSET;
509 __raw_writel(reg, MXC_CCM_CBCDR);
510
511 /* Wait for lock */
512 do {
513 reg = __raw_readl(MXC_CCM_CDHIPR);
514 if (!(reg & MXC_CCM_CDHIPR_AHB_PODF_BUSY))
515 break;
516
517 udelay(1);
518 } while (++i < MAX_DPLL_WAIT_TRIES);
519
520 if (i == MAX_DPLL_WAIT_TRIES) {
521 pr_err("MX5: clk_ahb_set_rate failed\n");
522 return -EINVAL;
523 }
524
525 return 0;
526}
527
528static unsigned long _clk_ahb_round_rate(struct clk *clk,
529 unsigned long rate)
530{
531 u32 div;
532 unsigned long parent_rate;
533
534 parent_rate = clk_get_rate(clk->parent);
535
536 div = parent_rate / rate;
537 if (div > 8)
538 div = 8;
539 else if (div == 0)
540 div++;
541 return parent_rate / div;
542}
543
544
545static int _clk_max_enable(struct clk *clk)
546{
547 u32 reg;
548
549 _clk_ccgr_enable(clk);
550
551 /* Handshake with MAX when LPM is entered. */
552 reg = __raw_readl(MXC_CCM_CLPCR);
553 if (cpu_is_mx51())
554 reg &= ~MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
555 else if (cpu_is_mx53())
556 reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
557 __raw_writel(reg, MXC_CCM_CLPCR);
558
559 return 0;
560}
561
562static void _clk_max_disable(struct clk *clk)
563{
564 u32 reg;
565
566 _clk_ccgr_disable_inwait(clk);
567
568 /* No Handshake with MAX when LPM is entered as its disabled. */
569 reg = __raw_readl(MXC_CCM_CLPCR);
570 if (cpu_is_mx51())
571 reg |= MX51_CCM_CLPCR_BYPASS_MAX_LPM_HS;
572 else if (cpu_is_mx53())
573 reg &= ~MX53_CCM_CLPCR_BYPASS_MAX_LPM_HS;
574 __raw_writel(reg, MXC_CCM_CLPCR);
575}
576
577static unsigned long clk_ipg_get_rate(struct clk *clk)
578{
579 u32 reg, div;
580 unsigned long parent_rate;
581
582 parent_rate = clk_get_rate(clk->parent);
583
584 reg = __raw_readl(MXC_CCM_CBCDR);
585 div = ((reg & MXC_CCM_CBCDR_IPG_PODF_MASK) >>
586 MXC_CCM_CBCDR_IPG_PODF_OFFSET) + 1;
587
588 return parent_rate / div;
589}
590
591static unsigned long clk_ipg_per_get_rate(struct clk *clk)
592{
593 u32 reg, prediv1, prediv2, podf;
594 unsigned long parent_rate;
595
596 parent_rate = clk_get_rate(clk->parent);
597
598 if (clk->parent == &main_bus_clk || clk->parent == &lp_apm_clk) {
599 /* the main_bus_clk is the one before the DVFS engine */
600 reg = __raw_readl(MXC_CCM_CBCDR);
601 prediv1 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED1_MASK) >>
602 MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET) + 1;
603 prediv2 = ((reg & MXC_CCM_CBCDR_PERCLK_PRED2_MASK) >>
604 MXC_CCM_CBCDR_PERCLK_PRED2_OFFSET) + 1;
605 podf = ((reg & MXC_CCM_CBCDR_PERCLK_PODF_MASK) >>
606 MXC_CCM_CBCDR_PERCLK_PODF_OFFSET) + 1;
607 return parent_rate / (prediv1 * prediv2 * podf);
608 } else if (clk->parent == &ipg_clk)
609 return parent_rate;
610 else
611 BUG();
612}
613
614static int _clk_ipg_per_set_parent(struct clk *clk, struct clk *parent)
615{
616 u32 reg;
617
618 reg = __raw_readl(MXC_CCM_CBCMR);
619
620 reg &= ~MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
621 reg &= ~MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
622
623 if (parent == &ipg_clk)
624 reg |= MXC_CCM_CBCMR_PERCLK_IPG_CLK_SEL;
625 else if (parent == &lp_apm_clk)
626 reg |= MXC_CCM_CBCMR_PERCLK_LP_APM_CLK_SEL;
627 else if (parent != &main_bus_clk)
628 return -EINVAL;
629
630 __raw_writel(reg, MXC_CCM_CBCMR);
631
632 return 0;
633}
634
635#define clk_nfc_set_parent NULL
636
637static unsigned long clk_nfc_get_rate(struct clk *clk)
638{
639 unsigned long rate;
640 u32 reg, div;
641
642 reg = __raw_readl(MXC_CCM_CBCDR);
643 div = ((reg & MXC_CCM_CBCDR_NFC_PODF_MASK) >>
644 MXC_CCM_CBCDR_NFC_PODF_OFFSET) + 1;
645 rate = clk_get_rate(clk->parent) / div;
646 WARN_ON(rate == 0);
647 return rate;
648}
649
650static unsigned long clk_nfc_round_rate(struct clk *clk,
651 unsigned long rate)
652{
653 u32 div;
654 unsigned long parent_rate = clk_get_rate(clk->parent);
655
656 if (!rate)
657 return -EINVAL;
658
659 div = parent_rate / rate;
660
661 if (parent_rate % rate)
662 div++;
663
664 if (div > 8)
665 return -EINVAL;
666
667 return parent_rate / div;
668
669}
670
671static int clk_nfc_set_rate(struct clk *clk, unsigned long rate)
672{
673 u32 reg, div;
674
675 div = clk_get_rate(clk->parent) / rate;
676 if (div == 0)
677 div++;
678 if (((clk_get_rate(clk->parent) / div) != rate) || (div > 8))
679 return -EINVAL;
680
681 reg = __raw_readl(MXC_CCM_CBCDR);
682 reg &= ~MXC_CCM_CBCDR_NFC_PODF_MASK;
683 reg |= (div - 1) << MXC_CCM_CBCDR_NFC_PODF_OFFSET;
684 __raw_writel(reg, MXC_CCM_CBCDR);
685
686 while (__raw_readl(MXC_CCM_CDHIPR) &
687 MXC_CCM_CDHIPR_NFC_IPG_INT_MEM_PODF_BUSY){
688 }
689
690 return 0;
691}
692
693static unsigned long get_high_reference_clock_rate(struct clk *clk)
694{
695 return external_high_reference;
696}
697
698static unsigned long get_low_reference_clock_rate(struct clk *clk)
699{
700 return external_low_reference;
701}
702
703static unsigned long get_oscillator_reference_clock_rate(struct clk *clk)
704{
705 return oscillator_reference;
706}
707
708static unsigned long get_ckih2_reference_clock_rate(struct clk *clk)
709{
710 return ckih2_reference;
711}
712
713static unsigned long clk_emi_slow_get_rate(struct clk *clk)
714{
715 u32 reg, div;
716
717 reg = __raw_readl(MXC_CCM_CBCDR);
718 div = ((reg & MXC_CCM_CBCDR_EMI_PODF_MASK) >>
719 MXC_CCM_CBCDR_EMI_PODF_OFFSET) + 1;
720
721 return clk_get_rate(clk->parent) / div;
722}
723
724static unsigned long _clk_ddr_hf_get_rate(struct clk *clk)
725{
726 unsigned long rate;
727 u32 reg, div;
728
729 reg = __raw_readl(MXC_CCM_CBCDR);
730 div = ((reg & MXC_CCM_CBCDR_DDR_PODF_MASK) >>
731 MXC_CCM_CBCDR_DDR_PODF_OFFSET) + 1;
732 rate = clk_get_rate(clk->parent) / div;
733
734 return rate;
735}
736
737/* External high frequency clock */
738static struct clk ckih_clk = {
739 .get_rate = get_high_reference_clock_rate,
740};
741
742static struct clk ckih2_clk = {
743 .get_rate = get_ckih2_reference_clock_rate,
744};
745
746static struct clk osc_clk = {
747 .get_rate = get_oscillator_reference_clock_rate,
748};
749
750/* External low frequency (32kHz) clock */
751static struct clk ckil_clk = {
752 .get_rate = get_low_reference_clock_rate,
753};
754
755static struct clk pll1_main_clk = {
756 .parent = &osc_clk,
757 .get_rate = clk_pll_get_rate,
758 .enable = _clk_pll_enable,
759 .disable = _clk_pll_disable,
760};
761
762/* Clock tree block diagram (WIP):
763 * CCM: Clock Controller Module
764 *
765 * PLL output -> |
766 * | CCM Switcher -> CCM_CLK_ROOT_GEN ->
767 * PLL bypass -> |
768 *
769 */
770
771/* PLL1 SW supplies to ARM core */
772static struct clk pll1_sw_clk = {
773 .parent = &pll1_main_clk,
774 .set_parent = _clk_pll1_sw_set_parent,
775 .get_rate = clk_pll1_sw_get_rate,
776};
777
778/* PLL2 SW supplies to AXI/AHB/IP buses */
779static struct clk pll2_sw_clk = {
780 .parent = &osc_clk,
781 .get_rate = clk_pll_get_rate,
782 .set_rate = _clk_pll_set_rate,
783 .set_parent = _clk_pll2_sw_set_parent,
784 .enable = _clk_pll_enable,
785 .disable = _clk_pll_disable,
786};
787
788/* PLL3 SW supplies to serial clocks like USB, SSI, etc. */
789static struct clk pll3_sw_clk = {
790 .parent = &osc_clk,
791 .set_rate = _clk_pll_set_rate,
792 .get_rate = clk_pll_get_rate,
793 .enable = _clk_pll_enable,
794 .disable = _clk_pll_disable,
795};
796
797/* PLL4 SW supplies to LVDS Display Bridge(LDB) */
798static struct clk mx53_pll4_sw_clk = {
799 .parent = &osc_clk,
800 .set_rate = _clk_pll_set_rate,
801 .enable = _clk_pll_enable,
802 .disable = _clk_pll_disable,
803};
804
805/* Low-power Audio Playback Mode clock */
806static struct clk lp_apm_clk = {
807 .parent = &osc_clk,
808 .set_parent = _clk_lp_apm_set_parent,
809};
810
811static struct clk periph_apm_clk = {
812 .parent = &pll1_sw_clk,
813 .set_parent = _clk_periph_apm_set_parent,
814};
815
816static struct clk cpu_clk = {
817 .parent = &pll1_sw_clk,
818 .get_rate = clk_cpu_get_rate,
819 .set_rate = clk_cpu_set_rate,
820};
821
822static struct clk ahb_clk = {
823 .parent = &main_bus_clk,
824 .get_rate = clk_ahb_get_rate,
825 .set_rate = _clk_ahb_set_rate,
826 .round_rate = _clk_ahb_round_rate,
827};
828
829static struct clk iim_clk = {
830 .parent = &ipg_clk,
831 .enable_reg = MXC_CCM_CCGR0,
832 .enable_shift = MXC_CCM_CCGRx_CG15_OFFSET,
833};
834
835/* Main IP interface clock for access to registers */
836static struct clk ipg_clk = {
837 .parent = &ahb_clk,
838 .get_rate = clk_ipg_get_rate,
839};
840
841static struct clk ipg_perclk = {
842 .parent = &lp_apm_clk,
843 .get_rate = clk_ipg_per_get_rate,
844 .set_parent = _clk_ipg_per_set_parent,
845};
846
847static struct clk ahb_max_clk = {
848 .parent = &ahb_clk,
849 .enable_reg = MXC_CCM_CCGR0,
850 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
851 .enable = _clk_max_enable,
852 .disable = _clk_max_disable,
853};
854
855static struct clk aips_tz1_clk = {
856 .parent = &ahb_clk,
857 .secondary = &ahb_max_clk,
858 .enable_reg = MXC_CCM_CCGR0,
859 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
860 .enable = _clk_ccgr_enable,
861 .disable = _clk_ccgr_disable_inwait,
862};
863
864static struct clk aips_tz2_clk = {
865 .parent = &ahb_clk,
866 .secondary = &ahb_max_clk,
867 .enable_reg = MXC_CCM_CCGR0,
868 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
869 .enable = _clk_ccgr_enable,
870 .disable = _clk_ccgr_disable_inwait,
871};
872
873static struct clk gpc_dvfs_clk = {
874 .enable_reg = MXC_CCM_CCGR5,
875 .enable_shift = MXC_CCM_CCGRx_CG12_OFFSET,
876 .enable = _clk_ccgr_enable,
877 .disable = _clk_ccgr_disable,
878};
879
880static struct clk gpt_32k_clk = {
881 .id = 0,
882 .parent = &ckil_clk,
883};
884
885static struct clk dummy_clk = {
886 .id = 0,
887};
888
889static struct clk emi_slow_clk = {
890 .parent = &pll2_sw_clk,
891 .enable_reg = MXC_CCM_CCGR5,
892 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
893 .enable = _clk_ccgr_enable,
894 .disable = _clk_ccgr_disable_inwait,
895 .get_rate = clk_emi_slow_get_rate,
896};
897
898static int clk_ipu_enable(struct clk *clk)
899{
900 u32 reg;
901
902 _clk_ccgr_enable(clk);
903
904 /* Enable handshake with IPU when certain clock rates are changed */
905 reg = __raw_readl(MXC_CCM_CCDR);
906 reg &= ~MXC_CCM_CCDR_IPU_HS_MASK;
907 __raw_writel(reg, MXC_CCM_CCDR);
908
909 /* Enable handshake with IPU when LPM is entered */
910 reg = __raw_readl(MXC_CCM_CLPCR);
911 reg &= ~MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
912 __raw_writel(reg, MXC_CCM_CLPCR);
913
914 return 0;
915}
916
917static void clk_ipu_disable(struct clk *clk)
918{
919 u32 reg;
920
921 _clk_ccgr_disable(clk);
922
923 /* Disable handshake with IPU whe dividers are changed */
924 reg = __raw_readl(MXC_CCM_CCDR);
925 reg |= MXC_CCM_CCDR_IPU_HS_MASK;
926 __raw_writel(reg, MXC_CCM_CCDR);
927
928 /* Disable handshake with IPU when LPM is entered */
929 reg = __raw_readl(MXC_CCM_CLPCR);
930 reg |= MXC_CCM_CLPCR_BYPASS_IPU_LPM_HS;
931 __raw_writel(reg, MXC_CCM_CLPCR);
932}
933
934static struct clk ahbmux1_clk = {
935 .parent = &ahb_clk,
936 .secondary = &ahb_max_clk,
937 .enable_reg = MXC_CCM_CCGR0,
938 .enable_shift = MXC_CCM_CCGRx_CG8_OFFSET,
939 .enable = _clk_ccgr_enable,
940 .disable = _clk_ccgr_disable_inwait,
941};
942
943static struct clk ipu_sec_clk = {
944 .parent = &emi_fast_clk,
945 .secondary = &ahbmux1_clk,
946};
947
948static struct clk ddr_hf_clk = {
949 .parent = &pll1_sw_clk,
950 .get_rate = _clk_ddr_hf_get_rate,
951};
952
953static struct clk ddr_clk = {
954 .parent = &ddr_hf_clk,
955};
956
957/* clock definitions for MIPI HSC unit which has been removed
958 * from documentation, but not from hardware
959 */
960static int _clk_hsc_enable(struct clk *clk)
961{
962 u32 reg;
963
964 _clk_ccgr_enable(clk);
965 /* Handshake with IPU when certain clock rates are changed. */
966 reg = __raw_readl(MXC_CCM_CCDR);
967 reg &= ~MXC_CCM_CCDR_HSC_HS_MASK;
968 __raw_writel(reg, MXC_CCM_CCDR);
969
970 reg = __raw_readl(MXC_CCM_CLPCR);
971 reg &= ~MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
972 __raw_writel(reg, MXC_CCM_CLPCR);
973
974 return 0;
975}
976
977static void _clk_hsc_disable(struct clk *clk)
978{
979 u32 reg;
980
981 _clk_ccgr_disable(clk);
982 /* No handshake with HSC as its not enabled. */
983 reg = __raw_readl(MXC_CCM_CCDR);
984 reg |= MXC_CCM_CCDR_HSC_HS_MASK;
985 __raw_writel(reg, MXC_CCM_CCDR);
986
987 reg = __raw_readl(MXC_CCM_CLPCR);
988 reg |= MXC_CCM_CLPCR_BYPASS_HSC_LPM_HS;
989 __raw_writel(reg, MXC_CCM_CLPCR);
990}
991
992static struct clk mipi_hsp_clk = {
993 .parent = &ipu_clk,
994 .enable_reg = MXC_CCM_CCGR4,
995 .enable_shift = MXC_CCM_CCGRx_CG6_OFFSET,
996 .enable = _clk_hsc_enable,
997 .disable = _clk_hsc_disable,
998 .secondary = &mipi_hsc1_clk,
999};
1000
1001#define DEFINE_CLOCK_CCGR(name, i, er, es, pfx, p, s) \
1002 static struct clk name = { \
1003 .id = i, \
1004 .enable_reg = er, \
1005 .enable_shift = es, \
1006 .get_rate = pfx##_get_rate, \
1007 .set_rate = pfx##_set_rate, \
1008 .round_rate = pfx##_round_rate, \
1009 .set_parent = pfx##_set_parent, \
1010 .enable = _clk_ccgr_enable, \
1011 .disable = _clk_ccgr_disable, \
1012 .parent = p, \
1013 .secondary = s, \
1014 }
1015
1016#define DEFINE_CLOCK_MAX(name, i, er, es, pfx, p, s) \
1017 static struct clk name = { \
1018 .id = i, \
1019 .enable_reg = er, \
1020 .enable_shift = es, \
1021 .get_rate = pfx##_get_rate, \
1022 .set_rate = pfx##_set_rate, \
1023 .set_parent = pfx##_set_parent, \
1024 .enable = _clk_max_enable, \
1025 .disable = _clk_max_disable, \
1026 .parent = p, \
1027 .secondary = s, \
1028 }
1029
1030#define CLK_GET_RATE(name, nr, bitsname) \
1031static unsigned long clk_##name##_get_rate(struct clk *clk) \
1032{ \
1033 u32 reg, pred, podf; \
1034 \
1035 reg = __raw_readl(MXC_CCM_CSCDR##nr); \
1036 pred = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK) \
1037 >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
1038 podf = (reg & MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK) \
1039 >> MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
1040 \
1041 return DIV_ROUND_CLOSEST(clk_get_rate(clk->parent), \
1042 (pred + 1) * (podf + 1)); \
1043}
1044
1045#define CLK_SET_PARENT(name, nr, bitsname) \
1046static int clk_##name##_set_parent(struct clk *clk, struct clk *parent) \
1047{ \
1048 u32 reg, mux; \
1049 \
1050 mux = _get_mux(parent, &pll1_sw_clk, &pll2_sw_clk, \
1051 &pll3_sw_clk, &lp_apm_clk); \
1052 reg = __raw_readl(MXC_CCM_CSCMR##nr) & \
1053 ~MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_MASK; \
1054 reg |= mux << MXC_CCM_CSCMR##nr##_##bitsname##_CLK_SEL_OFFSET; \
1055 __raw_writel(reg, MXC_CCM_CSCMR##nr); \
1056 \
1057 return 0; \
1058}
1059
1060#define CLK_SET_RATE(name, nr, bitsname) \
1061static int clk_##name##_set_rate(struct clk *clk, unsigned long rate) \
1062{ \
1063 u32 reg, div, parent_rate; \
1064 u32 pre = 0, post = 0; \
1065 \
1066 parent_rate = clk_get_rate(clk->parent); \
1067 div = parent_rate / rate; \
1068 \
1069 if ((parent_rate / div) != rate) \
1070 return -EINVAL; \
1071 \
1072 __calc_pre_post_dividers(div, &pre, &post, \
1073 (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK >> \
1074 MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET) + 1, \
1075 (MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK >> \
1076 MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET) + 1);\
1077 \
1078 /* Set sdhc1 clock divider */ \
1079 reg = __raw_readl(MXC_CCM_CSCDR##nr) & \
1080 ~(MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_MASK \
1081 | MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_MASK); \
1082 reg |= (post - 1) << \
1083 MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PODF_OFFSET; \
1084 reg |= (pre - 1) << \
1085 MXC_CCM_CSCDR##nr##_##bitsname##_CLK_PRED_OFFSET; \
1086 __raw_writel(reg, MXC_CCM_CSCDR##nr); \
1087 \
1088 return 0; \
1089}
1090
1091/* UART */
1092CLK_GET_RATE(uart, 1, UART)
1093CLK_SET_PARENT(uart, 1, UART)
1094
1095static struct clk uart_root_clk = {
1096 .parent = &pll2_sw_clk,
1097 .get_rate = clk_uart_get_rate,
1098 .set_parent = clk_uart_set_parent,
1099};
1100
1101/* USBOH3 */
1102CLK_GET_RATE(usboh3, 1, USBOH3)
1103CLK_SET_PARENT(usboh3, 1, USBOH3)
1104
1105static struct clk usboh3_clk = {
1106 .parent = &pll2_sw_clk,
1107 .get_rate = clk_usboh3_get_rate,
1108 .set_parent = clk_usboh3_set_parent,
1109 .enable = _clk_ccgr_enable,
1110 .disable = _clk_ccgr_disable,
1111 .enable_reg = MXC_CCM_CCGR2,
1112 .enable_shift = MXC_CCM_CCGRx_CG14_OFFSET,
1113};
1114
1115static struct clk usb_ahb_clk = {
1116 .parent = &ipg_clk,
1117 .enable = _clk_ccgr_enable,
1118 .disable = _clk_ccgr_disable,
1119 .enable_reg = MXC_CCM_CCGR2,
1120 .enable_shift = MXC_CCM_CCGRx_CG13_OFFSET,
1121};
1122
1123static int clk_usb_phy1_set_parent(struct clk *clk, struct clk *parent)
1124{
1125 u32 reg;
1126
1127 reg = __raw_readl(MXC_CCM_CSCMR1) & ~MXC_CCM_CSCMR1_USB_PHY_CLK_SEL;
1128
1129 if (parent == &pll3_sw_clk)
1130 reg |= 1 << MXC_CCM_CSCMR1_USB_PHY_CLK_SEL_OFFSET;
1131
1132 __raw_writel(reg, MXC_CCM_CSCMR1);
1133
1134 return 0;
1135}
1136
1137static struct clk usb_phy1_clk = {
1138 .parent = &pll3_sw_clk,
1139 .set_parent = clk_usb_phy1_set_parent,
1140 .enable = _clk_ccgr_enable,
1141 .enable_reg = MXC_CCM_CCGR2,
1142 .enable_shift = MXC_CCM_CCGRx_CG0_OFFSET,
1143 .disable = _clk_ccgr_disable,
1144};
1145
1146/* eCSPI */
1147CLK_GET_RATE(ecspi, 2, CSPI)
1148CLK_SET_PARENT(ecspi, 1, CSPI)
1149
1150static struct clk ecspi_main_clk = {
1151 .parent = &pll3_sw_clk,
1152 .get_rate = clk_ecspi_get_rate,
1153 .set_parent = clk_ecspi_set_parent,
1154};
1155
1156/* eSDHC */
1157CLK_GET_RATE(esdhc1, 1, ESDHC1_MSHC1)
1158CLK_SET_PARENT(esdhc1, 1, ESDHC1_MSHC1)
1159CLK_SET_RATE(esdhc1, 1, ESDHC1_MSHC1)
1160
1161/* mx51 specific */
1162CLK_GET_RATE(esdhc2, 1, ESDHC2_MSHC2)
1163CLK_SET_PARENT(esdhc2, 1, ESDHC2_MSHC2)
1164CLK_SET_RATE(esdhc2, 1, ESDHC2_MSHC2)
1165
1166static int clk_esdhc3_set_parent(struct clk *clk, struct clk *parent)
1167{
1168 u32 reg;
1169
1170 reg = __raw_readl(MXC_CCM_CSCMR1);
1171 if (parent == &esdhc1_clk)
1172 reg &= ~MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
1173 else if (parent == &esdhc2_clk)
1174 reg |= MXC_CCM_CSCMR1_ESDHC3_CLK_SEL;
1175 else
1176 return -EINVAL;
1177 __raw_writel(reg, MXC_CCM_CSCMR1);
1178
1179 return 0;
1180}
1181
1182static int clk_esdhc4_set_parent(struct clk *clk, struct clk *parent)
1183{
1184 u32 reg;
1185
1186 reg = __raw_readl(MXC_CCM_CSCMR1);
1187 if (parent == &esdhc1_clk)
1188 reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
1189 else if (parent == &esdhc2_clk)
1190 reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
1191 else
1192 return -EINVAL;
1193 __raw_writel(reg, MXC_CCM_CSCMR1);
1194
1195 return 0;
1196}
1197
1198/* mx53 specific */
1199static int clk_esdhc2_mx53_set_parent(struct clk *clk, struct clk *parent)
1200{
1201 u32 reg;
1202
1203 reg = __raw_readl(MXC_CCM_CSCMR1);
1204 if (parent == &esdhc1_clk)
1205 reg &= ~MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
1206 else if (parent == &esdhc3_mx53_clk)
1207 reg |= MXC_CCM_CSCMR1_ESDHC2_MSHC2_MX53_CLK_SEL;
1208 else
1209 return -EINVAL;
1210 __raw_writel(reg, MXC_CCM_CSCMR1);
1211
1212 return 0;
1213}
1214
1215CLK_GET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
1216CLK_SET_PARENT(esdhc3_mx53, 1, ESDHC3_MX53)
1217CLK_SET_RATE(esdhc3_mx53, 1, ESDHC3_MX53)
1218
1219static int clk_esdhc4_mx53_set_parent(struct clk *clk, struct clk *parent)
1220{
1221 u32 reg;
1222
1223 reg = __raw_readl(MXC_CCM_CSCMR1);
1224 if (parent == &esdhc1_clk)
1225 reg &= ~MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
1226 else if (parent == &esdhc3_mx53_clk)
1227 reg |= MXC_CCM_CSCMR1_ESDHC4_CLK_SEL;
1228 else
1229 return -EINVAL;
1230 __raw_writel(reg, MXC_CCM_CSCMR1);
1231
1232 return 0;
1233}
1234
1235#define DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, e, d, p, s) \
1236 static struct clk name = { \
1237 .id = i, \
1238 .enable_reg = er, \
1239 .enable_shift = es, \
1240 .get_rate = gr, \
1241 .set_rate = sr, \
1242 .enable = e, \
1243 .disable = d, \
1244 .parent = p, \
1245 .secondary = s, \
1246 }
1247
1248#define DEFINE_CLOCK(name, i, er, es, gr, sr, p, s) \
1249 DEFINE_CLOCK_FULL(name, i, er, es, gr, sr, _clk_ccgr_enable, _clk_ccgr_disable, p, s)
1250
1251/* Shared peripheral bus arbiter */
1252DEFINE_CLOCK(spba_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG0_OFFSET,
1253 NULL, NULL, &ipg_clk, NULL);
1254
1255/* UART */
1256DEFINE_CLOCK(uart1_ipg_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG3_OFFSET,
1257 NULL, NULL, &ipg_clk, &aips_tz1_clk);
1258DEFINE_CLOCK(uart2_ipg_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG5_OFFSET,
1259 NULL, NULL, &ipg_clk, &aips_tz1_clk);
1260DEFINE_CLOCK(uart3_ipg_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG7_OFFSET,
1261 NULL, NULL, &ipg_clk, &spba_clk);
1262DEFINE_CLOCK(uart4_ipg_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG4_OFFSET,
1263 NULL, NULL, &ipg_clk, &spba_clk);
1264DEFINE_CLOCK(uart5_ipg_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG6_OFFSET,
1265 NULL, NULL, &ipg_clk, &spba_clk);
1266DEFINE_CLOCK(uart1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG4_OFFSET,
1267 NULL, NULL, &uart_root_clk, &uart1_ipg_clk);
1268DEFINE_CLOCK(uart2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG6_OFFSET,
1269 NULL, NULL, &uart_root_clk, &uart2_ipg_clk);
1270DEFINE_CLOCK(uart3_clk, 2, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG8_OFFSET,
1271 NULL, NULL, &uart_root_clk, &uart3_ipg_clk);
1272DEFINE_CLOCK(uart4_clk, 3, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG5_OFFSET,
1273 NULL, NULL, &uart_root_clk, &uart4_ipg_clk);
1274DEFINE_CLOCK(uart5_clk, 4, MXC_CCM_CCGR7, MXC_CCM_CCGRx_CG7_OFFSET,
1275 NULL, NULL, &uart_root_clk, &uart5_ipg_clk);
1276
1277/* GPT */
1278DEFINE_CLOCK(gpt_ipg_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG10_OFFSET,
1279 NULL, NULL, &ipg_clk, NULL);
1280DEFINE_CLOCK(gpt_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG9_OFFSET,
1281 NULL, NULL, &ipg_clk, &gpt_ipg_clk);
1282
1283DEFINE_CLOCK(pwm1_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG6_OFFSET,
1284 NULL, NULL, &ipg_perclk, NULL);
1285DEFINE_CLOCK(pwm2_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG8_OFFSET,
1286 NULL, NULL, &ipg_perclk, NULL);
1287
1288/* I2C */
1289DEFINE_CLOCK(i2c1_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG9_OFFSET,
1290 NULL, NULL, &ipg_perclk, NULL);
1291DEFINE_CLOCK(i2c2_clk, 1, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG10_OFFSET,
1292 NULL, NULL, &ipg_perclk, NULL);
1293DEFINE_CLOCK(hsi2c_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
1294 NULL, NULL, &ipg_clk, NULL);
1295DEFINE_CLOCK(i2c3_mx53_clk, 0, MXC_CCM_CCGR1, MXC_CCM_CCGRx_CG11_OFFSET,
1296 NULL, NULL, &ipg_perclk, NULL);
1297
1298/* FEC */
1299DEFINE_CLOCK(fec_clk, 0, MXC_CCM_CCGR2, MXC_CCM_CCGRx_CG12_OFFSET,
1300 NULL, NULL, &ipg_clk, NULL);
1301
1302/* NFC */
1303DEFINE_CLOCK_CCGR(nfc_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG10_OFFSET,
1304 clk_nfc, &emi_slow_clk, NULL);
1305
1306/* SSI */
1307DEFINE_CLOCK(ssi1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG8_OFFSET,
1308 NULL, NULL, &ipg_clk, NULL);
1309DEFINE_CLOCK(ssi1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG9_OFFSET,
1310 NULL, NULL, &pll3_sw_clk, &ssi1_ipg_clk);
1311DEFINE_CLOCK(ssi2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG10_OFFSET,
1312 NULL, NULL, &ipg_clk, NULL);
1313DEFINE_CLOCK(ssi2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG11_OFFSET,
1314 NULL, NULL, &pll3_sw_clk, &ssi2_ipg_clk);
1315DEFINE_CLOCK(ssi3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG12_OFFSET,
1316 NULL, NULL, &ipg_clk, NULL);
1317DEFINE_CLOCK(ssi3_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG13_OFFSET,
1318 NULL, NULL, &pll3_sw_clk, &ssi3_ipg_clk);
1319
1320/* eCSPI */
1321DEFINE_CLOCK_FULL(ecspi1_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
1322 NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
1323 &ipg_clk, &spba_clk);
1324DEFINE_CLOCK(ecspi1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG10_OFFSET,
1325 NULL, NULL, &ecspi_main_clk, &ecspi1_ipg_clk);
1326DEFINE_CLOCK_FULL(ecspi2_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG11_OFFSET,
1327 NULL, NULL, _clk_ccgr_enable_inrun, _clk_ccgr_disable,
1328 &ipg_clk, &aips_tz2_clk);
1329DEFINE_CLOCK(ecspi2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG12_OFFSET,
1330 NULL, NULL, &ecspi_main_clk, &ecspi2_ipg_clk);
1331
1332/* CSPI */
1333DEFINE_CLOCK(cspi_ipg_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG9_OFFSET,
1334 NULL, NULL, &ipg_clk, &aips_tz2_clk);
1335DEFINE_CLOCK(cspi_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG13_OFFSET,
1336 NULL, NULL, &ipg_clk, &cspi_ipg_clk);
1337
1338/* SDMA */
1339DEFINE_CLOCK(sdma_clk, 1, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG15_OFFSET,
1340 NULL, NULL, &ahb_clk, NULL);
1341
1342/* eSDHC */
1343DEFINE_CLOCK_FULL(esdhc1_ipg_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG0_OFFSET,
1344 NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
1345DEFINE_CLOCK_MAX(esdhc1_clk, 0, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG1_OFFSET,
1346 clk_esdhc1, &pll2_sw_clk, &esdhc1_ipg_clk);
1347DEFINE_CLOCK_FULL(esdhc2_ipg_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG2_OFFSET,
1348 NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
1349DEFINE_CLOCK_FULL(esdhc3_ipg_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG4_OFFSET,
1350 NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
1351DEFINE_CLOCK_FULL(esdhc4_ipg_clk, 3, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG6_OFFSET,
1352 NULL, NULL, _clk_max_enable, _clk_max_disable, &ipg_clk, NULL);
1353
1354/* mx51 specific */
1355DEFINE_CLOCK_MAX(esdhc2_clk, 1, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG3_OFFSET,
1356 clk_esdhc2, &pll2_sw_clk, &esdhc2_ipg_clk);
1357
1358static struct clk esdhc3_clk = {
1359 .id = 2,
1360 .parent = &esdhc1_clk,
1361 .set_parent = clk_esdhc3_set_parent,
1362 .enable_reg = MXC_CCM_CCGR3,
1363 .enable_shift = MXC_CCM_CCGRx_CG5_OFFSET,
1364 .enable = _clk_max_enable,
1365 .disable = _clk_max_disable,
1366 .secondary = &esdhc3_ipg_clk,
1367};
1368static struct clk esdhc4_clk = {
1369 .id = 3,
1370 .parent = &esdhc1_clk,
1371 .set_parent = clk_esdhc4_set_parent,
1372 .enable_reg = MXC_CCM_CCGR3,
1373 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
1374 .enable = _clk_max_enable,
1375 .disable = _clk_max_disable,
1376 .secondary = &esdhc4_ipg_clk,
1377};
1378
1379/* mx53 specific */
1380static struct clk esdhc2_mx53_clk = {
1381 .id = 2,
1382 .parent = &esdhc1_clk,
1383 .set_parent = clk_esdhc2_mx53_set_parent,
1384 .enable_reg = MXC_CCM_CCGR3,
1385 .enable_shift = MXC_CCM_CCGRx_CG3_OFFSET,
1386 .enable = _clk_max_enable,
1387 .disable = _clk_max_disable,
1388 .secondary = &esdhc3_ipg_clk,
1389};
1390
1391DEFINE_CLOCK_MAX(esdhc3_mx53_clk, 2, MXC_CCM_CCGR3, MXC_CCM_CCGRx_CG5_OFFSET,
1392 clk_esdhc3_mx53, &pll2_sw_clk, &esdhc2_ipg_clk);
1393
1394static struct clk esdhc4_mx53_clk = {
1395 .id = 3,
1396 .parent = &esdhc1_clk,
1397 .set_parent = clk_esdhc4_mx53_set_parent,
1398 .enable_reg = MXC_CCM_CCGR3,
1399 .enable_shift = MXC_CCM_CCGRx_CG7_OFFSET,
1400 .enable = _clk_max_enable,
1401 .disable = _clk_max_disable,
1402 .secondary = &esdhc4_ipg_clk,
1403};
1404
1405static struct clk sata_clk = {
1406 .parent = &ipg_clk,
1407 .enable = _clk_max_enable,
1408 .enable_reg = MXC_CCM_CCGR4,
1409 .enable_shift = MXC_CCM_CCGRx_CG1_OFFSET,
1410 .disable = _clk_max_disable,
1411};
1412
1413static struct clk ahci_phy_clk = {
1414 .parent = &usb_phy1_clk,
1415};
1416
1417static struct clk ahci_dma_clk = {
1418 .parent = &ahb_clk,
1419};
1420
1421DEFINE_CLOCK(mipi_esc_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG5_OFFSET, NULL, NULL, NULL, &pll2_sw_clk);
1422DEFINE_CLOCK(mipi_hsc2_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG4_OFFSET, NULL, NULL, &mipi_esc_clk, &pll2_sw_clk);
1423DEFINE_CLOCK(mipi_hsc1_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG3_OFFSET, NULL, NULL, &mipi_hsc2_clk, &pll2_sw_clk);
1424
1425/* IPU */
1426DEFINE_CLOCK_FULL(ipu_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG5_OFFSET,
1427 NULL, NULL, clk_ipu_enable, clk_ipu_disable, &ahb_clk, &ipu_sec_clk);
1428
1429DEFINE_CLOCK_FULL(emi_fast_clk, 0, MXC_CCM_CCGR5, MXC_CCM_CCGRx_CG7_OFFSET,
1430 NULL, NULL, _clk_ccgr_enable, _clk_ccgr_disable_inwait,
1431 &ddr_clk, NULL);
1432
1433DEFINE_CLOCK(ipu_di0_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG5_OFFSET,
1434 NULL, NULL, &pll3_sw_clk, NULL);
1435DEFINE_CLOCK(ipu_di1_clk, 0, MXC_CCM_CCGR6, MXC_CCM_CCGRx_CG6_OFFSET,
1436 NULL, NULL, &pll3_sw_clk, NULL);
1437
1438/* PATA */
1439DEFINE_CLOCK(pata_clk, 0, MXC_CCM_CCGR4, MXC_CCM_CCGRx_CG0_OFFSET,
1440 NULL, NULL, &ipg_clk, &spba_clk);
1441
1442#define _REGISTER_CLOCK(d, n, c) \
1443 { \
1444 .dev_id = d, \
1445 .con_id = n, \
1446 .clk = &c, \
1447 },
1448
1449static struct clk_lookup mx51_lookups[] = {
1450 /* i.mx51 has the i.mx21 type uart */
1451 _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
1452 _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
1453 _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
1454 _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
1455 /* i.mx51 has the i.mx27 type fec */
1456 _REGISTER_CLOCK("imx27-fec.0", NULL, fec_clk)
1457 _REGISTER_CLOCK("mxc_pwm.0", "pwm", pwm1_clk)
1458 _REGISTER_CLOCK("mxc_pwm.1", "pwm", pwm2_clk)
1459 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
1460 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
1461 _REGISTER_CLOCK("imx-i2c.2", NULL, hsi2c_clk)
1462 _REGISTER_CLOCK("mxc-ehci.0", "usb", usboh3_clk)
1463 _REGISTER_CLOCK("mxc-ehci.0", "usb_ahb", usb_ahb_clk)
1464 _REGISTER_CLOCK("mxc-ehci.0", "usb_phy1", usb_phy1_clk)
1465 _REGISTER_CLOCK("mxc-ehci.1", "usb", usboh3_clk)
1466 _REGISTER_CLOCK("mxc-ehci.1", "usb_ahb", usb_ahb_clk)
1467 _REGISTER_CLOCK("mxc-ehci.2", "usb", usboh3_clk)
1468 _REGISTER_CLOCK("mxc-ehci.2", "usb_ahb", usb_ahb_clk)
1469 _REGISTER_CLOCK("fsl-usb2-udc", "usb", usboh3_clk)
1470 _REGISTER_CLOCK("fsl-usb2-udc", "usb_ahb", ahb_clk)
1471 _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
1472 _REGISTER_CLOCK("mxc_nand", NULL, nfc_clk)
1473 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
1474 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
1475 _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
1476 /* i.mx51 has the i.mx35 type sdma */
1477 _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
1478 _REGISTER_CLOCK(NULL, "ckih", ckih_clk)
1479 _REGISTER_CLOCK(NULL, "ckih2", ckih2_clk)
1480 _REGISTER_CLOCK(NULL, "gpt_32k", gpt_32k_clk)
1481 _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
1482 _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
1483 /* i.mx51 has the i.mx35 type cspi */
1484 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
1485 _REGISTER_CLOCK("sdhci-esdhc-imx51.0", NULL, esdhc1_clk)
1486 _REGISTER_CLOCK("sdhci-esdhc-imx51.1", NULL, esdhc2_clk)
1487 _REGISTER_CLOCK("sdhci-esdhc-imx51.2", NULL, esdhc3_clk)
1488 _REGISTER_CLOCK("sdhci-esdhc-imx51.3", NULL, esdhc4_clk)
1489 _REGISTER_CLOCK(NULL, "cpu_clk", cpu_clk)
1490 _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
1491 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
1492 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
1493 _REGISTER_CLOCK(NULL, "mipi_hsp", mipi_hsp_clk)
1494 _REGISTER_CLOCK("imx-ipuv3", NULL, ipu_clk)
1495 _REGISTER_CLOCK("imx-ipuv3", "di0", ipu_di0_clk)
1496 _REGISTER_CLOCK("imx-ipuv3", "di1", ipu_di1_clk)
1497 _REGISTER_CLOCK(NULL, "gpc_dvfs", gpc_dvfs_clk)
1498 _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
1499};
1500
1501static struct clk_lookup mx53_lookups[] = {
1502 /* i.mx53 has the i.mx21 type uart */
1503 _REGISTER_CLOCK("imx21-uart.0", NULL, uart1_clk)
1504 _REGISTER_CLOCK("imx21-uart.1", NULL, uart2_clk)
1505 _REGISTER_CLOCK("imx21-uart.2", NULL, uart3_clk)
1506 _REGISTER_CLOCK("imx21-uart.3", NULL, uart4_clk)
1507 _REGISTER_CLOCK("imx21-uart.4", NULL, uart5_clk)
1508 _REGISTER_CLOCK(NULL, "gpt", gpt_clk)
1509 /* i.mx53 has the i.mx25 type fec */
1510 _REGISTER_CLOCK("imx25-fec.0", NULL, fec_clk)
1511 _REGISTER_CLOCK(NULL, "iim_clk", iim_clk)
1512 _REGISTER_CLOCK("imx-i2c.0", NULL, i2c1_clk)
1513 _REGISTER_CLOCK("imx-i2c.1", NULL, i2c2_clk)
1514 _REGISTER_CLOCK("imx-i2c.2", NULL, i2c3_mx53_clk)
1515 /* i.mx53 has the i.mx51 type ecspi */
1516 _REGISTER_CLOCK("imx51-ecspi.0", NULL, ecspi1_clk)
1517 _REGISTER_CLOCK("imx51-ecspi.1", NULL, ecspi2_clk)
1518 /* i.mx53 has the i.mx25 type cspi */
1519 _REGISTER_CLOCK("imx35-cspi.0", NULL, cspi_clk)
1520 _REGISTER_CLOCK("sdhci-esdhc-imx53.0", NULL, esdhc1_clk)
1521 _REGISTER_CLOCK("sdhci-esdhc-imx53.1", NULL, esdhc2_mx53_clk)
1522 _REGISTER_CLOCK("sdhci-esdhc-imx53.2", NULL, esdhc3_mx53_clk)
1523 _REGISTER_CLOCK("sdhci-esdhc-imx53.3", NULL, esdhc4_mx53_clk)
1524 _REGISTER_CLOCK("imx2-wdt.0", NULL, dummy_clk)
1525 _REGISTER_CLOCK("imx2-wdt.1", NULL, dummy_clk)
1526 /* i.mx53 has the i.mx35 type sdma */
1527 _REGISTER_CLOCK("imx35-sdma", NULL, sdma_clk)
1528 _REGISTER_CLOCK("imx-ssi.0", NULL, ssi1_clk)
1529 _REGISTER_CLOCK("imx-ssi.1", NULL, ssi2_clk)
1530 _REGISTER_CLOCK("imx-ssi.2", NULL, ssi3_clk)
1531 _REGISTER_CLOCK("imx-keypad", NULL, dummy_clk)
1532 _REGISTER_CLOCK("pata_imx", NULL, pata_clk)
1533 _REGISTER_CLOCK("imx53-ahci.0", "ahci", sata_clk)
1534 _REGISTER_CLOCK("imx53-ahci.0", "ahci_phy", ahci_phy_clk)
1535 _REGISTER_CLOCK("imx53-ahci.0", "ahci_dma", ahci_dma_clk)
1536};
1537
1538static void clk_tree_init(void)
1539{
1540 u32 reg;
1541
1542 ipg_perclk.set_parent(&ipg_perclk, &lp_apm_clk);
1543
1544 /*
1545 * Initialise the IPG PER CLK dividers to 3. IPG_PER_CLK should be at
1546 * 8MHz, its derived from lp_apm.
1547 *
1548 * FIXME: Verify if true for all boards
1549 */
1550 reg = __raw_readl(MXC_CCM_CBCDR);
1551 reg &= ~MXC_CCM_CBCDR_PERCLK_PRED1_MASK;
1552 reg &= ~MXC_CCM_CBCDR_PERCLK_PRED2_MASK;
1553 reg &= ~MXC_CCM_CBCDR_PERCLK_PODF_MASK;
1554 reg |= (2 << MXC_CCM_CBCDR_PERCLK_PRED1_OFFSET);
1555 __raw_writel(reg, MXC_CCM_CBCDR);
1556}
1557
1558int __init mx51_clocks_init(unsigned long ckil, unsigned long osc,
1559 unsigned long ckih1, unsigned long ckih2)
1560{
1561 int i;
1562
1563 external_low_reference = ckil;
1564 external_high_reference = ckih1;
1565 ckih2_reference = ckih2;
1566 oscillator_reference = osc;
1567
1568 for (i = 0; i < ARRAY_SIZE(mx51_lookups); i++)
1569 clkdev_add(&mx51_lookups[i]);
1570
1571 clk_tree_init();
1572
1573 clk_enable(&cpu_clk);
1574 clk_enable(&main_bus_clk);
1575
1576 clk_enable(&iim_clk);
1577 imx_print_silicon_rev("i.MX51", mx51_revision());
1578 clk_disable(&iim_clk);
1579
1580 /* move usb_phy_clk to 24MHz */
1581 clk_set_parent(&usb_phy1_clk, &osc_clk);
1582
1583 /* set the usboh3_clk parent to pll2_sw_clk */
1584 clk_set_parent(&usboh3_clk, &pll2_sw_clk);
1585
1586 /* Set SDHC parents to be PLL2 */
1587 clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
1588 clk_set_parent(&esdhc2_clk, &pll2_sw_clk);
1589
1590 /* set SDHC root clock as 166.25MHZ*/
1591 clk_set_rate(&esdhc1_clk, 166250000);
1592 clk_set_rate(&esdhc2_clk, 166250000);
1593
1594 /* System timer */
1595 mxc_timer_init(&gpt_clk, MX51_IO_ADDRESS(MX51_GPT1_BASE_ADDR),
1596 MX51_INT_GPT);
1597 return 0;
1598}
1599
1600int __init mx53_clocks_init(unsigned long ckil, unsigned long osc,
1601 unsigned long ckih1, unsigned long ckih2)
1602{
1603 int i;
1604
1605 external_low_reference = ckil;
1606 external_high_reference = ckih1;
1607 ckih2_reference = ckih2;
1608 oscillator_reference = osc;
1609
1610 for (i = 0; i < ARRAY_SIZE(mx53_lookups); i++)
1611 clkdev_add(&mx53_lookups[i]);
1612
1613 clk_tree_init();
1614
1615 clk_set_parent(&uart_root_clk, &pll3_sw_clk);
1616 clk_enable(&cpu_clk);
1617 clk_enable(&main_bus_clk);
1618
1619 clk_enable(&iim_clk);
1620 imx_print_silicon_rev("i.MX53", mx53_revision());
1621 clk_disable(&iim_clk);
1622
1623 /* Set SDHC parents to be PLL2 */
1624 clk_set_parent(&esdhc1_clk, &pll2_sw_clk);
1625 clk_set_parent(&esdhc3_mx53_clk, &pll2_sw_clk);
1626
1627 /* set SDHC root clock as 200MHZ*/
1628 clk_set_rate(&esdhc1_clk, 200000000);
1629 clk_set_rate(&esdhc3_mx53_clk, 200000000);
1630
1631 /* System timer */
1632 mxc_timer_init(&gpt_clk, MX53_IO_ADDRESS(MX53_GPT1_BASE_ADDR),
1633 MX53_INT_GPT);
1634 return 0;
1635}
1636
1637#ifdef CONFIG_OF
1638static void __init clk_get_freq_dt(unsigned long *ckil, unsigned long *osc,
1639 unsigned long *ckih1, unsigned long *ckih2)
1640{
1641 struct device_node *np;
1642
1643 /* retrieve the freqency of fixed clocks from device tree */
1644 for_each_compatible_node(np, NULL, "fixed-clock") {
1645 u32 rate;
1646 if (of_property_read_u32(np, "clock-frequency", &rate))
1647 continue;
1648
1649 if (of_device_is_compatible(np, "fsl,imx-ckil"))
1650 *ckil = rate;
1651 else if (of_device_is_compatible(np, "fsl,imx-osc"))
1652 *osc = rate;
1653 else if (of_device_is_compatible(np, "fsl,imx-ckih1"))
1654 *ckih1 = rate;
1655 else if (of_device_is_compatible(np, "fsl,imx-ckih2"))
1656 *ckih2 = rate;
1657 }
1658}
1659
1660int __init mx51_clocks_init_dt(void)
1661{
1662 unsigned long ckil, osc, ckih1, ckih2;
1663
1664 clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
1665 return mx51_clocks_init(ckil, osc, ckih1, ckih2);
1666}
1667
1668int __init mx53_clocks_init_dt(void)
1669{
1670 unsigned long ckil, osc, ckih1, ckih2;
1671
1672 clk_get_freq_dt(&ckil, &osc, &ckih1, &ckih2);
1673 return mx53_clocks_init(ckil, osc, ckih1, ckih2);
1674}
1675#endif
diff --git a/arch/arm/mach-imx/cpu-imx5.c b/arch/arm/mach-imx/cpu-imx5.c
index aa15c517d06e..8eb15a2fcaf9 100644
--- a/arch/arm/mach-imx/cpu-imx5.c
+++ b/arch/arm/mach-imx/cpu-imx5.c
@@ -62,11 +62,8 @@ EXPORT_SYMBOL(mx51_revision);
62 * Dependent on link order - so the assumption is that vfp_init is called 62 * Dependent on link order - so the assumption is that vfp_init is called
63 * before us. 63 * before us.
64 */ 64 */
65static int __init mx51_neon_fixup(void) 65int __init mx51_neon_fixup(void)
66{ 66{
67 if (!cpu_is_mx51())
68 return 0;
69
70 if (mx51_revision() < IMX_CHIP_REVISION_3_0 && 67 if (mx51_revision() < IMX_CHIP_REVISION_3_0 &&
71 (elf_hwcap & HWCAP_NEON)) { 68 (elf_hwcap & HWCAP_NEON)) {
72 elf_hwcap &= ~HWCAP_NEON; 69 elf_hwcap &= ~HWCAP_NEON;
@@ -75,7 +72,6 @@ static int __init mx51_neon_fixup(void)
75 return 0; 72 return 0;
76} 73}
77 74
78late_initcall(mx51_neon_fixup);
79#endif 75#endif
80 76
81static int get_mx53_srev(void) 77static int get_mx53_srev(void)
diff --git a/arch/arm/mach-imx/crmregs-imx3.h b/arch/arm/mach-imx/crmregs-imx3.h
index 53141273df45..a1dfde53e335 100644
--- a/arch/arm/mach-imx/crmregs-imx3.h
+++ b/arch/arm/mach-imx/crmregs-imx3.h
@@ -24,48 +24,47 @@
24#define CKIH_CLK_FREQ_27MHZ 27000000 24#define CKIH_CLK_FREQ_27MHZ 27000000
25#define CKIL_CLK_FREQ 32768 25#define CKIL_CLK_FREQ 32768
26 26
27#define MXC_CCM_BASE (cpu_is_mx31() ? \ 27extern void __iomem *mx3_ccm_base;
28MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR) : MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR))
29 28
30/* Register addresses */ 29/* Register addresses */
31#define MXC_CCM_CCMR (MXC_CCM_BASE + 0x00) 30#define MXC_CCM_CCMR 0x00
32#define MXC_CCM_PDR0 (MXC_CCM_BASE + 0x04) 31#define MXC_CCM_PDR0 0x04
33#define MXC_CCM_PDR1 (MXC_CCM_BASE + 0x08) 32#define MXC_CCM_PDR1 0x08
34#define MX35_CCM_PDR2 (MXC_CCM_BASE + 0x0C) 33#define MX35_CCM_PDR2 0x0C
35#define MXC_CCM_RCSR (MXC_CCM_BASE + 0x0C) 34#define MXC_CCM_RCSR 0x0C
36#define MX35_CCM_PDR3 (MXC_CCM_BASE + 0x10) 35#define MX35_CCM_PDR3 0x10
37#define MXC_CCM_MPCTL (MXC_CCM_BASE + 0x10) 36#define MXC_CCM_MPCTL 0x10
38#define MX35_CCM_PDR4 (MXC_CCM_BASE + 0x14) 37#define MX35_CCM_PDR4 0x14
39#define MXC_CCM_UPCTL (MXC_CCM_BASE + 0x14) 38#define MXC_CCM_UPCTL 0x14
40#define MX35_CCM_RCSR (MXC_CCM_BASE + 0x18) 39#define MX35_CCM_RCSR 0x18
41#define MXC_CCM_SRPCTL (MXC_CCM_BASE + 0x18) 40#define MXC_CCM_SRPCTL 0x18
42#define MX35_CCM_MPCTL (MXC_CCM_BASE + 0x1C) 41#define MX35_CCM_MPCTL 0x1C
43#define MXC_CCM_COSR (MXC_CCM_BASE + 0x1C) 42#define MXC_CCM_COSR 0x1C
44#define MX35_CCM_PPCTL (MXC_CCM_BASE + 0x20) 43#define MX35_CCM_PPCTL 0x20
45#define MXC_CCM_CGR0 (MXC_CCM_BASE + 0x20) 44#define MXC_CCM_CGR0 0x20
46#define MX35_CCM_ACMR (MXC_CCM_BASE + 0x24) 45#define MX35_CCM_ACMR 0x24
47#define MXC_CCM_CGR1 (MXC_CCM_BASE + 0x24) 46#define MXC_CCM_CGR1 0x24
48#define MX35_CCM_COSR (MXC_CCM_BASE + 0x28) 47#define MX35_CCM_COSR 0x28
49#define MXC_CCM_CGR2 (MXC_CCM_BASE + 0x28) 48#define MXC_CCM_CGR2 0x28
50#define MX35_CCM_CGR0 (MXC_CCM_BASE + 0x2C) 49#define MX35_CCM_CGR0 0x2C
51#define MXC_CCM_WIMR (MXC_CCM_BASE + 0x2C) 50#define MXC_CCM_WIMR 0x2C
52#define MX35_CCM_CGR1 (MXC_CCM_BASE + 0x30) 51#define MX35_CCM_CGR1 0x30
53#define MXC_CCM_LDC (MXC_CCM_BASE + 0x30) 52#define MXC_CCM_LDC 0x30
54#define MX35_CCM_CGR2 (MXC_CCM_BASE + 0x34) 53#define MX35_CCM_CGR2 0x34
55#define MXC_CCM_DCVR0 (MXC_CCM_BASE + 0x34) 54#define MXC_CCM_DCVR0 0x34
56#define MX35_CCM_CGR3 (MXC_CCM_BASE + 0x38) 55#define MX35_CCM_CGR3 0x38
57#define MXC_CCM_DCVR1 (MXC_CCM_BASE + 0x38) 56#define MXC_CCM_DCVR1 0x38
58#define MXC_CCM_DCVR2 (MXC_CCM_BASE + 0x3C) 57#define MXC_CCM_DCVR2 0x3C
59#define MXC_CCM_DCVR3 (MXC_CCM_BASE + 0x40) 58#define MXC_CCM_DCVR3 0x40
60#define MXC_CCM_LTR0 (MXC_CCM_BASE + 0x44) 59#define MXC_CCM_LTR0 0x44
61#define MXC_CCM_LTR1 (MXC_CCM_BASE + 0x48) 60#define MXC_CCM_LTR1 0x48
62#define MXC_CCM_LTR2 (MXC_CCM_BASE + 0x4C) 61#define MXC_CCM_LTR2 0x4C
63#define MXC_CCM_LTR3 (MXC_CCM_BASE + 0x50) 62#define MXC_CCM_LTR3 0x50
64#define MXC_CCM_LTBR0 (MXC_CCM_BASE + 0x54) 63#define MXC_CCM_LTBR0 0x54
65#define MXC_CCM_LTBR1 (MXC_CCM_BASE + 0x58) 64#define MXC_CCM_LTBR1 0x58
66#define MXC_CCM_PMCR0 (MXC_CCM_BASE + 0x5C) 65#define MXC_CCM_PMCR0 0x5C
67#define MXC_CCM_PMCR1 (MXC_CCM_BASE + 0x60) 66#define MXC_CCM_PMCR1 0x60
68#define MXC_CCM_PDR2 (MXC_CCM_BASE + 0x64) 67#define MXC_CCM_PDR2 0x64
69 68
70/* Register bit definitions */ 69/* Register bit definitions */
71#define MXC_CCM_CCMR_WBEN (1 << 27) 70#define MXC_CCM_CCMR_WBEN (1 << 27)
diff --git a/arch/arm/mach-imx/imx51-dt.c b/arch/arm/mach-imx/imx51-dt.c
index 5f577fbda2c8..18e78dba4298 100644
--- a/arch/arm/mach-imx/imx51-dt.c
+++ b/arch/arm/mach-imx/imx51-dt.c
@@ -118,6 +118,7 @@ DT_MACHINE_START(IMX51_DT, "Freescale i.MX51 (Device Tree Support)")
118 .handle_irq = imx51_handle_irq, 118 .handle_irq = imx51_handle_irq,
119 .timer = &imx51_timer, 119 .timer = &imx51_timer,
120 .init_machine = imx51_dt_init, 120 .init_machine = imx51_dt_init,
121 .init_late = imx51_init_late,
121 .dt_compat = imx51_dt_board_compat, 122 .dt_compat = imx51_dt_board_compat,
122 .restart = mxc_restart, 123 .restart = mxc_restart,
123MACHINE_END 124MACHINE_END
diff --git a/arch/arm/mach-imx/imx53-dt.c b/arch/arm/mach-imx/imx53-dt.c
index 574eca4b89a5..eb04b6248e48 100644
--- a/arch/arm/mach-imx/imx53-dt.c
+++ b/arch/arm/mach-imx/imx53-dt.c
@@ -10,6 +10,9 @@
10 * http://www.gnu.org/copyleft/gpl.html 10 * http://www.gnu.org/copyleft/gpl.html
11 */ 11 */
12 12
13#include <linux/clk.h>
14#include <linux/clkdev.h>
15#include <linux/err.h>
13#include <linux/io.h> 16#include <linux/io.h>
14#include <linux/irq.h> 17#include <linux/irq.h>
15#include <linux/irqdomain.h> 18#include <linux/irqdomain.h>
@@ -81,6 +84,19 @@ static const struct of_device_id imx53_iomuxc_of_match[] __initconst = {
81 { /* sentinel */ } 84 { /* sentinel */ }
82}; 85};
83 86
87static void __init imx53_qsb_init(void)
88{
89 struct clk *clk;
90
91 clk = clk_get_sys(NULL, "ssi_ext1");
92 if (IS_ERR(clk)) {
93 pr_err("failed to get clk ssi_ext1\n");
94 return;
95 }
96
97 clk_register_clkdev(clk, NULL, "0-000a");
98}
99
84static void __init imx53_dt_init(void) 100static void __init imx53_dt_init(void)
85{ 101{
86 struct device_node *node; 102 struct device_node *node;
@@ -99,6 +115,9 @@ static void __init imx53_dt_init(void)
99 of_node_put(node); 115 of_node_put(node);
100 } 116 }
101 117
118 if (of_machine_is_compatible("fsl,imx53-qsb"))
119 imx53_qsb_init();
120
102 of_platform_populate(NULL, of_default_bus_match_table, 121 of_platform_populate(NULL, of_default_bus_match_table,
103 imx53_auxdata_lookup, NULL); 122 imx53_auxdata_lookup, NULL);
104} 123}
diff --git a/arch/arm/mach-imx/lluart.c b/arch/arm/mach-imx/lluart.c
index 0213f8dcee81..c40a34c00489 100644
--- a/arch/arm/mach-imx/lluart.c
+++ b/arch/arm/mach-imx/lluart.c
@@ -17,6 +17,12 @@
17#include <mach/hardware.h> 17#include <mach/hardware.h>
18 18
19static struct map_desc imx_lluart_desc = { 19static struct map_desc imx_lluart_desc = {
20#ifdef CONFIG_DEBUG_IMX6Q_UART2
21 .virtual = MX6Q_IO_P2V(MX6Q_UART2_BASE_ADDR),
22 .pfn = __phys_to_pfn(MX6Q_UART2_BASE_ADDR),
23 .length = MX6Q_UART2_SIZE,
24 .type = MT_DEVICE,
25#endif
20#ifdef CONFIG_DEBUG_IMX6Q_UART4 26#ifdef CONFIG_DEBUG_IMX6Q_UART4
21 .virtual = MX6Q_IO_P2V(MX6Q_UART4_BASE_ADDR), 27 .virtual = MX6Q_IO_P2V(MX6Q_UART4_BASE_ADDR),
22 .pfn = __phys_to_pfn(MX6Q_UART4_BASE_ADDR), 28 .pfn = __phys_to_pfn(MX6Q_UART4_BASE_ADDR),
diff --git a/arch/arm/mach-imx/mach-cpuimx51sd.c b/arch/arm/mach-imx/mach-cpuimx51sd.c
index ce341a6874fc..ac50f1671e38 100644
--- a/arch/arm/mach-imx/mach-cpuimx51sd.c
+++ b/arch/arm/mach-imx/mach-cpuimx51sd.c
@@ -369,5 +369,6 @@ MACHINE_START(EUKREA_CPUIMX51SD, "Eukrea CPUIMX51SD")
369 .handle_irq = imx51_handle_irq, 369 .handle_irq = imx51_handle_irq,
370 .timer = &mxc_timer, 370 .timer = &mxc_timer,
371 .init_machine = eukrea_cpuimx51sd_init, 371 .init_machine = eukrea_cpuimx51sd_init,
372 .init_late = imx51_init_late,
372 .restart = mxc_restart, 373 .restart = mxc_restart,
373MACHINE_END 374MACHINE_END
diff --git a/arch/arm/mach-imx/mach-imx6q.c b/arch/arm/mach-imx/mach-imx6q.c
index 3df360a52c17..b47e98b7d539 100644
--- a/arch/arm/mach-imx/mach-imx6q.c
+++ b/arch/arm/mach-imx/mach-imx6q.c
@@ -10,6 +10,8 @@
10 * http://www.gnu.org/copyleft/gpl.html 10 * http://www.gnu.org/copyleft/gpl.html
11 */ 11 */
12 12
13#include <linux/clk.h>
14#include <linux/clkdev.h>
13#include <linux/delay.h> 15#include <linux/delay.h>
14#include <linux/init.h> 16#include <linux/init.h>
15#include <linux/io.h> 17#include <linux/io.h>
@@ -64,18 +66,53 @@ soft:
64/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */ 66/* For imx6q sabrelite board: set KSZ9021RN RGMII pad skew */
65static int ksz9021rn_phy_fixup(struct phy_device *phydev) 67static int ksz9021rn_phy_fixup(struct phy_device *phydev)
66{ 68{
67 /* min rx data delay */ 69 if (IS_ENABLED(CONFIG_PHYLIB)) {
68 phy_write(phydev, 0x0b, 0x8105); 70 /* min rx data delay */
69 phy_write(phydev, 0x0c, 0x0000); 71 phy_write(phydev, 0x0b, 0x8105);
72 phy_write(phydev, 0x0c, 0x0000);
70 73
71 /* max rx/tx clock delay, min rx/tx control delay */ 74 /* max rx/tx clock delay, min rx/tx control delay */
72 phy_write(phydev, 0x0b, 0x8104); 75 phy_write(phydev, 0x0b, 0x8104);
73 phy_write(phydev, 0x0c, 0xf0f0); 76 phy_write(phydev, 0x0c, 0xf0f0);
74 phy_write(phydev, 0x0b, 0x104); 77 phy_write(phydev, 0x0b, 0x104);
78 }
75 79
76 return 0; 80 return 0;
77} 81}
78 82
83static void __init imx6q_sabrelite_cko1_setup(void)
84{
85 struct clk *cko1_sel, *ahb, *cko1;
86 unsigned long rate;
87
88 cko1_sel = clk_get_sys(NULL, "cko1_sel");
89 ahb = clk_get_sys(NULL, "ahb");
90 cko1 = clk_get_sys(NULL, "cko1");
91 if (IS_ERR(cko1_sel) || IS_ERR(ahb) || IS_ERR(cko1)) {
92 pr_err("cko1 setup failed!\n");
93 goto put_clk;
94 }
95 clk_set_parent(cko1_sel, ahb);
96 rate = clk_round_rate(cko1, 16000000);
97 clk_set_rate(cko1, rate);
98 clk_register_clkdev(cko1, NULL, "0-000a");
99put_clk:
100 if (!IS_ERR(cko1_sel))
101 clk_put(cko1_sel);
102 if (!IS_ERR(ahb))
103 clk_put(ahb);
104 if (!IS_ERR(cko1))
105 clk_put(cko1);
106}
107
108static void __init imx6q_sabrelite_init(void)
109{
110 if (IS_ENABLED(CONFIG_PHYLIB))
111 phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK,
112 ksz9021rn_phy_fixup);
113 imx6q_sabrelite_cko1_setup();
114}
115
79static void __init imx6q_init_machine(void) 116static void __init imx6q_init_machine(void)
80{ 117{
81 /* 118 /*
@@ -85,8 +122,7 @@ static void __init imx6q_init_machine(void)
85 pinctrl_provide_dummies(); 122 pinctrl_provide_dummies();
86 123
87 if (of_machine_is_compatible("fsl,imx6q-sabrelite")) 124 if (of_machine_is_compatible("fsl,imx6q-sabrelite"))
88 phy_register_fixup_for_uid(PHY_ID_KSZ9021, MICREL_PHY_ID_MASK, 125 imx6q_sabrelite_init();
89 ksz9021rn_phy_fixup);
90 126
91 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); 127 of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
92 128
@@ -139,6 +175,7 @@ static struct sys_timer imx6q_timer = {
139static const char *imx6q_dt_compat[] __initdata = { 175static const char *imx6q_dt_compat[] __initdata = {
140 "fsl,imx6q-arm2", 176 "fsl,imx6q-arm2",
141 "fsl,imx6q-sabrelite", 177 "fsl,imx6q-sabrelite",
178 "fsl,imx6q-sabresd",
142 "fsl,imx6q", 179 "fsl,imx6q",
143 NULL, 180 NULL,
144}; 181};
diff --git a/arch/arm/mach-imx/mach-mx51_3ds.c b/arch/arm/mach-imx/mach-mx51_3ds.c
index 83eab4176ca4..3c5b163923f6 100644
--- a/arch/arm/mach-imx/mach-mx51_3ds.c
+++ b/arch/arm/mach-imx/mach-mx51_3ds.c
@@ -175,5 +175,6 @@ MACHINE_START(MX51_3DS, "Freescale MX51 3-Stack Board")
175 .handle_irq = imx51_handle_irq, 175 .handle_irq = imx51_handle_irq,
176 .timer = &mx51_3ds_timer, 176 .timer = &mx51_3ds_timer,
177 .init_machine = mx51_3ds_init, 177 .init_machine = mx51_3ds_init,
178 .init_late = imx51_init_late,
178 .restart = mxc_restart, 179 .restart = mxc_restart,
179MACHINE_END 180MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_babbage.c b/arch/arm/mach-imx/mach-mx51_babbage.c
index e4b822e9f719..dde397014d4b 100644
--- a/arch/arm/mach-imx/mach-mx51_babbage.c
+++ b/arch/arm/mach-imx/mach-mx51_babbage.c
@@ -163,6 +163,12 @@ static iomux_v3_cfg_t mx51babbage_pads[] = {
163 MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK, 163 MX51_PAD_CSPI1_SCLK__ECSPI1_SCLK,
164 MX51_PAD_CSPI1_SS0__GPIO4_24, 164 MX51_PAD_CSPI1_SS0__GPIO4_24,
165 MX51_PAD_CSPI1_SS1__GPIO4_25, 165 MX51_PAD_CSPI1_SS1__GPIO4_25,
166
167 /* Audio */
168 MX51_PAD_AUD3_BB_TXD__AUD3_TXD,
169 MX51_PAD_AUD3_BB_RXD__AUD3_RXD,
170 MX51_PAD_AUD3_BB_CK__AUD3_TXC,
171 MX51_PAD_AUD3_BB_FS__AUD3_TXFS,
166}; 172};
167 173
168/* Serial ports */ 174/* Serial ports */
@@ -426,5 +432,6 @@ MACHINE_START(MX51_BABBAGE, "Freescale MX51 Babbage Board")
426 .handle_irq = imx51_handle_irq, 432 .handle_irq = imx51_handle_irq,
427 .timer = &mx51_babbage_timer, 433 .timer = &mx51_babbage_timer,
428 .init_machine = mx51_babbage_init, 434 .init_machine = mx51_babbage_init,
435 .init_late = imx51_init_late,
429 .restart = mxc_restart, 436 .restart = mxc_restart,
430MACHINE_END 437MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_efikamx.c b/arch/arm/mach-imx/mach-mx51_efikamx.c
index 86e96ef11f9d..8d09c0126cab 100644
--- a/arch/arm/mach-imx/mach-mx51_efikamx.c
+++ b/arch/arm/mach-imx/mach-mx51_efikamx.c
@@ -207,29 +207,32 @@ static void mx51_efikamx_power_off(void)
207 207
208static int __init mx51_efikamx_power_init(void) 208static int __init mx51_efikamx_power_init(void)
209{ 209{
210 if (machine_is_mx51_efikamx()) { 210 pwgt1 = regulator_get(NULL, "pwgt1");
211 pwgt1 = regulator_get(NULL, "pwgt1"); 211 pwgt2 = regulator_get(NULL, "pwgt2");
212 pwgt2 = regulator_get(NULL, "pwgt2"); 212 if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
213 if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) { 213 regulator_enable(pwgt1);
214 regulator_enable(pwgt1); 214 regulator_enable(pwgt2);
215 regulator_enable(pwgt2); 215 }
216 } 216 gpio_request(EFIKAMX_POWEROFF, "poweroff");
217 gpio_request(EFIKAMX_POWEROFF, "poweroff"); 217 pm_power_off = mx51_efikamx_power_off;
218 pm_power_off = mx51_efikamx_power_off; 218
219 219 /* enable coincell charger. maybe need a small power driver ? */
220 /* enable coincell charger. maybe need a small power driver ? */ 220 coincell = regulator_get(NULL, "coincell");
221 coincell = regulator_get(NULL, "coincell"); 221 if (!IS_ERR(coincell)) {
222 if (!IS_ERR(coincell)) { 222 regulator_set_voltage(coincell, 3000000, 3000000);
223 regulator_set_voltage(coincell, 3000000, 3000000); 223 regulator_enable(coincell);
224 regulator_enable(coincell);
225 }
226
227 regulator_has_full_constraints();
228 } 224 }
229 225
226 regulator_has_full_constraints();
227
230 return 0; 228 return 0;
231} 229}
232late_initcall(mx51_efikamx_power_init); 230
231static void __init mx51_efikamx_init_late(void)
232{
233 imx51_init_late();
234 mx51_efikamx_power_init();
235}
233 236
234static void __init mx51_efikamx_init(void) 237static void __init mx51_efikamx_init(void)
235{ 238{
@@ -292,5 +295,6 @@ MACHINE_START(MX51_EFIKAMX, "Genesi Efika MX (Smarttop)")
292 .handle_irq = imx51_handle_irq, 295 .handle_irq = imx51_handle_irq,
293 .timer = &mx51_efikamx_timer, 296 .timer = &mx51_efikamx_timer,
294 .init_machine = mx51_efikamx_init, 297 .init_machine = mx51_efikamx_init,
298 .init_late = mx51_efikamx_init_late,
295 .restart = mx51_efikamx_restart, 299 .restart = mx51_efikamx_restart,
296MACHINE_END 300MACHINE_END
diff --git a/arch/arm/mach-imx/mach-mx51_efikasb.c b/arch/arm/mach-imx/mach-mx51_efikasb.c
index 88f837a6cc76..fdbd181b97ef 100644
--- a/arch/arm/mach-imx/mach-mx51_efikasb.c
+++ b/arch/arm/mach-imx/mach-mx51_efikasb.c
@@ -211,22 +211,25 @@ static void mx51_efikasb_power_off(void)
211 211
212static int __init mx51_efikasb_power_init(void) 212static int __init mx51_efikasb_power_init(void)
213{ 213{
214 if (machine_is_mx51_efikasb()) { 214 pwgt1 = regulator_get(NULL, "pwgt1");
215 pwgt1 = regulator_get(NULL, "pwgt1"); 215 pwgt2 = regulator_get(NULL, "pwgt2");
216 pwgt2 = regulator_get(NULL, "pwgt2"); 216 if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) {
217 if (!IS_ERR(pwgt1) && !IS_ERR(pwgt2)) { 217 regulator_enable(pwgt1);
218 regulator_enable(pwgt1); 218 regulator_enable(pwgt2);
219 regulator_enable(pwgt2);
220 }
221 gpio_request(EFIKASB_POWEROFF, "poweroff");
222 pm_power_off = mx51_efikasb_power_off;
223
224 regulator_has_full_constraints();
225 } 219 }
220 gpio_request(EFIKASB_POWEROFF, "poweroff");
221 pm_power_off = mx51_efikasb_power_off;
222
223 regulator_has_full_constraints();
226 224
227 return 0; 225 return 0;
228} 226}
229late_initcall(mx51_efikasb_power_init); 227
228static void __init mx51_efikasb_init_late(void)
229{
230 imx51_init_late();
231 mx51_efikasb_power_init();
232}
230 233
231/* 01 R1.3 board 234/* 01 R1.3 board
232 10 R2.0 board */ 235 10 R2.0 board */
@@ -287,6 +290,7 @@ MACHINE_START(MX51_EFIKASB, "Genesi Efika MX (Smartbook)")
287 .init_irq = mx51_init_irq, 290 .init_irq = mx51_init_irq,
288 .handle_irq = imx51_handle_irq, 291 .handle_irq = imx51_handle_irq,
289 .init_machine = efikasb_board_init, 292 .init_machine = efikasb_board_init,
293 .init_late = mx51_efikasb_init_late,
290 .timer = &mx51_efikasb_timer, 294 .timer = &mx51_efikasb_timer,
291 .restart = mxc_restart, 295 .restart = mxc_restart,
292MACHINE_END 296MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037.c b/arch/arm/mach-imx/mach-pcm037.c
index 10c9795934a3..0a40004154f2 100644
--- a/arch/arm/mach-imx/mach-pcm037.c
+++ b/arch/arm/mach-imx/mach-pcm037.c
@@ -694,6 +694,11 @@ static void __init pcm037_reserve(void)
694 MX3_CAMERA_BUF_SIZE); 694 MX3_CAMERA_BUF_SIZE);
695} 695}
696 696
697static void __init pcm037_init_late(void)
698{
699 pcm037_eet_init_devices();
700}
701
697MACHINE_START(PCM037, "Phytec Phycore pcm037") 702MACHINE_START(PCM037, "Phytec Phycore pcm037")
698 /* Maintainer: Pengutronix */ 703 /* Maintainer: Pengutronix */
699 .atag_offset = 0x100, 704 .atag_offset = 0x100,
@@ -704,5 +709,6 @@ MACHINE_START(PCM037, "Phytec Phycore pcm037")
704 .handle_irq = imx31_handle_irq, 709 .handle_irq = imx31_handle_irq,
705 .timer = &pcm037_timer, 710 .timer = &pcm037_timer,
706 .init_machine = pcm037_init, 711 .init_machine = pcm037_init,
712 .init_late = pcm037_init_late,
707 .restart = mxc_restart, 713 .restart = mxc_restart,
708MACHINE_END 714MACHINE_END
diff --git a/arch/arm/mach-imx/mach-pcm037_eet.c b/arch/arm/mach-imx/mach-pcm037_eet.c
index 1b7606bef8f4..11ffa81ad17d 100644
--- a/arch/arm/mach-imx/mach-pcm037_eet.c
+++ b/arch/arm/mach-imx/mach-pcm037_eet.c
@@ -160,9 +160,9 @@ static const struct gpio_keys_platform_data
160 .rep = 0, /* No auto-repeat */ 160 .rep = 0, /* No auto-repeat */
161}; 161};
162 162
163static int __init eet_init_devices(void) 163int __init pcm037_eet_init_devices(void)
164{ 164{
165 if (!machine_is_pcm037() || pcm037_variant() != PCM037_EET) 165 if (pcm037_variant() != PCM037_EET)
166 return 0; 166 return 0;
167 167
168 mxc_iomux_setup_multiple_pins(pcm037_eet_pins, 168 mxc_iomux_setup_multiple_pins(pcm037_eet_pins,
@@ -176,4 +176,3 @@ static int __init eet_init_devices(void)
176 176
177 return 0; 177 return 0;
178} 178}
179late_initcall(eet_init_devices);
diff --git a/arch/arm/mach-imx/mm-imx3.c b/arch/arm/mach-imx/mm-imx3.c
index 9128d15b1eb7..967ed5b35a45 100644
--- a/arch/arm/mach-imx/mm-imx3.c
+++ b/arch/arm/mach-imx/mm-imx3.c
@@ -32,6 +32,10 @@
32#include <mach/iomux-v3.h> 32#include <mach/iomux-v3.h>
33#include <mach/irqs.h> 33#include <mach/irqs.h>
34 34
35#include "crmregs-imx3.h"
36
37void __iomem *mx3_ccm_base;
38
35static void imx3_idle(void) 39static void imx3_idle(void)
36{ 40{
37 unsigned long reg = 0; 41 unsigned long reg = 0;
@@ -138,6 +142,7 @@ void __init imx31_init_early(void)
138 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR)); 142 mxc_arch_reset_init(MX31_IO_ADDRESS(MX31_WDOG_BASE_ADDR));
139 arch_ioremap_caller = imx3_ioremap_caller; 143 arch_ioremap_caller = imx3_ioremap_caller;
140 arm_pm_idle = imx3_idle; 144 arm_pm_idle = imx3_idle;
145 mx3_ccm_base = MX31_IO_ADDRESS(MX31_CCM_BASE_ADDR);
141} 146}
142 147
143void __init mx31_init_irq(void) 148void __init mx31_init_irq(void)
@@ -211,6 +216,7 @@ void __init imx35_init_early(void)
211 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR)); 216 mxc_arch_reset_init(MX35_IO_ADDRESS(MX35_WDOG_BASE_ADDR));
212 arm_pm_idle = imx3_idle; 217 arm_pm_idle = imx3_idle;
213 arch_ioremap_caller = imx3_ioremap_caller; 218 arch_ioremap_caller = imx3_ioremap_caller;
219 mx3_ccm_base = MX35_IO_ADDRESS(MX35_CCM_BASE_ADDR);
214} 220}
215 221
216void __init mx35_init_irq(void) 222void __init mx35_init_irq(void)
diff --git a/arch/arm/mach-imx/mm-imx5.c b/arch/arm/mach-imx/mm-imx5.c
index ba91e6b31cf4..feeee17da96b 100644
--- a/arch/arm/mach-imx/mm-imx5.c
+++ b/arch/arm/mach-imx/mm-imx5.c
@@ -33,6 +33,7 @@ static void imx5_idle(void)
33 gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs"); 33 gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs");
34 if (IS_ERR(gpc_dvfs_clk)) 34 if (IS_ERR(gpc_dvfs_clk))
35 return; 35 return;
36 clk_prepare(gpc_dvfs_clk);
36 } 37 }
37 clk_enable(gpc_dvfs_clk); 38 clk_enable(gpc_dvfs_clk);
38 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); 39 mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF);
@@ -236,3 +237,8 @@ void __init imx53_soc_init(void)
236 platform_device_register_simple("imx31-audmux", 0, imx53_audmux_res, 237 platform_device_register_simple("imx31-audmux", 0, imx53_audmux_res,
237 ARRAY_SIZE(imx53_audmux_res)); 238 ARRAY_SIZE(imx53_audmux_res));
238} 239}
240
241void __init imx51_init_late(void)
242{
243 mx51_neon_fixup();
244}
diff --git a/arch/arm/mach-imx/pcm037.h b/arch/arm/mach-imx/pcm037.h
index d6929721a5fd..7d167690e17d 100644
--- a/arch/arm/mach-imx/pcm037.h
+++ b/arch/arm/mach-imx/pcm037.h
@@ -8,4 +8,10 @@ enum pcm037_board_variant {
8 8
9extern enum pcm037_board_variant pcm037_variant(void); 9extern enum pcm037_board_variant pcm037_variant(void);
10 10
11#ifdef CONFIG_MACH_PCM037_EET
12int pcm037_eet_init_devices(void);
13#else
14static inline int pcm037_eet_init_devices(void) { return 0; }
15#endif
16
11#endif 17#endif
diff --git a/arch/arm/mach-imx/pm-imx3.c b/arch/arm/mach-imx/pm-imx3.c
index b3752439632e..822103bdb709 100644
--- a/arch/arm/mach-imx/pm-imx3.c
+++ b/arch/arm/mach-imx/pm-imx3.c
@@ -21,14 +21,14 @@
21 */ 21 */
22void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode) 22void mx3_cpu_lp_set(enum mx3_cpu_pwr_mode mode)
23{ 23{
24 int reg = __raw_readl(MXC_CCM_CCMR); 24 int reg = __raw_readl(mx3_ccm_base + MXC_CCM_CCMR);
25 reg &= ~MXC_CCM_CCMR_LPM_MASK; 25 reg &= ~MXC_CCM_CCMR_LPM_MASK;
26 26
27 switch (mode) { 27 switch (mode) {
28 case MX3_WAIT: 28 case MX3_WAIT:
29 if (cpu_is_mx35()) 29 if (cpu_is_mx35())
30 reg |= MXC_CCM_CCMR_LPM_WAIT_MX35; 30 reg |= MXC_CCM_CCMR_LPM_WAIT_MX35;
31 __raw_writel(reg, MXC_CCM_CCMR); 31 __raw_writel(reg, mx3_ccm_base + MXC_CCM_CCMR);
32 break; 32 break;
33 default: 33 default:
34 pr_err("Unknown cpu power mode: %d\n", mode); 34 pr_err("Unknown cpu power mode: %d\n", mode);
diff --git a/arch/arm/mach-kirkwood/board-dreamplug.c b/arch/arm/mach-kirkwood/board-dreamplug.c
index 985453994dd3..55e357ab2923 100644
--- a/arch/arm/mach-kirkwood/board-dreamplug.c
+++ b/arch/arm/mach-kirkwood/board-dreamplug.c
@@ -27,7 +27,6 @@
27#include <linux/mtd/physmap.h> 27#include <linux/mtd/physmap.h>
28#include <linux/spi/flash.h> 28#include <linux/spi/flash.h>
29#include <linux/spi/spi.h> 29#include <linux/spi/spi.h>
30#include <linux/spi/orion_spi.h>
31#include <asm/mach-types.h> 30#include <asm/mach-types.h>
32#include <asm/mach/arch.h> 31#include <asm/mach/arch.h>
33#include <asm/mach/map.h> 32#include <asm/mach/map.h>
diff --git a/arch/arm/mach-kirkwood/board-dt.c b/arch/arm/mach-kirkwood/board-dt.c
index 10d1969b9e3a..edc3f8a9d45e 100644
--- a/arch/arm/mach-kirkwood/board-dt.c
+++ b/arch/arm/mach-kirkwood/board-dt.c
@@ -43,6 +43,9 @@ static void __init kirkwood_dt_init(void)
43 kirkwood_l2_init(); 43 kirkwood_l2_init();
44#endif 44#endif
45 45
46 /* Setup root of clk tree */
47 kirkwood_clk_init();
48
46 /* internal devices that every board has */ 49 /* internal devices that every board has */
47 kirkwood_wdt_init(); 50 kirkwood_wdt_init();
48 kirkwood_xor0_init(); 51 kirkwood_xor0_init();
diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
index 3ad037385a5e..25fb3fd418ef 100644
--- a/arch/arm/mach-kirkwood/common.c
+++ b/arch/arm/mach-kirkwood/common.c
@@ -15,7 +15,8 @@
15#include <linux/ata_platform.h> 15#include <linux/ata_platform.h>
16#include <linux/mtd/nand.h> 16#include <linux/mtd/nand.h>
17#include <linux/dma-mapping.h> 17#include <linux/dma-mapping.h>
18#include <linux/of.h> 18#include <linux/clk-provider.h>
19#include <linux/spinlock.h>
19#include <net/dsa.h> 20#include <net/dsa.h>
20#include <asm/page.h> 21#include <asm/page.h>
21#include <asm/timex.h> 22#include <asm/timex.h>
@@ -32,6 +33,7 @@
32#include <plat/common.h> 33#include <plat/common.h>
33#include <plat/time.h> 34#include <plat/time.h>
34#include <plat/addr-map.h> 35#include <plat/addr-map.h>
36#include <plat/mv_xor.h>
35#include "common.h" 37#include "common.h"
36 38
37/***************************************************************************** 39/*****************************************************************************
@@ -61,20 +63,188 @@ void __init kirkwood_map_io(void)
61 iotable_init(kirkwood_io_desc, ARRAY_SIZE(kirkwood_io_desc)); 63 iotable_init(kirkwood_io_desc, ARRAY_SIZE(kirkwood_io_desc));
62} 64}
63 65
64/* 66/*****************************************************************************
65 * Default clock control bits. Any bit _not_ set in this variable 67 * CLK tree
66 * will be cleared from the hardware after platform devices have been 68 ****************************************************************************/
67 * registered. Some reserved bits must be set to 1. 69
68 */ 70static void disable_sata0(void)
69unsigned int kirkwood_clk_ctrl = CGC_DUNIT | CGC_RESERVED; 71{
72 /* Disable PLL and IVREF */
73 writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
74 /* Disable PHY */
75 writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
76}
77
78static void disable_sata1(void)
79{
80 /* Disable PLL and IVREF */
81 writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
82 /* Disable PHY */
83 writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
84}
85
86static void disable_pcie0(void)
87{
88 writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
89 while (1)
90 if (readl(PCIE_STATUS) & 0x1)
91 break;
92 writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
93}
94
95static void disable_pcie1(void)
96{
97 u32 dev, rev;
98
99 kirkwood_pcie_id(&dev, &rev);
100
101 if (dev == MV88F6282_DEV_ID) {
102 writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
103 while (1)
104 if (readl(PCIE1_STATUS) & 0x1)
105 break;
106 writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
107 }
108}
109
110/* An extended version of the gated clk. This calls fn() before
111 * disabling the clock. We use this to turn off PHYs etc. */
112struct clk_gate_fn {
113 struct clk_gate gate;
114 void (*fn)(void);
115};
116
117#define to_clk_gate_fn(_gate) container_of(_gate, struct clk_gate_fn, gate)
118#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
119
120static void clk_gate_fn_disable(struct clk_hw *hw)
121{
122 struct clk_gate *gate = to_clk_gate(hw);
123 struct clk_gate_fn *gate_fn = to_clk_gate_fn(gate);
124
125 if (gate_fn->fn)
126 gate_fn->fn();
127
128 clk_gate_ops.disable(hw);
129}
130
131static struct clk_ops clk_gate_fn_ops;
132
133static struct clk __init *clk_register_gate_fn(struct device *dev,
134 const char *name,
135 const char *parent_name, unsigned long flags,
136 void __iomem *reg, u8 bit_idx,
137 u8 clk_gate_flags, spinlock_t *lock,
138 void (*fn)(void))
139{
140 struct clk_gate_fn *gate_fn;
141 struct clk *clk;
142 struct clk_init_data init;
143
144 gate_fn = kzalloc(sizeof(struct clk_gate_fn), GFP_KERNEL);
145 if (!gate_fn) {
146 pr_err("%s: could not allocate gated clk\n", __func__);
147 return ERR_PTR(-ENOMEM);
148 }
149
150 init.name = name;
151 init.ops = &clk_gate_fn_ops;
152 init.flags = flags;
153 init.parent_names = (parent_name ? &parent_name : NULL);
154 init.num_parents = (parent_name ? 1 : 0);
155
156 /* struct clk_gate assignments */
157 gate_fn->gate.reg = reg;
158 gate_fn->gate.bit_idx = bit_idx;
159 gate_fn->gate.flags = clk_gate_flags;
160 gate_fn->gate.lock = lock;
161 gate_fn->gate.hw.init = &init;
162
163 /* ops is the gate ops, but with our disable function */
164 if (clk_gate_fn_ops.disable != clk_gate_fn_disable) {
165 clk_gate_fn_ops = clk_gate_ops;
166 clk_gate_fn_ops.disable = clk_gate_fn_disable;
167 }
70 168
169 clk = clk_register(dev, &gate_fn->gate.hw);
170
171 if (IS_ERR(clk))
172 kfree(gate_fn);
173
174 return clk;
175}
176
177static DEFINE_SPINLOCK(gating_lock);
178static struct clk *tclk;
179
180static struct clk __init *kirkwood_register_gate(const char *name, u8 bit_idx)
181{
182 return clk_register_gate(NULL, name, "tclk", 0,
183 (void __iomem *)CLOCK_GATING_CTRL,
184 bit_idx, 0, &gating_lock);
185}
186
187static struct clk __init *kirkwood_register_gate_fn(const char *name,
188 u8 bit_idx,
189 void (*fn)(void))
190{
191 return clk_register_gate_fn(NULL, name, "tclk", 0,
192 (void __iomem *)CLOCK_GATING_CTRL,
193 bit_idx, 0, &gating_lock, fn);
194}
195
196void __init kirkwood_clk_init(void)
197{
198 struct clk *runit, *ge0, *ge1, *sata0, *sata1, *usb0, *sdio;
199 struct clk *crypto, *xor0, *xor1, *pex0, *pex1, *audio;
200
201 tclk = clk_register_fixed_rate(NULL, "tclk", NULL,
202 CLK_IS_ROOT, kirkwood_tclk);
203
204 runit = kirkwood_register_gate("runit", CGC_BIT_RUNIT);
205 ge0 = kirkwood_register_gate("ge0", CGC_BIT_GE0);
206 ge1 = kirkwood_register_gate("ge1", CGC_BIT_GE1);
207 sata0 = kirkwood_register_gate_fn("sata0", CGC_BIT_SATA0,
208 disable_sata0);
209 sata1 = kirkwood_register_gate_fn("sata1", CGC_BIT_SATA1,
210 disable_sata1);
211 usb0 = kirkwood_register_gate("usb0", CGC_BIT_USB0);
212 sdio = kirkwood_register_gate("sdio", CGC_BIT_SDIO);
213 crypto = kirkwood_register_gate("crypto", CGC_BIT_CRYPTO);
214 xor0 = kirkwood_register_gate("xor0", CGC_BIT_XOR0);
215 xor1 = kirkwood_register_gate("xor1", CGC_BIT_XOR1);
216 pex0 = kirkwood_register_gate_fn("pex0", CGC_BIT_PEX0,
217 disable_pcie0);
218 pex1 = kirkwood_register_gate_fn("pex1", CGC_BIT_PEX1,
219 disable_pcie1);
220 audio = kirkwood_register_gate("audio", CGC_BIT_AUDIO);
221 kirkwood_register_gate("tdm", CGC_BIT_TDM);
222 kirkwood_register_gate("tsu", CGC_BIT_TSU);
223
224 /* clkdev entries, mapping clks to devices */
225 orion_clkdev_add(NULL, "orion_spi.0", runit);
226 orion_clkdev_add(NULL, "orion_spi.1", runit);
227 orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", ge0);
228 orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", ge1);
229 orion_clkdev_add(NULL, "orion_wdt", tclk);
230 orion_clkdev_add("0", "sata_mv.0", sata0);
231 orion_clkdev_add("1", "sata_mv.0", sata1);
232 orion_clkdev_add(NULL, "orion-ehci.0", usb0);
233 orion_clkdev_add(NULL, "orion_nand", runit);
234 orion_clkdev_add(NULL, "mvsdio", sdio);
235 orion_clkdev_add(NULL, "mv_crypto", crypto);
236 orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".0", xor0);
237 orion_clkdev_add(NULL, MV_XOR_SHARED_NAME ".1", xor1);
238 orion_clkdev_add("0", "pcie", pex0);
239 orion_clkdev_add("1", "pcie", pex1);
240 orion_clkdev_add(NULL, "kirkwood-i2s", audio);
241}
71 242
72/***************************************************************************** 243/*****************************************************************************
73 * EHCI0 244 * EHCI0
74 ****************************************************************************/ 245 ****************************************************************************/
75void __init kirkwood_ehci_init(void) 246void __init kirkwood_ehci_init(void)
76{ 247{
77 kirkwood_clk_ctrl |= CGC_USB0;
78 orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA); 248 orion_ehci_init(USB_PHYS_BASE, IRQ_KIRKWOOD_USB, EHCI_PHY_NA);
79} 249}
80 250
@@ -84,11 +254,9 @@ void __init kirkwood_ehci_init(void)
84 ****************************************************************************/ 254 ****************************************************************************/
85void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data) 255void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
86{ 256{
87 kirkwood_clk_ctrl |= CGC_GE0;
88
89 orion_ge00_init(eth_data, 257 orion_ge00_init(eth_data,
90 GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM, 258 GE00_PHYS_BASE, IRQ_KIRKWOOD_GE00_SUM,
91 IRQ_KIRKWOOD_GE00_ERR, kirkwood_tclk); 259 IRQ_KIRKWOOD_GE00_ERR);
92} 260}
93 261
94 262
@@ -97,12 +265,9 @@ void __init kirkwood_ge00_init(struct mv643xx_eth_platform_data *eth_data)
97 ****************************************************************************/ 265 ****************************************************************************/
98void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data) 266void __init kirkwood_ge01_init(struct mv643xx_eth_platform_data *eth_data)
99{ 267{
100
101 kirkwood_clk_ctrl |= CGC_GE1;
102
103 orion_ge01_init(eth_data, 268 orion_ge01_init(eth_data,
104 GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM, 269 GE01_PHYS_BASE, IRQ_KIRKWOOD_GE01_SUM,
105 IRQ_KIRKWOOD_GE01_ERR, kirkwood_tclk); 270 IRQ_KIRKWOOD_GE01_ERR);
106} 271}
107 272
108 273
@@ -144,7 +309,6 @@ static struct platform_device kirkwood_nand_flash = {
144void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, 309void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
145 int chip_delay) 310 int chip_delay)
146{ 311{
147 kirkwood_clk_ctrl |= CGC_RUNIT;
148 kirkwood_nand_data.parts = parts; 312 kirkwood_nand_data.parts = parts;
149 kirkwood_nand_data.nr_parts = nr_parts; 313 kirkwood_nand_data.nr_parts = nr_parts;
150 kirkwood_nand_data.chip_delay = chip_delay; 314 kirkwood_nand_data.chip_delay = chip_delay;
@@ -154,7 +318,6 @@ void __init kirkwood_nand_init(struct mtd_partition *parts, int nr_parts,
154void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, 318void __init kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts,
155 int (*dev_ready)(struct mtd_info *)) 319 int (*dev_ready)(struct mtd_info *))
156{ 320{
157 kirkwood_clk_ctrl |= CGC_RUNIT;
158 kirkwood_nand_data.parts = parts; 321 kirkwood_nand_data.parts = parts;
159 kirkwood_nand_data.nr_parts = nr_parts; 322 kirkwood_nand_data.nr_parts = nr_parts;
160 kirkwood_nand_data.dev_ready = dev_ready; 323 kirkwood_nand_data.dev_ready = dev_ready;
@@ -175,10 +338,6 @@ static void __init kirkwood_rtc_init(void)
175 ****************************************************************************/ 338 ****************************************************************************/
176void __init kirkwood_sata_init(struct mv_sata_platform_data *sata_data) 339void __init kirkwood_sata_init(struct mv_sata_platform_data *sata_data)
177{ 340{
178 kirkwood_clk_ctrl |= CGC_SATA0;
179 if (sata_data->n_ports > 1)
180 kirkwood_clk_ctrl |= CGC_SATA1;
181
182 orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_KIRKWOOD_SATA); 341 orion_sata_init(sata_data, SATA_PHYS_BASE, IRQ_KIRKWOOD_SATA);
183} 342}
184 343
@@ -221,7 +380,6 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
221 mvsdio_data->clock = 100000000; 380 mvsdio_data->clock = 100000000;
222 else 381 else
223 mvsdio_data->clock = 200000000; 382 mvsdio_data->clock = 200000000;
224 kirkwood_clk_ctrl |= CGC_SDIO;
225 kirkwood_sdio.dev.platform_data = mvsdio_data; 383 kirkwood_sdio.dev.platform_data = mvsdio_data;
226 platform_device_register(&kirkwood_sdio); 384 platform_device_register(&kirkwood_sdio);
227} 385}
@@ -232,8 +390,7 @@ void __init kirkwood_sdio_init(struct mvsdio_platform_data *mvsdio_data)
232 ****************************************************************************/ 390 ****************************************************************************/
233void __init kirkwood_spi_init() 391void __init kirkwood_spi_init()
234{ 392{
235 kirkwood_clk_ctrl |= CGC_RUNIT; 393 orion_spi_init(SPI_PHYS_BASE);
236 orion_spi_init(SPI_PHYS_BASE, kirkwood_tclk);
237} 394}
238 395
239 396
@@ -253,7 +410,7 @@ void __init kirkwood_i2c_init(void)
253void __init kirkwood_uart0_init(void) 410void __init kirkwood_uart0_init(void)
254{ 411{
255 orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE, 412 orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
256 IRQ_KIRKWOOD_UART_0, kirkwood_tclk); 413 IRQ_KIRKWOOD_UART_0, tclk);
257} 414}
258 415
259 416
@@ -263,7 +420,7 @@ void __init kirkwood_uart0_init(void)
263void __init kirkwood_uart1_init(void) 420void __init kirkwood_uart1_init(void)
264{ 421{
265 orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE, 422 orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
266 IRQ_KIRKWOOD_UART_1, kirkwood_tclk); 423 IRQ_KIRKWOOD_UART_1, tclk);
267} 424}
268 425
269/***************************************************************************** 426/*****************************************************************************
@@ -271,7 +428,6 @@ void __init kirkwood_uart1_init(void)
271 ****************************************************************************/ 428 ****************************************************************************/
272void __init kirkwood_crypto_init(void) 429void __init kirkwood_crypto_init(void)
273{ 430{
274 kirkwood_clk_ctrl |= CGC_CRYPTO;
275 orion_crypto_init(CRYPTO_PHYS_BASE, KIRKWOOD_SRAM_PHYS_BASE, 431 orion_crypto_init(CRYPTO_PHYS_BASE, KIRKWOOD_SRAM_PHYS_BASE,
276 KIRKWOOD_SRAM_SIZE, IRQ_KIRKWOOD_CRYPTO); 432 KIRKWOOD_SRAM_SIZE, IRQ_KIRKWOOD_CRYPTO);
277} 433}
@@ -282,8 +438,6 @@ void __init kirkwood_crypto_init(void)
282 ****************************************************************************/ 438 ****************************************************************************/
283void __init kirkwood_xor0_init(void) 439void __init kirkwood_xor0_init(void)
284{ 440{
285 kirkwood_clk_ctrl |= CGC_XOR0;
286
287 orion_xor0_init(XOR0_PHYS_BASE, XOR0_HIGH_PHYS_BASE, 441 orion_xor0_init(XOR0_PHYS_BASE, XOR0_HIGH_PHYS_BASE,
288 IRQ_KIRKWOOD_XOR_00, IRQ_KIRKWOOD_XOR_01); 442 IRQ_KIRKWOOD_XOR_00, IRQ_KIRKWOOD_XOR_01);
289} 443}
@@ -294,8 +448,6 @@ void __init kirkwood_xor0_init(void)
294 ****************************************************************************/ 448 ****************************************************************************/
295void __init kirkwood_xor1_init(void) 449void __init kirkwood_xor1_init(void)
296{ 450{
297 kirkwood_clk_ctrl |= CGC_XOR1;
298
299 orion_xor1_init(XOR1_PHYS_BASE, XOR1_HIGH_PHYS_BASE, 451 orion_xor1_init(XOR1_PHYS_BASE, XOR1_HIGH_PHYS_BASE,
300 IRQ_KIRKWOOD_XOR_10, IRQ_KIRKWOOD_XOR_11); 452 IRQ_KIRKWOOD_XOR_10, IRQ_KIRKWOOD_XOR_11);
301} 453}
@@ -306,7 +458,7 @@ void __init kirkwood_xor1_init(void)
306 ****************************************************************************/ 458 ****************************************************************************/
307void __init kirkwood_wdt_init(void) 459void __init kirkwood_wdt_init(void)
308{ 460{
309 orion_wdt_init(kirkwood_tclk); 461 orion_wdt_init();
310} 462}
311 463
312 464
@@ -382,7 +534,6 @@ static struct platform_device kirkwood_pcm_device = {
382 534
383void __init kirkwood_audio_init(void) 535void __init kirkwood_audio_init(void)
384{ 536{
385 kirkwood_clk_ctrl |= CGC_AUDIO;
386 platform_device_register(&kirkwood_i2s_device); 537 platform_device_register(&kirkwood_i2s_device);
387 platform_device_register(&kirkwood_pcm_device); 538 platform_device_register(&kirkwood_pcm_device);
388} 539}
@@ -466,6 +617,9 @@ void __init kirkwood_init(void)
466 kirkwood_l2_init(); 617 kirkwood_l2_init();
467#endif 618#endif
468 619
620 /* Setup root of clk tree */
621 kirkwood_clk_init();
622
469 /* internal devices that every board has */ 623 /* internal devices that every board has */
470 kirkwood_rtc_init(); 624 kirkwood_rtc_init();
471 kirkwood_wdt_init(); 625 kirkwood_wdt_init();
@@ -478,72 +632,6 @@ void __init kirkwood_init(void)
478#endif 632#endif
479} 633}
480 634
481static int __init kirkwood_clock_gate(void)
482{
483 unsigned int curr = readl(CLOCK_GATING_CTRL);
484 u32 dev, rev;
485
486#ifdef CONFIG_OF
487 struct device_node *np;
488#endif
489 kirkwood_pcie_id(&dev, &rev);
490 printk(KERN_DEBUG "Gating clock of unused units\n");
491 printk(KERN_DEBUG "before: 0x%08x\n", curr);
492
493 /* Make sure those units are accessible */
494 writel(curr | CGC_SATA0 | CGC_SATA1 | CGC_PEX0 | CGC_PEX1, CLOCK_GATING_CTRL);
495
496#ifdef CONFIG_OF
497 np = of_find_compatible_node(NULL, NULL, "mrvl,orion-nand");
498 if (np && of_device_is_available(np)) {
499 kirkwood_clk_ctrl |= CGC_RUNIT;
500 of_node_put(np);
501 }
502#endif
503
504 /* For SATA: first shutdown the phy */
505 if (!(kirkwood_clk_ctrl & CGC_SATA0)) {
506 /* Disable PLL and IVREF */
507 writel(readl(SATA0_PHY_MODE_2) & ~0xf, SATA0_PHY_MODE_2);
508 /* Disable PHY */
509 writel(readl(SATA0_IF_CTRL) | 0x200, SATA0_IF_CTRL);
510 }
511 if (!(kirkwood_clk_ctrl & CGC_SATA1)) {
512 /* Disable PLL and IVREF */
513 writel(readl(SATA1_PHY_MODE_2) & ~0xf, SATA1_PHY_MODE_2);
514 /* Disable PHY */
515 writel(readl(SATA1_IF_CTRL) | 0x200, SATA1_IF_CTRL);
516 }
517
518 /* For PCIe: first shutdown the phy */
519 if (!(kirkwood_clk_ctrl & CGC_PEX0)) {
520 writel(readl(PCIE_LINK_CTRL) | 0x10, PCIE_LINK_CTRL);
521 while (1)
522 if (readl(PCIE_STATUS) & 0x1)
523 break;
524 writel(readl(PCIE_LINK_CTRL) & ~0x10, PCIE_LINK_CTRL);
525 }
526
527 /* For PCIe 1: first shutdown the phy */
528 if (dev == MV88F6282_DEV_ID) {
529 if (!(kirkwood_clk_ctrl & CGC_PEX1)) {
530 writel(readl(PCIE1_LINK_CTRL) | 0x10, PCIE1_LINK_CTRL);
531 while (1)
532 if (readl(PCIE1_STATUS) & 0x1)
533 break;
534 writel(readl(PCIE1_LINK_CTRL) & ~0x10, PCIE1_LINK_CTRL);
535 }
536 } else /* keep this bit set for devices that don't have PCIe1 */
537 kirkwood_clk_ctrl |= CGC_PEX1;
538
539 /* Now gate clock the required units */
540 writel(kirkwood_clk_ctrl, CLOCK_GATING_CTRL);
541 printk(KERN_DEBUG " after: 0x%08x\n", readl(CLOCK_GATING_CTRL));
542
543 return 0;
544}
545late_initcall(kirkwood_clock_gate);
546
547void kirkwood_restart(char mode, const char *cmd) 635void kirkwood_restart(char mode, const char *cmd)
548{ 636{
549 /* 637 /*
diff --git a/arch/arm/mach-kirkwood/common.h b/arch/arm/mach-kirkwood/common.h
index a34c41a5172e..9248fa2c165b 100644
--- a/arch/arm/mach-kirkwood/common.h
+++ b/arch/arm/mach-kirkwood/common.h
@@ -50,6 +50,7 @@ void kirkwood_nand_init(struct mtd_partition *parts, int nr_parts, int delay);
50void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *)); 50void kirkwood_nand_init_rnb(struct mtd_partition *parts, int nr_parts, int (*dev_ready)(struct mtd_info *));
51void kirkwood_audio_init(void); 51void kirkwood_audio_init(void);
52void kirkwood_restart(char, const char *); 52void kirkwood_restart(char, const char *);
53void kirkwood_clk_init(void);
53 54
54/* board init functions for boards not fully converted to fdt */ 55/* board init functions for boards not fully converted to fdt */
55#ifdef CONFIG_MACH_DREAMPLUG_DT 56#ifdef CONFIG_MACH_DREAMPLUG_DT
diff --git a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
index 957bd7997d7e..3eee37a3b501 100644
--- a/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
+++ b/arch/arm/mach-kirkwood/include/mach/bridge-regs.h
@@ -43,6 +43,22 @@
43#define L2_WRITETHROUGH 0x00000010 43#define L2_WRITETHROUGH 0x00000010
44 44
45#define CLOCK_GATING_CTRL (BRIDGE_VIRT_BASE | 0x11c) 45#define CLOCK_GATING_CTRL (BRIDGE_VIRT_BASE | 0x11c)
46#define CGC_BIT_GE0 (0)
47#define CGC_BIT_PEX0 (2)
48#define CGC_BIT_USB0 (3)
49#define CGC_BIT_SDIO (4)
50#define CGC_BIT_TSU (5)
51#define CGC_BIT_DUNIT (6)
52#define CGC_BIT_RUNIT (7)
53#define CGC_BIT_XOR0 (8)
54#define CGC_BIT_AUDIO (9)
55#define CGC_BIT_SATA0 (14)
56#define CGC_BIT_SATA1 (15)
57#define CGC_BIT_XOR1 (16)
58#define CGC_BIT_CRYPTO (17)
59#define CGC_BIT_PEX1 (18)
60#define CGC_BIT_GE1 (19)
61#define CGC_BIT_TDM (20)
46#define CGC_GE0 (1 << 0) 62#define CGC_GE0 (1 << 0)
47#define CGC_PEX0 (1 << 2) 63#define CGC_PEX0 (1 << 2)
48#define CGC_USB0 (1 << 3) 64#define CGC_USB0 (1 << 3)
diff --git a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
index 85f6169c2484..6d8364a97810 100644
--- a/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
+++ b/arch/arm/mach-kirkwood/mv88f6281gtw_ge-setup.c
@@ -23,7 +23,6 @@
23#include <linux/gpio_keys.h> 23#include <linux/gpio_keys.h>
24#include <linux/spi/flash.h> 24#include <linux/spi/flash.h>
25#include <linux/spi/spi.h> 25#include <linux/spi/spi.h>
26#include <linux/spi/orion_spi.h>
27#include <net/dsa.h> 26#include <net/dsa.h>
28#include <asm/mach-types.h> 27#include <asm/mach-types.h>
29#include <asm/mach/arch.h> 28#include <asm/mach/arch.h>
diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c
index de373176ee67..6e8b2efa3c35 100644
--- a/arch/arm/mach-kirkwood/pcie.c
+++ b/arch/arm/mach-kirkwood/pcie.c
@@ -11,6 +11,7 @@
11#include <linux/kernel.h> 11#include <linux/kernel.h>
12#include <linux/pci.h> 12#include <linux/pci.h>
13#include <linux/slab.h> 13#include <linux/slab.h>
14#include <linux/clk.h>
14#include <video/vga.h> 15#include <video/vga.h>
15#include <asm/irq.h> 16#include <asm/irq.h>
16#include <asm/mach/pci.h> 17#include <asm/mach/pci.h>
@@ -19,6 +20,23 @@
19#include <plat/addr-map.h> 20#include <plat/addr-map.h>
20#include "common.h" 21#include "common.h"
21 22
23static void kirkwood_enable_pcie_clk(const char *port)
24{
25 struct clk *clk;
26
27 clk = clk_get_sys("pcie", port);
28 if (IS_ERR(clk)) {
29 printk(KERN_ERR "PCIE clock %s missing\n", port);
30 return;
31 }
32 clk_prepare_enable(clk);
33 clk_put(clk);
34}
35
36/* This function is called very early in the boot when probing the
37 hardware to determine what we actually are, and what rate tclk is
38 ticking at. Hence calling kirkwood_enable_pcie_clk() is not
39 possible since the clk tree has not been created yet. */
22void kirkwood_enable_pcie(void) 40void kirkwood_enable_pcie(void)
23{ 41{
24 u32 curr = readl(CLOCK_GATING_CTRL); 42 u32 curr = readl(CLOCK_GATING_CTRL);
@@ -26,7 +44,7 @@ void kirkwood_enable_pcie(void)
26 writel(curr | CGC_PEX0, CLOCK_GATING_CTRL); 44 writel(curr | CGC_PEX0, CLOCK_GATING_CTRL);
27} 45}
28 46
29void __init kirkwood_pcie_id(u32 *dev, u32 *rev) 47void kirkwood_pcie_id(u32 *dev, u32 *rev)
30{ 48{
31 kirkwood_enable_pcie(); 49 kirkwood_enable_pcie();
32 *dev = orion_pcie_dev_id((void __iomem *)PCIE_VIRT_BASE); 50 *dev = orion_pcie_dev_id((void __iomem *)PCIE_VIRT_BASE);
@@ -159,7 +177,6 @@ static void __init pcie1_ioresources_init(struct pcie_port *pp)
159 177
160static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys) 178static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
161{ 179{
162 extern unsigned int kirkwood_clk_ctrl;
163 struct pcie_port *pp; 180 struct pcie_port *pp;
164 int index; 181 int index;
165 182
@@ -178,11 +195,11 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys)
178 195
179 switch (index) { 196 switch (index) {
180 case 0: 197 case 0:
181 kirkwood_clk_ctrl |= CGC_PEX0; 198 kirkwood_enable_pcie_clk("0");
182 pcie0_ioresources_init(pp); 199 pcie0_ioresources_init(pp);
183 break; 200 break;
184 case 1: 201 case 1:
185 kirkwood_clk_ctrl |= CGC_PEX1; 202 kirkwood_enable_pcie_clk("1");
186 pcie1_ioresources_init(pp); 203 pcie1_ioresources_init(pp);
187 break; 204 break;
188 default: 205 default:
diff --git a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
index fd2c9c8b6831..f742a66a7045 100644
--- a/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
+++ b/arch/arm/mach-kirkwood/rd88f6192-nas-setup.c
@@ -16,7 +16,6 @@
16#include <linux/gpio.h> 16#include <linux/gpio.h>
17#include <linux/spi/flash.h> 17#include <linux/spi/flash.h>
18#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
19#include <linux/spi/orion_spi.h>
20#include <asm/mach-types.h> 19#include <asm/mach-types.h>
21#include <asm/mach/arch.h> 20#include <asm/mach/arch.h>
22#include <mach/kirkwood.h> 21#include <mach/kirkwood.h>
diff --git a/arch/arm/mach-kirkwood/t5325-setup.c b/arch/arm/mach-kirkwood/t5325-setup.c
index f9d2a11b7f96..bad738e44044 100644
--- a/arch/arm/mach-kirkwood/t5325-setup.c
+++ b/arch/arm/mach-kirkwood/t5325-setup.c
@@ -16,7 +16,6 @@
16#include <linux/mtd/physmap.h> 16#include <linux/mtd/physmap.h>
17#include <linux/spi/flash.h> 17#include <linux/spi/flash.h>
18#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
19#include <linux/spi/orion_spi.h>
20#include <linux/i2c.h> 19#include <linux/i2c.h>
21#include <linux/mv643xx_eth.h> 20#include <linux/mv643xx_eth.h>
22#include <linux/ata_platform.h> 21#include <linux/ata_platform.h>
diff --git a/arch/arm/mach-kirkwood/tsx1x-common.c b/arch/arm/mach-kirkwood/tsx1x-common.c
index 24294b2bc469..8943ede29b44 100644
--- a/arch/arm/mach-kirkwood/tsx1x-common.c
+++ b/arch/arm/mach-kirkwood/tsx1x-common.c
@@ -4,7 +4,6 @@
4#include <linux/mtd/physmap.h> 4#include <linux/mtd/physmap.h>
5#include <linux/spi/flash.h> 5#include <linux/spi/flash.h>
6#include <linux/spi/spi.h> 6#include <linux/spi/spi.h>
7#include <linux/spi/orion_spi.h>
8#include <linux/serial_reg.h> 7#include <linux/serial_reg.h>
9#include <mach/kirkwood.h> 8#include <mach/kirkwood.h>
10#include "common.h" 9#include "common.h"
diff --git a/arch/arm/mach-msm/board-halibut.c b/arch/arm/mach-msm/board-halibut.c
index 26aac363a064..4fa3e99d9a62 100644
--- a/arch/arm/mach-msm/board-halibut.c
+++ b/arch/arm/mach-msm/board-halibut.c
@@ -94,6 +94,11 @@ static void __init halibut_map_io(void)
94 msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a); 94 msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
95} 95}
96 96
97static void __init halibut_init_late(void)
98{
99 smd_debugfs_init();
100}
101
97MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") 102MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
98 .atag_offset = 0x100, 103 .atag_offset = 0x100,
99 .fixup = halibut_fixup, 104 .fixup = halibut_fixup,
@@ -101,5 +106,6 @@ MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
101 .init_early = halibut_init_early, 106 .init_early = halibut_init_early,
102 .init_irq = halibut_init_irq, 107 .init_irq = halibut_init_irq,
103 .init_machine = halibut_init, 108 .init_machine = halibut_init,
109 .init_late = halibut_init_late,
104 .timer = &msm_timer, 110 .timer = &msm_timer,
105MACHINE_END 111MACHINE_END
diff --git a/arch/arm/mach-msm/board-mahimahi.c b/arch/arm/mach-msm/board-mahimahi.c
index 5a4882fc6f7a..cf1f89a5dc62 100644
--- a/arch/arm/mach-msm/board-mahimahi.c
+++ b/arch/arm/mach-msm/board-mahimahi.c
@@ -71,6 +71,11 @@ static void __init mahimahi_map_io(void)
71 msm_clock_init(); 71 msm_clock_init();
72} 72}
73 73
74static void __init mahimahi_init_late(void)
75{
76 smd_debugfs_init();
77}
78
74extern struct sys_timer msm_timer; 79extern struct sys_timer msm_timer;
75 80
76MACHINE_START(MAHIMAHI, "mahimahi") 81MACHINE_START(MAHIMAHI, "mahimahi")
@@ -79,5 +84,6 @@ MACHINE_START(MAHIMAHI, "mahimahi")
79 .map_io = mahimahi_map_io, 84 .map_io = mahimahi_map_io,
80 .init_irq = msm_init_irq, 85 .init_irq = msm_init_irq,
81 .init_machine = mahimahi_init, 86 .init_machine = mahimahi_init,
87 .init_late = mahimahi_init_late,
82 .timer = &msm_timer, 88 .timer = &msm_timer,
83MACHINE_END 89MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x27.c b/arch/arm/mach-msm/board-msm7x27.c
index 6d84ee740df4..451ab1d43c92 100644
--- a/arch/arm/mach-msm/board-msm7x27.c
+++ b/arch/arm/mach-msm/board-msm7x27.c
@@ -128,11 +128,17 @@ static void __init msm7x2x_map_io(void)
128#endif 128#endif
129} 129}
130 130
131static void __init msm7x2x_init_late(void)
132{
133 smd_debugfs_init();
134}
135
131MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF") 136MACHINE_START(MSM7X27_SURF, "QCT MSM7x27 SURF")
132 .atag_offset = 0x100, 137 .atag_offset = 0x100,
133 .map_io = msm7x2x_map_io, 138 .map_io = msm7x2x_map_io,
134 .init_irq = msm7x2x_init_irq, 139 .init_irq = msm7x2x_init_irq,
135 .init_machine = msm7x2x_init, 140 .init_machine = msm7x2x_init,
141 .init_late = msm7x2x_init_late,
136 .timer = &msm_timer, 142 .timer = &msm_timer,
137MACHINE_END 143MACHINE_END
138 144
@@ -141,6 +147,7 @@ MACHINE_START(MSM7X27_FFA, "QCT MSM7x27 FFA")
141 .map_io = msm7x2x_map_io, 147 .map_io = msm7x2x_map_io,
142 .init_irq = msm7x2x_init_irq, 148 .init_irq = msm7x2x_init_irq,
143 .init_machine = msm7x2x_init, 149 .init_machine = msm7x2x_init,
150 .init_late = msm7x2x_init_late,
144 .timer = &msm_timer, 151 .timer = &msm_timer,
145MACHINE_END 152MACHINE_END
146 153
@@ -149,6 +156,7 @@ MACHINE_START(MSM7X25_SURF, "QCT MSM7x25 SURF")
149 .map_io = msm7x2x_map_io, 156 .map_io = msm7x2x_map_io,
150 .init_irq = msm7x2x_init_irq, 157 .init_irq = msm7x2x_init_irq,
151 .init_machine = msm7x2x_init, 158 .init_machine = msm7x2x_init,
159 .init_late = msm7x2x_init_late,
152 .timer = &msm_timer, 160 .timer = &msm_timer,
153MACHINE_END 161MACHINE_END
154 162
@@ -157,5 +165,6 @@ MACHINE_START(MSM7X25_FFA, "QCT MSM7x25 FFA")
157 .map_io = msm7x2x_map_io, 165 .map_io = msm7x2x_map_io,
158 .init_irq = msm7x2x_init_irq, 166 .init_irq = msm7x2x_init_irq,
159 .init_machine = msm7x2x_init, 167 .init_machine = msm7x2x_init,
168 .init_late = msm7x2x_init_late,
160 .timer = &msm_timer, 169 .timer = &msm_timer,
161MACHINE_END 170MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm7x30.c b/arch/arm/mach-msm/board-msm7x30.c
index 75b3cfcada6d..a5001378135d 100644
--- a/arch/arm/mach-msm/board-msm7x30.c
+++ b/arch/arm/mach-msm/board-msm7x30.c
@@ -119,6 +119,11 @@ static void __init msm7x30_map_io(void)
119 msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30); 119 msm_clock_init(msm_clocks_7x30, msm_num_clocks_7x30);
120} 120}
121 121
122static void __init msm7x30_init_late(void)
123{
124 smd_debugfs_init();
125}
126
122MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF") 127MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
123 .atag_offset = 0x100, 128 .atag_offset = 0x100,
124 .fixup = msm7x30_fixup, 129 .fixup = msm7x30_fixup,
@@ -126,6 +131,7 @@ MACHINE_START(MSM7X30_SURF, "QCT MSM7X30 SURF")
126 .map_io = msm7x30_map_io, 131 .map_io = msm7x30_map_io,
127 .init_irq = msm7x30_init_irq, 132 .init_irq = msm7x30_init_irq,
128 .init_machine = msm7x30_init, 133 .init_machine = msm7x30_init,
134 .init_late = msm7x30_init_late,
129 .timer = &msm_timer, 135 .timer = &msm_timer,
130MACHINE_END 136MACHINE_END
131 137
@@ -136,6 +142,7 @@ MACHINE_START(MSM7X30_FFA, "QCT MSM7X30 FFA")
136 .map_io = msm7x30_map_io, 142 .map_io = msm7x30_map_io,
137 .init_irq = msm7x30_init_irq, 143 .init_irq = msm7x30_init_irq,
138 .init_machine = msm7x30_init, 144 .init_machine = msm7x30_init,
145 .init_late = msm7x30_init_late,
139 .timer = &msm_timer, 146 .timer = &msm_timer,
140MACHINE_END 147MACHINE_END
141 148
@@ -146,5 +153,6 @@ MACHINE_START(MSM7X30_FLUID, "QCT MSM7X30 FLUID")
146 .map_io = msm7x30_map_io, 153 .map_io = msm7x30_map_io,
147 .init_irq = msm7x30_init_irq, 154 .init_irq = msm7x30_init_irq,
148 .init_machine = msm7x30_init, 155 .init_machine = msm7x30_init,
156 .init_late = msm7x30_init_late,
149 .timer = &msm_timer, 157 .timer = &msm_timer,
150MACHINE_END 158MACHINE_END
diff --git a/arch/arm/mach-msm/board-msm8960.c b/arch/arm/mach-msm/board-msm8960.c
index ed3598128530..65f4a1daa2e5 100644
--- a/arch/arm/mach-msm/board-msm8960.c
+++ b/arch/arm/mach-msm/board-msm8960.c
@@ -93,6 +93,11 @@ static void __init msm8960_rumi3_init(void)
93 platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices)); 93 platform_add_devices(rumi3_devices, ARRAY_SIZE(rumi3_devices));
94} 94}
95 95
96static void __init msm8960_init_late(void)
97{
98 smd_debugfs_init();
99}
100
96MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR") 101MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
97 .fixup = msm8960_fixup, 102 .fixup = msm8960_fixup,
98 .reserve = msm8960_reserve, 103 .reserve = msm8960_reserve,
@@ -101,6 +106,7 @@ MACHINE_START(MSM8960_SIM, "QCT MSM8960 SIMULATOR")
101 .timer = &msm_timer, 106 .timer = &msm_timer,
102 .handle_irq = gic_handle_irq, 107 .handle_irq = gic_handle_irq,
103 .init_machine = msm8960_sim_init, 108 .init_machine = msm8960_sim_init,
109 .init_late = msm8960_init_late,
104MACHINE_END 110MACHINE_END
105 111
106MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3") 112MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
@@ -111,5 +117,6 @@ MACHINE_START(MSM8960_RUMI3, "QCT MSM8960 RUMI3")
111 .timer = &msm_timer, 117 .timer = &msm_timer,
112 .handle_irq = gic_handle_irq, 118 .handle_irq = gic_handle_irq,
113 .init_machine = msm8960_rumi3_init, 119 .init_machine = msm8960_rumi3_init,
120 .init_late = msm8960_init_late,
114MACHINE_END 121MACHINE_END
115 122
diff --git a/arch/arm/mach-msm/board-msm8x60.c b/arch/arm/mach-msm/board-msm8x60.c
index fb3496a52ef4..e37a724cd1eb 100644
--- a/arch/arm/mach-msm/board-msm8x60.c
+++ b/arch/arm/mach-msm/board-msm8x60.c
@@ -81,6 +81,11 @@ static void __init msm8x60_init(void)
81{ 81{
82} 82}
83 83
84static void __init msm8x60_init_late(void)
85{
86 smd_debugfs_init();
87}
88
84#ifdef CONFIG_OF 89#ifdef CONFIG_OF
85static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = { 90static struct of_dev_auxdata msm_auxdata_lookup[] __initdata = {
86 {} 91 {}
@@ -111,6 +116,7 @@ MACHINE_START(MSM8X60_RUMI3, "QCT MSM8X60 RUMI3")
111 .init_irq = msm8x60_init_irq, 116 .init_irq = msm8x60_init_irq,
112 .handle_irq = gic_handle_irq, 117 .handle_irq = gic_handle_irq,
113 .init_machine = msm8x60_init, 118 .init_machine = msm8x60_init,
119 .init_late = msm8x60_init_late,
114 .timer = &msm_timer, 120 .timer = &msm_timer,
115MACHINE_END 121MACHINE_END
116 122
@@ -121,6 +127,7 @@ MACHINE_START(MSM8X60_SURF, "QCT MSM8X60 SURF")
121 .init_irq = msm8x60_init_irq, 127 .init_irq = msm8x60_init_irq,
122 .handle_irq = gic_handle_irq, 128 .handle_irq = gic_handle_irq,
123 .init_machine = msm8x60_init, 129 .init_machine = msm8x60_init,
130 .init_late = msm8x60_init_late,
124 .timer = &msm_timer, 131 .timer = &msm_timer,
125MACHINE_END 132MACHINE_END
126 133
@@ -131,6 +138,7 @@ MACHINE_START(MSM8X60_SIM, "QCT MSM8X60 SIMULATOR")
131 .init_irq = msm8x60_init_irq, 138 .init_irq = msm8x60_init_irq,
132 .handle_irq = gic_handle_irq, 139 .handle_irq = gic_handle_irq,
133 .init_machine = msm8x60_init, 140 .init_machine = msm8x60_init,
141 .init_late = msm8x60_init_late,
134 .timer = &msm_timer, 142 .timer = &msm_timer,
135MACHINE_END 143MACHINE_END
136 144
@@ -141,6 +149,7 @@ MACHINE_START(MSM8X60_FFA, "QCT MSM8X60 FFA")
141 .init_irq = msm8x60_init_irq, 149 .init_irq = msm8x60_init_irq,
142 .handle_irq = gic_handle_irq, 150 .handle_irq = gic_handle_irq,
143 .init_machine = msm8x60_init, 151 .init_machine = msm8x60_init,
152 .init_late = msm8x60_init_late,
144 .timer = &msm_timer, 153 .timer = &msm_timer,
145MACHINE_END 154MACHINE_END
146 155
@@ -150,6 +159,7 @@ DT_MACHINE_START(MSM_DT, "Qualcomm MSM (Flattened Device Tree)")
150 .map_io = msm8x60_map_io, 159 .map_io = msm8x60_map_io,
151 .init_irq = msm8x60_init_irq, 160 .init_irq = msm8x60_init_irq,
152 .init_machine = msm8x60_dt_init, 161 .init_machine = msm8x60_dt_init,
162 .init_late = msm8x60_init_late,
153 .timer = &msm_timer, 163 .timer = &msm_timer,
154 .dt_compat = msm8x60_fluid_match, 164 .dt_compat = msm8x60_fluid_match,
155MACHINE_END 165MACHINE_END
diff --git a/arch/arm/mach-msm/board-qsd8x50.c b/arch/arm/mach-msm/board-qsd8x50.c
index fbaa4ed95a3c..c8fe0edb9761 100644
--- a/arch/arm/mach-msm/board-qsd8x50.c
+++ b/arch/arm/mach-msm/board-qsd8x50.c
@@ -190,11 +190,17 @@ static void __init qsd8x50_init(void)
190 qsd8x50_init_mmc(); 190 qsd8x50_init_mmc();
191} 191}
192 192
193static void __init qsd8x50_init_late(void)
194{
195 smd_debugfs_init();
196}
197
193MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF") 198MACHINE_START(QSD8X50_SURF, "QCT QSD8X50 SURF")
194 .atag_offset = 0x100, 199 .atag_offset = 0x100,
195 .map_io = qsd8x50_map_io, 200 .map_io = qsd8x50_map_io,
196 .init_irq = qsd8x50_init_irq, 201 .init_irq = qsd8x50_init_irq,
197 .init_machine = qsd8x50_init, 202 .init_machine = qsd8x50_init,
203 .init_late = qsd8x50_init_late,
198 .timer = &msm_timer, 204 .timer = &msm_timer,
199MACHINE_END 205MACHINE_END
200 206
@@ -203,5 +209,6 @@ MACHINE_START(QSD8X50A_ST1_5, "QCT QSD8X50A ST1.5")
203 .map_io = qsd8x50_map_io, 209 .map_io = qsd8x50_map_io,
204 .init_irq = qsd8x50_init_irq, 210 .init_irq = qsd8x50_init_irq,
205 .init_machine = qsd8x50_init, 211 .init_machine = qsd8x50_init,
212 .init_late = qsd8x50_init_late,
206 .timer = &msm_timer, 213 .timer = &msm_timer,
207MACHINE_END 214MACHINE_END
diff --git a/arch/arm/mach-msm/board-sapphire.c b/arch/arm/mach-msm/board-sapphire.c
index 4a8ea0d40b6f..2e569ab10eef 100644
--- a/arch/arm/mach-msm/board-sapphire.c
+++ b/arch/arm/mach-msm/board-sapphire.c
@@ -101,6 +101,11 @@ static void __init sapphire_map_io(void)
101 msm_clock_init(); 101 msm_clock_init();
102} 102}
103 103
104static void __init sapphire_init_late(void)
105{
106 smd_debugfs_init();
107}
108
104MACHINE_START(SAPPHIRE, "sapphire") 109MACHINE_START(SAPPHIRE, "sapphire")
105/* Maintainer: Brian Swetland <swetland@google.com> */ 110/* Maintainer: Brian Swetland <swetland@google.com> */
106 .atag_offset = 0x100, 111 .atag_offset = 0x100,
@@ -108,5 +113,6 @@ MACHINE_START(SAPPHIRE, "sapphire")
108 .map_io = sapphire_map_io, 113 .map_io = sapphire_map_io,
109 .init_irq = sapphire_init_irq, 114 .init_irq = sapphire_init_irq,
110 .init_machine = sapphire_init, 115 .init_machine = sapphire_init,
116 .init_late = sapphire_init_late,
111 .timer = &msm_timer, 117 .timer = &msm_timer,
112MACHINE_END 118MACHINE_END
diff --git a/arch/arm/mach-msm/board-trout.c b/arch/arm/mach-msm/board-trout.c
index d4060a37e23d..bbe13f12fa01 100644
--- a/arch/arm/mach-msm/board-trout.c
+++ b/arch/arm/mach-msm/board-trout.c
@@ -98,6 +98,11 @@ static void __init trout_map_io(void)
98 msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a); 98 msm_clock_init(msm_clocks_7x01a, msm_num_clocks_7x01a);
99} 99}
100 100
101static void __init trout_init_late(void)
102{
103 smd_debugfs_init();
104}
105
101MACHINE_START(TROUT, "HTC Dream") 106MACHINE_START(TROUT, "HTC Dream")
102 .atag_offset = 0x100, 107 .atag_offset = 0x100,
103 .fixup = trout_fixup, 108 .fixup = trout_fixup,
@@ -105,5 +110,6 @@ MACHINE_START(TROUT, "HTC Dream")
105 .init_early = trout_init_early, 110 .init_early = trout_init_early,
106 .init_irq = trout_init_irq, 111 .init_irq = trout_init_irq,
107 .init_machine = trout_init, 112 .init_machine = trout_init,
113 .init_late = trout_init_late,
108 .timer = &msm_timer, 114 .timer = &msm_timer,
109MACHINE_END 115MACHINE_END
diff --git a/arch/arm/mach-msm/include/mach/board.h b/arch/arm/mach-msm/include/mach/board.h
index 2ce8f1f2fc4d..435f8edfafd1 100644
--- a/arch/arm/mach-msm/include/mach/board.h
+++ b/arch/arm/mach-msm/include/mach/board.h
@@ -47,4 +47,10 @@ int __init msm_add_sdcc(unsigned int controller,
47 struct msm_mmc_platform_data *plat, 47 struct msm_mmc_platform_data *plat,
48 unsigned int stat_irq, unsigned long stat_irq_flags); 48 unsigned int stat_irq, unsigned long stat_irq_flags);
49 49
50#if defined(CONFIG_MSM_SMD) && defined(CONFIG_DEBUG_FS)
51int smd_debugfs_init(void);
52#else
53static inline int smd_debugfs_init(void) { return 0; }
54#endif
55
50#endif 56#endif
diff --git a/arch/arm/mach-msm/smd_debug.c b/arch/arm/mach-msm/smd_debug.c
index c56df9e932ae..8056b3e5590f 100644
--- a/arch/arm/mach-msm/smd_debug.c
+++ b/arch/arm/mach-msm/smd_debug.c
@@ -216,7 +216,7 @@ static void debug_create(const char *name, umode_t mode,
216 debugfs_create_file(name, mode, dent, fill, &debug_ops); 216 debugfs_create_file(name, mode, dent, fill, &debug_ops);
217} 217}
218 218
219static int smd_debugfs_init(void) 219int __init smd_debugfs_init(void)
220{ 220{
221 struct dentry *dent; 221 struct dentry *dent;
222 222
@@ -234,7 +234,6 @@ static int smd_debugfs_init(void)
234 return 0; 234 return 0;
235} 235}
236 236
237late_initcall(smd_debugfs_init);
238#endif 237#endif
239 238
240 239
diff --git a/arch/arm/mach-mv78xx0/common.c b/arch/arm/mach-mv78xx0/common.c
index a5dcf766a3f9..b4c53b846c9c 100644
--- a/arch/arm/mach-mv78xx0/common.c
+++ b/arch/arm/mach-mv78xx0/common.c
@@ -13,6 +13,7 @@
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/serial_8250.h> 14#include <linux/serial_8250.h>
15#include <linux/ata_platform.h> 15#include <linux/ata_platform.h>
16#include <linux/clk-provider.h>
16#include <linux/ethtool.h> 17#include <linux/ethtool.h>
17#include <asm/mach/map.h> 18#include <asm/mach/map.h>
18#include <asm/mach/time.h> 19#include <asm/mach/time.h>
@@ -103,24 +104,24 @@ static void get_pclk_l2clk(int hclk, int core_index, int *pclk, int *l2clk)
103 104
104static int get_tclk(void) 105static int get_tclk(void)
105{ 106{
106 int tclk; 107 int tclk_freq;
107 108
108 /* 109 /*
109 * TCLK tick rate is configured by DEV_A[2:0] strap pins. 110 * TCLK tick rate is configured by DEV_A[2:0] strap pins.
110 */ 111 */
111 switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) { 112 switch ((readl(SAMPLE_AT_RESET_HIGH) >> 6) & 7) {
112 case 1: 113 case 1:
113 tclk = 166666667; 114 tclk_freq = 166666667;
114 break; 115 break;
115 case 3: 116 case 3:
116 tclk = 200000000; 117 tclk_freq = 200000000;
117 break; 118 break;
118 default: 119 default:
119 panic("unknown TCLK PLL setting: %.8x\n", 120 panic("unknown TCLK PLL setting: %.8x\n",
120 readl(SAMPLE_AT_RESET_HIGH)); 121 readl(SAMPLE_AT_RESET_HIGH));
121 } 122 }
122 123
123 return tclk; 124 return tclk_freq;
124} 125}
125 126
126 127
@@ -166,6 +167,19 @@ void __init mv78xx0_map_io(void)
166 167
167 168
168/***************************************************************************** 169/*****************************************************************************
170 * CLK tree
171 ****************************************************************************/
172static struct clk *tclk;
173
174static void __init clk_init(void)
175{
176 tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
177 get_tclk());
178
179 orion_clkdev_init(tclk);
180}
181
182/*****************************************************************************
169 * EHCI 183 * EHCI
170 ****************************************************************************/ 184 ****************************************************************************/
171void __init mv78xx0_ehci0_init(void) 185void __init mv78xx0_ehci0_init(void)
@@ -199,7 +213,7 @@ void __init mv78xx0_ge00_init(struct mv643xx_eth_platform_data *eth_data)
199{ 213{
200 orion_ge00_init(eth_data, 214 orion_ge00_init(eth_data,
201 GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM, 215 GE00_PHYS_BASE, IRQ_MV78XX0_GE00_SUM,
202 IRQ_MV78XX0_GE_ERR, get_tclk()); 216 IRQ_MV78XX0_GE_ERR);
203} 217}
204 218
205 219
@@ -210,7 +224,7 @@ void __init mv78xx0_ge01_init(struct mv643xx_eth_platform_data *eth_data)
210{ 224{
211 orion_ge01_init(eth_data, 225 orion_ge01_init(eth_data,
212 GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM, 226 GE01_PHYS_BASE, IRQ_MV78XX0_GE01_SUM,
213 NO_IRQ, get_tclk()); 227 NO_IRQ);
214} 228}
215 229
216 230
@@ -234,7 +248,7 @@ void __init mv78xx0_ge10_init(struct mv643xx_eth_platform_data *eth_data)
234 248
235 orion_ge10_init(eth_data, 249 orion_ge10_init(eth_data,
236 GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM, 250 GE10_PHYS_BASE, IRQ_MV78XX0_GE10_SUM,
237 NO_IRQ, get_tclk()); 251 NO_IRQ);
238} 252}
239 253
240 254
@@ -258,7 +272,7 @@ void __init mv78xx0_ge11_init(struct mv643xx_eth_platform_data *eth_data)
258 272
259 orion_ge11_init(eth_data, 273 orion_ge11_init(eth_data,
260 GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM, 274 GE11_PHYS_BASE, IRQ_MV78XX0_GE11_SUM,
261 NO_IRQ, get_tclk()); 275 NO_IRQ);
262} 276}
263 277
264/***************************************************************************** 278/*****************************************************************************
@@ -285,7 +299,7 @@ void __init mv78xx0_sata_init(struct mv_sata_platform_data *sata_data)
285void __init mv78xx0_uart0_init(void) 299void __init mv78xx0_uart0_init(void)
286{ 300{
287 orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE, 301 orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
288 IRQ_MV78XX0_UART_0, get_tclk()); 302 IRQ_MV78XX0_UART_0, tclk);
289} 303}
290 304
291 305
@@ -295,7 +309,7 @@ void __init mv78xx0_uart0_init(void)
295void __init mv78xx0_uart1_init(void) 309void __init mv78xx0_uart1_init(void)
296{ 310{
297 orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE, 311 orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
298 IRQ_MV78XX0_UART_1, get_tclk()); 312 IRQ_MV78XX0_UART_1, tclk);
299} 313}
300 314
301 315
@@ -305,7 +319,7 @@ void __init mv78xx0_uart1_init(void)
305void __init mv78xx0_uart2_init(void) 319void __init mv78xx0_uart2_init(void)
306{ 320{
307 orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE, 321 orion_uart2_init(UART2_VIRT_BASE, UART2_PHYS_BASE,
308 IRQ_MV78XX0_UART_2, get_tclk()); 322 IRQ_MV78XX0_UART_2, tclk);
309} 323}
310 324
311/***************************************************************************** 325/*****************************************************************************
@@ -314,7 +328,7 @@ void __init mv78xx0_uart2_init(void)
314void __init mv78xx0_uart3_init(void) 328void __init mv78xx0_uart3_init(void)
315{ 329{
316 orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE, 330 orion_uart3_init(UART3_VIRT_BASE, UART3_PHYS_BASE,
317 IRQ_MV78XX0_UART_3, get_tclk()); 331 IRQ_MV78XX0_UART_3, tclk);
318} 332}
319 333
320/***************************************************************************** 334/*****************************************************************************
@@ -378,25 +392,26 @@ void __init mv78xx0_init(void)
378 int hclk; 392 int hclk;
379 int pclk; 393 int pclk;
380 int l2clk; 394 int l2clk;
381 int tclk;
382 395
383 core_index = mv78xx0_core_index(); 396 core_index = mv78xx0_core_index();
384 hclk = get_hclk(); 397 hclk = get_hclk();
385 get_pclk_l2clk(hclk, core_index, &pclk, &l2clk); 398 get_pclk_l2clk(hclk, core_index, &pclk, &l2clk);
386 tclk = get_tclk();
387 399
388 printk(KERN_INFO "%s ", mv78xx0_id()); 400 printk(KERN_INFO "%s ", mv78xx0_id());
389 printk("core #%d, ", core_index); 401 printk("core #%d, ", core_index);
390 printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000); 402 printk("PCLK = %dMHz, ", (pclk + 499999) / 1000000);
391 printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000); 403 printk("L2 = %dMHz, ", (l2clk + 499999) / 1000000);
392 printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000); 404 printk("HCLK = %dMHz, ", (hclk + 499999) / 1000000);
393 printk("TCLK = %dMHz\n", (tclk + 499999) / 1000000); 405 printk("TCLK = %dMHz\n", (get_tclk() + 499999) / 1000000);
394 406
395 mv78xx0_setup_cpu_mbus(); 407 mv78xx0_setup_cpu_mbus();
396 408
397#ifdef CONFIG_CACHE_FEROCEON_L2 409#ifdef CONFIG_CACHE_FEROCEON_L2
398 feroceon_l2_init(is_l2_writethrough()); 410 feroceon_l2_init(is_l2_writethrough());
399#endif 411#endif
412
413 /* Setup root of clk tree */
414 clk_init();
400} 415}
401 416
402void mv78xx0_restart(char mode, const char *cmd) 417void mv78xx0_restart(char mode, const char *cmd)
diff --git a/arch/arm/mach-mxs/Kconfig b/arch/arm/mach-mxs/Kconfig
index 07d5383d68ee..91cf0625819c 100644
--- a/arch/arm/mach-mxs/Kconfig
+++ b/arch/arm/mach-mxs/Kconfig
@@ -7,18 +7,28 @@ config MXS_OCOTP
7 7
8config SOC_IMX23 8config SOC_IMX23
9 bool 9 bool
10 select ARM_AMBA
10 select CPU_ARM926T 11 select CPU_ARM926T
11 select HAVE_PWM 12 select HAVE_PWM
12 select PINCTRL_IMX23 13 select PINCTRL_IMX23
13 14
14config SOC_IMX28 15config SOC_IMX28
15 bool 16 bool
17 select ARM_AMBA
16 select CPU_ARM926T 18 select CPU_ARM926T
17 select HAVE_PWM 19 select HAVE_PWM
18 select PINCTRL_IMX28 20 select PINCTRL_IMX28
19 21
20comment "MXS platforms:" 22comment "MXS platforms:"
21 23
24config MACH_MXS_DT
25 bool "Support MXS platforms from device tree"
26 select SOC_IMX23
27 select SOC_IMX28
28 help
29 Include support for Freescale MXS platforms(i.MX23 and i.MX28)
30 using the device tree for discovery
31
22config MACH_STMP378X_DEVB 32config MACH_STMP378X_DEVB
23 bool "Support STMP378x_devb Platform" 33 bool "Support STMP378x_devb Platform"
24 select SOC_IMX23 34 select SOC_IMX23
diff --git a/arch/arm/mach-mxs/Makefile b/arch/arm/mach-mxs/Makefile
index 908bf9a567f1..e41590ccb437 100644
--- a/arch/arm/mach-mxs/Makefile
+++ b/arch/arm/mach-mxs/Makefile
@@ -1,12 +1,10 @@
1# Common support 1# Common support
2obj-y := clock.o devices.o icoll.o iomux.o system.o timer.o mm.o 2obj-y := devices.o icoll.o iomux.o system.o timer.o mm.o
3 3
4obj-$(CONFIG_MXS_OCOTP) += ocotp.o 4obj-$(CONFIG_MXS_OCOTP) += ocotp.o
5obj-$(CONFIG_PM) += pm.o 5obj-$(CONFIG_PM) += pm.o
6 6
7obj-$(CONFIG_SOC_IMX23) += clock-mx23.o 7obj-$(CONFIG_MACH_MXS_DT) += mach-mxs.o
8obj-$(CONFIG_SOC_IMX28) += clock-mx28.o
9
10obj-$(CONFIG_MACH_STMP378X_DEVB) += mach-stmp378x_devb.o 8obj-$(CONFIG_MACH_STMP378X_DEVB) += mach-stmp378x_devb.o
11obj-$(CONFIG_MACH_MX23EVK) += mach-mx23evk.o 9obj-$(CONFIG_MACH_MX23EVK) += mach-mx23evk.o
12obj-$(CONFIG_MACH_MX28EVK) += mach-mx28evk.o 10obj-$(CONFIG_MACH_MX28EVK) += mach-mx28evk.o
diff --git a/arch/arm/mach-mxs/clock-mx23.c b/arch/arm/mach-mxs/clock-mx23.c
deleted file mode 100644
index e3ac52c34019..000000000000
--- a/arch/arm/mach-mxs/clock-mx23.c
+++ /dev/null
@@ -1,536 +0,0 @@
1/*
2 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/mm.h>
20#include <linux/delay.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/jiffies.h>
24#include <linux/clkdev.h>
25
26#include <asm/clkdev.h>
27#include <asm/div64.h>
28
29#include <mach/mx23.h>
30#include <mach/common.h>
31#include <mach/clock.h>
32
33#include "regs-clkctrl-mx23.h"
34
35#define CLKCTRL_BASE_ADDR MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
36#define DIGCTRL_BASE_ADDR MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
37
38#define PARENT_RATE_SHIFT 8
39
40static int _raw_clk_enable(struct clk *clk)
41{
42 u32 reg;
43
44 if (clk->enable_reg) {
45 reg = __raw_readl(clk->enable_reg);
46 reg &= ~(1 << clk->enable_shift);
47 __raw_writel(reg, clk->enable_reg);
48 }
49
50 return 0;
51}
52
53static void _raw_clk_disable(struct clk *clk)
54{
55 u32 reg;
56
57 if (clk->enable_reg) {
58 reg = __raw_readl(clk->enable_reg);
59 reg |= 1 << clk->enable_shift;
60 __raw_writel(reg, clk->enable_reg);
61 }
62}
63
64/*
65 * ref_xtal_clk
66 */
67static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
68{
69 return 24000000;
70}
71
72static struct clk ref_xtal_clk = {
73 .get_rate = ref_xtal_clk_get_rate,
74};
75
76/*
77 * pll_clk
78 */
79static unsigned long pll_clk_get_rate(struct clk *clk)
80{
81 return 480000000;
82}
83
84static int pll_clk_enable(struct clk *clk)
85{
86 __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
87 BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
88 CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_SET);
89
90 /* Only a 10us delay is need. PLLCTRL1 LOCK bitfied is only a timer
91 * and is incorrect (excessive). Per definition of the PLLCTRL0
92 * POWER field, waiting at least 10us.
93 */
94 udelay(10);
95
96 return 0;
97}
98
99static void pll_clk_disable(struct clk *clk)
100{
101 __raw_writel(BM_CLKCTRL_PLLCTRL0_POWER |
102 BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS,
103 CLKCTRL_BASE_ADDR + HW_CLKCTRL_PLLCTRL0_CLR);
104}
105
106static struct clk pll_clk = {
107 .get_rate = pll_clk_get_rate,
108 .enable = pll_clk_enable,
109 .disable = pll_clk_disable,
110 .parent = &ref_xtal_clk,
111};
112
113/*
114 * ref_clk
115 */
116#define _CLK_GET_RATE_REF(name, sr, ss) \
117static unsigned long name##_get_rate(struct clk *clk) \
118{ \
119 unsigned long parent_rate; \
120 u32 reg, div; \
121 \
122 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr); \
123 div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f; \
124 parent_rate = clk_get_rate(clk->parent); \
125 \
126 return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18, \
127 div, PARENT_RATE_SHIFT); \
128}
129
130_CLK_GET_RATE_REF(ref_cpu_clk, FRAC, CPU)
131_CLK_GET_RATE_REF(ref_emi_clk, FRAC, EMI)
132_CLK_GET_RATE_REF(ref_pix_clk, FRAC, PIX)
133_CLK_GET_RATE_REF(ref_io_clk, FRAC, IO)
134
135#define _DEFINE_CLOCK_REF(name, er, es) \
136 static struct clk name = { \
137 .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
138 .enable_shift = BP_CLKCTRL_##er##_CLKGATE##es, \
139 .get_rate = name##_get_rate, \
140 .enable = _raw_clk_enable, \
141 .disable = _raw_clk_disable, \
142 .parent = &pll_clk, \
143 }
144
145_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC, CPU);
146_DEFINE_CLOCK_REF(ref_emi_clk, FRAC, EMI);
147_DEFINE_CLOCK_REF(ref_pix_clk, FRAC, PIX);
148_DEFINE_CLOCK_REF(ref_io_clk, FRAC, IO);
149
150/*
151 * General clocks
152 *
153 * clk_get_rate
154 */
155static unsigned long rtc_clk_get_rate(struct clk *clk)
156{
157 /* ref_xtal_clk is implemented as the only parent */
158 return clk_get_rate(clk->parent) / 768;
159}
160
161static unsigned long clk32k_clk_get_rate(struct clk *clk)
162{
163 return clk->parent->get_rate(clk->parent) / 750;
164}
165
166#define _CLK_GET_RATE(name, rs) \
167static unsigned long name##_get_rate(struct clk *clk) \
168{ \
169 u32 reg, div; \
170 \
171 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
172 \
173 if (clk->parent == &ref_xtal_clk) \
174 div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >> \
175 BP_CLKCTRL_##rs##_DIV_XTAL; \
176 else \
177 div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >> \
178 BP_CLKCTRL_##rs##_DIV_##rs; \
179 \
180 if (!div) \
181 return -EINVAL; \
182 \
183 return clk_get_rate(clk->parent) / div; \
184}
185
186_CLK_GET_RATE(cpu_clk, CPU)
187_CLK_GET_RATE(emi_clk, EMI)
188
189#define _CLK_GET_RATE1(name, rs) \
190static unsigned long name##_get_rate(struct clk *clk) \
191{ \
192 u32 reg, div; \
193 \
194 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
195 div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV; \
196 \
197 if (!div) \
198 return -EINVAL; \
199 \
200 return clk_get_rate(clk->parent) / div; \
201}
202
203_CLK_GET_RATE1(hbus_clk, HBUS)
204_CLK_GET_RATE1(xbus_clk, XBUS)
205_CLK_GET_RATE1(ssp_clk, SSP)
206_CLK_GET_RATE1(gpmi_clk, GPMI)
207_CLK_GET_RATE1(lcdif_clk, PIX)
208
209#define _CLK_GET_RATE_STUB(name) \
210static unsigned long name##_get_rate(struct clk *clk) \
211{ \
212 return clk_get_rate(clk->parent); \
213}
214
215_CLK_GET_RATE_STUB(uart_clk)
216_CLK_GET_RATE_STUB(audio_clk)
217_CLK_GET_RATE_STUB(pwm_clk)
218
219/*
220 * clk_set_rate
221 */
222static int cpu_clk_set_rate(struct clk *clk, unsigned long rate)
223{
224 u32 reg, bm_busy, div_max, d, f, div, frac;
225 unsigned long diff, parent_rate, calc_rate;
226
227 parent_rate = clk_get_rate(clk->parent);
228
229 if (clk->parent == &ref_xtal_clk) {
230 div_max = BM_CLKCTRL_CPU_DIV_XTAL >> BP_CLKCTRL_CPU_DIV_XTAL;
231 bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL;
232 div = DIV_ROUND_UP(parent_rate, rate);
233 if (div == 0 || div > div_max)
234 return -EINVAL;
235 } else {
236 div_max = BM_CLKCTRL_CPU_DIV_CPU >> BP_CLKCTRL_CPU_DIV_CPU;
237 bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU;
238 rate >>= PARENT_RATE_SHIFT;
239 parent_rate >>= PARENT_RATE_SHIFT;
240 diff = parent_rate;
241 div = frac = 1;
242 for (d = 1; d <= div_max; d++) {
243 f = parent_rate * 18 / d / rate;
244 if ((parent_rate * 18 / d) % rate)
245 f++;
246 if (f < 18 || f > 35)
247 continue;
248
249 calc_rate = parent_rate * 18 / f / d;
250 if (calc_rate > rate)
251 continue;
252
253 if (rate - calc_rate < diff) {
254 frac = f;
255 div = d;
256 diff = rate - calc_rate;
257 }
258
259 if (diff == 0)
260 break;
261 }
262
263 if (diff == parent_rate)
264 return -EINVAL;
265
266 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
267 reg &= ~BM_CLKCTRL_FRAC_CPUFRAC;
268 reg |= frac;
269 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
270 }
271
272 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
273 reg &= ~BM_CLKCTRL_CPU_DIV_CPU;
274 reg |= div << BP_CLKCTRL_CPU_DIV_CPU;
275 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU);
276
277 mxs_clkctrl_timeout(HW_CLKCTRL_CPU, bm_busy);
278
279 return 0;
280}
281
282#define _CLK_SET_RATE(name, dr) \
283static int name##_set_rate(struct clk *clk, unsigned long rate) \
284{ \
285 u32 reg, div_max, div; \
286 unsigned long parent_rate; \
287 \
288 parent_rate = clk_get_rate(clk->parent); \
289 div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
290 \
291 div = DIV_ROUND_UP(parent_rate, rate); \
292 if (div == 0 || div > div_max) \
293 return -EINVAL; \
294 \
295 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
296 reg &= ~BM_CLKCTRL_##dr##_DIV; \
297 reg |= div << BP_CLKCTRL_##dr##_DIV; \
298 if (reg & (1 << clk->enable_shift)) { \
299 pr_err("%s: clock is gated\n", __func__); \
300 return -EINVAL; \
301 } \
302 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
303 \
304 mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY); \
305 return 0; \
306}
307
308_CLK_SET_RATE(xbus_clk, XBUS)
309_CLK_SET_RATE(ssp_clk, SSP)
310_CLK_SET_RATE(gpmi_clk, GPMI)
311_CLK_SET_RATE(lcdif_clk, PIX)
312
313#define _CLK_SET_RATE_STUB(name) \
314static int name##_set_rate(struct clk *clk, unsigned long rate) \
315{ \
316 return -EINVAL; \
317}
318
319_CLK_SET_RATE_STUB(emi_clk)
320_CLK_SET_RATE_STUB(uart_clk)
321_CLK_SET_RATE_STUB(audio_clk)
322_CLK_SET_RATE_STUB(pwm_clk)
323_CLK_SET_RATE_STUB(clk32k_clk)
324
325/*
326 * clk_set_parent
327 */
328#define _CLK_SET_PARENT(name, bit) \
329static int name##_set_parent(struct clk *clk, struct clk *parent) \
330{ \
331 if (parent != clk->parent) { \
332 __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
333 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
334 clk->parent = parent; \
335 } \
336 \
337 return 0; \
338}
339
340_CLK_SET_PARENT(cpu_clk, CPU)
341_CLK_SET_PARENT(emi_clk, EMI)
342_CLK_SET_PARENT(ssp_clk, SSP)
343_CLK_SET_PARENT(gpmi_clk, GPMI)
344_CLK_SET_PARENT(lcdif_clk, PIX)
345
346#define _CLK_SET_PARENT_STUB(name) \
347static int name##_set_parent(struct clk *clk, struct clk *parent) \
348{ \
349 if (parent != clk->parent) \
350 return -EINVAL; \
351 else \
352 return 0; \
353}
354
355_CLK_SET_PARENT_STUB(uart_clk)
356_CLK_SET_PARENT_STUB(audio_clk)
357_CLK_SET_PARENT_STUB(pwm_clk)
358_CLK_SET_PARENT_STUB(clk32k_clk)
359
360/*
361 * clk definition
362 */
363static struct clk cpu_clk = {
364 .get_rate = cpu_clk_get_rate,
365 .set_rate = cpu_clk_set_rate,
366 .set_parent = cpu_clk_set_parent,
367 .parent = &ref_cpu_clk,
368};
369
370static struct clk hbus_clk = {
371 .get_rate = hbus_clk_get_rate,
372 .parent = &cpu_clk,
373};
374
375static struct clk xbus_clk = {
376 .get_rate = xbus_clk_get_rate,
377 .set_rate = xbus_clk_set_rate,
378 .parent = &ref_xtal_clk,
379};
380
381static struct clk rtc_clk = {
382 .get_rate = rtc_clk_get_rate,
383 .parent = &ref_xtal_clk,
384};
385
386/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
387static struct clk usb_clk = {
388 .enable_reg = DIGCTRL_BASE_ADDR,
389 .enable_shift = 2,
390 .enable = _raw_clk_enable,
391 .disable = _raw_clk_disable,
392 .parent = &pll_clk,
393};
394
395#define _DEFINE_CLOCK(name, er, es, p) \
396 static struct clk name = { \
397 .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
398 .enable_shift = BP_CLKCTRL_##er##_##es, \
399 .get_rate = name##_get_rate, \
400 .set_rate = name##_set_rate, \
401 .set_parent = name##_set_parent, \
402 .enable = _raw_clk_enable, \
403 .disable = _raw_clk_disable, \
404 .parent = p, \
405 }
406
407_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
408_DEFINE_CLOCK(ssp_clk, SSP, CLKGATE, &ref_xtal_clk);
409_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
410_DEFINE_CLOCK(lcdif_clk, PIX, CLKGATE, &ref_xtal_clk);
411_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
412_DEFINE_CLOCK(audio_clk, XTAL, FILT_CLK24M_GATE, &ref_xtal_clk);
413_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
414_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
415
416#define _REGISTER_CLOCK(d, n, c) \
417 { \
418 .dev_id = d, \
419 .con_id = n, \
420 .clk = &c, \
421 },
422
423static struct clk_lookup lookups[] = {
424 /* for amba bus driver */
425 _REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
426 /* for amba-pl011 driver */
427 _REGISTER_CLOCK("duart", NULL, uart_clk)
428 _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
429 _REGISTER_CLOCK("rtc", NULL, rtc_clk)
430 _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
431 _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
432 _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp_clk)
433 _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp_clk)
434 _REGISTER_CLOCK(NULL, "usb", usb_clk)
435 _REGISTER_CLOCK(NULL, "audio", audio_clk)
436 _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
437 _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
438 _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
439 _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
440 _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
441 _REGISTER_CLOCK("imx23-fb", NULL, lcdif_clk)
442 _REGISTER_CLOCK("imx23-gpmi-nand", NULL, gpmi_clk)
443};
444
445static int clk_misc_init(void)
446{
447 u32 reg;
448 int ret;
449
450 /* Fix up parent per register setting */
451 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
452 cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
453 &ref_xtal_clk : &ref_cpu_clk;
454 emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
455 &ref_xtal_clk : &ref_emi_clk;
456 ssp_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP) ?
457 &ref_xtal_clk : &ref_io_clk;
458 gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
459 &ref_xtal_clk : &ref_io_clk;
460 lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_PIX) ?
461 &ref_xtal_clk : &ref_pix_clk;
462
463 /* Use int div over frac when both are available */
464 __raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
465 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
466 __raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
467 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
468 __raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
469 CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
470
471 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
472 reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
473 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
474
475 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
476 reg &= ~BM_CLKCTRL_SSP_DIV_FRAC_EN;
477 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP);
478
479 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
480 reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
481 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
482
483 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
484 reg &= ~BM_CLKCTRL_PIX_DIV_FRAC_EN;
485 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_PIX);
486
487 /*
488 * Set safe hbus clock divider. A divider of 3 ensure that
489 * the Vddd voltage required for the cpu clock is sufficiently
490 * high for the hbus clock.
491 */
492 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
493 reg &= BM_CLKCTRL_HBUS_DIV;
494 reg |= 3 << BP_CLKCTRL_HBUS_DIV;
495 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
496
497 ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_BUSY);
498
499 /* Gate off cpu clock in WFI for power saving */
500 __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
501 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
502
503 /*
504 * 480 MHz seems too high to be ssp clock source directly,
505 * so set frac to get a 288 MHz ref_io.
506 */
507 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
508 reg &= ~BM_CLKCTRL_FRAC_IOFRAC;
509 reg |= 30 << BP_CLKCTRL_FRAC_IOFRAC;
510 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC);
511
512 return ret;
513}
514
515int __init mx23_clocks_init(void)
516{
517 clk_misc_init();
518
519 /*
520 * source ssp clock from ref_io than ref_xtal,
521 * as ref_xtal only provides 24 MHz as maximum.
522 */
523 clk_set_parent(&ssp_clk, &ref_io_clk);
524
525 clk_prepare_enable(&cpu_clk);
526 clk_prepare_enable(&hbus_clk);
527 clk_prepare_enable(&xbus_clk);
528 clk_prepare_enable(&emi_clk);
529 clk_prepare_enable(&uart_clk);
530
531 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
532
533 mxs_timer_init(&clk32k_clk, MX23_INT_TIMER0);
534
535 return 0;
536}
diff --git a/arch/arm/mach-mxs/clock-mx28.c b/arch/arm/mach-mxs/clock-mx28.c
deleted file mode 100644
index cea29c99e214..000000000000
--- a/arch/arm/mach-mxs/clock-mx28.c
+++ /dev/null
@@ -1,803 +0,0 @@
1/*
2 * Copyright (C) 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License along
15 * with this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#include <linux/mm.h>
20#include <linux/delay.h>
21#include <linux/clk.h>
22#include <linux/io.h>
23#include <linux/jiffies.h>
24#include <linux/clkdev.h>
25#include <linux/spinlock.h>
26
27#include <asm/clkdev.h>
28#include <asm/div64.h>
29
30#include <mach/mx28.h>
31#include <mach/common.h>
32#include <mach/clock.h>
33#include <mach/digctl.h>
34
35#include "regs-clkctrl-mx28.h"
36
37#define CLKCTRL_BASE_ADDR MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
38#define DIGCTRL_BASE_ADDR MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
39
40#define PARENT_RATE_SHIFT 8
41
42static struct clk pll2_clk;
43static struct clk cpu_clk;
44static struct clk emi_clk;
45static struct clk saif0_clk;
46static struct clk saif1_clk;
47static struct clk clk32k_clk;
48static DEFINE_SPINLOCK(clkmux_lock);
49
50/*
51 * HW_SAIF_CLKMUX_SEL:
52 * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
53 * clock pins selected for SAIF1 input clocks.
54 * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
55 * SAIF0 clock inputs selected for SAIF1 input clocks.
56 * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
57 * clocks.
58 * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
59 * clocks.
60 */
61int mxs_saif_clkmux_select(unsigned int clkmux)
62{
63 if (clkmux > 0x3)
64 return -EINVAL;
65
66 spin_lock(&clkmux_lock);
67 __raw_writel(BM_DIGCTL_CTRL_SAIF_CLKMUX,
68 DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_CLR_ADDR);
69 __raw_writel(clkmux << BP_DIGCTL_CTRL_SAIF_CLKMUX,
70 DIGCTRL_BASE_ADDR + HW_DIGCTL_CTRL + MXS_SET_ADDR);
71 spin_unlock(&clkmux_lock);
72
73 return 0;
74}
75
76static int _raw_clk_enable(struct clk *clk)
77{
78 u32 reg;
79
80 if (clk->enable_reg) {
81 reg = __raw_readl(clk->enable_reg);
82 reg &= ~(1 << clk->enable_shift);
83 __raw_writel(reg, clk->enable_reg);
84 }
85
86 return 0;
87}
88
89static void _raw_clk_disable(struct clk *clk)
90{
91 u32 reg;
92
93 if (clk->enable_reg) {
94 reg = __raw_readl(clk->enable_reg);
95 reg |= 1 << clk->enable_shift;
96 __raw_writel(reg, clk->enable_reg);
97 }
98}
99
100/*
101 * ref_xtal_clk
102 */
103static unsigned long ref_xtal_clk_get_rate(struct clk *clk)
104{
105 return 24000000;
106}
107
108static struct clk ref_xtal_clk = {
109 .get_rate = ref_xtal_clk_get_rate,
110};
111
112/*
113 * pll_clk
114 */
115static unsigned long pll0_clk_get_rate(struct clk *clk)
116{
117 return 480000000;
118}
119
120static unsigned long pll1_clk_get_rate(struct clk *clk)
121{
122 return 480000000;
123}
124
125static unsigned long pll2_clk_get_rate(struct clk *clk)
126{
127 return 50000000;
128}
129
130#define _CLK_ENABLE_PLL(name, r, g) \
131static int name##_enable(struct clk *clk) \
132{ \
133 __raw_writel(BM_CLKCTRL_##r##CTRL0_POWER, \
134 CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
135 udelay(10); \
136 \
137 if (clk == &pll2_clk) \
138 __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
139 CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
140 else \
141 __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
142 CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
143 \
144 return 0; \
145}
146
147_CLK_ENABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
148_CLK_ENABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
149_CLK_ENABLE_PLL(pll2_clk, PLL2, CLKGATE)
150
151#define _CLK_DISABLE_PLL(name, r, g) \
152static void name##_disable(struct clk *clk) \
153{ \
154 __raw_writel(BM_CLKCTRL_##r##CTRL0_POWER, \
155 CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
156 \
157 if (clk == &pll2_clk) \
158 __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
159 CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_SET); \
160 else \
161 __raw_writel(BM_CLKCTRL_##r##CTRL0_##g, \
162 CLKCTRL_BASE_ADDR + HW_CLKCTRL_##r##CTRL0_CLR); \
163 \
164}
165
166_CLK_DISABLE_PLL(pll0_clk, PLL0, EN_USB_CLKS)
167_CLK_DISABLE_PLL(pll1_clk, PLL1, EN_USB_CLKS)
168_CLK_DISABLE_PLL(pll2_clk, PLL2, CLKGATE)
169
170#define _DEFINE_CLOCK_PLL(name) \
171 static struct clk name = { \
172 .get_rate = name##_get_rate, \
173 .enable = name##_enable, \
174 .disable = name##_disable, \
175 .parent = &ref_xtal_clk, \
176 }
177
178_DEFINE_CLOCK_PLL(pll0_clk);
179_DEFINE_CLOCK_PLL(pll1_clk);
180_DEFINE_CLOCK_PLL(pll2_clk);
181
182/*
183 * ref_clk
184 */
185#define _CLK_GET_RATE_REF(name, sr, ss) \
186static unsigned long name##_get_rate(struct clk *clk) \
187{ \
188 unsigned long parent_rate; \
189 u32 reg, div; \
190 \
191 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##sr); \
192 div = (reg >> BP_CLKCTRL_##sr##_##ss##FRAC) & 0x3f; \
193 parent_rate = clk_get_rate(clk->parent); \
194 \
195 return SH_DIV((parent_rate >> PARENT_RATE_SHIFT) * 18, \
196 div, PARENT_RATE_SHIFT); \
197}
198
199_CLK_GET_RATE_REF(ref_cpu_clk, FRAC0, CPU)
200_CLK_GET_RATE_REF(ref_emi_clk, FRAC0, EMI)
201_CLK_GET_RATE_REF(ref_io0_clk, FRAC0, IO0)
202_CLK_GET_RATE_REF(ref_io1_clk, FRAC0, IO1)
203_CLK_GET_RATE_REF(ref_pix_clk, FRAC1, PIX)
204_CLK_GET_RATE_REF(ref_gpmi_clk, FRAC1, GPMI)
205
206#define _DEFINE_CLOCK_REF(name, er, es) \
207 static struct clk name = { \
208 .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
209 .enable_shift = BP_CLKCTRL_##er##_CLKGATE##es, \
210 .get_rate = name##_get_rate, \
211 .enable = _raw_clk_enable, \
212 .disable = _raw_clk_disable, \
213 .parent = &pll0_clk, \
214 }
215
216_DEFINE_CLOCK_REF(ref_cpu_clk, FRAC0, CPU);
217_DEFINE_CLOCK_REF(ref_emi_clk, FRAC0, EMI);
218_DEFINE_CLOCK_REF(ref_io0_clk, FRAC0, IO0);
219_DEFINE_CLOCK_REF(ref_io1_clk, FRAC0, IO1);
220_DEFINE_CLOCK_REF(ref_pix_clk, FRAC1, PIX);
221_DEFINE_CLOCK_REF(ref_gpmi_clk, FRAC1, GPMI);
222
223/*
224 * General clocks
225 *
226 * clk_get_rate
227 */
228static unsigned long lradc_clk_get_rate(struct clk *clk)
229{
230 return clk_get_rate(clk->parent) / 16;
231}
232
233static unsigned long rtc_clk_get_rate(struct clk *clk)
234{
235 /* ref_xtal_clk is implemented as the only parent */
236 return clk_get_rate(clk->parent) / 768;
237}
238
239static unsigned long clk32k_clk_get_rate(struct clk *clk)
240{
241 return clk->parent->get_rate(clk->parent) / 750;
242}
243
244static unsigned long spdif_clk_get_rate(struct clk *clk)
245{
246 return clk_get_rate(clk->parent) / 4;
247}
248
249#define _CLK_GET_RATE(name, rs) \
250static unsigned long name##_get_rate(struct clk *clk) \
251{ \
252 u32 reg, div; \
253 \
254 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
255 \
256 if (clk->parent == &ref_xtal_clk) \
257 div = (reg & BM_CLKCTRL_##rs##_DIV_XTAL) >> \
258 BP_CLKCTRL_##rs##_DIV_XTAL; \
259 else \
260 div = (reg & BM_CLKCTRL_##rs##_DIV_##rs) >> \
261 BP_CLKCTRL_##rs##_DIV_##rs; \
262 \
263 if (!div) \
264 return -EINVAL; \
265 \
266 return clk_get_rate(clk->parent) / div; \
267}
268
269_CLK_GET_RATE(cpu_clk, CPU)
270_CLK_GET_RATE(emi_clk, EMI)
271
272#define _CLK_GET_RATE1(name, rs) \
273static unsigned long name##_get_rate(struct clk *clk) \
274{ \
275 u32 reg, div; \
276 \
277 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
278 div = (reg & BM_CLKCTRL_##rs##_DIV) >> BP_CLKCTRL_##rs##_DIV; \
279 \
280 if (!div) \
281 return -EINVAL; \
282 \
283 if (clk == &saif0_clk || clk == &saif1_clk) \
284 return clk_get_rate(clk->parent) >> 16 * div; \
285 else \
286 return clk_get_rate(clk->parent) / div; \
287}
288
289_CLK_GET_RATE1(hbus_clk, HBUS)
290_CLK_GET_RATE1(xbus_clk, XBUS)
291_CLK_GET_RATE1(ssp0_clk, SSP0)
292_CLK_GET_RATE1(ssp1_clk, SSP1)
293_CLK_GET_RATE1(ssp2_clk, SSP2)
294_CLK_GET_RATE1(ssp3_clk, SSP3)
295_CLK_GET_RATE1(gpmi_clk, GPMI)
296_CLK_GET_RATE1(lcdif_clk, DIS_LCDIF)
297_CLK_GET_RATE1(saif0_clk, SAIF0)
298_CLK_GET_RATE1(saif1_clk, SAIF1)
299
300#define _CLK_GET_RATE_STUB(name) \
301static unsigned long name##_get_rate(struct clk *clk) \
302{ \
303 return clk_get_rate(clk->parent); \
304}
305
306_CLK_GET_RATE_STUB(uart_clk)
307_CLK_GET_RATE_STUB(pwm_clk)
308_CLK_GET_RATE_STUB(can0_clk)
309_CLK_GET_RATE_STUB(can1_clk)
310_CLK_GET_RATE_STUB(fec_clk)
311
312/*
313 * clk_set_rate
314 */
315/* fool compiler */
316#define BM_CLKCTRL_CPU_DIV 0
317#define BP_CLKCTRL_CPU_DIV 0
318#define BM_CLKCTRL_CPU_BUSY 0
319
320#define _CLK_SET_RATE(name, dr, fr, fs) \
321static int name##_set_rate(struct clk *clk, unsigned long rate) \
322{ \
323 u32 reg, bm_busy, div_max, d, f, div, frac; \
324 unsigned long diff, parent_rate, calc_rate; \
325 \
326 div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
327 bm_busy = BM_CLKCTRL_##dr##_BUSY; \
328 \
329 if (clk->parent == &ref_xtal_clk) { \
330 parent_rate = clk_get_rate(clk->parent); \
331 div = DIV_ROUND_UP(parent_rate, rate); \
332 if (clk == &cpu_clk) { \
333 div_max = BM_CLKCTRL_CPU_DIV_XTAL >> \
334 BP_CLKCTRL_CPU_DIV_XTAL; \
335 bm_busy = BM_CLKCTRL_CPU_BUSY_REF_XTAL; \
336 } \
337 if (div == 0 || div > div_max) \
338 return -EINVAL; \
339 } else { \
340 /* \
341 * hack alert: this block modifies clk->parent, too, \
342 * so the base to use it the grand parent. \
343 */ \
344 parent_rate = clk_get_rate(clk->parent->parent); \
345 rate >>= PARENT_RATE_SHIFT; \
346 parent_rate >>= PARENT_RATE_SHIFT; \
347 diff = parent_rate; \
348 div = frac = 1; \
349 if (clk == &cpu_clk) { \
350 div_max = BM_CLKCTRL_CPU_DIV_CPU >> \
351 BP_CLKCTRL_CPU_DIV_CPU; \
352 bm_busy = BM_CLKCTRL_CPU_BUSY_REF_CPU; \
353 } \
354 for (d = 1; d <= div_max; d++) { \
355 f = parent_rate * 18 / d / rate; \
356 if ((parent_rate * 18 / d) % rate) \
357 f++; \
358 if (f < 18 || f > 35) \
359 continue; \
360 \
361 calc_rate = parent_rate * 18 / f / d; \
362 if (calc_rate > rate) \
363 continue; \
364 \
365 if (rate - calc_rate < diff) { \
366 frac = f; \
367 div = d; \
368 diff = rate - calc_rate; \
369 } \
370 \
371 if (diff == 0) \
372 break; \
373 } \
374 \
375 if (diff == parent_rate) \
376 return -EINVAL; \
377 \
378 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr); \
379 reg &= ~BM_CLKCTRL_##fr##_##fs##FRAC; \
380 reg |= frac << BP_CLKCTRL_##fr##_##fs##FRAC; \
381 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##fr); \
382 } \
383 \
384 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
385 if (clk == &cpu_clk) { \
386 reg &= ~BM_CLKCTRL_CPU_DIV_CPU; \
387 reg |= div << BP_CLKCTRL_CPU_DIV_CPU; \
388 } else { \
389 reg &= ~BM_CLKCTRL_##dr##_DIV; \
390 reg |= div << BP_CLKCTRL_##dr##_DIV; \
391 if (reg & (1 << clk->enable_shift)) { \
392 pr_err("%s: clock is gated\n", __func__); \
393 return -EINVAL; \
394 } \
395 } \
396 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
397 \
398 return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, bm_busy); \
399}
400
401_CLK_SET_RATE(cpu_clk, CPU, FRAC0, CPU)
402_CLK_SET_RATE(ssp0_clk, SSP0, FRAC0, IO0)
403_CLK_SET_RATE(ssp1_clk, SSP1, FRAC0, IO0)
404_CLK_SET_RATE(ssp2_clk, SSP2, FRAC0, IO1)
405_CLK_SET_RATE(ssp3_clk, SSP3, FRAC0, IO1)
406_CLK_SET_RATE(lcdif_clk, DIS_LCDIF, FRAC1, PIX)
407_CLK_SET_RATE(gpmi_clk, GPMI, FRAC1, GPMI)
408
409#define _CLK_SET_RATE1(name, dr) \
410static int name##_set_rate(struct clk *clk, unsigned long rate) \
411{ \
412 u32 reg, div_max, div; \
413 unsigned long parent_rate; \
414 \
415 parent_rate = clk_get_rate(clk->parent); \
416 div_max = BM_CLKCTRL_##dr##_DIV >> BP_CLKCTRL_##dr##_DIV; \
417 \
418 div = DIV_ROUND_UP(parent_rate, rate); \
419 if (div == 0 || div > div_max) \
420 return -EINVAL; \
421 \
422 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
423 reg &= ~BM_CLKCTRL_##dr##_DIV; \
424 reg |= div << BP_CLKCTRL_##dr##_DIV; \
425 if (reg & (1 << clk->enable_shift)) { \
426 pr_err("%s: clock is gated\n", __func__); \
427 return -EINVAL; \
428 } \
429 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##dr); \
430 \
431 return mxs_clkctrl_timeout(HW_CLKCTRL_##dr, BM_CLKCTRL_##dr##_BUSY);\
432}
433
434_CLK_SET_RATE1(xbus_clk, XBUS)
435
436/* saif clock uses 16 bits frac div */
437#define _CLK_SET_RATE_SAIF(name, rs) \
438static int name##_set_rate(struct clk *clk, unsigned long rate) \
439{ \
440 u16 div; \
441 u32 reg; \
442 u64 lrate; \
443 unsigned long parent_rate; \
444 \
445 parent_rate = clk_get_rate(clk->parent); \
446 if (rate > parent_rate) \
447 return -EINVAL; \
448 \
449 lrate = (u64)rate << 16; \
450 do_div(lrate, parent_rate); \
451 div = (u16)lrate; \
452 \
453 if (!div) \
454 return -EINVAL; \
455 \
456 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
457 reg &= ~BM_CLKCTRL_##rs##_DIV; \
458 reg |= div << BP_CLKCTRL_##rs##_DIV; \
459 if (reg & (1 << clk->enable_shift)) { \
460 pr_err("%s: clock is gated\n", __func__); \
461 return -EINVAL; \
462 } \
463 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_##rs); \
464 \
465 return mxs_clkctrl_timeout(HW_CLKCTRL_##rs, BM_CLKCTRL_##rs##_BUSY);\
466}
467
468_CLK_SET_RATE_SAIF(saif0_clk, SAIF0)
469_CLK_SET_RATE_SAIF(saif1_clk, SAIF1)
470
471#define _CLK_SET_RATE_STUB(name) \
472static int name##_set_rate(struct clk *clk, unsigned long rate) \
473{ \
474 return -EINVAL; \
475}
476
477_CLK_SET_RATE_STUB(emi_clk)
478_CLK_SET_RATE_STUB(uart_clk)
479_CLK_SET_RATE_STUB(pwm_clk)
480_CLK_SET_RATE_STUB(spdif_clk)
481_CLK_SET_RATE_STUB(clk32k_clk)
482_CLK_SET_RATE_STUB(can0_clk)
483_CLK_SET_RATE_STUB(can1_clk)
484_CLK_SET_RATE_STUB(fec_clk)
485
486/*
487 * clk_set_parent
488 */
489#define _CLK_SET_PARENT(name, bit) \
490static int name##_set_parent(struct clk *clk, struct clk *parent) \
491{ \
492 if (parent != clk->parent) { \
493 __raw_writel(BM_CLKCTRL_CLKSEQ_BYPASS_##bit, \
494 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ_TOG); \
495 clk->parent = parent; \
496 } \
497 \
498 return 0; \
499}
500
501_CLK_SET_PARENT(cpu_clk, CPU)
502_CLK_SET_PARENT(emi_clk, EMI)
503_CLK_SET_PARENT(ssp0_clk, SSP0)
504_CLK_SET_PARENT(ssp1_clk, SSP1)
505_CLK_SET_PARENT(ssp2_clk, SSP2)
506_CLK_SET_PARENT(ssp3_clk, SSP3)
507_CLK_SET_PARENT(lcdif_clk, DIS_LCDIF)
508_CLK_SET_PARENT(gpmi_clk, GPMI)
509_CLK_SET_PARENT(saif0_clk, SAIF0)
510_CLK_SET_PARENT(saif1_clk, SAIF1)
511
512#define _CLK_SET_PARENT_STUB(name) \
513static int name##_set_parent(struct clk *clk, struct clk *parent) \
514{ \
515 if (parent != clk->parent) \
516 return -EINVAL; \
517 else \
518 return 0; \
519}
520
521_CLK_SET_PARENT_STUB(pwm_clk)
522_CLK_SET_PARENT_STUB(uart_clk)
523_CLK_SET_PARENT_STUB(clk32k_clk)
524_CLK_SET_PARENT_STUB(spdif_clk)
525_CLK_SET_PARENT_STUB(fec_clk)
526_CLK_SET_PARENT_STUB(can0_clk)
527_CLK_SET_PARENT_STUB(can1_clk)
528
529/*
530 * clk definition
531 */
532static struct clk cpu_clk = {
533 .get_rate = cpu_clk_get_rate,
534 .set_rate = cpu_clk_set_rate,
535 .set_parent = cpu_clk_set_parent,
536 .parent = &ref_cpu_clk,
537};
538
539static struct clk hbus_clk = {
540 .get_rate = hbus_clk_get_rate,
541 .parent = &cpu_clk,
542};
543
544static struct clk xbus_clk = {
545 .get_rate = xbus_clk_get_rate,
546 .set_rate = xbus_clk_set_rate,
547 .parent = &ref_xtal_clk,
548};
549
550static struct clk lradc_clk = {
551 .get_rate = lradc_clk_get_rate,
552 .parent = &clk32k_clk,
553};
554
555static struct clk rtc_clk = {
556 .get_rate = rtc_clk_get_rate,
557 .parent = &ref_xtal_clk,
558};
559
560/* usb_clk gate is controlled in DIGCTRL other than CLKCTRL */
561static struct clk usb0_clk = {
562 .enable_reg = DIGCTRL_BASE_ADDR,
563 .enable_shift = 2,
564 .enable = _raw_clk_enable,
565 .disable = _raw_clk_disable,
566 .parent = &pll0_clk,
567};
568
569static struct clk usb1_clk = {
570 .enable_reg = DIGCTRL_BASE_ADDR,
571 .enable_shift = 16,
572 .enable = _raw_clk_enable,
573 .disable = _raw_clk_disable,
574 .parent = &pll1_clk,
575};
576
577#define _DEFINE_CLOCK(name, er, es, p) \
578 static struct clk name = { \
579 .enable_reg = CLKCTRL_BASE_ADDR + HW_CLKCTRL_##er, \
580 .enable_shift = BP_CLKCTRL_##er##_##es, \
581 .get_rate = name##_get_rate, \
582 .set_rate = name##_set_rate, \
583 .set_parent = name##_set_parent, \
584 .enable = _raw_clk_enable, \
585 .disable = _raw_clk_disable, \
586 .parent = p, \
587 }
588
589_DEFINE_CLOCK(emi_clk, EMI, CLKGATE, &ref_xtal_clk);
590_DEFINE_CLOCK(ssp0_clk, SSP0, CLKGATE, &ref_xtal_clk);
591_DEFINE_CLOCK(ssp1_clk, SSP1, CLKGATE, &ref_xtal_clk);
592_DEFINE_CLOCK(ssp2_clk, SSP2, CLKGATE, &ref_xtal_clk);
593_DEFINE_CLOCK(ssp3_clk, SSP3, CLKGATE, &ref_xtal_clk);
594_DEFINE_CLOCK(lcdif_clk, DIS_LCDIF, CLKGATE, &ref_xtal_clk);
595_DEFINE_CLOCK(gpmi_clk, GPMI, CLKGATE, &ref_xtal_clk);
596_DEFINE_CLOCK(saif0_clk, SAIF0, CLKGATE, &ref_xtal_clk);
597_DEFINE_CLOCK(saif1_clk, SAIF1, CLKGATE, &ref_xtal_clk);
598_DEFINE_CLOCK(can0_clk, FLEXCAN, STOP_CAN0, &ref_xtal_clk);
599_DEFINE_CLOCK(can1_clk, FLEXCAN, STOP_CAN1, &ref_xtal_clk);
600_DEFINE_CLOCK(pwm_clk, XTAL, PWM_CLK24M_GATE, &ref_xtal_clk);
601_DEFINE_CLOCK(uart_clk, XTAL, UART_CLK_GATE, &ref_xtal_clk);
602_DEFINE_CLOCK(clk32k_clk, XTAL, TIMROT_CLK32K_GATE, &ref_xtal_clk);
603_DEFINE_CLOCK(spdif_clk, SPDIF, CLKGATE, &pll0_clk);
604_DEFINE_CLOCK(fec_clk, ENET, DISABLE, &hbus_clk);
605
606#define _REGISTER_CLOCK(d, n, c) \
607 { \
608 .dev_id = d, \
609 .con_id = n, \
610 .clk = &c, \
611 },
612
613static struct clk_lookup lookups[] = {
614 /* for amba bus driver */
615 _REGISTER_CLOCK("duart", "apb_pclk", xbus_clk)
616 /* for amba-pl011 driver */
617 _REGISTER_CLOCK("duart", NULL, uart_clk)
618 _REGISTER_CLOCK("imx28-fec.0", NULL, fec_clk)
619 _REGISTER_CLOCK("imx28-fec.1", NULL, fec_clk)
620 _REGISTER_CLOCK("imx28-gpmi-nand", NULL, gpmi_clk)
621 _REGISTER_CLOCK("mxs-auart.0", NULL, uart_clk)
622 _REGISTER_CLOCK("mxs-auart.1", NULL, uart_clk)
623 _REGISTER_CLOCK("mxs-auart.2", NULL, uart_clk)
624 _REGISTER_CLOCK("mxs-auart.3", NULL, uart_clk)
625 _REGISTER_CLOCK("mxs-auart.4", NULL, uart_clk)
626 _REGISTER_CLOCK("rtc", NULL, rtc_clk)
627 _REGISTER_CLOCK("pll2", NULL, pll2_clk)
628 _REGISTER_CLOCK("mxs-dma-apbh", NULL, hbus_clk)
629 _REGISTER_CLOCK("mxs-dma-apbx", NULL, xbus_clk)
630 _REGISTER_CLOCK("mxs-mmc.0", NULL, ssp0_clk)
631 _REGISTER_CLOCK("mxs-mmc.1", NULL, ssp1_clk)
632 _REGISTER_CLOCK("mxs-mmc.2", NULL, ssp2_clk)
633 _REGISTER_CLOCK("mxs-mmc.3", NULL, ssp3_clk)
634 _REGISTER_CLOCK("flexcan.0", NULL, can0_clk)
635 _REGISTER_CLOCK("flexcan.1", NULL, can1_clk)
636 _REGISTER_CLOCK(NULL, "usb0", usb0_clk)
637 _REGISTER_CLOCK(NULL, "usb1", usb1_clk)
638 _REGISTER_CLOCK("mxs-pwm.0", NULL, pwm_clk)
639 _REGISTER_CLOCK("mxs-pwm.1", NULL, pwm_clk)
640 _REGISTER_CLOCK("mxs-pwm.2", NULL, pwm_clk)
641 _REGISTER_CLOCK("mxs-pwm.3", NULL, pwm_clk)
642 _REGISTER_CLOCK("mxs-pwm.4", NULL, pwm_clk)
643 _REGISTER_CLOCK("mxs-pwm.5", NULL, pwm_clk)
644 _REGISTER_CLOCK("mxs-pwm.6", NULL, pwm_clk)
645 _REGISTER_CLOCK("mxs-pwm.7", NULL, pwm_clk)
646 _REGISTER_CLOCK(NULL, "lradc", lradc_clk)
647 _REGISTER_CLOCK(NULL, "spdif", spdif_clk)
648 _REGISTER_CLOCK("imx28-fb", NULL, lcdif_clk)
649 _REGISTER_CLOCK("mxs-saif.0", NULL, saif0_clk)
650 _REGISTER_CLOCK("mxs-saif.1", NULL, saif1_clk)
651};
652
653static int clk_misc_init(void)
654{
655 u32 reg;
656 int ret;
657
658 /* Fix up parent per register setting */
659 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_CLKSEQ);
660 cpu_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_CPU) ?
661 &ref_xtal_clk : &ref_cpu_clk;
662 emi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_EMI) ?
663 &ref_xtal_clk : &ref_emi_clk;
664 ssp0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP0) ?
665 &ref_xtal_clk : &ref_io0_clk;
666 ssp1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP1) ?
667 &ref_xtal_clk : &ref_io0_clk;
668 ssp2_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP2) ?
669 &ref_xtal_clk : &ref_io1_clk;
670 ssp3_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SSP3) ?
671 &ref_xtal_clk : &ref_io1_clk;
672 lcdif_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF) ?
673 &ref_xtal_clk : &ref_pix_clk;
674 gpmi_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_GPMI) ?
675 &ref_xtal_clk : &ref_gpmi_clk;
676 saif0_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0) ?
677 &ref_xtal_clk : &pll0_clk;
678 saif1_clk.parent = (reg & BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1) ?
679 &ref_xtal_clk : &pll0_clk;
680
681 /* Use int div over frac when both are available */
682 __raw_writel(BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN,
683 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
684 __raw_writel(BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN,
685 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_CLR);
686 __raw_writel(BM_CLKCTRL_HBUS_DIV_FRAC_EN,
687 CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS_CLR);
688
689 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
690 reg &= ~BM_CLKCTRL_XBUS_DIV_FRAC_EN;
691 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_XBUS);
692
693 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
694 reg &= ~BM_CLKCTRL_SSP0_DIV_FRAC_EN;
695 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP0);
696
697 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
698 reg &= ~BM_CLKCTRL_SSP1_DIV_FRAC_EN;
699 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP1);
700
701 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
702 reg &= ~BM_CLKCTRL_SSP2_DIV_FRAC_EN;
703 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP2);
704
705 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
706 reg &= ~BM_CLKCTRL_SSP3_DIV_FRAC_EN;
707 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SSP3);
708
709 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
710 reg &= ~BM_CLKCTRL_GPMI_DIV_FRAC_EN;
711 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_GPMI);
712
713 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
714 reg &= ~BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN;
715 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_DIS_LCDIF);
716
717 /* SAIF has to use frac div for functional operation */
718 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
719 reg |= BM_CLKCTRL_SAIF0_DIV_FRAC_EN;
720 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF0);
721
722 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
723 reg |= BM_CLKCTRL_SAIF1_DIV_FRAC_EN;
724 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_SAIF1);
725
726 /*
727 * Set safe hbus clock divider. A divider of 3 ensure that
728 * the Vddd voltage required for the cpu clock is sufficiently
729 * high for the hbus clock.
730 */
731 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
732 reg &= BM_CLKCTRL_HBUS_DIV;
733 reg |= 3 << BP_CLKCTRL_HBUS_DIV;
734 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_HBUS);
735
736 ret = mxs_clkctrl_timeout(HW_CLKCTRL_HBUS, BM_CLKCTRL_HBUS_ASM_BUSY);
737
738 /* Gate off cpu clock in WFI for power saving */
739 __raw_writel(BM_CLKCTRL_CPU_INTERRUPT_WAIT,
740 CLKCTRL_BASE_ADDR + HW_CLKCTRL_CPU_SET);
741
742 /*
743 * Extra fec clock setting
744 * The DENX M28 uses an external clock source
745 * and the clock output must not be enabled
746 */
747 if (!machine_is_m28evk()) {
748 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
749 reg &= ~BM_CLKCTRL_ENET_SLEEP;
750 reg |= BM_CLKCTRL_ENET_CLK_OUT_EN;
751 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_ENET);
752 }
753
754 /*
755 * 480 MHz seems too high to be ssp clock source directly,
756 * so set frac0 to get a 288 MHz ref_io0.
757 */
758 reg = __raw_readl(CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
759 reg &= ~BM_CLKCTRL_FRAC0_IO0FRAC;
760 reg |= 30 << BP_CLKCTRL_FRAC0_IO0FRAC;
761 __raw_writel(reg, CLKCTRL_BASE_ADDR + HW_CLKCTRL_FRAC0);
762
763 return ret;
764}
765
766int __init mx28_clocks_init(void)
767{
768 clk_misc_init();
769
770 /*
771 * source ssp clock from ref_io0 than ref_xtal,
772 * as ref_xtal only provides 24 MHz as maximum.
773 */
774 clk_set_parent(&ssp0_clk, &ref_io0_clk);
775 clk_set_parent(&ssp1_clk, &ref_io0_clk);
776 clk_set_parent(&ssp2_clk, &ref_io1_clk);
777 clk_set_parent(&ssp3_clk, &ref_io1_clk);
778
779 clk_prepare_enable(&cpu_clk);
780 clk_prepare_enable(&hbus_clk);
781 clk_prepare_enable(&xbus_clk);
782 clk_prepare_enable(&emi_clk);
783 clk_prepare_enable(&uart_clk);
784
785 clk_set_parent(&lcdif_clk, &ref_pix_clk);
786 clk_set_parent(&saif0_clk, &pll0_clk);
787 clk_set_parent(&saif1_clk, &pll0_clk);
788
789 /*
790 * Set an initial clock rate for the saif internal logic to work
791 * properly. This is important when working in EXTMASTER mode that
792 * uses the other saif's BITCLK&LRCLK but it still needs a basic
793 * clock which should be fast enough for the internal logic.
794 */
795 clk_set_rate(&saif0_clk, 24000000);
796 clk_set_rate(&saif1_clk, 24000000);
797
798 clkdev_add_table(lookups, ARRAY_SIZE(lookups));
799
800 mxs_timer_init(&clk32k_clk, MX28_INT_TIMER0);
801
802 return 0;
803}
diff --git a/arch/arm/mach-mxs/clock.c b/arch/arm/mach-mxs/clock.c
deleted file mode 100644
index 97a6f4acc6cc..000000000000
--- a/arch/arm/mach-mxs/clock.c
+++ /dev/null
@@ -1,211 +0,0 @@
1/*
2 * Based on arch/arm/plat-omap/clock.c
3 *
4 * Copyright (C) 2004 - 2005 Nokia corporation
5 * Written by Tuukka Tikkanen <tuukka.tikkanen@elektrobit.com>
6 * Modified for omap shared clock framework by Tony Lindgren <tony@atomide.com>
7 * Copyright 2007 Freescale Semiconductor, Inc. All Rights Reserved.
8 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version 2
13 * of the License, or (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
22 * MA 02110-1301, USA.
23 */
24
25/* #define DEBUG */
26
27#include <linux/clk.h>
28#include <linux/err.h>
29#include <linux/errno.h>
30#include <linux/init.h>
31#include <linux/io.h>
32#include <linux/kernel.h>
33#include <linux/list.h>
34#include <linux/module.h>
35#include <linux/mutex.h>
36#include <linux/platform_device.h>
37#include <linux/proc_fs.h>
38#include <linux/semaphore.h>
39#include <linux/string.h>
40
41#include <mach/clock.h>
42
43static LIST_HEAD(clocks);
44static DEFINE_MUTEX(clocks_mutex);
45
46/*-------------------------------------------------------------------------
47 * Standard clock functions defined in include/linux/clk.h
48 *-------------------------------------------------------------------------*/
49
50static void __clk_disable(struct clk *clk)
51{
52 if (clk == NULL || IS_ERR(clk))
53 return;
54 WARN_ON(!clk->usecount);
55
56 if (!(--clk->usecount)) {
57 if (clk->disable)
58 clk->disable(clk);
59 __clk_disable(clk->parent);
60 }
61}
62
63static int __clk_enable(struct clk *clk)
64{
65 if (clk == NULL || IS_ERR(clk))
66 return -EINVAL;
67
68 if (clk->usecount++ == 0) {
69 __clk_enable(clk->parent);
70
71 if (clk->enable)
72 clk->enable(clk);
73 }
74 return 0;
75}
76
77/*
78 * The clk_enable/clk_disable could be called by drivers in atomic context,
79 * so they should not really hold mutex. Instead, clk_prepare/clk_unprepare
80 * can hold a mutex, as the pair will only be called in non-atomic context.
81 * Before migrating to common clk framework, we can have __clk_enable and
82 * __clk_disable called in clk_prepare/clk_unprepare with mutex held and
83 * leave clk_enable/clk_disable as the dummy functions.
84 */
85int clk_prepare(struct clk *clk)
86{
87 int ret = 0;
88
89 if (clk == NULL || IS_ERR(clk))
90 return -EINVAL;
91
92 mutex_lock(&clocks_mutex);
93 ret = __clk_enable(clk);
94 mutex_unlock(&clocks_mutex);
95
96 return ret;
97}
98EXPORT_SYMBOL(clk_prepare);
99
100void clk_unprepare(struct clk *clk)
101{
102 if (clk == NULL || IS_ERR(clk))
103 return;
104
105 mutex_lock(&clocks_mutex);
106 __clk_disable(clk);
107 mutex_unlock(&clocks_mutex);
108}
109EXPORT_SYMBOL(clk_unprepare);
110
111int clk_enable(struct clk *clk)
112{
113 return 0;
114}
115EXPORT_SYMBOL(clk_enable);
116
117void clk_disable(struct clk *clk)
118{
119 /* nothing to do */
120}
121EXPORT_SYMBOL(clk_disable);
122
123/* Retrieve the *current* clock rate. If the clock itself
124 * does not provide a special calculation routine, ask
125 * its parent and so on, until one is able to return
126 * a valid clock rate
127 */
128unsigned long clk_get_rate(struct clk *clk)
129{
130 if (clk == NULL || IS_ERR(clk))
131 return 0UL;
132
133 if (clk->get_rate)
134 return clk->get_rate(clk);
135
136 return clk_get_rate(clk->parent);
137}
138EXPORT_SYMBOL(clk_get_rate);
139
140/* Round the requested clock rate to the nearest supported
141 * rate that is less than or equal to the requested rate.
142 * This is dependent on the clock's current parent.
143 */
144long clk_round_rate(struct clk *clk, unsigned long rate)
145{
146 if (clk == NULL || IS_ERR(clk) || !clk->round_rate)
147 return 0;
148
149 return clk->round_rate(clk, rate);
150}
151EXPORT_SYMBOL(clk_round_rate);
152
153/* Set the clock to the requested clock rate. The rate must
154 * match a supported rate exactly based on what clk_round_rate returns
155 */
156int clk_set_rate(struct clk *clk, unsigned long rate)
157{
158 int ret = -EINVAL;
159
160 if (clk == NULL || IS_ERR(clk) || clk->set_rate == NULL || rate == 0)
161 return ret;
162
163 mutex_lock(&clocks_mutex);
164 ret = clk->set_rate(clk, rate);
165 mutex_unlock(&clocks_mutex);
166
167 return ret;
168}
169EXPORT_SYMBOL(clk_set_rate);
170
171/* Set the clock's parent to another clock source */
172int clk_set_parent(struct clk *clk, struct clk *parent)
173{
174 int ret = -EINVAL;
175 struct clk *old;
176
177 if (clk == NULL || IS_ERR(clk) || parent == NULL ||
178 IS_ERR(parent) || clk->set_parent == NULL)
179 return ret;
180
181 if (clk->usecount)
182 clk_prepare_enable(parent);
183
184 mutex_lock(&clocks_mutex);
185 ret = clk->set_parent(clk, parent);
186 if (ret == 0) {
187 old = clk->parent;
188 clk->parent = parent;
189 } else {
190 old = parent;
191 }
192 mutex_unlock(&clocks_mutex);
193
194 if (clk->usecount)
195 clk_disable(old);
196
197 return ret;
198}
199EXPORT_SYMBOL(clk_set_parent);
200
201/* Retrieve the clock's parent clock source */
202struct clk *clk_get_parent(struct clk *clk)
203{
204 struct clk *ret = NULL;
205
206 if (clk == NULL || IS_ERR(clk))
207 return ret;
208
209 return clk->parent;
210}
211EXPORT_SYMBOL(clk_get_parent);
diff --git a/arch/arm/mach-mxs/devices/Kconfig b/arch/arm/mach-mxs/devices/Kconfig
index b8913df4cfa2..19659de1c4e8 100644
--- a/arch/arm/mach-mxs/devices/Kconfig
+++ b/arch/arm/mach-mxs/devices/Kconfig
@@ -1,6 +1,5 @@
1config MXS_HAVE_AMBA_DUART 1config MXS_HAVE_AMBA_DUART
2 bool 2 bool
3 select ARM_AMBA
4 3
5config MXS_HAVE_PLATFORM_AUART 4config MXS_HAVE_PLATFORM_AUART
6 bool 5 bool
diff --git a/arch/arm/mach-mxs/devices/platform-dma.c b/arch/arm/mach-mxs/devices/platform-dma.c
index 6a0202b1016c..46824501de00 100644
--- a/arch/arm/mach-mxs/devices/platform-dma.c
+++ b/arch/arm/mach-mxs/devices/platform-dma.c
@@ -14,7 +14,7 @@
14#include <mach/mx28.h> 14#include <mach/mx28.h>
15#include <mach/devices-common.h> 15#include <mach/devices-common.h>
16 16
17static struct platform_device *__init mxs_add_dma(const char *devid, 17struct platform_device *__init mxs_add_dma(const char *devid,
18 resource_size_t base) 18 resource_size_t base)
19{ 19{
20 struct resource res[] = { 20 struct resource res[] = {
@@ -29,22 +29,3 @@ static struct platform_device *__init mxs_add_dma(const char *devid,
29 res, ARRAY_SIZE(res), NULL, 0, 29 res, ARRAY_SIZE(res), NULL, 0,
30 DMA_BIT_MASK(32)); 30 DMA_BIT_MASK(32));
31} 31}
32
33static int __init mxs_add_mxs_dma(void)
34{
35 char *apbh = "mxs-dma-apbh";
36 char *apbx = "mxs-dma-apbx";
37
38 if (cpu_is_mx23()) {
39 mxs_add_dma(apbh, MX23_APBH_DMA_BASE_ADDR);
40 mxs_add_dma(apbx, MX23_APBX_DMA_BASE_ADDR);
41 }
42
43 if (cpu_is_mx28()) {
44 mxs_add_dma(apbh, MX28_APBH_DMA_BASE_ADDR);
45 mxs_add_dma(apbx, MX28_APBX_DMA_BASE_ADDR);
46 }
47
48 return 0;
49}
50arch_initcall(mxs_add_mxs_dma);
diff --git a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
index ed0885e414e0..cd99f19ec637 100644
--- a/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
+++ b/arch/arm/mach-mxs/devices/platform-gpio-mxs.c
@@ -14,7 +14,7 @@
14#include <mach/devices-common.h> 14#include <mach/devices-common.h>
15 15
16struct platform_device *__init mxs_add_gpio( 16struct platform_device *__init mxs_add_gpio(
17 int id, resource_size_t iobase, int irq) 17 char *name, int id, resource_size_t iobase, int irq)
18{ 18{
19 struct resource res[] = { 19 struct resource res[] = {
20 { 20 {
@@ -29,25 +29,5 @@ struct platform_device *__init mxs_add_gpio(
29 }; 29 };
30 30
31 return platform_device_register_resndata(&mxs_apbh_bus, 31 return platform_device_register_resndata(&mxs_apbh_bus,
32 "gpio-mxs", id, res, ARRAY_SIZE(res), NULL, 0); 32 name, id, res, ARRAY_SIZE(res), NULL, 0);
33} 33}
34
35static int __init mxs_add_mxs_gpio(void)
36{
37 if (cpu_is_mx23()) {
38 mxs_add_gpio(0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
39 mxs_add_gpio(1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
40 mxs_add_gpio(2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
41 }
42
43 if (cpu_is_mx28()) {
44 mxs_add_gpio(0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
45 mxs_add_gpio(1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
46 mxs_add_gpio(2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
47 mxs_add_gpio(3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
48 mxs_add_gpio(4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
49 }
50
51 return 0;
52}
53postcore_initcall(mxs_add_mxs_gpio);
diff --git a/arch/arm/mach-mxs/devices/platform-mxs-mmc.c b/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
index bef9d923f54e..b33c9d05c552 100644
--- a/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
+++ b/arch/arm/mach-mxs/devices/platform-mxs-mmc.c
@@ -17,8 +17,9 @@
17#include <mach/mx28.h> 17#include <mach/mx28.h>
18#include <mach/devices-common.h> 18#include <mach/devices-common.h>
19 19
20#define mxs_mxs_mmc_data_entry_single(soc, _id, hwid) \ 20#define mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid) \
21 { \ 21 { \
22 .devid = _devid, \
22 .id = _id, \ 23 .id = _id, \
23 .iobase = soc ## _SSP ## hwid ## _BASE_ADDR, \ 24 .iobase = soc ## _SSP ## hwid ## _BASE_ADDR, \
24 .dma = soc ## _DMA_SSP ## hwid, \ 25 .dma = soc ## _DMA_SSP ## hwid, \
@@ -26,23 +27,23 @@
26 .irq_dma = soc ## _INT_SSP ## hwid ## _DMA, \ 27 .irq_dma = soc ## _INT_SSP ## hwid ## _DMA, \
27 } 28 }
28 29
29#define mxs_mxs_mmc_data_entry(soc, _id, hwid) \ 30#define mxs_mxs_mmc_data_entry(soc, _devid, _id, hwid) \
30 [_id] = mxs_mxs_mmc_data_entry_single(soc, _id, hwid) 31 [_id] = mxs_mxs_mmc_data_entry_single(soc, _devid, _id, hwid)
31 32
32 33
33#ifdef CONFIG_SOC_IMX23 34#ifdef CONFIG_SOC_IMX23
34const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst = { 35const struct mxs_mxs_mmc_data mx23_mxs_mmc_data[] __initconst = {
35 mxs_mxs_mmc_data_entry(MX23, 0, 1), 36 mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 0, 1),
36 mxs_mxs_mmc_data_entry(MX23, 1, 2), 37 mxs_mxs_mmc_data_entry(MX23, "imx23-mmc", 1, 2),
37}; 38};
38#endif 39#endif
39 40
40#ifdef CONFIG_SOC_IMX28 41#ifdef CONFIG_SOC_IMX28
41const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst = { 42const struct mxs_mxs_mmc_data mx28_mxs_mmc_data[] __initconst = {
42 mxs_mxs_mmc_data_entry(MX28, 0, 0), 43 mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 0, 0),
43 mxs_mxs_mmc_data_entry(MX28, 1, 1), 44 mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 1, 1),
44 mxs_mxs_mmc_data_entry(MX28, 2, 2), 45 mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 2, 2),
45 mxs_mxs_mmc_data_entry(MX28, 3, 3), 46 mxs_mxs_mmc_data_entry(MX28, "imx28-mmc", 3, 3),
46}; 47};
47#endif 48#endif
48 49
@@ -70,6 +71,6 @@ struct platform_device *__init mxs_add_mxs_mmc(
70 }, 71 },
71 }; 72 };
72 73
73 return mxs_add_platform_device("mxs-mmc", data->id, 74 return mxs_add_platform_device(data->devid, data->id,
74 res, ARRAY_SIZE(res), pdata, sizeof(*pdata)); 75 res, ARRAY_SIZE(res), pdata, sizeof(*pdata));
75} 76}
diff --git a/arch/arm/mach-mxs/include/mach/clock.h b/arch/arm/mach-mxs/include/mach/clock.h
deleted file mode 100644
index 592c9ab5d760..000000000000
--- a/arch/arm/mach-mxs/include/mach/clock.h
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * Copyright 2005-2007 Freescale Semiconductor, Inc. All Rights Reserved.
3 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
4 *
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation; either version 2
8 * of the License, or (at your option) any later version.
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston,
17 * MA 02110-1301, USA.
18 */
19
20#ifndef __MACH_MXS_CLOCK_H__
21#define __MACH_MXS_CLOCK_H__
22
23#ifndef __ASSEMBLY__
24#include <linux/list.h>
25
26struct module;
27
28struct clk {
29 int id;
30 /* Source clock this clk depends on */
31 struct clk *parent;
32 /* Reference count of clock enable/disable */
33 __s8 usecount;
34 /* Register bit position for clock's enable/disable control. */
35 u8 enable_shift;
36 /* Register address for clock's enable/disable control. */
37 void __iomem *enable_reg;
38 u32 flags;
39 /* get the current clock rate (always a fresh value) */
40 unsigned long (*get_rate) (struct clk *);
41 /* Function ptr to set the clock to a new rate. The rate must match a
42 supported rate returned from round_rate. Leave blank if clock is not
43 programmable */
44 int (*set_rate) (struct clk *, unsigned long);
45 /* Function ptr to round the requested clock rate to the nearest
46 supported rate that is less than or equal to the requested rate. */
47 unsigned long (*round_rate) (struct clk *, unsigned long);
48 /* Function ptr to enable the clock. Leave blank if clock can not
49 be gated. */
50 int (*enable) (struct clk *);
51 /* Function ptr to disable the clock. Leave blank if clock can not
52 be gated. */
53 void (*disable) (struct clk *);
54 /* Function ptr to set the parent clock of the clock. */
55 int (*set_parent) (struct clk *, struct clk *);
56};
57
58int clk_register(struct clk *clk);
59void clk_unregister(struct clk *clk);
60
61#endif /* __ASSEMBLY__ */
62#endif /* __MACH_MXS_CLOCK_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/common.h b/arch/arm/mach-mxs/include/mach/common.h
index 8d88399b73ef..de6c7ba42544 100644
--- a/arch/arm/mach-mxs/include/mach/common.h
+++ b/arch/arm/mach-mxs/include/mach/common.h
@@ -11,28 +11,27 @@
11#ifndef __MACH_MXS_COMMON_H__ 11#ifndef __MACH_MXS_COMMON_H__
12#define __MACH_MXS_COMMON_H__ 12#define __MACH_MXS_COMMON_H__
13 13
14struct clk;
15
16extern const u32 *mxs_get_ocotp(void); 14extern const u32 *mxs_get_ocotp(void);
17extern int mxs_reset_block(void __iomem *); 15extern int mxs_reset_block(void __iomem *);
18extern void mxs_timer_init(struct clk *, int); 16extern void mxs_timer_init(int);
19extern void mxs_restart(char, const char *); 17extern void mxs_restart(char, const char *);
20extern int mxs_saif_clkmux_select(unsigned int clkmux); 18extern int mxs_saif_clkmux_select(unsigned int clkmux);
21 19
22extern void mx23_soc_init(void); 20extern void mx23_soc_init(void);
23extern int mx23_register_gpios(void);
24extern int mx23_clocks_init(void); 21extern int mx23_clocks_init(void);
25extern void mx23_map_io(void); 22extern void mx23_map_io(void);
26extern void mx23_init_irq(void); 23extern void mx23_init_irq(void);
27 24
28extern void mx28_soc_init(void); 25extern void mx28_soc_init(void);
29extern int mx28_register_gpios(void);
30extern int mx28_clocks_init(void); 26extern int mx28_clocks_init(void);
31extern void mx28_map_io(void); 27extern void mx28_map_io(void);
32extern void mx28_init_irq(void); 28extern void mx28_init_irq(void);
33 29
34extern void icoll_init_irq(void); 30extern void icoll_init_irq(void);
35 31
36extern int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask); 32extern struct platform_device *mxs_add_dma(const char *devid,
33 resource_size_t base);
34extern struct platform_device *mxs_add_gpio(char *name, int id,
35 resource_size_t iobase, int irq);
37 36
38#endif /* __MACH_MXS_COMMON_H__ */ 37#endif /* __MACH_MXS_COMMON_H__ */
diff --git a/arch/arm/mach-mxs/include/mach/devices-common.h b/arch/arm/mach-mxs/include/mach/devices-common.h
index 21e45a70d344..e8b1d958240b 100644
--- a/arch/arm/mach-mxs/include/mach/devices-common.h
+++ b/arch/arm/mach-mxs/include/mach/devices-common.h
@@ -82,8 +82,9 @@ struct platform_device * __init mxs_add_mxs_i2c(
82 const struct mxs_mxs_i2c_data *data); 82 const struct mxs_mxs_i2c_data *data);
83 83
84/* mmc */ 84/* mmc */
85#include <mach/mmc.h> 85#include <linux/mmc/mxs-mmc.h>
86struct mxs_mxs_mmc_data { 86struct mxs_mxs_mmc_data {
87 const char *devid;
87 int id; 88 int id;
88 resource_size_t iobase; 89 resource_size_t iobase;
89 resource_size_t dma; 90 resource_size_t dma;
diff --git a/arch/arm/mach-mxs/mach-mx28evk.c b/arch/arm/mach-mxs/mach-mx28evk.c
index da4610ebe9e6..dafd48e86c8c 100644
--- a/arch/arm/mach-mxs/mach-mx28evk.c
+++ b/arch/arm/mach-mxs/mach-mx28evk.c
@@ -226,7 +226,7 @@ static void __init mx28evk_fec_reset(void)
226 struct clk *clk; 226 struct clk *clk;
227 227
228 /* Enable fec phy clock */ 228 /* Enable fec phy clock */
229 clk = clk_get_sys("pll2", NULL); 229 clk = clk_get_sys("enet_out", NULL);
230 if (!IS_ERR(clk)) 230 if (!IS_ERR(clk))
231 clk_prepare_enable(clk); 231 clk_prepare_enable(clk);
232 232
diff --git a/arch/arm/mach-mxs/mach-mxs.c b/arch/arm/mach-mxs/mach-mxs.c
new file mode 100644
index 000000000000..8cac94b33020
--- /dev/null
+++ b/arch/arm/mach-mxs/mach-mxs.c
@@ -0,0 +1,121 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 * Copyright 2012 Linaro Ltd.
4 *
5 * The code contained herein is licensed under the GNU General Public
6 * License. You may obtain a copy of the GNU General Public License
7 * Version 2 or later at the following locations:
8 *
9 * http://www.opensource.org/licenses/gpl-license.html
10 * http://www.gnu.org/copyleft/gpl.html
11 */
12
13#include <linux/clk.h>
14#include <linux/clkdev.h>
15#include <linux/err.h>
16#include <linux/init.h>
17#include <linux/init.h>
18#include <linux/irqdomain.h>
19#include <linux/of_irq.h>
20#include <linux/of_platform.h>
21#include <asm/mach/arch.h>
22#include <asm/mach/time.h>
23#include <mach/common.h>
24
25static int __init mxs_icoll_add_irq_domain(struct device_node *np,
26 struct device_node *interrupt_parent)
27{
28 irq_domain_add_legacy(np, 128, 0, 0, &irq_domain_simple_ops, NULL);
29
30 return 0;
31}
32
33static int __init mxs_gpio_add_irq_domain(struct device_node *np,
34 struct device_node *interrupt_parent)
35{
36 static int gpio_irq_base = MXS_GPIO_IRQ_START;
37
38 irq_domain_add_legacy(np, 32, gpio_irq_base, 0, &irq_domain_simple_ops, NULL);
39 gpio_irq_base += 32;
40
41 return 0;
42}
43
44static const struct of_device_id mxs_irq_match[] __initconst = {
45 { .compatible = "fsl,mxs-icoll", .data = mxs_icoll_add_irq_domain, },
46 { .compatible = "fsl,mxs-gpio", .data = mxs_gpio_add_irq_domain, },
47 { /* sentinel */ }
48};
49
50static void __init mxs_dt_init_irq(void)
51{
52 icoll_init_irq();
53 of_irq_init(mxs_irq_match);
54}
55
56static void __init imx23_timer_init(void)
57{
58 mx23_clocks_init();
59}
60
61static struct sys_timer imx23_timer = {
62 .init = imx23_timer_init,
63};
64
65static void __init imx28_timer_init(void)
66{
67 mx28_clocks_init();
68}
69
70static struct sys_timer imx28_timer = {
71 .init = imx28_timer_init,
72};
73
74static void __init imx28_evk_init(void)
75{
76 struct clk *clk;
77
78 /* Enable fec phy clock */
79 clk = clk_get_sys("enet_out", NULL);
80 if (!IS_ERR(clk))
81 clk_prepare_enable(clk);
82}
83
84static void __init mxs_machine_init(void)
85{
86 if (of_machine_is_compatible("fsl,imx28-evk"))
87 imx28_evk_init();
88
89 of_platform_populate(NULL, of_default_bus_match_table,
90 NULL, NULL);
91}
92
93static const char *imx23_dt_compat[] __initdata = {
94 "fsl,imx23-evk",
95 "fsl,imx23",
96 NULL,
97};
98
99static const char *imx28_dt_compat[] __initdata = {
100 "fsl,imx28-evk",
101 "fsl,imx28",
102 NULL,
103};
104
105DT_MACHINE_START(IMX23, "Freescale i.MX23 (Device Tree)")
106 .map_io = mx23_map_io,
107 .init_irq = mxs_dt_init_irq,
108 .timer = &imx23_timer,
109 .init_machine = mxs_machine_init,
110 .dt_compat = imx23_dt_compat,
111 .restart = mxs_restart,
112MACHINE_END
113
114DT_MACHINE_START(IMX28, "Freescale i.MX28 (Device Tree)")
115 .map_io = mx28_map_io,
116 .init_irq = mxs_dt_init_irq,
117 .timer = &imx28_timer,
118 .init_machine = mxs_machine_init,
119 .dt_compat = imx28_dt_compat,
120 .restart = mxs_restart,
121MACHINE_END
diff --git a/arch/arm/mach-mxs/mm.c b/arch/arm/mach-mxs/mm.c
index 67a384edcf5b..dccb67a9e7c4 100644
--- a/arch/arm/mach-mxs/mm.c
+++ b/arch/arm/mach-mxs/mm.c
@@ -66,9 +66,25 @@ void __init mx28_init_irq(void)
66void __init mx23_soc_init(void) 66void __init mx23_soc_init(void)
67{ 67{
68 pinctrl_provide_dummies(); 68 pinctrl_provide_dummies();
69
70 mxs_add_dma("imx23-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
71 mxs_add_dma("imx23-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
72
73 mxs_add_gpio("imx23-gpio", 0, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO0);
74 mxs_add_gpio("imx23-gpio", 1, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO1);
75 mxs_add_gpio("imx23-gpio", 2, MX23_PINCTRL_BASE_ADDR, MX23_INT_GPIO2);
69} 76}
70 77
71void __init mx28_soc_init(void) 78void __init mx28_soc_init(void)
72{ 79{
73 pinctrl_provide_dummies(); 80 pinctrl_provide_dummies();
81
82 mxs_add_dma("imx28-dma-apbh", MX23_APBH_DMA_BASE_ADDR);
83 mxs_add_dma("imx28-dma-apbx", MX23_APBX_DMA_BASE_ADDR);
84
85 mxs_add_gpio("imx28-gpio", 0, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO0);
86 mxs_add_gpio("imx28-gpio", 1, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO1);
87 mxs_add_gpio("imx28-gpio", 2, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO2);
88 mxs_add_gpio("imx28-gpio", 3, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO3);
89 mxs_add_gpio("imx28-gpio", 4, MX28_PINCTRL_BASE_ADDR, MX28_INT_GPIO4);
74} 90}
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx23.h b/arch/arm/mach-mxs/regs-clkctrl-mx23.h
deleted file mode 100644
index 0ea5c9d0e2b2..000000000000
--- a/arch/arm/mach-mxs/regs-clkctrl-mx23.h
+++ /dev/null
@@ -1,331 +0,0 @@
1/*
2 * Freescale CLKCTRL Register Definitions
3 *
4 * Copyright 2008 Embedded Alley Solutions, Inc All Rights Reserved.
5 * Copyright 2008-2010 Freescale Semiconductor, Inc.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; either version 2 of the License, or
10 * (at your option) any later version.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 * This file is created by xml file. Don't Edit it.
22 *
23 * Xml Revision: 1.48
24 * Template revision: 26195
25 */
26
27#ifndef __REGS_CLKCTRL_MX23_H__
28#define __REGS_CLKCTRL_MX23_H__
29
30
31#define HW_CLKCTRL_PLLCTRL0 (0x00000000)
32#define HW_CLKCTRL_PLLCTRL0_SET (0x00000004)
33#define HW_CLKCTRL_PLLCTRL0_CLR (0x00000008)
34#define HW_CLKCTRL_PLLCTRL0_TOG (0x0000000c)
35
36#define BP_CLKCTRL_PLLCTRL0_LFR_SEL 28
37#define BM_CLKCTRL_PLLCTRL0_LFR_SEL 0x30000000
38#define BF_CLKCTRL_PLLCTRL0_LFR_SEL(v) \
39 (((v) << 28) & BM_CLKCTRL_PLLCTRL0_LFR_SEL)
40#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__DEFAULT 0x0
41#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_2 0x1
42#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__TIMES_05 0x2
43#define BV_CLKCTRL_PLLCTRL0_LFR_SEL__UNDEFINED 0x3
44#define BP_CLKCTRL_PLLCTRL0_CP_SEL 24
45#define BM_CLKCTRL_PLLCTRL0_CP_SEL 0x03000000
46#define BF_CLKCTRL_PLLCTRL0_CP_SEL(v) \
47 (((v) << 24) & BM_CLKCTRL_PLLCTRL0_CP_SEL)
48#define BV_CLKCTRL_PLLCTRL0_CP_SEL__DEFAULT 0x0
49#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_2 0x1
50#define BV_CLKCTRL_PLLCTRL0_CP_SEL__TIMES_05 0x2
51#define BV_CLKCTRL_PLLCTRL0_CP_SEL__UNDEFINED 0x3
52#define BP_CLKCTRL_PLLCTRL0_DIV_SEL 20
53#define BM_CLKCTRL_PLLCTRL0_DIV_SEL 0x00300000
54#define BF_CLKCTRL_PLLCTRL0_DIV_SEL(v) \
55 (((v) << 20) & BM_CLKCTRL_PLLCTRL0_DIV_SEL)
56#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__DEFAULT 0x0
57#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWER 0x1
58#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__LOWEST 0x2
59#define BV_CLKCTRL_PLLCTRL0_DIV_SEL__UNDEFINED 0x3
60#define BM_CLKCTRL_PLLCTRL0_EN_USB_CLKS 0x00040000
61#define BM_CLKCTRL_PLLCTRL0_POWER 0x00010000
62
63#define HW_CLKCTRL_PLLCTRL1 (0x00000010)
64
65#define BM_CLKCTRL_PLLCTRL1_LOCK 0x80000000
66#define BM_CLKCTRL_PLLCTRL1_FORCE_LOCK 0x40000000
67#define BP_CLKCTRL_PLLCTRL1_LOCK_COUNT 0
68#define BM_CLKCTRL_PLLCTRL1_LOCK_COUNT 0x0000FFFF
69#define BF_CLKCTRL_PLLCTRL1_LOCK_COUNT(v) \
70 (((v) << 0) & BM_CLKCTRL_PLLCTRL1_LOCK_COUNT)
71
72#define HW_CLKCTRL_CPU (0x00000020)
73#define HW_CLKCTRL_CPU_SET (0x00000024)
74#define HW_CLKCTRL_CPU_CLR (0x00000028)
75#define HW_CLKCTRL_CPU_TOG (0x0000002c)
76
77#define BM_CLKCTRL_CPU_BUSY_REF_XTAL 0x20000000
78#define BM_CLKCTRL_CPU_BUSY_REF_CPU 0x10000000
79#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN 0x04000000
80#define BP_CLKCTRL_CPU_DIV_XTAL 16
81#define BM_CLKCTRL_CPU_DIV_XTAL 0x03FF0000
82#define BF_CLKCTRL_CPU_DIV_XTAL(v) \
83 (((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
84#define BM_CLKCTRL_CPU_INTERRUPT_WAIT 0x00001000
85#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
86#define BP_CLKCTRL_CPU_DIV_CPU 0
87#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
88#define BF_CLKCTRL_CPU_DIV_CPU(v) \
89 (((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
90
91#define HW_CLKCTRL_HBUS (0x00000030)
92#define HW_CLKCTRL_HBUS_SET (0x00000034)
93#define HW_CLKCTRL_HBUS_CLR (0x00000038)
94#define HW_CLKCTRL_HBUS_TOG (0x0000003c)
95
96#define BM_CLKCTRL_HBUS_BUSY 0x20000000
97#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE 0x10000000
98#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE 0x08000000
99#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE 0x04000000
100#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE 0x02000000
101#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE 0x01000000
102#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE 0x00800000
103#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE 0x00400000
104#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE 0x00200000
105#define BM_CLKCTRL_HBUS_AUTO_SLOW_MODE 0x00100000
106#define BP_CLKCTRL_HBUS_SLOW_DIV 16
107#define BM_CLKCTRL_HBUS_SLOW_DIV 0x00070000
108#define BF_CLKCTRL_HBUS_SLOW_DIV(v) \
109 (((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
110#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1 0x0
111#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2 0x1
112#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4 0x2
113#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8 0x3
114#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
115#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
116#define BM_CLKCTRL_HBUS_DIV_FRAC_EN 0x00000020
117#define BP_CLKCTRL_HBUS_DIV 0
118#define BM_CLKCTRL_HBUS_DIV 0x0000001F
119#define BF_CLKCTRL_HBUS_DIV(v) \
120 (((v) << 0) & BM_CLKCTRL_HBUS_DIV)
121
122#define HW_CLKCTRL_XBUS (0x00000040)
123
124#define BM_CLKCTRL_XBUS_BUSY 0x80000000
125#define BM_CLKCTRL_XBUS_DIV_FRAC_EN 0x00000400
126#define BP_CLKCTRL_XBUS_DIV 0
127#define BM_CLKCTRL_XBUS_DIV 0x000003FF
128#define BF_CLKCTRL_XBUS_DIV(v) \
129 (((v) << 0) & BM_CLKCTRL_XBUS_DIV)
130
131#define HW_CLKCTRL_XTAL (0x00000050)
132#define HW_CLKCTRL_XTAL_SET (0x00000054)
133#define HW_CLKCTRL_XTAL_CLR (0x00000058)
134#define HW_CLKCTRL_XTAL_TOG (0x0000005c)
135
136#define BP_CLKCTRL_XTAL_UART_CLK_GATE 31
137#define BM_CLKCTRL_XTAL_UART_CLK_GATE 0x80000000
138#define BP_CLKCTRL_XTAL_FILT_CLK24M_GATE 30
139#define BM_CLKCTRL_XTAL_FILT_CLK24M_GATE 0x40000000
140#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE 29
141#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE 0x20000000
142#define BM_CLKCTRL_XTAL_DRI_CLK24M_GATE 0x10000000
143#define BM_CLKCTRL_XTAL_DIGCTRL_CLK1M_GATE 0x08000000
144#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 26
145#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 0x04000000
146#define BP_CLKCTRL_XTAL_DIV_UART 0
147#define BM_CLKCTRL_XTAL_DIV_UART 0x00000003
148#define BF_CLKCTRL_XTAL_DIV_UART(v) \
149 (((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
150
151#define HW_CLKCTRL_PIX (0x00000060)
152
153#define BP_CLKCTRL_PIX_CLKGATE 31
154#define BM_CLKCTRL_PIX_CLKGATE 0x80000000
155#define BM_CLKCTRL_PIX_BUSY 0x20000000
156#define BM_CLKCTRL_PIX_DIV_FRAC_EN 0x00001000
157#define BP_CLKCTRL_PIX_DIV 0
158#define BM_CLKCTRL_PIX_DIV 0x00000FFF
159#define BF_CLKCTRL_PIX_DIV(v) \
160 (((v) << 0) & BM_CLKCTRL_PIX_DIV)
161
162#define HW_CLKCTRL_SSP (0x00000070)
163
164#define BP_CLKCTRL_SSP_CLKGATE 31
165#define BM_CLKCTRL_SSP_CLKGATE 0x80000000
166#define BM_CLKCTRL_SSP_BUSY 0x20000000
167#define BM_CLKCTRL_SSP_DIV_FRAC_EN 0x00000200
168#define BP_CLKCTRL_SSP_DIV 0
169#define BM_CLKCTRL_SSP_DIV 0x000001FF
170#define BF_CLKCTRL_SSP_DIV(v) \
171 (((v) << 0) & BM_CLKCTRL_SSP_DIV)
172
173#define HW_CLKCTRL_GPMI (0x00000080)
174
175#define BP_CLKCTRL_GPMI_CLKGATE 31
176#define BM_CLKCTRL_GPMI_CLKGATE 0x80000000
177#define BM_CLKCTRL_GPMI_BUSY 0x20000000
178#define BM_CLKCTRL_GPMI_DIV_FRAC_EN 0x00000400
179#define BP_CLKCTRL_GPMI_DIV 0
180#define BM_CLKCTRL_GPMI_DIV 0x000003FF
181#define BF_CLKCTRL_GPMI_DIV(v) \
182 (((v) << 0) & BM_CLKCTRL_GPMI_DIV)
183
184#define HW_CLKCTRL_SPDIF (0x00000090)
185
186#define BM_CLKCTRL_SPDIF_CLKGATE 0x80000000
187
188#define HW_CLKCTRL_EMI (0x000000a0)
189
190#define BP_CLKCTRL_EMI_CLKGATE 31
191#define BM_CLKCTRL_EMI_CLKGATE 0x80000000
192#define BM_CLKCTRL_EMI_SYNC_MODE_EN 0x40000000
193#define BM_CLKCTRL_EMI_BUSY_REF_XTAL 0x20000000
194#define BM_CLKCTRL_EMI_BUSY_REF_EMI 0x10000000
195#define BM_CLKCTRL_EMI_BUSY_REF_CPU 0x08000000
196#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE 0x04000000
197#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
198#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE 0x00010000
199#define BP_CLKCTRL_EMI_DIV_XTAL 8
200#define BM_CLKCTRL_EMI_DIV_XTAL 0x00000F00
201#define BF_CLKCTRL_EMI_DIV_XTAL(v) \
202 (((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
203#define BP_CLKCTRL_EMI_DIV_EMI 0
204#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
205#define BF_CLKCTRL_EMI_DIV_EMI(v) \
206 (((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
207
208#define HW_CLKCTRL_IR (0x000000b0)
209
210#define BM_CLKCTRL_IR_CLKGATE 0x80000000
211#define BM_CLKCTRL_IR_AUTO_DIV 0x20000000
212#define BM_CLKCTRL_IR_IR_BUSY 0x10000000
213#define BM_CLKCTRL_IR_IROV_BUSY 0x08000000
214#define BP_CLKCTRL_IR_IROV_DIV 16
215#define BM_CLKCTRL_IR_IROV_DIV 0x01FF0000
216#define BF_CLKCTRL_IR_IROV_DIV(v) \
217 (((v) << 16) & BM_CLKCTRL_IR_IROV_DIV)
218#define BP_CLKCTRL_IR_IR_DIV 0
219#define BM_CLKCTRL_IR_IR_DIV 0x000003FF
220#define BF_CLKCTRL_IR_IR_DIV(v) \
221 (((v) << 0) & BM_CLKCTRL_IR_IR_DIV)
222
223#define HW_CLKCTRL_SAIF (0x000000c0)
224
225#define BM_CLKCTRL_SAIF_CLKGATE 0x80000000
226#define BM_CLKCTRL_SAIF_BUSY 0x20000000
227#define BM_CLKCTRL_SAIF_DIV_FRAC_EN 0x00010000
228#define BP_CLKCTRL_SAIF_DIV 0
229#define BM_CLKCTRL_SAIF_DIV 0x0000FFFF
230#define BF_CLKCTRL_SAIF_DIV(v) \
231 (((v) << 0) & BM_CLKCTRL_SAIF_DIV)
232
233#define HW_CLKCTRL_TV (0x000000d0)
234
235#define BM_CLKCTRL_TV_CLK_TV108M_GATE 0x80000000
236#define BM_CLKCTRL_TV_CLK_TV_GATE 0x40000000
237
238#define HW_CLKCTRL_ETM (0x000000e0)
239
240#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
241#define BM_CLKCTRL_ETM_BUSY 0x20000000
242#define BM_CLKCTRL_ETM_DIV_FRAC_EN 0x00000040
243#define BP_CLKCTRL_ETM_DIV 0
244#define BM_CLKCTRL_ETM_DIV 0x0000003F
245#define BF_CLKCTRL_ETM_DIV(v) \
246 (((v) << 0) & BM_CLKCTRL_ETM_DIV)
247
248#define HW_CLKCTRL_FRAC (0x000000f0)
249#define HW_CLKCTRL_FRAC_SET (0x000000f4)
250#define HW_CLKCTRL_FRAC_CLR (0x000000f8)
251#define HW_CLKCTRL_FRAC_TOG (0x000000fc)
252
253#define BP_CLKCTRL_FRAC_CLKGATEIO 31
254#define BM_CLKCTRL_FRAC_CLKGATEIO 0x80000000
255#define BM_CLKCTRL_FRAC_IO_STABLE 0x40000000
256#define BP_CLKCTRL_FRAC_IOFRAC 24
257#define BM_CLKCTRL_FRAC_IOFRAC 0x3F000000
258#define BF_CLKCTRL_FRAC_IOFRAC(v) \
259 (((v) << 24) & BM_CLKCTRL_FRAC_IOFRAC)
260#define BP_CLKCTRL_FRAC_CLKGATEPIX 23
261#define BM_CLKCTRL_FRAC_CLKGATEPIX 0x00800000
262#define BM_CLKCTRL_FRAC_PIX_STABLE 0x00400000
263#define BP_CLKCTRL_FRAC_PIXFRAC 16
264#define BM_CLKCTRL_FRAC_PIXFRAC 0x003F0000
265#define BF_CLKCTRL_FRAC_PIXFRAC(v) \
266 (((v) << 16) & BM_CLKCTRL_FRAC_PIXFRAC)
267#define BP_CLKCTRL_FRAC_CLKGATEEMI 15
268#define BM_CLKCTRL_FRAC_CLKGATEEMI 0x00008000
269#define BM_CLKCTRL_FRAC_EMI_STABLE 0x00004000
270#define BP_CLKCTRL_FRAC_EMIFRAC 8
271#define BM_CLKCTRL_FRAC_EMIFRAC 0x00003F00
272#define BF_CLKCTRL_FRAC_EMIFRAC(v) \
273 (((v) << 8) & BM_CLKCTRL_FRAC_EMIFRAC)
274#define BP_CLKCTRL_FRAC_CLKGATECPU 7
275#define BM_CLKCTRL_FRAC_CLKGATECPU 0x00000080
276#define BM_CLKCTRL_FRAC_CPU_STABLE 0x00000040
277#define BP_CLKCTRL_FRAC_CPUFRAC 0
278#define BM_CLKCTRL_FRAC_CPUFRAC 0x0000003F
279#define BF_CLKCTRL_FRAC_CPUFRAC(v) \
280 (((v) << 0) & BM_CLKCTRL_FRAC_CPUFRAC)
281
282#define HW_CLKCTRL_FRAC1 (0x00000100)
283#define HW_CLKCTRL_FRAC1_SET (0x00000104)
284#define HW_CLKCTRL_FRAC1_CLR (0x00000108)
285#define HW_CLKCTRL_FRAC1_TOG (0x0000010c)
286
287#define BM_CLKCTRL_FRAC1_CLKGATEVID 0x80000000
288#define BM_CLKCTRL_FRAC1_VID_STABLE 0x40000000
289
290#define HW_CLKCTRL_CLKSEQ (0x00000110)
291#define HW_CLKCTRL_CLKSEQ_SET (0x00000114)
292#define HW_CLKCTRL_CLKSEQ_CLR (0x00000118)
293#define HW_CLKCTRL_CLKSEQ_TOG (0x0000011c)
294
295#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM 0x00000100
296#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU 0x00000080
297#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI 0x00000040
298#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP 0x00000020
299#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI 0x00000010
300#define BM_CLKCTRL_CLKSEQ_BYPASS_IR 0x00000008
301#define BM_CLKCTRL_CLKSEQ_BYPASS_PIX 0x00000002
302#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF 0x00000001
303
304#define HW_CLKCTRL_RESET (0x00000120)
305
306#define BM_CLKCTRL_RESET_CHIP 0x00000002
307#define BM_CLKCTRL_RESET_DIG 0x00000001
308
309#define HW_CLKCTRL_STATUS (0x00000130)
310
311#define BP_CLKCTRL_STATUS_CPU_LIMIT 30
312#define BM_CLKCTRL_STATUS_CPU_LIMIT 0xC0000000
313#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
314 (((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
315
316#define HW_CLKCTRL_VERSION (0x00000140)
317
318#define BP_CLKCTRL_VERSION_MAJOR 24
319#define BM_CLKCTRL_VERSION_MAJOR 0xFF000000
320#define BF_CLKCTRL_VERSION_MAJOR(v) \
321 (((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
322#define BP_CLKCTRL_VERSION_MINOR 16
323#define BM_CLKCTRL_VERSION_MINOR 0x00FF0000
324#define BF_CLKCTRL_VERSION_MINOR(v) \
325 (((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
326#define BP_CLKCTRL_VERSION_STEP 0
327#define BM_CLKCTRL_VERSION_STEP 0x0000FFFF
328#define BF_CLKCTRL_VERSION_STEP(v) \
329 (((v) << 0) & BM_CLKCTRL_VERSION_STEP)
330
331#endif /* __REGS_CLKCTRL_MX23_H__ */
diff --git a/arch/arm/mach-mxs/regs-clkctrl-mx28.h b/arch/arm/mach-mxs/regs-clkctrl-mx28.h
deleted file mode 100644
index 7d1b061d7943..000000000000
--- a/arch/arm/mach-mxs/regs-clkctrl-mx28.h
+++ /dev/null
@@ -1,486 +0,0 @@
1/*
2 * Freescale CLKCTRL Register Definitions
3 *
4 * Copyright 2009-2010 Freescale Semiconductor, Inc. All Rights Reserved.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 *
20 * This file is created by xml file. Don't Edit it.
21 *
22 * Xml Revision: 1.48
23 * Template revision: 26195
24 */
25
26#ifndef __REGS_CLKCTRL_MX28_H__
27#define __REGS_CLKCTRL_MX28_H__
28
29#define HW_CLKCTRL_PLL0CTRL0 (0x00000000)
30#define HW_CLKCTRL_PLL0CTRL0_SET (0x00000004)
31#define HW_CLKCTRL_PLL0CTRL0_CLR (0x00000008)
32#define HW_CLKCTRL_PLL0CTRL0_TOG (0x0000000c)
33
34#define BP_CLKCTRL_PLL0CTRL0_LFR_SEL 28
35#define BM_CLKCTRL_PLL0CTRL0_LFR_SEL 0x30000000
36#define BF_CLKCTRL_PLL0CTRL0_LFR_SEL(v) \
37 (((v) << 28) & BM_CLKCTRL_PLL0CTRL0_LFR_SEL)
38#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__DEFAULT 0x0
39#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_2 0x1
40#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__TIMES_05 0x2
41#define BV_CLKCTRL_PLL0CTRL0_LFR_SEL__UNDEFINED 0x3
42#define BP_CLKCTRL_PLL0CTRL0_CP_SEL 24
43#define BM_CLKCTRL_PLL0CTRL0_CP_SEL 0x03000000
44#define BF_CLKCTRL_PLL0CTRL0_CP_SEL(v) \
45 (((v) << 24) & BM_CLKCTRL_PLL0CTRL0_CP_SEL)
46#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__DEFAULT 0x0
47#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_2 0x1
48#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__TIMES_05 0x2
49#define BV_CLKCTRL_PLL0CTRL0_CP_SEL__UNDEFINED 0x3
50#define BP_CLKCTRL_PLL0CTRL0_DIV_SEL 20
51#define BM_CLKCTRL_PLL0CTRL0_DIV_SEL 0x00300000
52#define BF_CLKCTRL_PLL0CTRL0_DIV_SEL(v) \
53 (((v) << 20) & BM_CLKCTRL_PLL0CTRL0_DIV_SEL)
54#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__DEFAULT 0x0
55#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWER 0x1
56#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__LOWEST 0x2
57#define BV_CLKCTRL_PLL0CTRL0_DIV_SEL__UNDEFINED 0x3
58#define BM_CLKCTRL_PLL0CTRL0_EN_USB_CLKS 0x00040000
59#define BM_CLKCTRL_PLL0CTRL0_POWER 0x00020000
60
61#define HW_CLKCTRL_PLL0CTRL1 (0x00000010)
62
63#define BM_CLKCTRL_PLL0CTRL1_LOCK 0x80000000
64#define BM_CLKCTRL_PLL0CTRL1_FORCE_LOCK 0x40000000
65#define BP_CLKCTRL_PLL0CTRL1_LOCK_COUNT 0
66#define BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT 0x0000FFFF
67#define BF_CLKCTRL_PLL0CTRL1_LOCK_COUNT(v) \
68 (((v) << 0) & BM_CLKCTRL_PLL0CTRL1_LOCK_COUNT)
69
70#define HW_CLKCTRL_PLL1CTRL0 (0x00000020)
71#define HW_CLKCTRL_PLL1CTRL0_SET (0x00000024)
72#define HW_CLKCTRL_PLL1CTRL0_CLR (0x00000028)
73#define HW_CLKCTRL_PLL1CTRL0_TOG (0x0000002c)
74
75#define BM_CLKCTRL_PLL1CTRL0_CLKGATEEMI 0x80000000
76#define BP_CLKCTRL_PLL1CTRL0_LFR_SEL 28
77#define BM_CLKCTRL_PLL1CTRL0_LFR_SEL 0x30000000
78#define BF_CLKCTRL_PLL1CTRL0_LFR_SEL(v) \
79 (((v) << 28) & BM_CLKCTRL_PLL1CTRL0_LFR_SEL)
80#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__DEFAULT 0x0
81#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_2 0x1
82#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__TIMES_05 0x2
83#define BV_CLKCTRL_PLL1CTRL0_LFR_SEL__UNDEFINED 0x3
84#define BP_CLKCTRL_PLL1CTRL0_CP_SEL 24
85#define BM_CLKCTRL_PLL1CTRL0_CP_SEL 0x03000000
86#define BF_CLKCTRL_PLL1CTRL0_CP_SEL(v) \
87 (((v) << 24) & BM_CLKCTRL_PLL1CTRL0_CP_SEL)
88#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__DEFAULT 0x0
89#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_2 0x1
90#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__TIMES_05 0x2
91#define BV_CLKCTRL_PLL1CTRL0_CP_SEL__UNDEFINED 0x3
92#define BP_CLKCTRL_PLL1CTRL0_DIV_SEL 20
93#define BM_CLKCTRL_PLL1CTRL0_DIV_SEL 0x00300000
94#define BF_CLKCTRL_PLL1CTRL0_DIV_SEL(v) \
95 (((v) << 20) & BM_CLKCTRL_PLL1CTRL0_DIV_SEL)
96#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__DEFAULT 0x0
97#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWER 0x1
98#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__LOWEST 0x2
99#define BV_CLKCTRL_PLL1CTRL0_DIV_SEL__UNDEFINED 0x3
100#define BM_CLKCTRL_PLL1CTRL0_EN_USB_CLKS 0x00040000
101#define BM_CLKCTRL_PLL1CTRL0_POWER 0x00020000
102
103#define HW_CLKCTRL_PLL1CTRL1 (0x00000030)
104
105#define BM_CLKCTRL_PLL1CTRL1_LOCK 0x80000000
106#define BM_CLKCTRL_PLL1CTRL1_FORCE_LOCK 0x40000000
107#define BP_CLKCTRL_PLL1CTRL1_LOCK_COUNT 0
108#define BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT 0x0000FFFF
109#define BF_CLKCTRL_PLL1CTRL1_LOCK_COUNT(v) \
110 (((v) << 0) & BM_CLKCTRL_PLL1CTRL1_LOCK_COUNT)
111
112#define HW_CLKCTRL_PLL2CTRL0 (0x00000040)
113#define HW_CLKCTRL_PLL2CTRL0_SET (0x00000044)
114#define HW_CLKCTRL_PLL2CTRL0_CLR (0x00000048)
115#define HW_CLKCTRL_PLL2CTRL0_TOG (0x0000004c)
116
117#define BM_CLKCTRL_PLL2CTRL0_CLKGATE 0x80000000
118#define BP_CLKCTRL_PLL2CTRL0_LFR_SEL 28
119#define BM_CLKCTRL_PLL2CTRL0_LFR_SEL 0x30000000
120#define BF_CLKCTRL_PLL2CTRL0_LFR_SEL(v) \
121 (((v) << 28) & BM_CLKCTRL_PLL2CTRL0_LFR_SEL)
122#define BM_CLKCTRL_PLL2CTRL0_HOLD_RING_OFF_B 0x04000000
123#define BP_CLKCTRL_PLL2CTRL0_CP_SEL 24
124#define BM_CLKCTRL_PLL2CTRL0_CP_SEL 0x03000000
125#define BF_CLKCTRL_PLL2CTRL0_CP_SEL(v) \
126 (((v) << 24) & BM_CLKCTRL_PLL2CTRL0_CP_SEL)
127#define BM_CLKCTRL_PLL2CTRL0_POWER 0x00800000
128
129#define HW_CLKCTRL_CPU (0x00000050)
130#define HW_CLKCTRL_CPU_SET (0x00000054)
131#define HW_CLKCTRL_CPU_CLR (0x00000058)
132#define HW_CLKCTRL_CPU_TOG (0x0000005c)
133
134#define BM_CLKCTRL_CPU_BUSY_REF_XTAL 0x20000000
135#define BM_CLKCTRL_CPU_BUSY_REF_CPU 0x10000000
136#define BM_CLKCTRL_CPU_DIV_XTAL_FRAC_EN 0x04000000
137#define BP_CLKCTRL_CPU_DIV_XTAL 16
138#define BM_CLKCTRL_CPU_DIV_XTAL 0x03FF0000
139#define BF_CLKCTRL_CPU_DIV_XTAL(v) \
140 (((v) << 16) & BM_CLKCTRL_CPU_DIV_XTAL)
141#define BM_CLKCTRL_CPU_INTERRUPT_WAIT 0x00001000
142#define BM_CLKCTRL_CPU_DIV_CPU_FRAC_EN 0x00000400
143#define BP_CLKCTRL_CPU_DIV_CPU 0
144#define BM_CLKCTRL_CPU_DIV_CPU 0x0000003F
145#define BF_CLKCTRL_CPU_DIV_CPU(v) \
146 (((v) << 0) & BM_CLKCTRL_CPU_DIV_CPU)
147
148#define HW_CLKCTRL_HBUS (0x00000060)
149#define HW_CLKCTRL_HBUS_SET (0x00000064)
150#define HW_CLKCTRL_HBUS_CLR (0x00000068)
151#define HW_CLKCTRL_HBUS_TOG (0x0000006c)
152
153#define BM_CLKCTRL_HBUS_ASM_BUSY 0x80000000
154#define BM_CLKCTRL_HBUS_DCP_AS_ENABLE 0x40000000
155#define BM_CLKCTRL_HBUS_PXP_AS_ENABLE 0x20000000
156#define BM_CLKCTRL_HBUS_ASM_EMIPORT_AS_ENABLE 0x08000000
157#define BM_CLKCTRL_HBUS_APBHDMA_AS_ENABLE 0x04000000
158#define BM_CLKCTRL_HBUS_APBXDMA_AS_ENABLE 0x02000000
159#define BM_CLKCTRL_HBUS_TRAFFIC_JAM_AS_ENABLE 0x01000000
160#define BM_CLKCTRL_HBUS_TRAFFIC_AS_ENABLE 0x00800000
161#define BM_CLKCTRL_HBUS_CPU_DATA_AS_ENABLE 0x00400000
162#define BM_CLKCTRL_HBUS_CPU_INSTR_AS_ENABLE 0x00200000
163#define BM_CLKCTRL_HBUS_ASM_ENABLE 0x00100000
164#define BM_CLKCTRL_HBUS_AUTO_CLEAR_DIV_ENABLE 0x00080000
165#define BP_CLKCTRL_HBUS_SLOW_DIV 16
166#define BM_CLKCTRL_HBUS_SLOW_DIV 0x00070000
167#define BF_CLKCTRL_HBUS_SLOW_DIV(v) \
168 (((v) << 16) & BM_CLKCTRL_HBUS_SLOW_DIV)
169#define BV_CLKCTRL_HBUS_SLOW_DIV__BY1 0x0
170#define BV_CLKCTRL_HBUS_SLOW_DIV__BY2 0x1
171#define BV_CLKCTRL_HBUS_SLOW_DIV__BY4 0x2
172#define BV_CLKCTRL_HBUS_SLOW_DIV__BY8 0x3
173#define BV_CLKCTRL_HBUS_SLOW_DIV__BY16 0x4
174#define BV_CLKCTRL_HBUS_SLOW_DIV__BY32 0x5
175#define BM_CLKCTRL_HBUS_DIV_FRAC_EN 0x00000020
176#define BP_CLKCTRL_HBUS_DIV 0
177#define BM_CLKCTRL_HBUS_DIV 0x0000001F
178#define BF_CLKCTRL_HBUS_DIV(v) \
179 (((v) << 0) & BM_CLKCTRL_HBUS_DIV)
180
181#define HW_CLKCTRL_XBUS (0x00000070)
182
183#define BM_CLKCTRL_XBUS_BUSY 0x80000000
184#define BM_CLKCTRL_XBUS_AUTO_CLEAR_DIV_ENABLE 0x00000800
185#define BM_CLKCTRL_XBUS_DIV_FRAC_EN 0x00000400
186#define BP_CLKCTRL_XBUS_DIV 0
187#define BM_CLKCTRL_XBUS_DIV 0x000003FF
188#define BF_CLKCTRL_XBUS_DIV(v) \
189 (((v) << 0) & BM_CLKCTRL_XBUS_DIV)
190
191#define HW_CLKCTRL_XTAL (0x00000080)
192#define HW_CLKCTRL_XTAL_SET (0x00000084)
193#define HW_CLKCTRL_XTAL_CLR (0x00000088)
194#define HW_CLKCTRL_XTAL_TOG (0x0000008c)
195
196#define BP_CLKCTRL_XTAL_UART_CLK_GATE 31
197#define BM_CLKCTRL_XTAL_UART_CLK_GATE 0x80000000
198#define BP_CLKCTRL_XTAL_PWM_CLK24M_GATE 29
199#define BM_CLKCTRL_XTAL_PWM_CLK24M_GATE 0x20000000
200#define BP_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 26
201#define BM_CLKCTRL_XTAL_TIMROT_CLK32K_GATE 0x04000000
202#define BP_CLKCTRL_XTAL_DIV_UART 0
203#define BM_CLKCTRL_XTAL_DIV_UART 0x00000003
204#define BF_CLKCTRL_XTAL_DIV_UART(v) \
205 (((v) << 0) & BM_CLKCTRL_XTAL_DIV_UART)
206
207#define HW_CLKCTRL_SSP0 (0x00000090)
208
209#define BP_CLKCTRL_SSP0_CLKGATE 31
210#define BM_CLKCTRL_SSP0_CLKGATE 0x80000000
211#define BM_CLKCTRL_SSP0_BUSY 0x20000000
212#define BM_CLKCTRL_SSP0_DIV_FRAC_EN 0x00000200
213#define BP_CLKCTRL_SSP0_DIV 0
214#define BM_CLKCTRL_SSP0_DIV 0x000001FF
215#define BF_CLKCTRL_SSP0_DIV(v) \
216 (((v) << 0) & BM_CLKCTRL_SSP0_DIV)
217
218#define HW_CLKCTRL_SSP1 (0x000000a0)
219
220#define BP_CLKCTRL_SSP1_CLKGATE 31
221#define BM_CLKCTRL_SSP1_CLKGATE 0x80000000
222#define BM_CLKCTRL_SSP1_BUSY 0x20000000
223#define BM_CLKCTRL_SSP1_DIV_FRAC_EN 0x00000200
224#define BP_CLKCTRL_SSP1_DIV 0
225#define BM_CLKCTRL_SSP1_DIV 0x000001FF
226#define BF_CLKCTRL_SSP1_DIV(v) \
227 (((v) << 0) & BM_CLKCTRL_SSP1_DIV)
228
229#define HW_CLKCTRL_SSP2 (0x000000b0)
230
231#define BP_CLKCTRL_SSP2_CLKGATE 31
232#define BM_CLKCTRL_SSP2_CLKGATE 0x80000000
233#define BM_CLKCTRL_SSP2_BUSY 0x20000000
234#define BM_CLKCTRL_SSP2_DIV_FRAC_EN 0x00000200
235#define BP_CLKCTRL_SSP2_DIV 0
236#define BM_CLKCTRL_SSP2_DIV 0x000001FF
237#define BF_CLKCTRL_SSP2_DIV(v) \
238 (((v) << 0) & BM_CLKCTRL_SSP2_DIV)
239
240#define HW_CLKCTRL_SSP3 (0x000000c0)
241
242#define BP_CLKCTRL_SSP3_CLKGATE 31
243#define BM_CLKCTRL_SSP3_CLKGATE 0x80000000
244#define BM_CLKCTRL_SSP3_BUSY 0x20000000
245#define BM_CLKCTRL_SSP3_DIV_FRAC_EN 0x00000200
246#define BP_CLKCTRL_SSP3_DIV 0
247#define BM_CLKCTRL_SSP3_DIV 0x000001FF
248#define BF_CLKCTRL_SSP3_DIV(v) \
249 (((v) << 0) & BM_CLKCTRL_SSP3_DIV)
250
251#define HW_CLKCTRL_GPMI (0x000000d0)
252
253#define BP_CLKCTRL_GPMI_CLKGATE 31
254#define BM_CLKCTRL_GPMI_CLKGATE 0x80000000
255#define BM_CLKCTRL_GPMI_BUSY 0x20000000
256#define BM_CLKCTRL_GPMI_DIV_FRAC_EN 0x00000400
257#define BP_CLKCTRL_GPMI_DIV 0
258#define BM_CLKCTRL_GPMI_DIV 0x000003FF
259#define BF_CLKCTRL_GPMI_DIV(v) \
260 (((v) << 0) & BM_CLKCTRL_GPMI_DIV)
261
262#define HW_CLKCTRL_SPDIF (0x000000e0)
263
264#define BP_CLKCTRL_SPDIF_CLKGATE 31
265#define BM_CLKCTRL_SPDIF_CLKGATE 0x80000000
266
267#define HW_CLKCTRL_EMI (0x000000f0)
268
269#define BP_CLKCTRL_EMI_CLKGATE 31
270#define BM_CLKCTRL_EMI_CLKGATE 0x80000000
271#define BM_CLKCTRL_EMI_SYNC_MODE_EN 0x40000000
272#define BM_CLKCTRL_EMI_BUSY_REF_XTAL 0x20000000
273#define BM_CLKCTRL_EMI_BUSY_REF_EMI 0x10000000
274#define BM_CLKCTRL_EMI_BUSY_REF_CPU 0x08000000
275#define BM_CLKCTRL_EMI_BUSY_SYNC_MODE 0x04000000
276#define BM_CLKCTRL_EMI_BUSY_DCC_RESYNC 0x00020000
277#define BM_CLKCTRL_EMI_DCC_RESYNC_ENABLE 0x00010000
278#define BP_CLKCTRL_EMI_DIV_XTAL 8
279#define BM_CLKCTRL_EMI_DIV_XTAL 0x00000F00
280#define BF_CLKCTRL_EMI_DIV_XTAL(v) \
281 (((v) << 8) & BM_CLKCTRL_EMI_DIV_XTAL)
282#define BP_CLKCTRL_EMI_DIV_EMI 0
283#define BM_CLKCTRL_EMI_DIV_EMI 0x0000003F
284#define BF_CLKCTRL_EMI_DIV_EMI(v) \
285 (((v) << 0) & BM_CLKCTRL_EMI_DIV_EMI)
286
287#define HW_CLKCTRL_SAIF0 (0x00000100)
288
289#define BP_CLKCTRL_SAIF0_CLKGATE 31
290#define BM_CLKCTRL_SAIF0_CLKGATE 0x80000000
291#define BM_CLKCTRL_SAIF0_BUSY 0x20000000
292#define BM_CLKCTRL_SAIF0_DIV_FRAC_EN 0x00010000
293#define BP_CLKCTRL_SAIF0_DIV 0
294#define BM_CLKCTRL_SAIF0_DIV 0x0000FFFF
295#define BF_CLKCTRL_SAIF0_DIV(v) \
296 (((v) << 0) & BM_CLKCTRL_SAIF0_DIV)
297
298#define HW_CLKCTRL_SAIF1 (0x00000110)
299
300#define BP_CLKCTRL_SAIF1_CLKGATE 31
301#define BM_CLKCTRL_SAIF1_CLKGATE 0x80000000
302#define BM_CLKCTRL_SAIF1_BUSY 0x20000000
303#define BM_CLKCTRL_SAIF1_DIV_FRAC_EN 0x00010000
304#define BP_CLKCTRL_SAIF1_DIV 0
305#define BM_CLKCTRL_SAIF1_DIV 0x0000FFFF
306#define BF_CLKCTRL_SAIF1_DIV(v) \
307 (((v) << 0) & BM_CLKCTRL_SAIF1_DIV)
308
309#define HW_CLKCTRL_DIS_LCDIF (0x00000120)
310
311#define BP_CLKCTRL_DIS_LCDIF_CLKGATE 31
312#define BM_CLKCTRL_DIS_LCDIF_CLKGATE 0x80000000
313#define BM_CLKCTRL_DIS_LCDIF_BUSY 0x20000000
314#define BM_CLKCTRL_DIS_LCDIF_DIV_FRAC_EN 0x00002000
315#define BP_CLKCTRL_DIS_LCDIF_DIV 0
316#define BM_CLKCTRL_DIS_LCDIF_DIV 0x00001FFF
317#define BF_CLKCTRL_DIS_LCDIF_DIV(v) \
318 (((v) << 0) & BM_CLKCTRL_DIS_LCDIF_DIV)
319
320#define HW_CLKCTRL_ETM (0x00000130)
321
322#define BM_CLKCTRL_ETM_CLKGATE 0x80000000
323#define BM_CLKCTRL_ETM_BUSY 0x20000000
324#define BM_CLKCTRL_ETM_DIV_FRAC_EN 0x00000080
325#define BP_CLKCTRL_ETM_DIV 0
326#define BM_CLKCTRL_ETM_DIV 0x0000007F
327#define BF_CLKCTRL_ETM_DIV(v) \
328 (((v) << 0) & BM_CLKCTRL_ETM_DIV)
329
330#define HW_CLKCTRL_ENET (0x00000140)
331
332#define BM_CLKCTRL_ENET_SLEEP 0x80000000
333#define BP_CLKCTRL_ENET_DISABLE 30
334#define BM_CLKCTRL_ENET_DISABLE 0x40000000
335#define BM_CLKCTRL_ENET_STATUS 0x20000000
336#define BM_CLKCTRL_ENET_BUSY_TIME 0x08000000
337#define BP_CLKCTRL_ENET_DIV_TIME 21
338#define BM_CLKCTRL_ENET_DIV_TIME 0x07E00000
339#define BF_CLKCTRL_ENET_DIV_TIME(v) \
340 (((v) << 21) & BM_CLKCTRL_ENET_DIV_TIME)
341#define BM_CLKCTRL_ENET_BUSY 0x08000000
342#define BP_CLKCTRL_ENET_DIV 21
343#define BM_CLKCTRL_ENET_DIV 0x07E00000
344#define BF_CLKCTRL_ENET_DIV(v) \
345 (((v) << 21) & BM_CLKCTRL_ENET_DIV)
346#define BP_CLKCTRL_ENET_TIME_SEL 19
347#define BM_CLKCTRL_ENET_TIME_SEL 0x00180000
348#define BF_CLKCTRL_ENET_TIME_SEL(v) \
349 (((v) << 19) & BM_CLKCTRL_ENET_TIME_SEL)
350#define BV_CLKCTRL_ENET_TIME_SEL__XTAL 0x0
351#define BV_CLKCTRL_ENET_TIME_SEL__PLL 0x1
352#define BV_CLKCTRL_ENET_TIME_SEL__RMII_CLK 0x2
353#define BV_CLKCTRL_ENET_TIME_SEL__UNDEFINED 0x3
354#define BM_CLKCTRL_ENET_CLK_OUT_EN 0x00040000
355#define BM_CLKCTRL_ENET_RESET_BY_SW_CHIP 0x00020000
356#define BM_CLKCTRL_ENET_RESET_BY_SW 0x00010000
357
358#define HW_CLKCTRL_HSADC (0x00000150)
359
360#define BM_CLKCTRL_HSADC_RESETB 0x40000000
361#define BP_CLKCTRL_HSADC_FREQDIV 28
362#define BM_CLKCTRL_HSADC_FREQDIV 0x30000000
363#define BF_CLKCTRL_HSADC_FREQDIV(v) \
364 (((v) << 28) & BM_CLKCTRL_HSADC_FREQDIV)
365
366#define HW_CLKCTRL_FLEXCAN (0x00000160)
367
368#define BP_CLKCTRL_FLEXCAN_STOP_CAN0 30
369#define BM_CLKCTRL_FLEXCAN_STOP_CAN0 0x40000000
370#define BM_CLKCTRL_FLEXCAN_CAN0_STATUS 0x20000000
371#define BP_CLKCTRL_FLEXCAN_STOP_CAN1 28
372#define BM_CLKCTRL_FLEXCAN_STOP_CAN1 0x10000000
373#define BM_CLKCTRL_FLEXCAN_CAN1_STATUS 0x08000000
374
375#define HW_CLKCTRL_FRAC0 (0x000001b0)
376#define HW_CLKCTRL_FRAC0_SET (0x000001b4)
377#define HW_CLKCTRL_FRAC0_CLR (0x000001b8)
378#define HW_CLKCTRL_FRAC0_TOG (0x000001bc)
379
380#define BP_CLKCTRL_FRAC0_CLKGATEIO0 31
381#define BM_CLKCTRL_FRAC0_CLKGATEIO0 0x80000000
382#define BM_CLKCTRL_FRAC0_IO0_STABLE 0x40000000
383#define BP_CLKCTRL_FRAC0_IO0FRAC 24
384#define BM_CLKCTRL_FRAC0_IO0FRAC 0x3F000000
385#define BF_CLKCTRL_FRAC0_IO0FRAC(v) \
386 (((v) << 24) & BM_CLKCTRL_FRAC0_IO0FRAC)
387#define BP_CLKCTRL_FRAC0_CLKGATEIO1 23
388#define BM_CLKCTRL_FRAC0_CLKGATEIO1 0x00800000
389#define BM_CLKCTRL_FRAC0_IO1_STABLE 0x00400000
390#define BP_CLKCTRL_FRAC0_IO1FRAC 16
391#define BM_CLKCTRL_FRAC0_IO1FRAC 0x003F0000
392#define BF_CLKCTRL_FRAC0_IO1FRAC(v) \
393 (((v) << 16) & BM_CLKCTRL_FRAC0_IO1FRAC)
394#define BP_CLKCTRL_FRAC0_CLKGATEEMI 15
395#define BM_CLKCTRL_FRAC0_CLKGATEEMI 0x00008000
396#define BM_CLKCTRL_FRAC0_EMI_STABLE 0x00004000
397#define BP_CLKCTRL_FRAC0_EMIFRAC 8
398#define BM_CLKCTRL_FRAC0_EMIFRAC 0x00003F00
399#define BF_CLKCTRL_FRAC0_EMIFRAC(v) \
400 (((v) << 8) & BM_CLKCTRL_FRAC0_EMIFRAC)
401#define BP_CLKCTRL_FRAC0_CLKGATECPU 7
402#define BM_CLKCTRL_FRAC0_CLKGATECPU 0x00000080
403#define BM_CLKCTRL_FRAC0_CPU_STABLE 0x00000040
404#define BP_CLKCTRL_FRAC0_CPUFRAC 0
405#define BM_CLKCTRL_FRAC0_CPUFRAC 0x0000003F
406#define BF_CLKCTRL_FRAC0_CPUFRAC(v) \
407 (((v) << 0) & BM_CLKCTRL_FRAC0_CPUFRAC)
408
409#define HW_CLKCTRL_FRAC1 (0x000001c0)
410#define HW_CLKCTRL_FRAC1_SET (0x000001c4)
411#define HW_CLKCTRL_FRAC1_CLR (0x000001c8)
412#define HW_CLKCTRL_FRAC1_TOG (0x000001cc)
413
414#define BP_CLKCTRL_FRAC1_CLKGATEGPMI 23
415#define BM_CLKCTRL_FRAC1_CLKGATEGPMI 0x00800000
416#define BM_CLKCTRL_FRAC1_GPMI_STABLE 0x00400000
417#define BP_CLKCTRL_FRAC1_GPMIFRAC 16
418#define BM_CLKCTRL_FRAC1_GPMIFRAC 0x003F0000
419#define BF_CLKCTRL_FRAC1_GPMIFRAC(v) \
420 (((v) << 16) & BM_CLKCTRL_FRAC1_GPMIFRAC)
421#define BP_CLKCTRL_FRAC1_CLKGATEHSADC 15
422#define BM_CLKCTRL_FRAC1_CLKGATEHSADC 0x00008000
423#define BM_CLKCTRL_FRAC1_HSADC_STABLE 0x00004000
424#define BP_CLKCTRL_FRAC1_HSADCFRAC 8
425#define BM_CLKCTRL_FRAC1_HSADCFRAC 0x00003F00
426#define BF_CLKCTRL_FRAC1_HSADCFRAC(v) \
427 (((v) << 8) & BM_CLKCTRL_FRAC1_HSADCFRAC)
428#define BP_CLKCTRL_FRAC1_CLKGATEPIX 7
429#define BM_CLKCTRL_FRAC1_CLKGATEPIX 0x00000080
430#define BM_CLKCTRL_FRAC1_PIX_STABLE 0x00000040
431#define BP_CLKCTRL_FRAC1_PIXFRAC 0
432#define BM_CLKCTRL_FRAC1_PIXFRAC 0x0000003F
433#define BF_CLKCTRL_FRAC1_PIXFRAC(v) \
434 (((v) << 0) & BM_CLKCTRL_FRAC1_PIXFRAC)
435
436#define HW_CLKCTRL_CLKSEQ (0x000001d0)
437#define HW_CLKCTRL_CLKSEQ_SET (0x000001d4)
438#define HW_CLKCTRL_CLKSEQ_CLR (0x000001d8)
439#define HW_CLKCTRL_CLKSEQ_TOG (0x000001dc)
440
441#define BM_CLKCTRL_CLKSEQ_BYPASS_CPU 0x00040000
442#define BM_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF 0x00004000
443#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__BYPASS 0x1
444#define BV_CLKCTRL_CLKSEQ_BYPASS_DIS_LCDIF__PFD 0x0
445#define BM_CLKCTRL_CLKSEQ_BYPASS_ETM 0x00000100
446#define BM_CLKCTRL_CLKSEQ_BYPASS_EMI 0x00000080
447#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP3 0x00000040
448#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP2 0x00000020
449#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP1 0x00000010
450#define BM_CLKCTRL_CLKSEQ_BYPASS_SSP0 0x00000008
451#define BM_CLKCTRL_CLKSEQ_BYPASS_GPMI 0x00000004
452#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF1 0x00000002
453#define BM_CLKCTRL_CLKSEQ_BYPASS_SAIF0 0x00000001
454
455#define HW_CLKCTRL_RESET (0x000001e0)
456
457#define BM_CLKCTRL_RESET_WDOG_POR_DISABLE 0x00000020
458#define BM_CLKCTRL_RESET_EXTERNAL_RESET_ENABLE 0x00000010
459#define BM_CLKCTRL_RESET_THERMAL_RESET_ENABLE 0x00000008
460#define BM_CLKCTRL_RESET_THERMAL_RESET_DEFAULT 0x00000004
461#define BM_CLKCTRL_RESET_CHIP 0x00000002
462#define BM_CLKCTRL_RESET_DIG 0x00000001
463
464#define HW_CLKCTRL_STATUS (0x000001f0)
465
466#define BP_CLKCTRL_STATUS_CPU_LIMIT 30
467#define BM_CLKCTRL_STATUS_CPU_LIMIT 0xC0000000
468#define BF_CLKCTRL_STATUS_CPU_LIMIT(v) \
469 (((v) << 30) & BM_CLKCTRL_STATUS_CPU_LIMIT)
470
471#define HW_CLKCTRL_VERSION (0x00000200)
472
473#define BP_CLKCTRL_VERSION_MAJOR 24
474#define BM_CLKCTRL_VERSION_MAJOR 0xFF000000
475#define BF_CLKCTRL_VERSION_MAJOR(v) \
476 (((v) << 24) & BM_CLKCTRL_VERSION_MAJOR)
477#define BP_CLKCTRL_VERSION_MINOR 16
478#define BM_CLKCTRL_VERSION_MINOR 0x00FF0000
479#define BF_CLKCTRL_VERSION_MINOR(v) \
480 (((v) << 16) & BM_CLKCTRL_VERSION_MINOR)
481#define BP_CLKCTRL_VERSION_STEP 0
482#define BM_CLKCTRL_VERSION_STEP 0x0000FFFF
483#define BF_CLKCTRL_VERSION_STEP(v) \
484 (((v) << 0) & BM_CLKCTRL_VERSION_STEP)
485
486#endif /* __REGS_CLKCTRL_MX28_H__ */
diff --git a/arch/arm/mach-mxs/system.c b/arch/arm/mach-mxs/system.c
index 80ac1fca8a00..30042e23bfa7 100644
--- a/arch/arm/mach-mxs/system.c
+++ b/arch/arm/mach-mxs/system.c
@@ -37,8 +37,6 @@
37#define MXS_MODULE_CLKGATE (1 << 30) 37#define MXS_MODULE_CLKGATE (1 << 30)
38#define MXS_MODULE_SFTRST (1 << 31) 38#define MXS_MODULE_SFTRST (1 << 31)
39 39
40#define CLKCTRL_TIMEOUT 10 /* 10 ms */
41
42static void __iomem *mxs_clkctrl_reset_addr; 40static void __iomem *mxs_clkctrl_reset_addr;
43 41
44/* 42/*
@@ -139,17 +137,3 @@ error:
139 return -ETIMEDOUT; 137 return -ETIMEDOUT;
140} 138}
141EXPORT_SYMBOL(mxs_reset_block); 139EXPORT_SYMBOL(mxs_reset_block);
142
143int mxs_clkctrl_timeout(unsigned int reg_offset, unsigned int mask)
144{
145 unsigned long timeout = jiffies + msecs_to_jiffies(CLKCTRL_TIMEOUT);
146 while (readl_relaxed(MXS_IO_ADDRESS(MXS_CLKCTRL_BASE_ADDR)
147 + reg_offset) & mask) {
148 if (time_after(jiffies, timeout)) {
149 pr_err("Timeout at CLKCTRL + 0x%x\n", reg_offset);
150 return -ETIMEDOUT;
151 }
152 }
153
154 return 0;
155}
diff --git a/arch/arm/mach-mxs/timer.c b/arch/arm/mach-mxs/timer.c
index 564a63279f18..02d36de9c4e8 100644
--- a/arch/arm/mach-mxs/timer.c
+++ b/arch/arm/mach-mxs/timer.c
@@ -20,6 +20,7 @@
20 * MA 02110-1301, USA. 20 * MA 02110-1301, USA.
21 */ 21 */
22 22
23#include <linux/err.h>
23#include <linux/interrupt.h> 24#include <linux/interrupt.h>
24#include <linux/irq.h> 25#include <linux/irq.h>
25#include <linux/clockchips.h> 26#include <linux/clockchips.h>
@@ -243,8 +244,16 @@ static int __init mxs_clocksource_init(struct clk *timer_clk)
243 return 0; 244 return 0;
244} 245}
245 246
246void __init mxs_timer_init(struct clk *timer_clk, int irq) 247void __init mxs_timer_init(int irq)
247{ 248{
249 struct clk *timer_clk;
250
251 timer_clk = clk_get_sys("timrot", NULL);
252 if (IS_ERR(timer_clk)) {
253 pr_err("%s: failed to get clk\n", __func__);
254 return;
255 }
256
248 clk_prepare_enable(timer_clk); 257 clk_prepare_enable(timer_clk);
249 258
250 /* 259 /*
diff --git a/arch/arm/mach-omap1/board-ams-delta.c b/arch/arm/mach-omap1/board-ams-delta.c
index c1b681ef4cba..f2f8a5847018 100644
--- a/arch/arm/mach-omap1/board-ams-delta.c
+++ b/arch/arm/mach-omap1/board-ams-delta.c
@@ -595,7 +595,12 @@ gpio_free:
595 gpio_free(AMS_DELTA_GPIO_PIN_MODEM_IRQ); 595 gpio_free(AMS_DELTA_GPIO_PIN_MODEM_IRQ);
596 return err; 596 return err;
597} 597}
598late_initcall(late_init); 598
599static void __init ams_delta_init_late(void)
600{
601 omap1_init_late();
602 late_init();
603}
599 604
600static void __init ams_delta_map_io(void) 605static void __init ams_delta_map_io(void)
601{ 606{
@@ -611,6 +616,7 @@ MACHINE_START(AMS_DELTA, "Amstrad E3 (Delta)")
611 .reserve = omap_reserve, 616 .reserve = omap_reserve,
612 .init_irq = omap1_init_irq, 617 .init_irq = omap1_init_irq,
613 .init_machine = ams_delta_init, 618 .init_machine = ams_delta_init,
619 .init_late = ams_delta_init_late,
614 .timer = &omap1_timer, 620 .timer = &omap1_timer,
615 .restart = omap1_restart, 621 .restart = omap1_restart,
616MACHINE_END 622MACHINE_END
diff --git a/arch/arm/mach-omap1/board-fsample.c b/arch/arm/mach-omap1/board-fsample.c
index 4a4afb371022..c7364fdbda05 100644
--- a/arch/arm/mach-omap1/board-fsample.c
+++ b/arch/arm/mach-omap1/board-fsample.c
@@ -369,6 +369,7 @@ MACHINE_START(OMAP_FSAMPLE, "OMAP730 F-Sample")
369 .reserve = omap_reserve, 369 .reserve = omap_reserve,
370 .init_irq = omap1_init_irq, 370 .init_irq = omap1_init_irq,
371 .init_machine = omap_fsample_init, 371 .init_machine = omap_fsample_init,
372 .init_late = omap1_init_late,
372 .timer = &omap1_timer, 373 .timer = &omap1_timer,
373 .restart = omap1_restart, 374 .restart = omap1_restart,
374MACHINE_END 375MACHINE_END
diff --git a/arch/arm/mach-omap1/board-generic.c b/arch/arm/mach-omap1/board-generic.c
index 9a5fe581bc1c..e75e2d55a2d7 100644
--- a/arch/arm/mach-omap1/board-generic.c
+++ b/arch/arm/mach-omap1/board-generic.c
@@ -88,6 +88,7 @@ MACHINE_START(OMAP_GENERIC, "Generic OMAP1510/1610/1710")
88 .reserve = omap_reserve, 88 .reserve = omap_reserve,
89 .init_irq = omap1_init_irq, 89 .init_irq = omap1_init_irq,
90 .init_machine = omap_generic_init, 90 .init_machine = omap_generic_init,
91 .init_late = omap1_init_late,
91 .timer = &omap1_timer, 92 .timer = &omap1_timer,
92 .restart = omap1_restart, 93 .restart = omap1_restart,
93MACHINE_END 94MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h2.c b/arch/arm/mach-omap1/board-h2.c
index 057ec13f0649..7e503686f7af 100644
--- a/arch/arm/mach-omap1/board-h2.c
+++ b/arch/arm/mach-omap1/board-h2.c
@@ -431,6 +431,7 @@ MACHINE_START(OMAP_H2, "TI-H2")
431 .reserve = omap_reserve, 431 .reserve = omap_reserve,
432 .init_irq = omap1_init_irq, 432 .init_irq = omap1_init_irq,
433 .init_machine = h2_init, 433 .init_machine = h2_init,
434 .init_late = omap1_init_late,
434 .timer = &omap1_timer, 435 .timer = &omap1_timer,
435 .restart = omap1_restart, 436 .restart = omap1_restart,
436MACHINE_END 437MACHINE_END
diff --git a/arch/arm/mach-omap1/board-h3.c b/arch/arm/mach-omap1/board-h3.c
index f6ddf8759657..9fb03f189d93 100644
--- a/arch/arm/mach-omap1/board-h3.c
+++ b/arch/arm/mach-omap1/board-h3.c
@@ -425,6 +425,7 @@ MACHINE_START(OMAP_H3, "TI OMAP1710 H3 board")
425 .reserve = omap_reserve, 425 .reserve = omap_reserve,
426 .init_irq = omap1_init_irq, 426 .init_irq = omap1_init_irq,
427 .init_machine = h3_init, 427 .init_machine = h3_init,
428 .init_late = omap1_init_late,
428 .timer = &omap1_timer, 429 .timer = &omap1_timer,
429 .restart = omap1_restart, 430 .restart = omap1_restart,
430MACHINE_END 431MACHINE_END
diff --git a/arch/arm/mach-omap1/board-htcherald.c b/arch/arm/mach-omap1/board-htcherald.c
index 60c06ee23855..118a9d4a4c54 100644
--- a/arch/arm/mach-omap1/board-htcherald.c
+++ b/arch/arm/mach-omap1/board-htcherald.c
@@ -605,6 +605,7 @@ MACHINE_START(HERALD, "HTC Herald")
605 .reserve = omap_reserve, 605 .reserve = omap_reserve,
606 .init_irq = omap1_init_irq, 606 .init_irq = omap1_init_irq,
607 .init_machine = htcherald_init, 607 .init_machine = htcherald_init,
608 .init_late = omap1_init_late,
608 .timer = &omap1_timer, 609 .timer = &omap1_timer,
609 .restart = omap1_restart, 610 .restart = omap1_restart,
610MACHINE_END 611MACHINE_END
diff --git a/arch/arm/mach-omap1/board-innovator.c b/arch/arm/mach-omap1/board-innovator.c
index 67d7fd57a692..7970223a559d 100644
--- a/arch/arm/mach-omap1/board-innovator.c
+++ b/arch/arm/mach-omap1/board-innovator.c
@@ -457,6 +457,7 @@ MACHINE_START(OMAP_INNOVATOR, "TI-Innovator")
457 .reserve = omap_reserve, 457 .reserve = omap_reserve,
458 .init_irq = omap1_init_irq, 458 .init_irq = omap1_init_irq,
459 .init_machine = innovator_init, 459 .init_machine = innovator_init,
460 .init_late = omap1_init_late,
460 .timer = &omap1_timer, 461 .timer = &omap1_timer,
461 .restart = omap1_restart, 462 .restart = omap1_restart,
462MACHINE_END 463MACHINE_END
diff --git a/arch/arm/mach-omap1/board-nokia770.c b/arch/arm/mach-omap1/board-nokia770.c
index d21dcc2fbc5a..7212ae97f44a 100644
--- a/arch/arm/mach-omap1/board-nokia770.c
+++ b/arch/arm/mach-omap1/board-nokia770.c
@@ -255,6 +255,7 @@ MACHINE_START(NOKIA770, "Nokia 770")
255 .reserve = omap_reserve, 255 .reserve = omap_reserve,
256 .init_irq = omap1_init_irq, 256 .init_irq = omap1_init_irq,
257 .init_machine = omap_nokia770_init, 257 .init_machine = omap_nokia770_init,
258 .init_late = omap1_init_late,
258 .timer = &omap1_timer, 259 .timer = &omap1_timer,
259 .restart = omap1_restart, 260 .restart = omap1_restart,
260MACHINE_END 261MACHINE_END
diff --git a/arch/arm/mach-omap1/board-osk.c b/arch/arm/mach-omap1/board-osk.c
index a5f85dda3f69..da8d872d3d1c 100644
--- a/arch/arm/mach-omap1/board-osk.c
+++ b/arch/arm/mach-omap1/board-osk.c
@@ -574,6 +574,7 @@ MACHINE_START(OMAP_OSK, "TI-OSK")
574 .reserve = omap_reserve, 574 .reserve = omap_reserve,
575 .init_irq = omap1_init_irq, 575 .init_irq = omap1_init_irq,
576 .init_machine = osk_init, 576 .init_machine = osk_init,
577 .init_late = omap1_init_late,
577 .timer = &omap1_timer, 578 .timer = &omap1_timer,
578 .restart = omap1_restart, 579 .restart = omap1_restart,
579MACHINE_END 580MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmte.c b/arch/arm/mach-omap1/board-palmte.c
index a60e6c22f816..949b62a73693 100644
--- a/arch/arm/mach-omap1/board-palmte.c
+++ b/arch/arm/mach-omap1/board-palmte.c
@@ -267,6 +267,7 @@ MACHINE_START(OMAP_PALMTE, "OMAP310 based Palm Tungsten E")
267 .reserve = omap_reserve, 267 .reserve = omap_reserve,
268 .init_irq = omap1_init_irq, 268 .init_irq = omap1_init_irq,
269 .init_machine = omap_palmte_init, 269 .init_machine = omap_palmte_init,
270 .init_late = omap1_init_late,
270 .timer = &omap1_timer, 271 .timer = &omap1_timer,
271 .restart = omap1_restart, 272 .restart = omap1_restart,
272MACHINE_END 273MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmtt.c b/arch/arm/mach-omap1/board-palmtt.c
index 8d854878547b..7f1e1cf2bf46 100644
--- a/arch/arm/mach-omap1/board-palmtt.c
+++ b/arch/arm/mach-omap1/board-palmtt.c
@@ -313,6 +313,7 @@ MACHINE_START(OMAP_PALMTT, "OMAP1510 based Palm Tungsten|T")
313 .reserve = omap_reserve, 313 .reserve = omap_reserve,
314 .init_irq = omap1_init_irq, 314 .init_irq = omap1_init_irq,
315 .init_machine = omap_palmtt_init, 315 .init_machine = omap_palmtt_init,
316 .init_late = omap1_init_late,
316 .timer = &omap1_timer, 317 .timer = &omap1_timer,
317 .restart = omap1_restart, 318 .restart = omap1_restart,
318MACHINE_END 319MACHINE_END
diff --git a/arch/arm/mach-omap1/board-palmz71.c b/arch/arm/mach-omap1/board-palmz71.c
index 61ed4f0247ce..3c71c6bace2c 100644
--- a/arch/arm/mach-omap1/board-palmz71.c
+++ b/arch/arm/mach-omap1/board-palmz71.c
@@ -330,6 +330,7 @@ MACHINE_START(OMAP_PALMZ71, "OMAP310 based Palm Zire71")
330 .reserve = omap_reserve, 330 .reserve = omap_reserve,
331 .init_irq = omap1_init_irq, 331 .init_irq = omap1_init_irq,
332 .init_machine = omap_palmz71_init, 332 .init_machine = omap_palmz71_init,
333 .init_late = omap1_init_late,
333 .timer = &omap1_timer, 334 .timer = &omap1_timer,
334 .restart = omap1_restart, 335 .restart = omap1_restart,
335MACHINE_END 336MACHINE_END
diff --git a/arch/arm/mach-omap1/board-perseus2.c b/arch/arm/mach-omap1/board-perseus2.c
index a2c88890e767..f2cb24387c22 100644
--- a/arch/arm/mach-omap1/board-perseus2.c
+++ b/arch/arm/mach-omap1/board-perseus2.c
@@ -331,6 +331,7 @@ MACHINE_START(OMAP_PERSEUS2, "OMAP730 Perseus2")
331 .reserve = omap_reserve, 331 .reserve = omap_reserve,
332 .init_irq = omap1_init_irq, 332 .init_irq = omap1_init_irq,
333 .init_machine = omap_perseus2_init, 333 .init_machine = omap_perseus2_init,
334 .init_late = omap1_init_late,
334 .timer = &omap1_timer, 335 .timer = &omap1_timer,
335 .restart = omap1_restart, 336 .restart = omap1_restart,
336MACHINE_END 337MACHINE_END
diff --git a/arch/arm/mach-omap1/board-sx1.c b/arch/arm/mach-omap1/board-sx1.c
index f34cb74a9f41..3b7b82b13684 100644
--- a/arch/arm/mach-omap1/board-sx1.c
+++ b/arch/arm/mach-omap1/board-sx1.c
@@ -407,6 +407,7 @@ MACHINE_START(SX1, "OMAP310 based Siemens SX1")
407 .reserve = omap_reserve, 407 .reserve = omap_reserve,
408 .init_irq = omap1_init_irq, 408 .init_irq = omap1_init_irq,
409 .init_machine = omap_sx1_init, 409 .init_machine = omap_sx1_init,
410 .init_late = omap1_init_late,
410 .timer = &omap1_timer, 411 .timer = &omap1_timer,
411 .restart = omap1_restart, 412 .restart = omap1_restart,
412MACHINE_END 413MACHINE_END
diff --git a/arch/arm/mach-omap1/board-voiceblue.c b/arch/arm/mach-omap1/board-voiceblue.c
index 37232d04233f..afd67f0ec495 100644
--- a/arch/arm/mach-omap1/board-voiceblue.c
+++ b/arch/arm/mach-omap1/board-voiceblue.c
@@ -294,6 +294,7 @@ MACHINE_START(VOICEBLUE, "VoiceBlue OMAP5910")
294 .reserve = omap_reserve, 294 .reserve = omap_reserve,
295 .init_irq = omap1_init_irq, 295 .init_irq = omap1_init_irq,
296 .init_machine = voiceblue_init, 296 .init_machine = voiceblue_init,
297 .init_late = omap1_init_late,
297 .timer = &omap1_timer, 298 .timer = &omap1_timer,
298 .restart = voiceblue_restart, 299 .restart = voiceblue_restart,
299MACHINE_END 300MACHINE_END
diff --git a/arch/arm/mach-omap1/common.h b/arch/arm/mach-omap1/common.h
index bb7779b57795..c2552b24f9f2 100644
--- a/arch/arm/mach-omap1/common.h
+++ b/arch/arm/mach-omap1/common.h
@@ -53,8 +53,18 @@ static inline void omap16xx_map_io(void)
53} 53}
54#endif 54#endif
55 55
56#ifdef CONFIG_OMAP_SERIAL_WAKE
57int omap_serial_wakeup_init(void);
58#else
59static inline int omap_serial_wakeup_init(void)
60{
61 return 0;
62}
63#endif
64
56void omap1_init_early(void); 65void omap1_init_early(void);
57void omap1_init_irq(void); 66void omap1_init_irq(void);
67void omap1_init_late(void);
58void omap1_restart(char, const char *); 68void omap1_restart(char, const char *);
59 69
60extern void __init omap_check_revision(void); 70extern void __init omap_check_revision(void);
@@ -63,7 +73,14 @@ extern void omap1_nand_cmd_ctl(struct mtd_info *mtd, int cmd,
63 unsigned int ctrl); 73 unsigned int ctrl);
64 74
65extern struct sys_timer omap1_timer; 75extern struct sys_timer omap1_timer;
66extern bool omap_32k_timer_init(void); 76#ifdef CONFIG_OMAP_32K_TIMER
77extern int omap_32k_timer_init(void);
78#else
79static inline int __init omap_32k_timer_init(void)
80{
81 return -ENODEV;
82}
83#endif
67 84
68extern u32 omap_irq_flags; 85extern u32 omap_irq_flags;
69 86
diff --git a/arch/arm/mach-omap1/devices.c b/arch/arm/mach-omap1/devices.c
index dcd8ddbec2bb..fa1fa4deb6aa 100644
--- a/arch/arm/mach-omap1/devices.c
+++ b/arch/arm/mach-omap1/devices.c
@@ -22,6 +22,7 @@
22#include <plat/tc.h> 22#include <plat/tc.h>
23#include <plat/board.h> 23#include <plat/board.h>
24#include <plat/mux.h> 24#include <plat/mux.h>
25#include <plat/dma.h>
25#include <plat/mmc.h> 26#include <plat/mmc.h>
26#include <plat/omap7xx.h> 27#include <plat/omap7xx.h>
27 28
@@ -31,6 +32,22 @@
31#include "common.h" 32#include "common.h"
32#include "clock.h" 33#include "clock.h"
33 34
35#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE)
36
37static struct platform_device omap_pcm = {
38 .name = "omap-pcm-audio",
39 .id = -1,
40};
41
42static void omap_init_audio(void)
43{
44 platform_device_register(&omap_pcm);
45}
46
47#else
48static inline void omap_init_audio(void) {}
49#endif
50
34/*-------------------------------------------------------------------------*/ 51/*-------------------------------------------------------------------------*/
35 52
36#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE) 53#if defined(CONFIG_RTC_DRV_OMAP) || defined(CONFIG_RTC_DRV_OMAP_MODULE)
@@ -128,6 +145,56 @@ static inline void omap1_mmc_mux(struct omap_mmc_platform_data *mmc_controller,
128 } 145 }
129} 146}
130 147
148#define OMAP_MMC_NR_RES 4
149
150/*
151 * Register MMC devices.
152 */
153static int __init omap_mmc_add(const char *name, int id, unsigned long base,
154 unsigned long size, unsigned int irq,
155 unsigned rx_req, unsigned tx_req,
156 struct omap_mmc_platform_data *data)
157{
158 struct platform_device *pdev;
159 struct resource res[OMAP_MMC_NR_RES];
160 int ret;
161
162 pdev = platform_device_alloc(name, id);
163 if (!pdev)
164 return -ENOMEM;
165
166 memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
167 res[0].start = base;
168 res[0].end = base + size - 1;
169 res[0].flags = IORESOURCE_MEM;
170 res[1].start = res[1].end = irq;
171 res[1].flags = IORESOURCE_IRQ;
172 res[2].start = rx_req;
173 res[2].name = "rx";
174 res[2].flags = IORESOURCE_DMA;
175 res[3].start = tx_req;
176 res[3].name = "tx";
177 res[3].flags = IORESOURCE_DMA;
178
179 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
180 if (ret == 0)
181 ret = platform_device_add_data(pdev, data, sizeof(*data));
182 if (ret)
183 goto fail;
184
185 ret = platform_device_add(pdev);
186 if (ret)
187 goto fail;
188
189 /* return device handle to board setup code */
190 data->dev = &pdev->dev;
191 return 0;
192
193fail:
194 platform_device_put(pdev);
195 return ret;
196}
197
131void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, 198void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
132 int nr_controllers) 199 int nr_controllers)
133{ 200{
@@ -135,6 +202,7 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
135 202
136 for (i = 0; i < nr_controllers; i++) { 203 for (i = 0; i < nr_controllers; i++) {
137 unsigned long base, size; 204 unsigned long base, size;
205 unsigned rx_req, tx_req;
138 unsigned int irq = 0; 206 unsigned int irq = 0;
139 207
140 if (!mmc_data[i]) 208 if (!mmc_data[i])
@@ -146,19 +214,24 @@ void __init omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
146 case 0: 214 case 0:
147 base = OMAP1_MMC1_BASE; 215 base = OMAP1_MMC1_BASE;
148 irq = INT_MMC; 216 irq = INT_MMC;
217 rx_req = OMAP_DMA_MMC_RX;
218 tx_req = OMAP_DMA_MMC_TX;
149 break; 219 break;
150 case 1: 220 case 1:
151 if (!cpu_is_omap16xx()) 221 if (!cpu_is_omap16xx())
152 return; 222 return;
153 base = OMAP1_MMC2_BASE; 223 base = OMAP1_MMC2_BASE;
154 irq = INT_1610_MMC2; 224 irq = INT_1610_MMC2;
225 rx_req = OMAP_DMA_MMC2_RX;
226 tx_req = OMAP_DMA_MMC2_TX;
155 break; 227 break;
156 default: 228 default:
157 continue; 229 continue;
158 } 230 }
159 size = OMAP1_MMC_SIZE; 231 size = OMAP1_MMC_SIZE;
160 232
161 omap_mmc_add("mmci-omap", i, base, size, irq, mmc_data[i]); 233 omap_mmc_add("mmci-omap", i, base, size, irq,
234 rx_req, tx_req, mmc_data[i]);
162 }; 235 };
163} 236}
164 237
@@ -242,23 +315,48 @@ void __init omap1_camera_init(void *info)
242 315
243static inline void omap_init_sti(void) {} 316static inline void omap_init_sti(void) {}
244 317
245#if defined(CONFIG_SND_SOC) || defined(CONFIG_SND_SOC_MODULE) 318/* Numbering for the SPI-capable controllers when used for SPI:
319 * spi = 1
320 * uwire = 2
321 * mmc1..2 = 3..4
322 * mcbsp1..3 = 5..7
323 */
246 324
247static struct platform_device omap_pcm = { 325#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
248 .name = "omap-pcm-audio", 326
249 .id = -1, 327#define OMAP_UWIRE_BASE 0xfffb3000
328
329static struct resource uwire_resources[] = {
330 {
331 .start = OMAP_UWIRE_BASE,
332 .end = OMAP_UWIRE_BASE + 0x20,
333 .flags = IORESOURCE_MEM,
334 },
250}; 335};
251 336
252static void omap_init_audio(void) 337static struct platform_device omap_uwire_device = {
338 .name = "omap_uwire",
339 .id = -1,
340 .num_resources = ARRAY_SIZE(uwire_resources),
341 .resource = uwire_resources,
342};
343
344static void omap_init_uwire(void)
253{ 345{
254 platform_device_register(&omap_pcm); 346 /* FIXME define and use a boot tag; not all boards will be hooking
255} 347 * up devices to the microwire controller, and multi-board configs
348 * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
349 */
256 350
351 /* board-specific code must configure chipselects (only a few
352 * are normally used) and SCLK/SDI/SDO (each has two choices).
353 */
354 (void) platform_device_register(&omap_uwire_device);
355}
257#else 356#else
258static inline void omap_init_audio(void) {} 357static inline void omap_init_uwire(void) {}
259#endif 358#endif
260 359
261/*-------------------------------------------------------------------------*/
262 360
263/* 361/*
264 * This gets called after board-specific INIT_MACHINE, and initializes most 362 * This gets called after board-specific INIT_MACHINE, and initializes most
@@ -292,11 +390,12 @@ static int __init omap1_init_devices(void)
292 * in alphabetical order so they're easier to sort through. 390 * in alphabetical order so they're easier to sort through.
293 */ 391 */
294 392
393 omap_init_audio();
295 omap_init_mbox(); 394 omap_init_mbox();
296 omap_init_rtc(); 395 omap_init_rtc();
297 omap_init_spi100k(); 396 omap_init_spi100k();
298 omap_init_sti(); 397 omap_init_sti();
299 omap_init_audio(); 398 omap_init_uwire();
300 399
301 return 0; 400 return 0;
302} 401}
diff --git a/arch/arm/mach-omap1/io.c b/arch/arm/mach-omap1/io.c
index 71ce017bf5d8..6c95a59f0f16 100644
--- a/arch/arm/mach-omap1/io.c
+++ b/arch/arm/mach-omap1/io.c
@@ -137,6 +137,11 @@ void __init omap1_init_early(void)
137 omap_init_consistent_dma_size(); 137 omap_init_consistent_dma_size();
138} 138}
139 139
140void __init omap1_init_late(void)
141{
142 omap_serial_wakeup_init();
143}
144
140/* 145/*
141 * NOTE: Please use ioremap + __raw_read/write where possible instead of these 146 * NOTE: Please use ioremap + __raw_read/write where possible instead of these
142 */ 147 */
diff --git a/arch/arm/mach-omap1/serial.c b/arch/arm/mach-omap1/serial.c
index 93ae8f29727e..6809c9e56c93 100644
--- a/arch/arm/mach-omap1/serial.c
+++ b/arch/arm/mach-omap1/serial.c
@@ -237,7 +237,7 @@ static void __init omap_serial_set_port_wakeup(int gpio_nr)
237 enable_irq_wake(gpio_to_irq(gpio_nr)); 237 enable_irq_wake(gpio_to_irq(gpio_nr));
238} 238}
239 239
240static int __init omap_serial_wakeup_init(void) 240int __init omap_serial_wakeup_init(void)
241{ 241{
242 if (!cpu_is_omap16xx()) 242 if (!cpu_is_omap16xx())
243 return 0; 243 return 0;
@@ -251,7 +251,6 @@ static int __init omap_serial_wakeup_init(void)
251 251
252 return 0; 252 return 0;
253} 253}
254late_initcall(omap_serial_wakeup_init);
255 254
256#endif /* CONFIG_OMAP_SERIAL_WAKE */ 255#endif /* CONFIG_OMAP_SERIAL_WAKE */
257 256
diff --git a/arch/arm/mach-omap1/time.c b/arch/arm/mach-omap1/time.c
index 4d8dd9a1b04c..4062480bfec7 100644
--- a/arch/arm/mach-omap1/time.c
+++ b/arch/arm/mach-omap1/time.c
@@ -232,20 +232,6 @@ static inline void omap_mpu_timer_init(void)
232} 232}
233#endif /* CONFIG_OMAP_MPU_TIMER */ 233#endif /* CONFIG_OMAP_MPU_TIMER */
234 234
235static inline int omap_32k_timer_usable(void)
236{
237 int res = false;
238
239 if (cpu_is_omap730() || cpu_is_omap15xx())
240 return res;
241
242#ifdef CONFIG_OMAP_32K_TIMER
243 res = omap_32k_timer_init();
244#endif
245
246 return res;
247}
248
249/* 235/*
250 * --------------------------------------------------------------------------- 236 * ---------------------------------------------------------------------------
251 * Timer initialization 237 * Timer initialization
@@ -253,7 +239,7 @@ static inline int omap_32k_timer_usable(void)
253 */ 239 */
254static void __init omap1_timer_init(void) 240static void __init omap1_timer_init(void)
255{ 241{
256 if (!omap_32k_timer_usable()) 242 if (omap_32k_timer_init() != 0)
257 omap_mpu_timer_init(); 243 omap_mpu_timer_init();
258} 244}
259 245
diff --git a/arch/arm/mach-omap1/timer32k.c b/arch/arm/mach-omap1/timer32k.c
index 325b9a0aa4a0..eae49c3980c9 100644
--- a/arch/arm/mach-omap1/timer32k.c
+++ b/arch/arm/mach-omap1/timer32k.c
@@ -71,6 +71,7 @@
71 71
72/* 16xx specific defines */ 72/* 16xx specific defines */
73#define OMAP1_32K_TIMER_BASE 0xfffb9000 73#define OMAP1_32K_TIMER_BASE 0xfffb9000
74#define OMAP1_32KSYNC_TIMER_BASE 0xfffbc400
74#define OMAP1_32K_TIMER_CR 0x08 75#define OMAP1_32K_TIMER_CR 0x08
75#define OMAP1_32K_TIMER_TVR 0x00 76#define OMAP1_32K_TIMER_TVR 0x00
76#define OMAP1_32K_TIMER_TCR 0x04 77#define OMAP1_32K_TIMER_TCR 0x04
@@ -182,10 +183,29 @@ static __init void omap_init_32k_timer(void)
182 * Timer initialization 183 * Timer initialization
183 * --------------------------------------------------------------------------- 184 * ---------------------------------------------------------------------------
184 */ 185 */
185bool __init omap_32k_timer_init(void) 186int __init omap_32k_timer_init(void)
186{ 187{
187 omap_init_clocksource_32k(); 188 int ret = -ENODEV;
188 omap_init_32k_timer();
189 189
190 return true; 190 if (cpu_is_omap16xx()) {
191 void __iomem *base;
192 struct clk *sync32k_ick;
193
194 base = ioremap(OMAP1_32KSYNC_TIMER_BASE, SZ_1K);
195 if (!base) {
196 pr_err("32k_counter: failed to map base addr\n");
197 return -ENODEV;
198 }
199
200 sync32k_ick = clk_get(NULL, "omap_32ksync_ick");
201 if (!IS_ERR(sync32k_ick))
202 clk_enable(sync32k_ick);
203
204 ret = omap_init_clocksource_32k(base);
205 }
206
207 if (!ret)
208 omap_init_32k_timer();
209
210 return ret;
191} 211}
diff --git a/arch/arm/mach-omap2/Kconfig b/arch/arm/mach-omap2/Kconfig
index 964ee67a3b77..4cf5142f22cc 100644
--- a/arch/arm/mach-omap2/Kconfig
+++ b/arch/arm/mach-omap2/Kconfig
@@ -78,12 +78,12 @@ config SOC_OMAP3430
78 default y 78 default y
79 select ARCH_OMAP_OTG 79 select ARCH_OMAP_OTG
80 80
81config SOC_OMAPTI81XX 81config SOC_TI81XX
82 bool "TI81XX support" 82 bool "TI81XX support"
83 depends on ARCH_OMAP3 83 depends on ARCH_OMAP3
84 default y 84 default y
85 85
86config SOC_OMAPAM33XX 86config SOC_AM33XX
87 bool "AM33XX support" 87 bool "AM33XX support"
88 depends on ARCH_OMAP3 88 depends on ARCH_OMAP3
89 default y 89 default y
@@ -320,12 +320,12 @@ config MACH_OMAP_3630SDP
320 320
321config MACH_TI8168EVM 321config MACH_TI8168EVM
322 bool "TI8168 Evaluation Module" 322 bool "TI8168 Evaluation Module"
323 depends on SOC_OMAPTI81XX 323 depends on SOC_TI81XX
324 default y 324 default y
325 325
326config MACH_TI8148EVM 326config MACH_TI8148EVM
327 bool "TI8148 Evaluation Module" 327 bool "TI8148 Evaluation Module"
328 depends on SOC_OMAPTI81XX 328 depends on SOC_TI81XX
329 default y 329 default y
330 330
331config MACH_OMAP_4430SDP 331config MACH_OMAP_4430SDP
diff --git a/arch/arm/mach-omap2/Makefile b/arch/arm/mach-omap2/Makefile
index 385c083d24b2..fa742f3c2629 100644
--- a/arch/arm/mach-omap2/Makefile
+++ b/arch/arm/mach-omap2/Makefile
@@ -24,10 +24,11 @@ endif
24obj-$(CONFIG_TWL4030_CORE) += omap_twl.o 24obj-$(CONFIG_TWL4030_CORE) += omap_twl.o
25 25
26# SMP support ONLY available for OMAP4 26# SMP support ONLY available for OMAP4
27
27obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o 28obj-$(CONFIG_SMP) += omap-smp.o omap-headsmp.o
28obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o 29obj-$(CONFIG_HOTPLUG_CPU) += omap-hotplug.o
29obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o \ 30obj-$(CONFIG_ARCH_OMAP4) += omap4-common.o omap-wakeupgen.o
30 sleep44xx.o 31obj-$(CONFIG_ARCH_OMAP4) += sleep44xx.o
31 32
32plus_sec := $(call as-instr,.arch_extension sec,+sec) 33plus_sec := $(call as-instr,.arch_extension sec,+sec)
33AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec) 34AFLAGS_omap-headsmp.o :=-Wa,-march=armv7-a$(plus_sec)
@@ -64,10 +65,10 @@ endif
64ifeq ($(CONFIG_PM),y) 65ifeq ($(CONFIG_PM),y)
65obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o 66obj-$(CONFIG_ARCH_OMAP2) += pm24xx.o
66obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o 67obj-$(CONFIG_ARCH_OMAP2) += sleep24xx.o
67obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o \ 68obj-$(CONFIG_ARCH_OMAP3) += pm34xx.o sleep34xx.o
68 cpuidle34xx.o 69obj-$(CONFIG_ARCH_OMAP3) += cpuidle34xx.o
69obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o \ 70obj-$(CONFIG_ARCH_OMAP4) += pm44xx.o omap-mpuss-lowpower.o
70 cpuidle44xx.o 71obj-$(CONFIG_ARCH_OMAP4) += cpuidle44xx.o
71obj-$(CONFIG_PM_DEBUG) += pm-debug.o 72obj-$(CONFIG_PM_DEBUG) += pm-debug.o
72obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o 73obj-$(CONFIG_OMAP_SMARTREFLEX) += sr_device.o smartreflex.o
73obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o 74obj-$(CONFIG_OMAP_SMARTREFLEX_CLASS3) += smartreflex-class3.o
@@ -84,90 +85,86 @@ endif
84# PRCM 85# PRCM
85obj-y += prm_common.o 86obj-y += prm_common.o
86obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o 87obj-$(CONFIG_ARCH_OMAP2) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
87obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o \ 88obj-$(CONFIG_ARCH_OMAP3) += prcm.o cm2xxx_3xxx.o prm2xxx_3xxx.o
88 vc3xxx_data.o vp3xxx_data.o 89obj-$(CONFIG_ARCH_OMAP3) += vc3xxx_data.o vp3xxx_data.o
89# XXX The presence of cm2xxx_3xxx.o on the line below is temporary and 90obj-$(CONFIG_ARCH_OMAP4) += prcm.o cminst44xx.o cm44xx.o
90# will be removed once the OMAP4 part of the codebase is converted to 91obj-$(CONFIG_ARCH_OMAP4) += prcm_mpu44xx.o prminst44xx.o
91# use OMAP4-specific PRCM functions. 92obj-$(CONFIG_ARCH_OMAP4) += vc44xx_data.o vp44xx_data.o prm44xx.o
92obj-$(CONFIG_ARCH_OMAP4) += prcm.o cm2xxx_3xxx.o cminst44xx.o \
93 cm44xx.o prcm_mpu44xx.o \
94 prminst44xx.o vc44xx_data.o \
95 vp44xx_data.o prm44xx.o
96 93
97# OMAP voltage domains 94# OMAP voltage domains
98voltagedomain-common := voltage.o vc.o vp.o 95voltagedomain-common := voltage.o vc.o vp.o
99obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common) \ 96obj-$(CONFIG_ARCH_OMAP2) += $(voltagedomain-common)
100 voltagedomains2xxx_data.o 97obj-$(CONFIG_ARCH_OMAP2) += voltagedomains2xxx_data.o
101obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common) \ 98obj-$(CONFIG_ARCH_OMAP3) += $(voltagedomain-common)
102 voltagedomains3xxx_data.o 99obj-$(CONFIG_ARCH_OMAP3) += voltagedomains3xxx_data.o
103obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common) \ 100obj-$(CONFIG_ARCH_OMAP4) += $(voltagedomain-common)
104 voltagedomains44xx_data.o 101obj-$(CONFIG_ARCH_OMAP4) += voltagedomains44xx_data.o
105 102
106# OMAP powerdomain framework 103# OMAP powerdomain framework
107powerdomain-common += powerdomain.o powerdomain-common.o 104powerdomain-common += powerdomain.o powerdomain-common.o
108obj-$(CONFIG_ARCH_OMAP2) += $(powerdomain-common) \ 105obj-$(CONFIG_ARCH_OMAP2) += $(powerdomain-common)
109 powerdomain2xxx_3xxx.o \ 106obj-$(CONFIG_ARCH_OMAP2) += powerdomains2xxx_data.o
110 powerdomains2xxx_data.o \ 107obj-$(CONFIG_ARCH_OMAP2) += powerdomain2xxx_3xxx.o
111 powerdomains2xxx_3xxx_data.o 108obj-$(CONFIG_ARCH_OMAP2) += powerdomains2xxx_3xxx_data.o
112obj-$(CONFIG_ARCH_OMAP3) += $(powerdomain-common) \ 109obj-$(CONFIG_ARCH_OMAP3) += $(powerdomain-common)
113 powerdomain2xxx_3xxx.o \ 110obj-$(CONFIG_ARCH_OMAP3) += powerdomain2xxx_3xxx.o
114 powerdomains3xxx_data.o \ 111obj-$(CONFIG_ARCH_OMAP3) += powerdomains3xxx_data.o
115 powerdomains2xxx_3xxx_data.o 112obj-$(CONFIG_ARCH_OMAP3) += powerdomains2xxx_3xxx_data.o
116obj-$(CONFIG_ARCH_OMAP4) += $(powerdomain-common) \ 113obj-$(CONFIG_ARCH_OMAP4) += $(powerdomain-common)
117 powerdomain44xx.o \ 114obj-$(CONFIG_ARCH_OMAP4) += powerdomain44xx.o
118 powerdomains44xx_data.o 115obj-$(CONFIG_ARCH_OMAP4) += powerdomains44xx_data.o
119 116
120# PRCM clockdomain control 117# PRCM clockdomain control
121clockdomain-common += clockdomain.o \ 118clockdomain-common += clockdomain.o
122 clockdomains_common_data.o 119clockdomain-common += clockdomains_common_data.o
123obj-$(CONFIG_ARCH_OMAP2) += $(clockdomain-common) \ 120obj-$(CONFIG_ARCH_OMAP2) += $(clockdomain-common)
124 clockdomain2xxx_3xxx.o \ 121obj-$(CONFIG_ARCH_OMAP2) += clockdomain2xxx_3xxx.o
125 clockdomains2xxx_3xxx_data.o 122obj-$(CONFIG_ARCH_OMAP2) += clockdomains2xxx_3xxx_data.o
126obj-$(CONFIG_SOC_OMAP2420) += clockdomains2420_data.o 123obj-$(CONFIG_SOC_OMAP2420) += clockdomains2420_data.o
127obj-$(CONFIG_SOC_OMAP2430) += clockdomains2430_data.o 124obj-$(CONFIG_SOC_OMAP2430) += clockdomains2430_data.o
128obj-$(CONFIG_ARCH_OMAP3) += $(clockdomain-common) \ 125obj-$(CONFIG_ARCH_OMAP3) += $(clockdomain-common)
129 clockdomain2xxx_3xxx.o \ 126obj-$(CONFIG_ARCH_OMAP3) += clockdomain2xxx_3xxx.o
130 clockdomains2xxx_3xxx_data.o \ 127obj-$(CONFIG_ARCH_OMAP3) += clockdomains2xxx_3xxx_data.o
131 clockdomains3xxx_data.o 128obj-$(CONFIG_ARCH_OMAP3) += clockdomains3xxx_data.o
132obj-$(CONFIG_ARCH_OMAP4) += $(clockdomain-common) \ 129obj-$(CONFIG_ARCH_OMAP4) += $(clockdomain-common)
133 clockdomain44xx.o \ 130obj-$(CONFIG_ARCH_OMAP4) += clockdomain44xx.o
134 clockdomains44xx_data.o 131obj-$(CONFIG_ARCH_OMAP4) += clockdomains44xx_data.o
135 132
136# Clock framework 133# Clock framework
137obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o \ 134obj-$(CONFIG_ARCH_OMAP2) += $(clock-common) clock2xxx.o
138 clkt2xxx_sys.o \ 135obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_sys.o
139 clkt2xxx_dpllcore.o \ 136obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpllcore.o
140 clkt2xxx_virt_prcm_set.o \ 137obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_virt_prcm_set.o
141 clkt2xxx_apll.o clkt2xxx_osc.o \ 138obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_apll.o clkt2xxx_osc.o
142 clkt2xxx_dpll.o clkt_iclk.o 139obj-$(CONFIG_ARCH_OMAP2) += clkt2xxx_dpll.o clkt_iclk.o
143obj-$(CONFIG_SOC_OMAP2420) += clock2420_data.o 140obj-$(CONFIG_SOC_OMAP2420) += clock2420_data.o
144obj-$(CONFIG_SOC_OMAP2430) += clock2430.o clock2430_data.o 141obj-$(CONFIG_SOC_OMAP2430) += clock2430.o clock2430_data.o
145obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o \ 142obj-$(CONFIG_ARCH_OMAP3) += $(clock-common) clock3xxx.o
146 clock34xx.o clkt34xx_dpll3m2.o \ 143obj-$(CONFIG_ARCH_OMAP3) += clock34xx.o clkt34xx_dpll3m2.o
147 clock3517.o clock36xx.o \ 144obj-$(CONFIG_ARCH_OMAP3) += clock3517.o clock36xx.o
148 dpll3xxx.o clock3xxx_data.o \ 145obj-$(CONFIG_ARCH_OMAP3) += dpll3xxx.o clock3xxx_data.o
149 clkt_iclk.o 146obj-$(CONFIG_ARCH_OMAP3) += clkt_iclk.o
150obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o \ 147obj-$(CONFIG_ARCH_OMAP4) += $(clock-common) clock44xx_data.o
151 dpll3xxx.o dpll44xx.o 148obj-$(CONFIG_ARCH_OMAP4) += dpll3xxx.o dpll44xx.o
152 149
153# OMAP2 clock rate set data (old "OPP" data) 150# OMAP2 clock rate set data (old "OPP" data)
154obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o 151obj-$(CONFIG_SOC_OMAP2420) += opp2420_data.o
155obj-$(CONFIG_SOC_OMAP2430) += opp2430_data.o 152obj-$(CONFIG_SOC_OMAP2430) += opp2430_data.o
156 153
157# hwmod data 154# hwmod data
158obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_ipblock_data.o \ 155obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_ipblock_data.o
159 omap_hwmod_2xxx_3xxx_ipblock_data.o \ 156obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_3xxx_ipblock_data.o
160 omap_hwmod_2xxx_interconnect_data.o \ 157obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_interconnect_data.o
161 omap_hwmod_2xxx_3xxx_interconnect_data.o \ 158obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2xxx_3xxx_interconnect_data.o
162 omap_hwmod_2420_data.o 159obj-$(CONFIG_SOC_OMAP2420) += omap_hwmod_2420_data.o
163obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_ipblock_data.o \ 160obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_ipblock_data.o
164 omap_hwmod_2xxx_3xxx_ipblock_data.o \ 161obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_3xxx_ipblock_data.o
165 omap_hwmod_2xxx_interconnect_data.o \ 162obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_interconnect_data.o
166 omap_hwmod_2xxx_3xxx_interconnect_data.o \ 163obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2xxx_3xxx_interconnect_data.o
167 omap_hwmod_2430_data.o 164obj-$(CONFIG_SOC_OMAP2430) += omap_hwmod_2430_data.o
168obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o \ 165obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_ipblock_data.o
169 omap_hwmod_2xxx_3xxx_interconnect_data.o \ 166obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_2xxx_3xxx_interconnect_data.o
170 omap_hwmod_3xxx_data.o 167obj-$(CONFIG_ARCH_OMAP3) += omap_hwmod_3xxx_data.o
171obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o 168obj-$(CONFIG_ARCH_OMAP4) += omap_hwmod_44xx_data.o
172 169
173# EMU peripherals 170# EMU peripherals
@@ -208,23 +205,19 @@ obj-$(CONFIG_MACH_OMAP3EVM) += board-omap3evm.o
208obj-$(CONFIG_MACH_OMAP3_PANDORA) += board-omap3pandora.o 205obj-$(CONFIG_MACH_OMAP3_PANDORA) += board-omap3pandora.o
209obj-$(CONFIG_MACH_OMAP_3430SDP) += board-3430sdp.o 206obj-$(CONFIG_MACH_OMAP_3430SDP) += board-3430sdp.o
210obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o 207obj-$(CONFIG_MACH_NOKIA_N8X0) += board-n8x0.o
211obj-$(CONFIG_MACH_NOKIA_RM680) += board-rm680.o \ 208obj-$(CONFIG_MACH_NOKIA_RM680) += board-rm680.o sdram-nokia.o
212 sdram-nokia.o 209obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o sdram-nokia.o
213obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51.o \ 210obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-peripherals.o
214 sdram-nokia.o \ 211obj-$(CONFIG_MACH_NOKIA_RX51) += board-rx51-video.o
215 board-rx51-peripherals.o \ 212obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom.o board-zoom-peripherals.o
216 board-rx51-video.o 213obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom-display.o
217obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom.o \ 214obj-$(CONFIG_MACH_OMAP_ZOOM2) += board-zoom-debugboard.o
218 board-zoom-peripherals.o \ 215obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom.o board-zoom-peripherals.o
219 board-zoom-display.o \ 216obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom-display.o
220 board-zoom-debugboard.o 217obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom-debugboard.o
221obj-$(CONFIG_MACH_OMAP_ZOOM3) += board-zoom.o \ 218obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o
222 board-zoom-peripherals.o \ 219obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-peripherals.o
223 board-zoom-display.o \ 220obj-$(CONFIG_MACH_OMAP_3630SDP) += board-zoom-display.o
224 board-zoom-debugboard.o
225obj-$(CONFIG_MACH_OMAP_3630SDP) += board-3630sdp.o \
226 board-zoom-peripherals.o \
227 board-zoom-display.o
228obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o 221obj-$(CONFIG_MACH_CM_T35) += board-cm-t35.o
229obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o 222obj-$(CONFIG_MACH_CM_T3517) += board-cm-t3517.o
230obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o 223obj-$(CONFIG_MACH_IGEP0020) += board-igep0020.o
diff --git a/arch/arm/mach-omap2/board-2430sdp.c b/arch/arm/mach-omap2/board-2430sdp.c
index e658f835d0de..99ca6bad5c30 100644
--- a/arch/arm/mach-omap2/board-2430sdp.c
+++ b/arch/arm/mach-omap2/board-2430sdp.c
@@ -303,6 +303,7 @@ MACHINE_START(OMAP_2430SDP, "OMAP2430 sdp2430 board")
303 .init_irq = omap2_init_irq, 303 .init_irq = omap2_init_irq,
304 .handle_irq = omap2_intc_handle_irq, 304 .handle_irq = omap2_intc_handle_irq,
305 .init_machine = omap_2430sdp_init, 305 .init_machine = omap_2430sdp_init,
306 .init_late = omap2430_init_late,
306 .timer = &omap2_timer, 307 .timer = &omap2_timer,
307 .restart = omap_prcm_restart, 308 .restart = omap_prcm_restart,
308MACHINE_END 309MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3430sdp.c b/arch/arm/mach-omap2/board-3430sdp.c
index 37abb0d49b51..a98c688058a9 100644
--- a/arch/arm/mach-omap2/board-3430sdp.c
+++ b/arch/arm/mach-omap2/board-3430sdp.c
@@ -605,6 +605,7 @@ MACHINE_START(OMAP_3430SDP, "OMAP3430 3430SDP board")
605 .init_irq = omap3_init_irq, 605 .init_irq = omap3_init_irq,
606 .handle_irq = omap3_intc_handle_irq, 606 .handle_irq = omap3_intc_handle_irq,
607 .init_machine = omap_3430sdp_init, 607 .init_machine = omap_3430sdp_init,
608 .init_late = omap3430_init_late,
608 .timer = &omap3_timer, 609 .timer = &omap3_timer,
609 .restart = omap_prcm_restart, 610 .restart = omap_prcm_restart,
610MACHINE_END 611MACHINE_END
diff --git a/arch/arm/mach-omap2/board-3630sdp.c b/arch/arm/mach-omap2/board-3630sdp.c
index 6ef350d1ae4f..2dc9ba523c7a 100644
--- a/arch/arm/mach-omap2/board-3630sdp.c
+++ b/arch/arm/mach-omap2/board-3630sdp.c
@@ -217,6 +217,7 @@ MACHINE_START(OMAP_3630SDP, "OMAP 3630SDP board")
217 .init_irq = omap3_init_irq, 217 .init_irq = omap3_init_irq,
218 .handle_irq = omap3_intc_handle_irq, 218 .handle_irq = omap3_intc_handle_irq,
219 .init_machine = omap_sdp_init, 219 .init_machine = omap_sdp_init,
220 .init_late = omap3630_init_late,
220 .timer = &omap3_timer, 221 .timer = &omap3_timer,
221 .restart = omap_prcm_restart, 222 .restart = omap_prcm_restart,
222MACHINE_END 223MACHINE_END
diff --git a/arch/arm/mach-omap2/board-4430sdp.c b/arch/arm/mach-omap2/board-4430sdp.c
index 94af6cde2e36..8e17284a803f 100644
--- a/arch/arm/mach-omap2/board-4430sdp.c
+++ b/arch/arm/mach-omap2/board-4430sdp.c
@@ -912,6 +912,7 @@ MACHINE_START(OMAP_4430SDP, "OMAP4430 4430SDP board")
912 .init_irq = gic_init_irq, 912 .init_irq = gic_init_irq,
913 .handle_irq = gic_handle_irq, 913 .handle_irq = gic_handle_irq,
914 .init_machine = omap_4430sdp_init, 914 .init_machine = omap_4430sdp_init,
915 .init_late = omap4430_init_late,
915 .timer = &omap4_timer, 916 .timer = &omap4_timer,
916 .restart = omap_prcm_restart, 917 .restart = omap_prcm_restart,
917MACHINE_END 918MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517crane.c b/arch/arm/mach-omap2/board-am3517crane.c
index 3b8a53c1f2a8..92432c28673d 100644
--- a/arch/arm/mach-omap2/board-am3517crane.c
+++ b/arch/arm/mach-omap2/board-am3517crane.c
@@ -102,6 +102,7 @@ MACHINE_START(CRANEBOARD, "AM3517/05 CRANEBOARD")
102 .init_irq = omap3_init_irq, 102 .init_irq = omap3_init_irq,
103 .handle_irq = omap3_intc_handle_irq, 103 .handle_irq = omap3_intc_handle_irq,
104 .init_machine = am3517_crane_init, 104 .init_machine = am3517_crane_init,
105 .init_late = am35xx_init_late,
105 .timer = &omap3_timer, 106 .timer = &omap3_timer,
106 .restart = omap_prcm_restart, 107 .restart = omap_prcm_restart,
107MACHINE_END 108MACHINE_END
diff --git a/arch/arm/mach-omap2/board-am3517evm.c b/arch/arm/mach-omap2/board-am3517evm.c
index 99790eb646e8..18f601096ce1 100644
--- a/arch/arm/mach-omap2/board-am3517evm.c
+++ b/arch/arm/mach-omap2/board-am3517evm.c
@@ -385,6 +385,7 @@ MACHINE_START(OMAP3517EVM, "OMAP3517/AM3517 EVM")
385 .init_irq = omap3_init_irq, 385 .init_irq = omap3_init_irq,
386 .handle_irq = omap3_intc_handle_irq, 386 .handle_irq = omap3_intc_handle_irq,
387 .init_machine = am3517_evm_init, 387 .init_machine = am3517_evm_init,
388 .init_late = am35xx_init_late,
388 .timer = &omap3_timer, 389 .timer = &omap3_timer,
389 .restart = omap_prcm_restart, 390 .restart = omap_prcm_restart,
390MACHINE_END 391MACHINE_END
diff --git a/arch/arm/mach-omap2/board-apollon.c b/arch/arm/mach-omap2/board-apollon.c
index 768ece2e9c3b..502c31e123be 100644
--- a/arch/arm/mach-omap2/board-apollon.c
+++ b/arch/arm/mach-omap2/board-apollon.c
@@ -356,6 +356,7 @@ MACHINE_START(OMAP_APOLLON, "OMAP24xx Apollon")
356 .init_irq = omap2_init_irq, 356 .init_irq = omap2_init_irq,
357 .handle_irq = omap2_intc_handle_irq, 357 .handle_irq = omap2_intc_handle_irq,
358 .init_machine = omap_apollon_init, 358 .init_machine = omap_apollon_init,
359 .init_late = omap2420_init_late,
359 .timer = &omap2_timer, 360 .timer = &omap2_timer,
360 .restart = omap_prcm_restart, 361 .restart = omap_prcm_restart,
361MACHINE_END 362MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t35.c b/arch/arm/mach-omap2/board-cm-t35.c
index c03df142ea67..ded100c80a91 100644
--- a/arch/arm/mach-omap2/board-cm-t35.c
+++ b/arch/arm/mach-omap2/board-cm-t35.c
@@ -669,6 +669,7 @@ MACHINE_START(CM_T35, "Compulab CM-T35")
669 .init_irq = omap3_init_irq, 669 .init_irq = omap3_init_irq,
670 .handle_irq = omap3_intc_handle_irq, 670 .handle_irq = omap3_intc_handle_irq,
671 .init_machine = cm_t35_init, 671 .init_machine = cm_t35_init,
672 .init_late = omap35xx_init_late,
672 .timer = &omap3_timer, 673 .timer = &omap3_timer,
673 .restart = omap_prcm_restart, 674 .restart = omap_prcm_restart,
674MACHINE_END 675MACHINE_END
@@ -681,6 +682,7 @@ MACHINE_START(CM_T3730, "Compulab CM-T3730")
681 .init_irq = omap3_init_irq, 682 .init_irq = omap3_init_irq,
682 .handle_irq = omap3_intc_handle_irq, 683 .handle_irq = omap3_intc_handle_irq,
683 .init_machine = cm_t3730_init, 684 .init_machine = cm_t3730_init,
685 .init_late = omap3630_init_late,
684 .timer = &omap3_timer, 686 .timer = &omap3_timer,
685 .restart = omap_prcm_restart, 687 .restart = omap_prcm_restart,
686MACHINE_END 688MACHINE_END
diff --git a/arch/arm/mach-omap2/board-cm-t3517.c b/arch/arm/mach-omap2/board-cm-t3517.c
index 9e66e167e4f3..a33ad4641d9a 100644
--- a/arch/arm/mach-omap2/board-cm-t3517.c
+++ b/arch/arm/mach-omap2/board-cm-t3517.c
@@ -303,6 +303,7 @@ MACHINE_START(CM_T3517, "Compulab CM-T3517")
303 .init_irq = omap3_init_irq, 303 .init_irq = omap3_init_irq,
304 .handle_irq = omap3_intc_handle_irq, 304 .handle_irq = omap3_intc_handle_irq,
305 .init_machine = cm_t3517_init, 305 .init_machine = cm_t3517_init,
306 .init_late = am35xx_init_late,
306 .timer = &omap3_timer, 307 .timer = &omap3_timer,
307 .restart = omap_prcm_restart, 308 .restart = omap_prcm_restart,
308MACHINE_END 309MACHINE_END
diff --git a/arch/arm/mach-omap2/board-devkit8000.c b/arch/arm/mach-omap2/board-devkit8000.c
index b063f0d2faa6..6567c1cd5572 100644
--- a/arch/arm/mach-omap2/board-devkit8000.c
+++ b/arch/arm/mach-omap2/board-devkit8000.c
@@ -644,6 +644,7 @@ MACHINE_START(DEVKIT8000, "OMAP3 Devkit8000")
644 .init_irq = omap3_init_irq, 644 .init_irq = omap3_init_irq,
645 .handle_irq = omap3_intc_handle_irq, 645 .handle_irq = omap3_intc_handle_irq,
646 .init_machine = devkit8000_init, 646 .init_machine = devkit8000_init,
647 .init_late = omap35xx_init_late,
647 .timer = &omap3_secure_timer, 648 .timer = &omap3_secure_timer,
648 .restart = omap_prcm_restart, 649 .restart = omap_prcm_restart,
649MACHINE_END 650MACHINE_END
diff --git a/arch/arm/mach-omap2/board-generic.c b/arch/arm/mach-omap2/board-generic.c
index 7302ba7ff1b9..202934657867 100644
--- a/arch/arm/mach-omap2/board-generic.c
+++ b/arch/arm/mach-omap2/board-generic.c
@@ -125,6 +125,7 @@ DT_MACHINE_START(OMAP4_DT, "Generic OMAP4 (Flattened Device Tree)")
125 .init_irq = omap_init_irq, 125 .init_irq = omap_init_irq,
126 .handle_irq = gic_handle_irq, 126 .handle_irq = gic_handle_irq,
127 .init_machine = omap_generic_init, 127 .init_machine = omap_generic_init,
128 .init_late = omap4430_init_late,
128 .timer = &omap4_timer, 129 .timer = &omap4_timer,
129 .dt_compat = omap4_boards_compat, 130 .dt_compat = omap4_boards_compat,
130 .restart = omap_prcm_restart, 131 .restart = omap_prcm_restart,
diff --git a/arch/arm/mach-omap2/board-h4.c b/arch/arm/mach-omap2/board-h4.c
index 0bbbabe28fcc..876becf8205a 100644
--- a/arch/arm/mach-omap2/board-h4.c
+++ b/arch/arm/mach-omap2/board-h4.c
@@ -398,6 +398,7 @@ MACHINE_START(OMAP_H4, "OMAP2420 H4 board")
398 .init_irq = omap2_init_irq, 398 .init_irq = omap2_init_irq,
399 .handle_irq = omap2_intc_handle_irq, 399 .handle_irq = omap2_intc_handle_irq,
400 .init_machine = omap_h4_init, 400 .init_machine = omap_h4_init,
401 .init_late = omap2420_init_late,
401 .timer = &omap2_timer, 402 .timer = &omap2_timer,
402 .restart = omap_prcm_restart, 403 .restart = omap_prcm_restart,
403MACHINE_END 404MACHINE_END
diff --git a/arch/arm/mach-omap2/board-igep0020.c b/arch/arm/mach-omap2/board-igep0020.c
index 7a274098f67b..74915295482e 100644
--- a/arch/arm/mach-omap2/board-igep0020.c
+++ b/arch/arm/mach-omap2/board-igep0020.c
@@ -650,6 +650,7 @@ MACHINE_START(IGEP0020, "IGEP v2 board")
650 .init_irq = omap3_init_irq, 650 .init_irq = omap3_init_irq,
651 .handle_irq = omap3_intc_handle_irq, 651 .handle_irq = omap3_intc_handle_irq,
652 .init_machine = igep_init, 652 .init_machine = igep_init,
653 .init_late = omap35xx_init_late,
653 .timer = &omap3_timer, 654 .timer = &omap3_timer,
654 .restart = omap_prcm_restart, 655 .restart = omap_prcm_restart,
655MACHINE_END 656MACHINE_END
@@ -662,6 +663,7 @@ MACHINE_START(IGEP0030, "IGEP OMAP3 module")
662 .init_irq = omap3_init_irq, 663 .init_irq = omap3_init_irq,
663 .handle_irq = omap3_intc_handle_irq, 664 .handle_irq = omap3_intc_handle_irq,
664 .init_machine = igep_init, 665 .init_machine = igep_init,
666 .init_late = omap35xx_init_late,
665 .timer = &omap3_timer, 667 .timer = &omap3_timer,
666 .restart = omap_prcm_restart, 668 .restart = omap_prcm_restart,
667MACHINE_END 669MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ldp.c b/arch/arm/mach-omap2/board-ldp.c
index 1b6049567ab4..ef9e82977499 100644
--- a/arch/arm/mach-omap2/board-ldp.c
+++ b/arch/arm/mach-omap2/board-ldp.c
@@ -442,6 +442,7 @@ MACHINE_START(OMAP_LDP, "OMAP LDP board")
442 .init_irq = omap3_init_irq, 442 .init_irq = omap3_init_irq,
443 .handle_irq = omap3_intc_handle_irq, 443 .handle_irq = omap3_intc_handle_irq,
444 .init_machine = omap_ldp_init, 444 .init_machine = omap_ldp_init,
445 .init_late = omap3430_init_late,
445 .timer = &omap3_timer, 446 .timer = &omap3_timer,
446 .restart = omap_prcm_restart, 447 .restart = omap_prcm_restart,
447MACHINE_END 448MACHINE_END
diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
index 518091c5f77c..8ca14e88a31a 100644
--- a/arch/arm/mach-omap2/board-n8x0.c
+++ b/arch/arm/mach-omap2/board-n8x0.c
@@ -694,6 +694,7 @@ MACHINE_START(NOKIA_N800, "Nokia N800")
694 .init_irq = omap2_init_irq, 694 .init_irq = omap2_init_irq,
695 .handle_irq = omap2_intc_handle_irq, 695 .handle_irq = omap2_intc_handle_irq,
696 .init_machine = n8x0_init_machine, 696 .init_machine = n8x0_init_machine,
697 .init_late = omap2420_init_late,
697 .timer = &omap2_timer, 698 .timer = &omap2_timer,
698 .restart = omap_prcm_restart, 699 .restart = omap_prcm_restart,
699MACHINE_END 700MACHINE_END
@@ -706,6 +707,7 @@ MACHINE_START(NOKIA_N810, "Nokia N810")
706 .init_irq = omap2_init_irq, 707 .init_irq = omap2_init_irq,
707 .handle_irq = omap2_intc_handle_irq, 708 .handle_irq = omap2_intc_handle_irq,
708 .init_machine = n8x0_init_machine, 709 .init_machine = n8x0_init_machine,
710 .init_late = omap2420_init_late,
709 .timer = &omap2_timer, 711 .timer = &omap2_timer,
710 .restart = omap_prcm_restart, 712 .restart = omap_prcm_restart,
711MACHINE_END 713MACHINE_END
@@ -718,6 +720,7 @@ MACHINE_START(NOKIA_N810_WIMAX, "Nokia N810 WiMAX")
718 .init_irq = omap2_init_irq, 720 .init_irq = omap2_init_irq,
719 .handle_irq = omap2_intc_handle_irq, 721 .handle_irq = omap2_intc_handle_irq,
720 .init_machine = n8x0_init_machine, 722 .init_machine = n8x0_init_machine,
723 .init_late = omap2420_init_late,
721 .timer = &omap2_timer, 724 .timer = &omap2_timer,
722 .restart = omap_prcm_restart, 725 .restart = omap_prcm_restart,
723MACHINE_END 726MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3beagle.c b/arch/arm/mach-omap2/board-omap3beagle.c
index 2a7b9a9da1db..79c6909eeb78 100644
--- a/arch/arm/mach-omap2/board-omap3beagle.c
+++ b/arch/arm/mach-omap2/board-omap3beagle.c
@@ -543,6 +543,7 @@ MACHINE_START(OMAP3_BEAGLE, "OMAP3 Beagle Board")
543 .init_irq = omap3_init_irq, 543 .init_irq = omap3_init_irq,
544 .handle_irq = omap3_intc_handle_irq, 544 .handle_irq = omap3_intc_handle_irq,
545 .init_machine = omap3_beagle_init, 545 .init_machine = omap3_beagle_init,
546 .init_late = omap3_init_late,
546 .timer = &omap3_secure_timer, 547 .timer = &omap3_secure_timer,
547 .restart = omap_prcm_restart, 548 .restart = omap_prcm_restart,
548MACHINE_END 549MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3evm.c b/arch/arm/mach-omap2/board-omap3evm.c
index ace3c675e9c2..639bd07ea38a 100644
--- a/arch/arm/mach-omap2/board-omap3evm.c
+++ b/arch/arm/mach-omap2/board-omap3evm.c
@@ -671,6 +671,7 @@ MACHINE_START(OMAP3EVM, "OMAP3 EVM")
671 .init_irq = omap3_init_irq, 671 .init_irq = omap3_init_irq,
672 .handle_irq = omap3_intc_handle_irq, 672 .handle_irq = omap3_intc_handle_irq,
673 .init_machine = omap3_evm_init, 673 .init_machine = omap3_evm_init,
674 .init_late = omap35xx_init_late,
674 .timer = &omap3_timer, 675 .timer = &omap3_timer,
675 .restart = omap_prcm_restart, 676 .restart = omap_prcm_restart,
676MACHINE_END 677MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3logic.c b/arch/arm/mach-omap2/board-omap3logic.c
index c008bf8e1c36..932e1778aff9 100644
--- a/arch/arm/mach-omap2/board-omap3logic.c
+++ b/arch/arm/mach-omap2/board-omap3logic.c
@@ -242,6 +242,7 @@ MACHINE_START(OMAP3_TORPEDO, "Logic OMAP3 Torpedo board")
242 .init_irq = omap3_init_irq, 242 .init_irq = omap3_init_irq,
243 .handle_irq = omap3_intc_handle_irq, 243 .handle_irq = omap3_intc_handle_irq,
244 .init_machine = omap3logic_init, 244 .init_machine = omap3logic_init,
245 .init_late = omap35xx_init_late,
245 .timer = &omap3_timer, 246 .timer = &omap3_timer,
246 .restart = omap_prcm_restart, 247 .restart = omap_prcm_restart,
247MACHINE_END 248MACHINE_END
@@ -254,6 +255,7 @@ MACHINE_START(OMAP3530_LV_SOM, "OMAP Logic 3530 LV SOM board")
254 .init_irq = omap3_init_irq, 255 .init_irq = omap3_init_irq,
255 .handle_irq = omap3_intc_handle_irq, 256 .handle_irq = omap3_intc_handle_irq,
256 .init_machine = omap3logic_init, 257 .init_machine = omap3logic_init,
258 .init_late = omap35xx_init_late,
257 .timer = &omap3_timer, 259 .timer = &omap3_timer,
258 .restart = omap_prcm_restart, 260 .restart = omap_prcm_restart,
259MACHINE_END 261MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3pandora.c b/arch/arm/mach-omap2/board-omap3pandora.c
index 33d995d0f075..57aebee44fd0 100644
--- a/arch/arm/mach-omap2/board-omap3pandora.c
+++ b/arch/arm/mach-omap2/board-omap3pandora.c
@@ -622,6 +622,7 @@ MACHINE_START(OMAP3_PANDORA, "Pandora Handheld Console")
622 .init_irq = omap3_init_irq, 622 .init_irq = omap3_init_irq,
623 .handle_irq = omap3_intc_handle_irq, 623 .handle_irq = omap3_intc_handle_irq,
624 .init_machine = omap3pandora_init, 624 .init_machine = omap3pandora_init,
625 .init_late = omap35xx_init_late,
625 .timer = &omap3_timer, 626 .timer = &omap3_timer,
626 .restart = omap_prcm_restart, 627 .restart = omap_prcm_restart,
627MACHINE_END 628MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3stalker.c b/arch/arm/mach-omap2/board-omap3stalker.c
index 4396bae91677..b318f5602e36 100644
--- a/arch/arm/mach-omap2/board-omap3stalker.c
+++ b/arch/arm/mach-omap2/board-omap3stalker.c
@@ -436,6 +436,7 @@ MACHINE_START(SBC3530, "OMAP3 STALKER")
436 .init_irq = omap3_init_irq, 436 .init_irq = omap3_init_irq,
437 .handle_irq = omap3_intc_handle_irq, 437 .handle_irq = omap3_intc_handle_irq,
438 .init_machine = omap3_stalker_init, 438 .init_machine = omap3_stalker_init,
439 .init_late = omap35xx_init_late,
439 .timer = &omap3_secure_timer, 440 .timer = &omap3_secure_timer,
440 .restart = omap_prcm_restart, 441 .restart = omap_prcm_restart,
441MACHINE_END 442MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap3touchbook.c b/arch/arm/mach-omap2/board-omap3touchbook.c
index ae2251fa4a69..485d14d6a8cd 100644
--- a/arch/arm/mach-omap2/board-omap3touchbook.c
+++ b/arch/arm/mach-omap2/board-omap3touchbook.c
@@ -387,6 +387,7 @@ MACHINE_START(TOUCHBOOK, "OMAP3 touchbook Board")
387 .init_irq = omap3_init_irq, 387 .init_irq = omap3_init_irq,
388 .handle_irq = omap3_intc_handle_irq, 388 .handle_irq = omap3_intc_handle_irq,
389 .init_machine = omap3_touchbook_init, 389 .init_machine = omap3_touchbook_init,
390 .init_late = omap3430_init_late,
390 .timer = &omap3_secure_timer, 391 .timer = &omap3_secure_timer,
391 .restart = omap_prcm_restart, 392 .restart = omap_prcm_restart,
392MACHINE_END 393MACHINE_END
diff --git a/arch/arm/mach-omap2/board-omap4panda.c b/arch/arm/mach-omap2/board-omap4panda.c
index 68b8fc9ff010..982fb2622ab8 100644
--- a/arch/arm/mach-omap2/board-omap4panda.c
+++ b/arch/arm/mach-omap2/board-omap4panda.c
@@ -521,6 +521,7 @@ MACHINE_START(OMAP4_PANDA, "OMAP4 Panda board")
521 .init_irq = gic_init_irq, 521 .init_irq = gic_init_irq,
522 .handle_irq = gic_handle_irq, 522 .handle_irq = gic_handle_irq,
523 .init_machine = omap4_panda_init, 523 .init_machine = omap4_panda_init,
524 .init_late = omap4430_init_late,
524 .timer = &omap4_timer, 525 .timer = &omap4_timer,
525 .restart = omap_prcm_restart, 526 .restart = omap_prcm_restart,
526MACHINE_END 527MACHINE_END
diff --git a/arch/arm/mach-omap2/board-overo.c b/arch/arm/mach-omap2/board-overo.c
index 5527c1979a16..8fa2fc3a4c3c 100644
--- a/arch/arm/mach-omap2/board-overo.c
+++ b/arch/arm/mach-omap2/board-overo.c
@@ -554,6 +554,7 @@ MACHINE_START(OVERO, "Gumstix Overo")
554 .init_irq = omap3_init_irq, 554 .init_irq = omap3_init_irq,
555 .handle_irq = omap3_intc_handle_irq, 555 .handle_irq = omap3_intc_handle_irq,
556 .init_machine = overo_init, 556 .init_machine = overo_init,
557 .init_late = omap35xx_init_late,
557 .timer = &omap3_timer, 558 .timer = &omap3_timer,
558 .restart = omap_prcm_restart, 559 .restart = omap_prcm_restart,
559MACHINE_END 560MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rm680.c b/arch/arm/mach-omap2/board-rm680.c
index ae53d71f0ce0..0ad1bb3bdb98 100644
--- a/arch/arm/mach-omap2/board-rm680.c
+++ b/arch/arm/mach-omap2/board-rm680.c
@@ -151,6 +151,7 @@ MACHINE_START(NOKIA_RM680, "Nokia RM-680 board")
151 .init_irq = omap3_init_irq, 151 .init_irq = omap3_init_irq,
152 .handle_irq = omap3_intc_handle_irq, 152 .handle_irq = omap3_intc_handle_irq,
153 .init_machine = rm680_init, 153 .init_machine = rm680_init,
154 .init_late = omap3630_init_late,
154 .timer = &omap3_timer, 155 .timer = &omap3_timer,
155 .restart = omap_prcm_restart, 156 .restart = omap_prcm_restart,
156MACHINE_END 157MACHINE_END
@@ -163,6 +164,7 @@ MACHINE_START(NOKIA_RM696, "Nokia RM-696 board")
163 .init_irq = omap3_init_irq, 164 .init_irq = omap3_init_irq,
164 .handle_irq = omap3_intc_handle_irq, 165 .handle_irq = omap3_intc_handle_irq,
165 .init_machine = rm680_init, 166 .init_machine = rm680_init,
167 .init_late = omap3630_init_late,
166 .timer = &omap3_timer, 168 .timer = &omap3_timer,
167 .restart = omap_prcm_restart, 169 .restart = omap_prcm_restart,
168MACHINE_END 170MACHINE_END
diff --git a/arch/arm/mach-omap2/board-rx51.c b/arch/arm/mach-omap2/board-rx51.c
index 2da92a6ba40a..345dd931f76f 100644
--- a/arch/arm/mach-omap2/board-rx51.c
+++ b/arch/arm/mach-omap2/board-rx51.c
@@ -127,6 +127,7 @@ MACHINE_START(NOKIA_RX51, "Nokia RX-51 board")
127 .init_irq = omap3_init_irq, 127 .init_irq = omap3_init_irq,
128 .handle_irq = omap3_intc_handle_irq, 128 .handle_irq = omap3_intc_handle_irq,
129 .init_machine = rx51_init, 129 .init_machine = rx51_init,
130 .init_late = omap3430_init_late,
130 .timer = &omap3_timer, 131 .timer = &omap3_timer,
131 .restart = omap_prcm_restart, 132 .restart = omap_prcm_restart,
132MACHINE_END 133MACHINE_END
diff --git a/arch/arm/mach-omap2/board-ti8168evm.c b/arch/arm/mach-omap2/board-ti8168evm.c
index ab9a7a9e9d64..d4c8392cadb6 100644
--- a/arch/arm/mach-omap2/board-ti8168evm.c
+++ b/arch/arm/mach-omap2/board-ti8168evm.c
@@ -52,6 +52,7 @@ MACHINE_START(TI8168EVM, "ti8168evm")
52 .init_irq = ti81xx_init_irq, 52 .init_irq = ti81xx_init_irq,
53 .timer = &omap3_timer, 53 .timer = &omap3_timer,
54 .init_machine = ti81xx_evm_init, 54 .init_machine = ti81xx_evm_init,
55 .init_late = ti81xx_init_late,
55 .restart = omap_prcm_restart, 56 .restart = omap_prcm_restart,
56MACHINE_END 57MACHINE_END
57 58
@@ -63,5 +64,6 @@ MACHINE_START(TI8148EVM, "ti8148evm")
63 .init_irq = ti81xx_init_irq, 64 .init_irq = ti81xx_init_irq,
64 .timer = &omap3_timer, 65 .timer = &omap3_timer,
65 .init_machine = ti81xx_evm_init, 66 .init_machine = ti81xx_evm_init,
67 .init_late = ti81xx_init_late,
66 .restart = omap_prcm_restart, 68 .restart = omap_prcm_restart,
67MACHINE_END 69MACHINE_END
diff --git a/arch/arm/mach-omap2/board-zoom.c b/arch/arm/mach-omap2/board-zoom.c
index 5c20bcc57f2b..4e7e56142e6f 100644
--- a/arch/arm/mach-omap2/board-zoom.c
+++ b/arch/arm/mach-omap2/board-zoom.c
@@ -137,6 +137,7 @@ MACHINE_START(OMAP_ZOOM2, "OMAP Zoom2 board")
137 .init_irq = omap3_init_irq, 137 .init_irq = omap3_init_irq,
138 .handle_irq = omap3_intc_handle_irq, 138 .handle_irq = omap3_intc_handle_irq,
139 .init_machine = omap_zoom_init, 139 .init_machine = omap_zoom_init,
140 .init_late = omap3430_init_late,
140 .timer = &omap3_timer, 141 .timer = &omap3_timer,
141 .restart = omap_prcm_restart, 142 .restart = omap_prcm_restart,
142MACHINE_END 143MACHINE_END
@@ -149,6 +150,7 @@ MACHINE_START(OMAP_ZOOM3, "OMAP Zoom3 board")
149 .init_irq = omap3_init_irq, 150 .init_irq = omap3_init_irq,
150 .handle_irq = omap3_intc_handle_irq, 151 .handle_irq = omap3_intc_handle_irq,
151 .init_machine = omap_zoom_init, 152 .init_machine = omap_zoom_init,
153 .init_late = omap3630_init_late,
152 .timer = &omap3_timer, 154 .timer = &omap3_timer,
153 .restart = omap_prcm_restart, 155 .restart = omap_prcm_restart,
154MACHINE_END 156MACHINE_END
diff --git a/arch/arm/mach-omap2/common.h b/arch/arm/mach-omap2/common.h
index d6c9e6180318..be9dfd1abe60 100644
--- a/arch/arm/mach-omap2/common.h
+++ b/arch/arm/mach-omap2/common.h
@@ -55,7 +55,7 @@ static inline void omap34xx_map_common_io(void)
55} 55}
56#endif 56#endif
57 57
58#ifdef CONFIG_SOC_OMAPTI81XX 58#ifdef CONFIG_SOC_TI81XX
59extern void omapti81xx_map_common_io(void); 59extern void omapti81xx_map_common_io(void);
60#else 60#else
61static inline void omapti81xx_map_common_io(void) 61static inline void omapti81xx_map_common_io(void)
@@ -63,7 +63,7 @@ static inline void omapti81xx_map_common_io(void)
63} 63}
64#endif 64#endif
65 65
66#ifdef CONFIG_SOC_OMAPAM33XX 66#ifdef CONFIG_SOC_AM33XX
67extern void omapam33xx_map_common_io(void); 67extern void omapam33xx_map_common_io(void);
68#else 68#else
69static inline void omapam33xx_map_common_io(void) 69static inline void omapam33xx_map_common_io(void)
@@ -79,6 +79,42 @@ static inline void omap44xx_map_common_io(void)
79} 79}
80#endif 80#endif
81 81
82#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP2)
83int omap2_pm_init(void);
84#else
85static inline int omap2_pm_init(void)
86{
87 return 0;
88}
89#endif
90
91#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP3)
92int omap3_pm_init(void);
93#else
94static inline int omap3_pm_init(void)
95{
96 return 0;
97}
98#endif
99
100#if defined(CONFIG_PM) && defined(CONFIG_ARCH_OMAP4)
101int omap4_pm_init(void);
102#else
103static inline int omap4_pm_init(void)
104{
105 return 0;
106}
107#endif
108
109#ifdef CONFIG_OMAP_MUX
110int omap_mux_late_init(void);
111#else
112static inline int omap_mux_late_init(void)
113{
114 return 0;
115}
116#endif
117
82extern void omap2_init_common_infrastructure(void); 118extern void omap2_init_common_infrastructure(void);
83 119
84extern struct sys_timer omap2_timer; 120extern struct sys_timer omap2_timer;
@@ -95,6 +131,17 @@ void omap3_init_early(void); /* Do not use this one */
95void am35xx_init_early(void); 131void am35xx_init_early(void);
96void ti81xx_init_early(void); 132void ti81xx_init_early(void);
97void omap4430_init_early(void); 133void omap4430_init_early(void);
134void omap3_init_late(void); /* Do not use this one */
135void omap4430_init_late(void);
136void omap2420_init_late(void);
137void omap2430_init_late(void);
138void omap3430_init_late(void);
139void omap35xx_init_late(void);
140void omap3630_init_late(void);
141void am35xx_init_late(void);
142void ti81xx_init_late(void);
143void omap4430_init_late(void);
144int omap2_common_pm_late_init(void);
98void omap_prcm_restart(char, const char *); 145void omap_prcm_restart(char, const char *);
99 146
100/* 147/*
diff --git a/arch/arm/mach-omap2/devices.c b/arch/arm/mach-omap2/devices.c
index ae62ece04ef9..7b4b9327e543 100644
--- a/arch/arm/mach-omap2/devices.c
+++ b/arch/arm/mach-omap2/devices.c
@@ -645,7 +645,11 @@ static inline void omap242x_mmc_mux(struct omap_mmc_platform_data
645 645
646void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data) 646void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
647{ 647{
648 char *name = "mmci-omap"; 648 struct platform_device *pdev;
649 struct omap_hwmod *oh;
650 int id = 0;
651 char *oh_name = "msdi1";
652 char *dev_name = "mmci-omap";
649 653
650 if (!mmc_data[0]) { 654 if (!mmc_data[0]) {
651 pr_err("%s fails: Incomplete platform data\n", __func__); 655 pr_err("%s fails: Incomplete platform data\n", __func__);
@@ -653,8 +657,17 @@ void __init omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
653 } 657 }
654 658
655 omap242x_mmc_mux(mmc_data[0]); 659 omap242x_mmc_mux(mmc_data[0]);
656 omap_mmc_add(name, 0, OMAP2_MMC1_BASE, OMAP2420_MMC_SIZE, 660
657 INT_24XX_MMC_IRQ, mmc_data[0]); 661 oh = omap_hwmod_lookup(oh_name);
662 if (!oh) {
663 pr_err("Could not look up %s\n", oh_name);
664 return;
665 }
666 pdev = omap_device_build(dev_name, id, oh, mmc_data[0],
667 sizeof(struct omap_mmc_platform_data), NULL, 0, 0);
668 if (IS_ERR(pdev))
669 WARN(1, "Can'd build omap_device for %s:%s.\n",
670 dev_name, oh->name);
658} 671}
659 672
660#endif 673#endif
diff --git a/arch/arm/mach-omap2/dma.c b/arch/arm/mach-omap2/dma.c
index b19d8496c16e..ff75abe60af2 100644
--- a/arch/arm/mach-omap2/dma.c
+++ b/arch/arm/mach-omap2/dma.c
@@ -227,10 +227,6 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
227 227
228 dma_stride = OMAP2_DMA_STRIDE; 228 dma_stride = OMAP2_DMA_STRIDE;
229 dma_common_ch_start = CSDP; 229 dma_common_ch_start = CSDP;
230 if (cpu_is_omap3630() || cpu_is_omap44xx())
231 dma_common_ch_end = CCDN;
232 else
233 dma_common_ch_end = CCFN;
234 230
235 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL); 231 p = kzalloc(sizeof(struct omap_system_dma_plat_info), GFP_KERNEL);
236 if (!p) { 232 if (!p) {
@@ -277,6 +273,13 @@ static int __init omap2_system_dma_init_dev(struct omap_hwmod *oh, void *unused)
277 dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__); 273 dev_err(&pdev->dev, "%s: kzalloc fail\n", __func__);
278 return -ENOMEM; 274 return -ENOMEM;
279 } 275 }
276
277 /* Check the capabilities register for descriptor loading feature */
278 if (dma_read(CAPS_0, 0) & DMA_HAS_DESCRIPTOR_CAPS)
279 dma_common_ch_end = CCDN;
280 else
281 dma_common_ch_end = CCFN;
282
280 return 0; 283 return 0;
281} 284}
282 285
diff --git a/arch/arm/mach-omap2/dsp.c b/arch/arm/mach-omap2/dsp.c
index 3376388b317a..845309f146fe 100644
--- a/arch/arm/mach-omap2/dsp.c
+++ b/arch/arm/mach-omap2/dsp.c
@@ -28,8 +28,6 @@
28 28
29#include <plat/dsp.h> 29#include <plat/dsp.h>
30 30
31extern phys_addr_t omap_dsp_get_mempool_base(void);
32
33static struct platform_device *omap_dsp_pdev; 31static struct platform_device *omap_dsp_pdev;
34 32
35static struct omap_dsp_platform_data omap_dsp_pdata __initdata = { 33static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
@@ -47,6 +45,31 @@ static struct omap_dsp_platform_data omap_dsp_pdata __initdata = {
47 .dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits, 45 .dsp_cm_rmw_bits = omap2_cm_rmw_mod_reg_bits,
48}; 46};
49 47
48static phys_addr_t omap_dsp_phys_mempool_base;
49
50void __init omap_dsp_reserve_sdram_memblock(void)
51{
52 phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
53 phys_addr_t paddr;
54
55 if (!size)
56 return;
57
58 paddr = arm_memblock_steal(size, SZ_1M);
59 if (!paddr) {
60 pr_err("%s: failed to reserve %llx bytes\n",
61 __func__, (unsigned long long)size);
62 return;
63 }
64
65 omap_dsp_phys_mempool_base = paddr;
66}
67
68static phys_addr_t omap_dsp_get_mempool_base(void)
69{
70 return omap_dsp_phys_mempool_base;
71}
72
50static int __init omap_dsp_init(void) 73static int __init omap_dsp_init(void)
51{ 74{
52 struct platform_device *pdev; 75 struct platform_device *pdev;
diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
index 580e684e8825..46b09dae770e 100644
--- a/arch/arm/mach-omap2/gpmc.c
+++ b/arch/arm/mach-omap2/gpmc.c
@@ -50,6 +50,19 @@
50#define GPMC_ECC_SIZE_CONFIG 0x1fc 50#define GPMC_ECC_SIZE_CONFIG 0x1fc
51#define GPMC_ECC1_RESULT 0x200 51#define GPMC_ECC1_RESULT 0x200
52 52
53/* GPMC ECC control settings */
54#define GPMC_ECC_CTRL_ECCCLEAR 0x100
55#define GPMC_ECC_CTRL_ECCDISABLE 0x000
56#define GPMC_ECC_CTRL_ECCREG1 0x001
57#define GPMC_ECC_CTRL_ECCREG2 0x002
58#define GPMC_ECC_CTRL_ECCREG3 0x003
59#define GPMC_ECC_CTRL_ECCREG4 0x004
60#define GPMC_ECC_CTRL_ECCREG5 0x005
61#define GPMC_ECC_CTRL_ECCREG6 0x006
62#define GPMC_ECC_CTRL_ECCREG7 0x007
63#define GPMC_ECC_CTRL_ECCREG8 0x008
64#define GPMC_ECC_CTRL_ECCREG9 0x009
65
53#define GPMC_CS0_OFFSET 0x60 66#define GPMC_CS0_OFFSET 0x60
54#define GPMC_CS_SIZE 0x30 67#define GPMC_CS_SIZE 0x30
55 68
@@ -860,8 +873,9 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
860 gpmc_ecc_used = cs; 873 gpmc_ecc_used = cs;
861 874
862 /* clear ecc and enable bits */ 875 /* clear ecc and enable bits */
863 val = ((0x00000001<<8) | 0x00000001); 876 gpmc_write_reg(GPMC_ECC_CONTROL,
864 gpmc_write_reg(GPMC_ECC_CONTROL, val); 877 GPMC_ECC_CTRL_ECCCLEAR |
878 GPMC_ECC_CTRL_ECCREG1);
865 879
866 /* program ecc and result sizes */ 880 /* program ecc and result sizes */
867 val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F)); 881 val = ((((ecc_size >> 1) - 1) << 22) | (0x0000000F));
@@ -869,13 +883,15 @@ int gpmc_enable_hwecc(int cs, int mode, int dev_width, int ecc_size)
869 883
870 switch (mode) { 884 switch (mode) {
871 case GPMC_ECC_READ: 885 case GPMC_ECC_READ:
872 gpmc_write_reg(GPMC_ECC_CONTROL, 0x101); 886 case GPMC_ECC_WRITE:
887 gpmc_write_reg(GPMC_ECC_CONTROL,
888 GPMC_ECC_CTRL_ECCCLEAR |
889 GPMC_ECC_CTRL_ECCREG1);
873 break; 890 break;
874 case GPMC_ECC_READSYN: 891 case GPMC_ECC_READSYN:
875 gpmc_write_reg(GPMC_ECC_CONTROL, 0x100); 892 gpmc_write_reg(GPMC_ECC_CONTROL,
876 break; 893 GPMC_ECC_CTRL_ECCCLEAR |
877 case GPMC_ECC_WRITE: 894 GPMC_ECC_CTRL_ECCDISABLE);
878 gpmc_write_reg(GPMC_ECC_CONTROL, 0x101);
879 break; 895 break;
880 default: 896 default:
881 printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode); 897 printk(KERN_INFO "Error: Unrecognized Mode[%d]!\n", mode);
diff --git a/arch/arm/mach-omap2/hsmmc.c b/arch/arm/mach-omap2/hsmmc.c
index b0268eaffe13..be697d4e0843 100644
--- a/arch/arm/mach-omap2/hsmmc.c
+++ b/arch/arm/mach-omap2/hsmmc.c
@@ -355,7 +355,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
355 * 355 *
356 * temporary HACK: ocr_mask instead of fixed supply 356 * temporary HACK: ocr_mask instead of fixed supply
357 */ 357 */
358 if (cpu_is_omap3505() || cpu_is_omap3517()) 358 if (soc_is_am35xx())
359 mmc->slots[0].ocr_mask = MMC_VDD_165_195 | 359 mmc->slots[0].ocr_mask = MMC_VDD_165_195 |
360 MMC_VDD_26_27 | 360 MMC_VDD_26_27 |
361 MMC_VDD_27_28 | 361 MMC_VDD_27_28 |
@@ -365,7 +365,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
365 else 365 else
366 mmc->slots[0].ocr_mask = c->ocr_mask; 366 mmc->slots[0].ocr_mask = c->ocr_mask;
367 367
368 if (!cpu_is_omap3517() && !cpu_is_omap3505()) 368 if (!soc_is_am35xx())
369 mmc->slots[0].features |= HSMMC_HAS_PBIAS; 369 mmc->slots[0].features |= HSMMC_HAS_PBIAS;
370 370
371 if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0)) 371 if (cpu_is_omap44xx() && (omap_rev() > OMAP4430_REV_ES1_0))
@@ -388,7 +388,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
388 } 388 }
389 } 389 }
390 390
391 if (cpu_is_omap3517() || cpu_is_omap3505()) 391 if (soc_is_am35xx())
392 mmc->slots[0].set_power = nop_mmc_set_power; 392 mmc->slots[0].set_power = nop_mmc_set_power;
393 393
394 /* OMAP3630 HSMMC1 supports only 4-bit */ 394 /* OMAP3630 HSMMC1 supports only 4-bit */
@@ -400,7 +400,7 @@ static int __init omap_hsmmc_pdata_init(struct omap2_hsmmc_info *c,
400 } 400 }
401 break; 401 break;
402 case 2: 402 case 2:
403 if (cpu_is_omap3517() || cpu_is_omap3505()) 403 if (soc_is_am35xx())
404 mmc->slots[0].set_power = am35x_hsmmc2_set_power; 404 mmc->slots[0].set_power = am35x_hsmmc2_set_power;
405 405
406 if (c->ext_clock) 406 if (c->ext_clock)
diff --git a/arch/arm/mach-omap2/id.c b/arch/arm/mach-omap2/id.c
index f1398171d8a2..0389b3264abe 100644
--- a/arch/arm/mach-omap2/id.c
+++ b/arch/arm/mach-omap2/id.c
@@ -185,8 +185,7 @@ static void __init omap3_cpuinfo(void)
185 */ 185 */
186 if (cpu_is_omap3630()) { 186 if (cpu_is_omap3630()) {
187 cpu_name = "OMAP3630"; 187 cpu_name = "OMAP3630";
188 } else if (cpu_is_omap3517()) { 188 } else if (soc_is_am35xx()) {
189 /* AM35xx devices */
190 cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505"; 189 cpu_name = (omap3_has_sgx()) ? "AM3517" : "AM3505";
191 } else if (cpu_is_ti816x()) { 190 } else if (cpu_is_ti816x()) {
192 cpu_name = "TI816X"; 191 cpu_name = "TI816X";
@@ -352,13 +351,13 @@ void __init omap3xxx_check_revision(void)
352 */ 351 */
353 switch (rev) { 352 switch (rev) {
354 case 0: 353 case 0:
355 omap_revision = OMAP3517_REV_ES1_0; 354 omap_revision = AM35XX_REV_ES1_0;
356 cpu_rev = "1.0"; 355 cpu_rev = "1.0";
357 break; 356 break;
358 case 1: 357 case 1:
359 /* FALLTHROUGH */ 358 /* FALLTHROUGH */
360 default: 359 default:
361 omap_revision = OMAP3517_REV_ES1_1; 360 omap_revision = AM35XX_REV_ES1_1;
362 cpu_rev = "1.1"; 361 cpu_rev = "1.1";
363 } 362 }
364 break; 363 break;
diff --git a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
index d79321b0f2a2..548de90b58c2 100644
--- a/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
+++ b/arch/arm/mach-omap2/include/mach/omap-wakeupgen.h
@@ -16,18 +16,10 @@
16#define OMAP_WKG_ENB_B_0 0x14 16#define OMAP_WKG_ENB_B_0 0x14
17#define OMAP_WKG_ENB_C_0 0x18 17#define OMAP_WKG_ENB_C_0 0x18
18#define OMAP_WKG_ENB_D_0 0x1c 18#define OMAP_WKG_ENB_D_0 0x1c
19#define OMAP_WKG_ENB_SECURE_A_0 0x20
20#define OMAP_WKG_ENB_SECURE_B_0 0x24
21#define OMAP_WKG_ENB_SECURE_C_0 0x28
22#define OMAP_WKG_ENB_SECURE_D_0 0x2c
23#define OMAP_WKG_ENB_A_1 0x410 19#define OMAP_WKG_ENB_A_1 0x410
24#define OMAP_WKG_ENB_B_1 0x414 20#define OMAP_WKG_ENB_B_1 0x414
25#define OMAP_WKG_ENB_C_1 0x418 21#define OMAP_WKG_ENB_C_1 0x418
26#define OMAP_WKG_ENB_D_1 0x41c 22#define OMAP_WKG_ENB_D_1 0x41c
27#define OMAP_WKG_ENB_SECURE_A_1 0x420
28#define OMAP_WKG_ENB_SECURE_B_1 0x424
29#define OMAP_WKG_ENB_SECURE_C_1 0x428
30#define OMAP_WKG_ENB_SECURE_D_1 0x42c
31#define OMAP_AUX_CORE_BOOT_0 0x800 23#define OMAP_AUX_CORE_BOOT_0 0x800
32#define OMAP_AUX_CORE_BOOT_1 0x804 24#define OMAP_AUX_CORE_BOOT_1 0x804
33#define OMAP_PTMSYNCREQ_MASK 0xc00 25#define OMAP_PTMSYNCREQ_MASK 0xc00
diff --git a/arch/arm/mach-omap2/io.c b/arch/arm/mach-omap2/io.c
index 4b9491aa36fa..8d014ba04abc 100644
--- a/arch/arm/mach-omap2/io.c
+++ b/arch/arm/mach-omap2/io.c
@@ -173,7 +173,7 @@ static struct map_desc omap34xx_io_desc[] __initdata = {
173}; 173};
174#endif 174#endif
175 175
176#ifdef CONFIG_SOC_OMAPTI81XX 176#ifdef CONFIG_SOC_TI81XX
177static struct map_desc omapti81xx_io_desc[] __initdata = { 177static struct map_desc omapti81xx_io_desc[] __initdata = {
178 { 178 {
179 .virtual = L4_34XX_VIRT, 179 .virtual = L4_34XX_VIRT,
@@ -184,7 +184,7 @@ static struct map_desc omapti81xx_io_desc[] __initdata = {
184}; 184};
185#endif 185#endif
186 186
187#ifdef CONFIG_SOC_OMAPAM33XX 187#ifdef CONFIG_SOC_AM33XX
188static struct map_desc omapam33xx_io_desc[] __initdata = { 188static struct map_desc omapam33xx_io_desc[] __initdata = {
189 { 189 {
190 .virtual = L4_34XX_VIRT, 190 .virtual = L4_34XX_VIRT,
@@ -216,41 +216,11 @@ static struct map_desc omap44xx_io_desc[] __initdata = {
216 .type = MT_DEVICE, 216 .type = MT_DEVICE,
217 }, 217 },
218 { 218 {
219 .virtual = OMAP44XX_GPMC_VIRT,
220 .pfn = __phys_to_pfn(OMAP44XX_GPMC_PHYS),
221 .length = OMAP44XX_GPMC_SIZE,
222 .type = MT_DEVICE,
223 },
224 {
225 .virtual = OMAP44XX_EMIF1_VIRT,
226 .pfn = __phys_to_pfn(OMAP44XX_EMIF1_PHYS),
227 .length = OMAP44XX_EMIF1_SIZE,
228 .type = MT_DEVICE,
229 },
230 {
231 .virtual = OMAP44XX_EMIF2_VIRT,
232 .pfn = __phys_to_pfn(OMAP44XX_EMIF2_PHYS),
233 .length = OMAP44XX_EMIF2_SIZE,
234 .type = MT_DEVICE,
235 },
236 {
237 .virtual = OMAP44XX_DMM_VIRT,
238 .pfn = __phys_to_pfn(OMAP44XX_DMM_PHYS),
239 .length = OMAP44XX_DMM_SIZE,
240 .type = MT_DEVICE,
241 },
242 {
243 .virtual = L4_PER_44XX_VIRT, 219 .virtual = L4_PER_44XX_VIRT,
244 .pfn = __phys_to_pfn(L4_PER_44XX_PHYS), 220 .pfn = __phys_to_pfn(L4_PER_44XX_PHYS),
245 .length = L4_PER_44XX_SIZE, 221 .length = L4_PER_44XX_SIZE,
246 .type = MT_DEVICE, 222 .type = MT_DEVICE,
247 }, 223 },
248 {
249 .virtual = L4_EMU_44XX_VIRT,
250 .pfn = __phys_to_pfn(L4_EMU_44XX_PHYS),
251 .length = L4_EMU_44XX_SIZE,
252 .type = MT_DEVICE,
253 },
254#ifdef CONFIG_OMAP4_ERRATA_I688 224#ifdef CONFIG_OMAP4_ERRATA_I688
255 { 225 {
256 .virtual = OMAP4_SRAM_VA, 226 .virtual = OMAP4_SRAM_VA,
@@ -286,14 +256,14 @@ void __init omap34xx_map_common_io(void)
286} 256}
287#endif 257#endif
288 258
289#ifdef CONFIG_SOC_OMAPTI81XX 259#ifdef CONFIG_SOC_TI81XX
290void __init omapti81xx_map_common_io(void) 260void __init omapti81xx_map_common_io(void)
291{ 261{
292 iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc)); 262 iotable_init(omapti81xx_io_desc, ARRAY_SIZE(omapti81xx_io_desc));
293} 263}
294#endif 264#endif
295 265
296#ifdef CONFIG_SOC_OMAPAM33XX 266#ifdef CONFIG_SOC_AM33XX
297void __init omapam33xx_map_common_io(void) 267void __init omapam33xx_map_common_io(void)
298{ 268{
299 iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc)); 269 iotable_init(omapam33xx_io_desc, ARRAY_SIZE(omapam33xx_io_desc));
@@ -380,6 +350,13 @@ void __init omap2420_init_early(void)
380 omap_hwmod_init_postsetup(); 350 omap_hwmod_init_postsetup();
381 omap2420_clk_init(); 351 omap2420_clk_init();
382} 352}
353
354void __init omap2420_init_late(void)
355{
356 omap_mux_late_init();
357 omap2_common_pm_late_init();
358 omap2_pm_init();
359}
383#endif 360#endif
384 361
385#ifdef CONFIG_SOC_OMAP2430 362#ifdef CONFIG_SOC_OMAP2430
@@ -395,6 +372,13 @@ void __init omap2430_init_early(void)
395 omap_hwmod_init_postsetup(); 372 omap_hwmod_init_postsetup();
396 omap2430_clk_init(); 373 omap2430_clk_init();
397} 374}
375
376void __init omap2430_init_late(void)
377{
378 omap_mux_late_init();
379 omap2_common_pm_late_init();
380 omap2_pm_init();
381}
398#endif 382#endif
399 383
400/* 384/*
@@ -449,6 +433,48 @@ void __init ti81xx_init_early(void)
449 omap_hwmod_init_postsetup(); 433 omap_hwmod_init_postsetup();
450 omap3xxx_clk_init(); 434 omap3xxx_clk_init();
451} 435}
436
437void __init omap3_init_late(void)
438{
439 omap_mux_late_init();
440 omap2_common_pm_late_init();
441 omap3_pm_init();
442}
443
444void __init omap3430_init_late(void)
445{
446 omap_mux_late_init();
447 omap2_common_pm_late_init();
448 omap3_pm_init();
449}
450
451void __init omap35xx_init_late(void)
452{
453 omap_mux_late_init();
454 omap2_common_pm_late_init();
455 omap3_pm_init();
456}
457
458void __init omap3630_init_late(void)
459{
460 omap_mux_late_init();
461 omap2_common_pm_late_init();
462 omap3_pm_init();
463}
464
465void __init am35xx_init_late(void)
466{
467 omap_mux_late_init();
468 omap2_common_pm_late_init();
469 omap3_pm_init();
470}
471
472void __init ti81xx_init_late(void)
473{
474 omap_mux_late_init();
475 omap2_common_pm_late_init();
476 omap3_pm_init();
477}
452#endif 478#endif
453 479
454#ifdef CONFIG_ARCH_OMAP4 480#ifdef CONFIG_ARCH_OMAP4
@@ -465,6 +491,13 @@ void __init omap4430_init_early(void)
465 omap_hwmod_init_postsetup(); 491 omap_hwmod_init_postsetup();
466 omap4xxx_clk_init(); 492 omap4xxx_clk_init();
467} 493}
494
495void __init omap4430_init_late(void)
496{
497 omap_mux_late_init();
498 omap2_common_pm_late_init();
499 omap4_pm_init();
500}
468#endif 501#endif
469 502
470void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0, 503void __init omap_sdrc_init(struct omap_sdrc_params *sdrc_cs0,
diff --git a/arch/arm/mach-omap2/iomap.h b/arch/arm/mach-omap2/iomap.h
index 0812b154f5b5..80b88921faba 100644
--- a/arch/arm/mach-omap2/iomap.h
+++ b/arch/arm/mach-omap2/iomap.h
@@ -37,9 +37,6 @@
37#define OMAP4_L3_PER_IO_OFFSET 0xb1100000 37#define OMAP4_L3_PER_IO_OFFSET 0xb1100000
38#define OMAP4_L3_PER_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_L3_PER_IO_OFFSET) 38#define OMAP4_L3_PER_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_L3_PER_IO_OFFSET)
39 39
40#define OMAP4_GPMC_IO_OFFSET 0xa9000000
41#define OMAP4_GPMC_IO_ADDRESS(pa) IOMEM((pa) + OMAP4_GPMC_IO_OFFSET)
42
43#define OMAP2_EMU_IO_OFFSET 0xaa800000 /* Emulation */ 40#define OMAP2_EMU_IO_OFFSET 0xaa800000 /* Emulation */
44#define OMAP2_EMU_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_EMU_IO_OFFSET) 41#define OMAP2_EMU_IO_ADDRESS(pa) IOMEM((pa) + OMAP2_EMU_IO_OFFSET)
45 42
@@ -170,28 +167,3 @@
170#define L4_ABE_44XX_VIRT (L4_ABE_44XX_PHYS + OMAP2_L4_IO_OFFSET) 167#define L4_ABE_44XX_VIRT (L4_ABE_44XX_PHYS + OMAP2_L4_IO_OFFSET)
171#define L4_ABE_44XX_SIZE SZ_1M 168#define L4_ABE_44XX_SIZE SZ_1M
172 169
173#define L4_EMU_44XX_PHYS L4_EMU_44XX_BASE
174 /* 0x54000000 --> 0xfe800000 */
175#define L4_EMU_44XX_VIRT (L4_EMU_44XX_PHYS + OMAP2_EMU_IO_OFFSET)
176#define L4_EMU_44XX_SIZE SZ_8M
177
178#define OMAP44XX_GPMC_PHYS OMAP44XX_GPMC_BASE
179 /* 0x50000000 --> 0xf9000000 */
180#define OMAP44XX_GPMC_VIRT (OMAP44XX_GPMC_PHYS + OMAP4_GPMC_IO_OFFSET)
181#define OMAP44XX_GPMC_SIZE SZ_1M
182
183
184#define OMAP44XX_EMIF1_PHYS OMAP44XX_EMIF1_BASE
185 /* 0x4c000000 --> 0xfd100000 */
186#define OMAP44XX_EMIF1_VIRT (OMAP44XX_EMIF1_PHYS + OMAP4_L3_PER_IO_OFFSET)
187#define OMAP44XX_EMIF1_SIZE SZ_1M
188
189#define OMAP44XX_EMIF2_PHYS OMAP44XX_EMIF2_BASE
190 /* 0x4d000000 --> 0xfd200000 */
191#define OMAP44XX_EMIF2_SIZE SZ_1M
192#define OMAP44XX_EMIF2_VIRT (OMAP44XX_EMIF1_VIRT + OMAP44XX_EMIF1_SIZE)
193
194#define OMAP44XX_DMM_PHYS OMAP44XX_DMM_BASE
195 /* 0x4e000000 --> 0xfd300000 */
196#define OMAP44XX_DMM_SIZE SZ_1M
197#define OMAP44XX_DMM_VIRT (OMAP44XX_EMIF2_VIRT + OMAP44XX_EMIF2_SIZE)
diff --git a/arch/arm/mach-omap2/irq.c b/arch/arm/mach-omap2/irq.c
index 1ecf54565fe2..fdc4303be563 100644
--- a/arch/arm/mach-omap2/irq.c
+++ b/arch/arm/mach-omap2/irq.c
@@ -231,7 +231,7 @@ static inline void omap_intc_handle_irq(void __iomem *base_addr, struct pt_regs
231 goto out; 231 goto out;
232 232
233 irqnr = readl_relaxed(base_addr + 0xd8); 233 irqnr = readl_relaxed(base_addr + 0xd8);
234#ifdef CONFIG_SOC_OMAPTI81XX 234#ifdef CONFIG_SOC_TI81XX
235 if (irqnr) 235 if (irqnr)
236 goto out; 236 goto out;
237 irqnr = readl_relaxed(base_addr + 0xf8); 237 irqnr = readl_relaxed(base_addr + 0xf8);
diff --git a/arch/arm/mach-omap2/mux.c b/arch/arm/mach-omap2/mux.c
index 3268ee24eada..80e55c5c9998 100644
--- a/arch/arm/mach-omap2/mux.c
+++ b/arch/arm/mach-omap2/mux.c
@@ -788,7 +788,7 @@ static void __init omap_mux_free_names(struct omap_mux *m)
788} 788}
789 789
790/* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */ 790/* Free all data except for GPIO pins unless CONFIG_DEBUG_FS is set */
791static int __init omap_mux_late_init(void) 791int __init omap_mux_late_init(void)
792{ 792{
793 struct omap_mux_partition *partition; 793 struct omap_mux_partition *partition;
794 int ret; 794 int ret;
@@ -823,7 +823,6 @@ static int __init omap_mux_late_init(void)
823 823
824 return 0; 824 return 0;
825} 825}
826late_initcall(omap_mux_late_init);
827 826
828static void __init omap_mux_package_fixup(struct omap_mux *p, 827static void __init omap_mux_package_fixup(struct omap_mux *p,
829 struct omap_mux *superset) 828 struct omap_mux *superset)
diff --git a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
index fd48797fa95a..b26d3c9bca16 100644
--- a/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
+++ b/arch/arm/mach-omap2/omap_hwmod_3xxx_data.c
@@ -3306,7 +3306,7 @@ int __init omap3xxx_hwmod_init(void)
3306 rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 || 3306 rev == OMAP3430_REV_ES2_1 || rev == OMAP3430_REV_ES3_0 ||
3307 rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) { 3307 rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2) {
3308 h = omap34xx_hwmod_ocp_ifs; 3308 h = omap34xx_hwmod_ocp_ifs;
3309 } else if (rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1) { 3309 } else if (rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1) {
3310 h = am35xx_hwmod_ocp_ifs; 3310 h = am35xx_hwmod_ocp_ifs;
3311 } else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 || 3311 } else if (rev == OMAP3630_REV_ES1_0 || rev == OMAP3630_REV_ES1_1 ||
3312 rev == OMAP3630_REV_ES1_2) { 3312 rev == OMAP3630_REV_ES1_2) {
diff --git a/arch/arm/mach-omap2/pm.c b/arch/arm/mach-omap2/pm.c
index d0c1c9695996..9cb5cede0f50 100644
--- a/arch/arm/mach-omap2/pm.c
+++ b/arch/arm/mach-omap2/pm.c
@@ -295,7 +295,7 @@ static int __init omap2_common_pm_init(void)
295} 295}
296postcore_initcall(omap2_common_pm_init); 296postcore_initcall(omap2_common_pm_init);
297 297
298static int __init omap2_common_pm_late_init(void) 298int __init omap2_common_pm_late_init(void)
299{ 299{
300 /* 300 /*
301 * In the case of DT, the PMIC and SR initialization will be done using 301 * In the case of DT, the PMIC and SR initialization will be done using
@@ -322,4 +322,3 @@ static int __init omap2_common_pm_late_init(void)
322 322
323 return 0; 323 return 0;
324} 324}
325late_initcall(omap2_common_pm_late_init);
diff --git a/arch/arm/mach-omap2/pm24xx.c b/arch/arm/mach-omap2/pm24xx.c
index facfffca9eac..2edeffc923a6 100644
--- a/arch/arm/mach-omap2/pm24xx.c
+++ b/arch/arm/mach-omap2/pm24xx.c
@@ -298,13 +298,10 @@ static void __init prcm_setup_regs(void)
298 WKUP_MOD, PM_WKEN); 298 WKUP_MOD, PM_WKEN);
299} 299}
300 300
301static int __init omap2_pm_init(void) 301int __init omap2_pm_init(void)
302{ 302{
303 u32 l; 303 u32 l;
304 304
305 if (!cpu_is_omap24xx())
306 return -ENODEV;
307
308 printk(KERN_INFO "Power Management for OMAP2 initializing\n"); 305 printk(KERN_INFO "Power Management for OMAP2 initializing\n");
309 l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET); 306 l = omap2_prm_read_mod_reg(OCP_MOD, OMAP2_PRCM_REVISION_OFFSET);
310 printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f); 307 printk(KERN_INFO "PRCM revision %d.%d\n", (l >> 4) & 0x0f, l & 0x0f);
@@ -370,17 +367,13 @@ static int __init omap2_pm_init(void)
370 * These routines need to be in SRAM as that's the only 367 * These routines need to be in SRAM as that's the only
371 * memory the MPU can see when it wakes up. 368 * memory the MPU can see when it wakes up.
372 */ 369 */
373 if (cpu_is_omap24xx()) { 370 omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend,
374 omap2_sram_idle = omap_sram_push(omap24xx_idle_loop_suspend, 371 omap24xx_idle_loop_suspend_sz);
375 omap24xx_idle_loop_suspend_sz);
376 372
377 omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend, 373 omap2_sram_suspend = omap_sram_push(omap24xx_cpu_suspend,
378 omap24xx_cpu_suspend_sz); 374 omap24xx_cpu_suspend_sz);
379 }
380 375
381 arm_pm_idle = omap2_pm_idle; 376 arm_pm_idle = omap2_pm_idle;
382 377
383 return 0; 378 return 0;
384} 379}
385
386late_initcall(omap2_pm_init);
diff --git a/arch/arm/mach-omap2/pm34xx.c b/arch/arm/mach-omap2/pm34xx.c
index 8b43aefba0ea..a34023d0ca7c 100644
--- a/arch/arm/mach-omap2/pm34xx.c
+++ b/arch/arm/mach-omap2/pm34xx.c
@@ -697,15 +697,12 @@ static void __init pm_errata_configure(void)
697 } 697 }
698} 698}
699 699
700static int __init omap3_pm_init(void) 700int __init omap3_pm_init(void)
701{ 701{
702 struct power_state *pwrst, *tmp; 702 struct power_state *pwrst, *tmp;
703 struct clockdomain *neon_clkdm, *mpu_clkdm; 703 struct clockdomain *neon_clkdm, *mpu_clkdm;
704 int ret; 704 int ret;
705 705
706 if (!cpu_is_omap34xx())
707 return -ENODEV;
708
709 if (!omap3_has_io_chain_ctrl()) 706 if (!omap3_has_io_chain_ctrl())
710 pr_warning("PM: no software I/O chain control; some wakeups may be lost\n"); 707 pr_warning("PM: no software I/O chain control; some wakeups may be lost\n");
711 708
@@ -804,5 +801,3 @@ err2:
804err1: 801err1:
805 return ret; 802 return ret;
806} 803}
807
808late_initcall(omap3_pm_init);
diff --git a/arch/arm/mach-omap2/pm44xx.c b/arch/arm/mach-omap2/pm44xx.c
index 885625352429..ea24174f5707 100644
--- a/arch/arm/mach-omap2/pm44xx.c
+++ b/arch/arm/mach-omap2/pm44xx.c
@@ -141,15 +141,12 @@ static void omap_default_idle(void)
141 * Initializes all powerdomain and clockdomain target states 141 * Initializes all powerdomain and clockdomain target states
142 * and all PRCM settings. 142 * and all PRCM settings.
143 */ 143 */
144static int __init omap4_pm_init(void) 144int __init omap4_pm_init(void)
145{ 145{
146 int ret; 146 int ret;
147 struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup; 147 struct clockdomain *emif_clkdm, *mpuss_clkdm, *l3_1_clkdm, *l4wkup;
148 struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm; 148 struct clockdomain *ducati_clkdm, *l3_2_clkdm, *l4_per_clkdm;
149 149
150 if (!cpu_is_omap44xx())
151 return -ENODEV;
152
153 if (omap_rev() == OMAP4430_REV_ES1_0) { 150 if (omap_rev() == OMAP4430_REV_ES1_0) {
154 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n"); 151 WARN(1, "Power Management not supported on OMAP4430 ES1.0\n");
155 return -ENODEV; 152 return -ENODEV;
@@ -217,4 +214,3 @@ static int __init omap4_pm_init(void)
217err2: 214err2:
218 return ret; 215 return ret;
219} 216}
220late_initcall(omap4_pm_init);
diff --git a/arch/arm/mach-omap2/powerdomains3xxx_data.c b/arch/arm/mach-omap2/powerdomains3xxx_data.c
index b7ea468eea32..fb0a0a6869d1 100644
--- a/arch/arm/mach-omap2/powerdomains3xxx_data.c
+++ b/arch/arm/mach-omap2/powerdomains3xxx_data.c
@@ -311,7 +311,7 @@ void __init omap3xxx_powerdomains_init(void)
311 rev == OMAP3430_REV_ES3_0 || rev == OMAP3630_REV_ES1_0) 311 rev == OMAP3430_REV_ES3_0 || rev == OMAP3630_REV_ES1_0)
312 pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0); 312 pwrdm_register_pwrdms(powerdomains_omap3430es2_es3_0);
313 else if (rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2 || 313 else if (rev == OMAP3430_REV_ES3_1 || rev == OMAP3430_REV_ES3_1_2 ||
314 rev == OMAP3517_REV_ES1_0 || rev == OMAP3517_REV_ES1_1 || 314 rev == AM35XX_REV_ES1_0 || rev == AM35XX_REV_ES1_1 ||
315 rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2) 315 rev == OMAP3630_REV_ES1_1 || rev == OMAP3630_REV_ES1_2)
316 pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus); 316 pwrdm_register_pwrdms(powerdomains_omap3430es3_1plus);
317 else 317 else
diff --git a/arch/arm/mach-omap2/timer.c b/arch/arm/mach-omap2/timer.c
index 1b7835865c83..840929bd9dae 100644
--- a/arch/arm/mach-omap2/timer.c
+++ b/arch/arm/mach-omap2/timer.c
@@ -90,7 +90,7 @@ static irqreturn_t omap2_gp_timer_interrupt(int irq, void *dev_id)
90} 90}
91 91
92static struct irqaction omap2_gp_timer_irq = { 92static struct irqaction omap2_gp_timer_irq = {
93 .name = "gp timer", 93 .name = "gp_timer",
94 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL, 94 .flags = IRQF_DISABLED | IRQF_TIMER | IRQF_IRQPOLL,
95 .handler = omap2_gp_timer_interrupt, 95 .handler = omap2_gp_timer_interrupt,
96}; 96};
@@ -132,7 +132,7 @@ static void omap2_gp_timer_set_mode(enum clock_event_mode mode,
132} 132}
133 133
134static struct clock_event_device clockevent_gpt = { 134static struct clock_event_device clockevent_gpt = {
135 .name = "gp timer", 135 .name = "gp_timer",
136 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT, 136 .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
137 .shift = 32, 137 .shift = 32,
138 .set_next_event = omap2_gp_timer_set_next_event, 138 .set_next_event = omap2_gp_timer_set_next_event,
@@ -236,22 +236,8 @@ static void __init omap2_gp_clockevent_init(int gptimer_id,
236} 236}
237 237
238/* Clocksource code */ 238/* Clocksource code */
239
240#ifdef CONFIG_OMAP_32K_TIMER
241/*
242 * When 32k-timer is enabled, don't use GPTimer for clocksource
243 * instead, just leave default clocksource which uses the 32k
244 * sync counter. See clocksource setup in plat-omap/counter_32k.c
245 */
246
247static void __init omap2_gp_clocksource_init(int unused, const char *dummy)
248{
249 omap_init_clocksource_32k();
250}
251
252#else
253
254static struct omap_dm_timer clksrc; 239static struct omap_dm_timer clksrc;
240static bool use_gptimer_clksrc;
255 241
256/* 242/*
257 * clocksource 243 * clocksource
@@ -262,7 +248,7 @@ static cycle_t clocksource_read_cycles(struct clocksource *cs)
262} 248}
263 249
264static struct clocksource clocksource_gpt = { 250static struct clocksource clocksource_gpt = {
265 .name = "gp timer", 251 .name = "gp_timer",
266 .rating = 300, 252 .rating = 300,
267 .read = clocksource_read_cycles, 253 .read = clocksource_read_cycles,
268 .mask = CLOCKSOURCE_MASK(32), 254 .mask = CLOCKSOURCE_MASK(32),
@@ -278,7 +264,46 @@ static u32 notrace dmtimer_read_sched_clock(void)
278} 264}
279 265
280/* Setup free-running counter for clocksource */ 266/* Setup free-running counter for clocksource */
281static void __init omap2_gp_clocksource_init(int gptimer_id, 267static int __init omap2_sync32k_clocksource_init(void)
268{
269 int ret;
270 struct omap_hwmod *oh;
271 void __iomem *vbase;
272 const char *oh_name = "counter_32k";
273
274 /*
275 * First check hwmod data is available for sync32k counter
276 */
277 oh = omap_hwmod_lookup(oh_name);
278 if (!oh || oh->slaves_cnt == 0)
279 return -ENODEV;
280
281 omap_hwmod_setup_one(oh_name);
282
283 vbase = omap_hwmod_get_mpu_rt_va(oh);
284 if (!vbase) {
285 pr_warn("%s: failed to get counter_32k resource\n", __func__);
286 return -ENXIO;
287 }
288
289 ret = omap_hwmod_enable(oh);
290 if (ret) {
291 pr_warn("%s: failed to enable counter_32k module (%d)\n",
292 __func__, ret);
293 return ret;
294 }
295
296 ret = omap_init_clocksource_32k(vbase);
297 if (ret) {
298 pr_warn("%s: failed to initialize counter_32k as a clocksource (%d)\n",
299 __func__, ret);
300 omap_hwmod_idle(oh);
301 }
302
303 return ret;
304}
305
306static void __init omap2_gptimer_clocksource_init(int gptimer_id,
282 const char *fck_source) 307 const char *fck_source)
283{ 308{
284 int res; 309 int res;
@@ -286,9 +311,6 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
286 res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source); 311 res = omap_dm_timer_init_one(&clksrc, gptimer_id, fck_source);
287 BUG_ON(res); 312 BUG_ON(res);
288 313
289 pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
290 gptimer_id, clksrc.rate);
291
292 __omap_dm_timer_load_start(&clksrc, 314 __omap_dm_timer_load_start(&clksrc,
293 OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1); 315 OMAP_TIMER_CTRL_ST | OMAP_TIMER_CTRL_AR, 0, 1);
294 setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate); 316 setup_sched_clock(dmtimer_read_sched_clock, 32, clksrc.rate);
@@ -296,15 +318,36 @@ static void __init omap2_gp_clocksource_init(int gptimer_id,
296 if (clocksource_register_hz(&clocksource_gpt, clksrc.rate)) 318 if (clocksource_register_hz(&clocksource_gpt, clksrc.rate))
297 pr_err("Could not register clocksource %s\n", 319 pr_err("Could not register clocksource %s\n",
298 clocksource_gpt.name); 320 clocksource_gpt.name);
321 else
322 pr_info("OMAP clocksource: GPTIMER%d at %lu Hz\n",
323 gptimer_id, clksrc.rate);
324}
325
326static void __init omap2_clocksource_init(int gptimer_id,
327 const char *fck_source)
328{
329 /*
330 * First give preference to kernel parameter configuration
331 * by user (clocksource="gp_timer").
332 *
333 * In case of missing kernel parameter for clocksource,
334 * first check for availability for 32k-sync timer, in case
335 * of failure in finding 32k_counter module or registering
336 * it as clocksource, execution will fallback to gp-timer.
337 */
338 if (use_gptimer_clksrc == true)
339 omap2_gptimer_clocksource_init(gptimer_id, fck_source);
340 else if (omap2_sync32k_clocksource_init())
341 /* Fall back to gp-timer code */
342 omap2_gptimer_clocksource_init(gptimer_id, fck_source);
299} 343}
300#endif
301 344
302#define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src, \ 345#define OMAP_SYS_TIMER_INIT(name, clkev_nr, clkev_src, \
303 clksrc_nr, clksrc_src) \ 346 clksrc_nr, clksrc_src) \
304static void __init omap##name##_timer_init(void) \ 347static void __init omap##name##_timer_init(void) \
305{ \ 348{ \
306 omap2_gp_clockevent_init((clkev_nr), clkev_src); \ 349 omap2_gp_clockevent_init((clkev_nr), clkev_src); \
307 omap2_gp_clocksource_init((clksrc_nr), clksrc_src); \ 350 omap2_clocksource_init((clksrc_nr), clksrc_src); \
308} 351}
309 352
310#define OMAP_SYS_TIMER(name) \ 353#define OMAP_SYS_TIMER(name) \
@@ -335,7 +378,7 @@ static DEFINE_TWD_LOCAL_TIMER(twd_local_timer,
335static void __init omap4_timer_init(void) 378static void __init omap4_timer_init(void)
336{ 379{
337 omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE); 380 omap2_gp_clockevent_init(1, OMAP4_CLKEV_SOURCE);
338 omap2_gp_clocksource_init(2, OMAP4_MPU_SOURCE); 381 omap2_clocksource_init(2, OMAP4_MPU_SOURCE);
339#ifdef CONFIG_LOCAL_TIMERS 382#ifdef CONFIG_LOCAL_TIMERS
340 /* Local timers are not supprted on OMAP4430 ES1.0 */ 383 /* Local timers are not supprted on OMAP4430 ES1.0 */
341 if (omap_rev() != OMAP4430_REV_ES1_0) { 384 if (omap_rev() != OMAP4430_REV_ES1_0) {
@@ -503,3 +546,28 @@ static int __init omap2_dm_timer_init(void)
503 return 0; 546 return 0;
504} 547}
505arch_initcall(omap2_dm_timer_init); 548arch_initcall(omap2_dm_timer_init);
549
550/**
551 * omap2_override_clocksource - clocksource override with user configuration
552 *
553 * Allows user to override default clocksource, using kernel parameter
554 * clocksource="gp_timer" (For all OMAP2PLUS architectures)
555 *
556 * Note that, here we are using same standard kernel parameter "clocksource=",
557 * and not introducing any OMAP specific interface.
558 */
559static int __init omap2_override_clocksource(char *str)
560{
561 if (!str)
562 return 0;
563 /*
564 * For OMAP architecture, we only have two options
565 * - sync_32k (default)
566 * - gp_timer (sys_clk based)
567 */
568 if (!strcmp(str, "gp_timer"))
569 use_gptimer_clksrc = true;
570
571 return 0;
572}
573early_param("clocksource", omap2_override_clocksource);
diff --git a/arch/arm/mach-omap2/usb-musb.c b/arch/arm/mach-omap2/usb-musb.c
index 8d5ed775dd56..b19d1b43c12e 100644
--- a/arch/arm/mach-omap2/usb-musb.c
+++ b/arch/arm/mach-omap2/usb-musb.c
@@ -90,7 +90,7 @@ void __init usb_musb_init(struct omap_musb_board_data *musb_board_data)
90 musb_plat.mode = board_data->mode; 90 musb_plat.mode = board_data->mode;
91 musb_plat.extvbus = board_data->extvbus; 91 musb_plat.extvbus = board_data->extvbus;
92 92
93 if (cpu_is_omap3517() || cpu_is_omap3505()) { 93 if (soc_is_am35xx()) {
94 oh_name = "am35x_otg_hs"; 94 oh_name = "am35x_otg_hs";
95 name = "musb-am35x"; 95 name = "musb-am35x";
96 } else if (cpu_is_ti81xx()) { 96 } else if (cpu_is_ti81xx()) {
diff --git a/arch/arm/mach-omap2/voltagedomains3xxx_data.c b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
index 57db2038b23c..d0103c80d040 100644
--- a/arch/arm/mach-omap2/voltagedomains3xxx_data.c
+++ b/arch/arm/mach-omap2/voltagedomains3xxx_data.c
@@ -118,7 +118,7 @@ void __init omap3xxx_voltagedomains_init(void)
118 } 118 }
119#endif 119#endif
120 120
121 if (cpu_is_omap3517() || cpu_is_omap3505()) 121 if (soc_is_am35xx())
122 voltdms = voltagedomains_am35xx; 122 voltdms = voltagedomains_am35xx;
123 else 123 else
124 voltdms = voltagedomains_omap3; 124 voltdms = voltagedomains_omap3;
diff --git a/arch/arm/mach-orion5x/common.c b/arch/arm/mach-orion5x/common.c
index e2e9db492d0c..9148b229d0de 100644
--- a/arch/arm/mach-orion5x/common.c
+++ b/arch/arm/mach-orion5x/common.c
@@ -18,6 +18,7 @@
18#include <linux/mv643xx_i2c.h> 18#include <linux/mv643xx_i2c.h>
19#include <linux/ata_platform.h> 19#include <linux/ata_platform.h>
20#include <linux/delay.h> 20#include <linux/delay.h>
21#include <linux/clk-provider.h>
21#include <net/dsa.h> 22#include <net/dsa.h>
22#include <asm/page.h> 23#include <asm/page.h>
23#include <asm/setup.h> 24#include <asm/setup.h>
@@ -70,6 +71,19 @@ void __init orion5x_map_io(void)
70 71
71 72
72/***************************************************************************** 73/*****************************************************************************
74 * CLK tree
75 ****************************************************************************/
76static struct clk *tclk;
77
78static void __init clk_init(void)
79{
80 tclk = clk_register_fixed_rate(NULL, "tclk", NULL, CLK_IS_ROOT,
81 orion5x_tclk);
82
83 orion_clkdev_init(tclk);
84}
85
86/*****************************************************************************
73 * EHCI0 87 * EHCI0
74 ****************************************************************************/ 88 ****************************************************************************/
75void __init orion5x_ehci0_init(void) 89void __init orion5x_ehci0_init(void)
@@ -95,7 +109,7 @@ void __init orion5x_eth_init(struct mv643xx_eth_platform_data *eth_data)
95{ 109{
96 orion_ge00_init(eth_data, 110 orion_ge00_init(eth_data,
97 ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM, 111 ORION5X_ETH_PHYS_BASE, IRQ_ORION5X_ETH_SUM,
98 IRQ_ORION5X_ETH_ERR, orion5x_tclk); 112 IRQ_ORION5X_ETH_ERR);
99} 113}
100 114
101 115
@@ -132,7 +146,7 @@ void __init orion5x_sata_init(struct mv_sata_platform_data *sata_data)
132 ****************************************************************************/ 146 ****************************************************************************/
133void __init orion5x_spi_init() 147void __init orion5x_spi_init()
134{ 148{
135 orion_spi_init(SPI_PHYS_BASE, orion5x_tclk); 149 orion_spi_init(SPI_PHYS_BASE);
136} 150}
137 151
138 152
@@ -142,7 +156,7 @@ void __init orion5x_spi_init()
142void __init orion5x_uart0_init(void) 156void __init orion5x_uart0_init(void)
143{ 157{
144 orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE, 158 orion_uart0_init(UART0_VIRT_BASE, UART0_PHYS_BASE,
145 IRQ_ORION5X_UART0, orion5x_tclk); 159 IRQ_ORION5X_UART0, tclk);
146} 160}
147 161
148/***************************************************************************** 162/*****************************************************************************
@@ -151,7 +165,7 @@ void __init orion5x_uart0_init(void)
151void __init orion5x_uart1_init(void) 165void __init orion5x_uart1_init(void)
152{ 166{
153 orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE, 167 orion_uart1_init(UART1_VIRT_BASE, UART1_PHYS_BASE,
154 IRQ_ORION5X_UART1, orion5x_tclk); 168 IRQ_ORION5X_UART1, tclk);
155} 169}
156 170
157/***************************************************************************** 171/*****************************************************************************
@@ -179,7 +193,7 @@ static void __init orion5x_crypto_init(void)
179 ****************************************************************************/ 193 ****************************************************************************/
180void __init orion5x_wdt_init(void) 194void __init orion5x_wdt_init(void)
181{ 195{
182 orion_wdt_init(orion5x_tclk); 196 orion_wdt_init();
183} 197}
184 198
185 199
@@ -276,6 +290,9 @@ void __init orion5x_init(void)
276 */ 290 */
277 orion5x_setup_cpu_mbus_bridge(); 291 orion5x_setup_cpu_mbus_bridge();
278 292
293 /* Setup root of clk tree */
294 clk_init();
295
279 /* 296 /*
280 * Don't issue "Wait for Interrupt" instruction if we are 297 * Don't issue "Wait for Interrupt" instruction if we are
281 * running on D0 5281 silicon. 298 * running on D0 5281 silicon.
diff --git a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
index e91bf0ba4e8e..92df49c1b62a 100644
--- a/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
+++ b/arch/arm/mach-orion5x/rd88f6183ap-ge-setup.c
@@ -16,7 +16,6 @@
16#include <linux/mtd/physmap.h> 16#include <linux/mtd/physmap.h>
17#include <linux/mv643xx_eth.h> 17#include <linux/mv643xx_eth.h>
18#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
19#include <linux/spi/orion_spi.h>
20#include <linux/spi/flash.h> 19#include <linux/spi/flash.h>
21#include <linux/ethtool.h> 20#include <linux/ethtool.h>
22#include <net/dsa.h> 21#include <net/dsa.h>
diff --git a/arch/arm/mach-pnx4008/core.c b/arch/arm/mach-pnx4008/core.c
index be4c92858509..a00d2f1254ed 100644
--- a/arch/arm/mach-pnx4008/core.c
+++ b/arch/arm/mach-pnx4008/core.c
@@ -265,6 +265,17 @@ static void pnx4008_restart(char mode, const char *cmd)
265 soft_restart(0); 265 soft_restart(0);
266} 266}
267 267
268#ifdef CONFIG_PM
269extern int pnx4008_pm_init(void);
270#else
271static inline int pnx4008_pm_init(void) { return 0; }
272#endif
273
274void __init pnx4008_init_late(void)
275{
276 pnx4008_pm_init();
277}
278
268extern struct sys_timer pnx4008_timer; 279extern struct sys_timer pnx4008_timer;
269 280
270MACHINE_START(PNX4008, "Philips PNX4008") 281MACHINE_START(PNX4008, "Philips PNX4008")
@@ -273,6 +284,7 @@ MACHINE_START(PNX4008, "Philips PNX4008")
273 .map_io = pnx4008_map_io, 284 .map_io = pnx4008_map_io,
274 .init_irq = pnx4008_init_irq, 285 .init_irq = pnx4008_init_irq,
275 .init_machine = pnx4008_init, 286 .init_machine = pnx4008_init,
287 .init_late = pnx4008_init_late,
276 .timer = &pnx4008_timer, 288 .timer = &pnx4008_timer,
277 .restart = pnx4008_restart, 289 .restart = pnx4008_restart,
278MACHINE_END 290MACHINE_END
diff --git a/arch/arm/mach-pnx4008/pm.c b/arch/arm/mach-pnx4008/pm.c
index f3e60a049f98..26f8d06b142a 100644
--- a/arch/arm/mach-pnx4008/pm.c
+++ b/arch/arm/mach-pnx4008/pm.c
@@ -124,7 +124,7 @@ static const struct platform_suspend_ops pnx4008_pm_ops = {
124 .valid = pnx4008_pm_valid, 124 .valid = pnx4008_pm_valid,
125}; 125};
126 126
127static int __init pnx4008_pm_init(void) 127int __init pnx4008_pm_init(void)
128{ 128{
129 u32 sram_size_to_allocate; 129 u32 sram_size_to_allocate;
130 130
@@ -151,5 +151,3 @@ static int __init pnx4008_pm_init(void)
151 suspend_set_ops(&pnx4008_pm_ops); 151 suspend_set_ops(&pnx4008_pm_ops);
152 return 0; 152 return 0;
153} 153}
154
155late_initcall(pnx4008_pm_init);
diff --git a/arch/arm/mach-prima2/common.h b/arch/arm/mach-prima2/common.h
index b28a930d4f8a..60d826fc2185 100644
--- a/arch/arm/mach-prima2/common.h
+++ b/arch/arm/mach-prima2/common.h
@@ -24,4 +24,10 @@ static inline void sirfsoc_map_lluart(void) {}
24extern void __init sirfsoc_map_lluart(void); 24extern void __init sirfsoc_map_lluart(void);
25#endif 25#endif
26 26
27#ifdef CONFIG_SUSPEND
28extern int sirfsoc_pm_init(void);
29#else
30static inline int sirfsoc_pm_init(void) { return 0; }
31#endif
32
27#endif 33#endif
diff --git a/arch/arm/mach-prima2/pm.c b/arch/arm/mach-prima2/pm.c
index 26ebb57719df..fb5a7910af35 100644
--- a/arch/arm/mach-prima2/pm.c
+++ b/arch/arm/mach-prima2/pm.c
@@ -85,12 +85,11 @@ static const struct platform_suspend_ops sirfsoc_pm_ops = {
85 .valid = suspend_valid_only_mem, 85 .valid = suspend_valid_only_mem,
86}; 86};
87 87
88static int __init sirfsoc_pm_init(void) 88int __init sirfsoc_pm_init(void)
89{ 89{
90 suspend_set_ops(&sirfsoc_pm_ops); 90 suspend_set_ops(&sirfsoc_pm_ops);
91 return 0; 91 return 0;
92} 92}
93late_initcall(sirfsoc_pm_init);
94 93
95static const struct of_device_id pwrc_ids[] = { 94static const struct of_device_id pwrc_ids[] = {
96 { .compatible = "sirf,prima2-pwrc" }, 95 { .compatible = "sirf,prima2-pwrc" },
diff --git a/arch/arm/mach-prima2/prima2.c b/arch/arm/mach-prima2/prima2.c
index 02b9c05ff990..8f0429d4b79f 100644
--- a/arch/arm/mach-prima2/prima2.c
+++ b/arch/arm/mach-prima2/prima2.c
@@ -25,6 +25,11 @@ void __init sirfsoc_mach_init(void)
25 of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL); 25 of_platform_bus_probe(NULL, sirfsoc_of_bus_ids, NULL);
26} 26}
27 27
28void __init sirfsoc_init_late(void)
29{
30 sirfsoc_pm_init();
31}
32
28static const char *prima2cb_dt_match[] __initdata = { 33static const char *prima2cb_dt_match[] __initdata = {
29 "sirf,prima2-cb", 34 "sirf,prima2-cb",
30 NULL 35 NULL
@@ -39,6 +44,7 @@ MACHINE_START(PRIMA2_EVB, "prima2cb")
39 .timer = &sirfsoc_timer, 44 .timer = &sirfsoc_timer,
40 .dma_zone_size = SZ_256M, 45 .dma_zone_size = SZ_256M,
41 .init_machine = sirfsoc_mach_init, 46 .init_machine = sirfsoc_mach_init,
47 .init_late = sirfsoc_init_late,
42 .dt_compat = prima2cb_dt_match, 48 .dt_compat = prima2cb_dt_match,
43 .restart = sirfsoc_restart, 49 .restart = sirfsoc_restart,
44MACHINE_END 50MACHINE_END
diff --git a/arch/arm/mach-s3c24xx/Kconfig b/arch/arm/mach-s3c24xx/Kconfig
index b34287ab5afd..e24961109b70 100644
--- a/arch/arm/mach-s3c24xx/Kconfig
+++ b/arch/arm/mach-s3c24xx/Kconfig
@@ -518,6 +518,11 @@ config S3C2443_DMA
518 help 518 help
519 Internal config node for S3C2443 DMA support 519 Internal config node for S3C2443 DMA support
520 520
521config S3C2443_SETUP_SPI
522 bool
523 help
524 Common setup code for SPI GPIO configurations
525
521endif # CPU_S3C2443 || CPU_S3C2416 526endif # CPU_S3C2443 || CPU_S3C2416
522 527
523if CPU_S3C2443 528if CPU_S3C2443
diff --git a/arch/arm/mach-s3c24xx/Makefile b/arch/arm/mach-s3c24xx/Makefile
index 3518fe812d5f..0ab6ab15da4c 100644
--- a/arch/arm/mach-s3c24xx/Makefile
+++ b/arch/arm/mach-s3c24xx/Makefile
@@ -14,6 +14,8 @@ obj- :=
14 14
15# core 15# core
16 16
17obj-y += common.o
18
17obj-$(CONFIG_CPU_S3C2410) += s3c2410.o 19obj-$(CONFIG_CPU_S3C2410) += s3c2410.o
18obj-$(CONFIG_S3C2410_DMA) += dma-s3c2410.o 20obj-$(CONFIG_S3C2410_DMA) += dma-s3c2410.o
19obj-$(CONFIG_S3C2410_PM) += pm-s3c2410.o sleep-s3c2410.o 21obj-$(CONFIG_S3C2410_PM) += pm-s3c2410.o sleep-s3c2410.o
@@ -33,6 +35,10 @@ obj-$(CONFIG_S3C2440_DMA) += dma-s3c2440.o
33 35
34obj-$(CONFIG_CPU_S3C2443) += s3c2443.o irq-s3c2443.o clock-s3c2443.o 36obj-$(CONFIG_CPU_S3C2443) += s3c2443.o irq-s3c2443.o clock-s3c2443.o
35 37
38# PM
39
40obj-$(CONFIG_PM) += pm.o irq-pm.o sleep.o
41
36# common code 42# common code
37 43
38obj-$(CONFIG_S3C2443_COMMON) += common-s3c2443.o 44obj-$(CONFIG_S3C2443_COMMON) += common-s3c2443.o
@@ -91,5 +97,6 @@ obj-$(CONFIG_MACH_OSIRIS_DVS) += mach-osiris-dvs.o
91# device setup 97# device setup
92 98
93obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o 99obj-$(CONFIG_S3C2416_SETUP_SDHCI_GPIO) += setup-sdhci-gpio.o
100obj-$(CONFIG_S3C2443_SETUP_SPI) += setup-spi.o
94obj-$(CONFIG_ARCH_S3C24XX) += setup-i2c.o 101obj-$(CONFIG_ARCH_S3C24XX) += setup-i2c.o
95obj-$(CONFIG_S3C24XX_SETUP_TS) += setup-ts.o 102obj-$(CONFIG_S3C24XX_SETUP_TS) += setup-ts.o
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2416.c b/arch/arm/mach-s3c24xx/clock-s3c2416.c
index dbc9ab4aaca2..8702ecfaab30 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2416.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2416.c
@@ -144,6 +144,7 @@ static struct clk_lookup s3c2416_clk_lookup[] = {
144 CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk), 144 CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.0", &hsmmc0_clk),
145 CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk), 145 CLKDEV_INIT("s3c-sdhci.0", "mmc_busclk.2", &hsmmc_mux0.clk),
146 CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk), 146 CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &hsmmc_mux1.clk),
147 CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &hsspi_mux.clk),
147}; 148};
148 149
149void __init s3c2416_init_clocks(int xtal) 150void __init s3c2416_init_clocks(int xtal)
diff --git a/arch/arm/mach-s3c24xx/clock-s3c2443.c b/arch/arm/mach-s3c24xx/clock-s3c2443.c
index efb3ac359566..a4c5a520d994 100644
--- a/arch/arm/mach-s3c24xx/clock-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/clock-s3c2443.c
@@ -179,6 +179,11 @@ static struct clk *clks[] __initdata = {
179 &clk_hsmmc, 179 &clk_hsmmc,
180}; 180};
181 181
182static struct clk_lookup s3c2443_clk_lookup[] = {
183 CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.2", &clk_hsmmc),
184 CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk2", &clk_hsspi.clk),
185};
186
182void __init s3c2443_init_clocks(int xtal) 187void __init s3c2443_init_clocks(int xtal)
183{ 188{
184 unsigned long epllcon = __raw_readl(S3C2443_EPLLCON); 189 unsigned long epllcon = __raw_readl(S3C2443_EPLLCON);
@@ -210,6 +215,7 @@ void __init s3c2443_init_clocks(int xtal)
210 215
211 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 216 s3c_register_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
212 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off)); 217 s3c_disable_clocks(init_clocks_off, ARRAY_SIZE(init_clocks_off));
218 clkdev_add_table(s3c2443_clk_lookup, ARRAY_SIZE(s3c2443_clk_lookup));
213 219
214 s3c_pwmclk_init(); 220 s3c_pwmclk_init();
215} 221}
diff --git a/arch/arm/mach-s3c24xx/common-s3c2443.c b/arch/arm/mach-s3c24xx/common-s3c2443.c
index 460431589f39..aeeb2be283fa 100644
--- a/arch/arm/mach-s3c24xx/common-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/common-s3c2443.c
@@ -424,11 +424,6 @@ static struct clk init_clocks_off[] = {
424 .enable = s3c2443_clkcon_enable_p, 424 .enable = s3c2443_clkcon_enable_p,
425 .ctrlbit = S3C2443_PCLKCON_IIS, 425 .ctrlbit = S3C2443_PCLKCON_IIS,
426 }, { 426 }, {
427 .name = "hsspi",
428 .parent = &clk_p,
429 .enable = s3c2443_clkcon_enable_p,
430 .ctrlbit = S3C2443_PCLKCON_HSSPI,
431 }, {
432 .name = "adc", 427 .name = "adc",
433 .parent = &clk_p, 428 .parent = &clk_p,
434 .enable = s3c2443_clkcon_enable_p, 429 .enable = s3c2443_clkcon_enable_p,
@@ -562,6 +557,14 @@ static struct clk hsmmc1_clk = {
562 .ctrlbit = S3C2443_HCLKCON_HSMMC, 557 .ctrlbit = S3C2443_HCLKCON_HSMMC,
563}; 558};
564 559
560static struct clk hsspi_clk = {
561 .name = "spi",
562 .devname = "s3c64xx-spi.0",
563 .parent = &clk_p,
564 .enable = s3c2443_clkcon_enable_p,
565 .ctrlbit = S3C2443_PCLKCON_HSSPI,
566};
567
565/* EPLLCON compatible enough to get on/off information */ 568/* EPLLCON compatible enough to get on/off information */
566 569
567void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll) 570void __init_or_cpufreq s3c2443_common_setup_clocks(pll_fn get_mpll)
@@ -612,6 +615,7 @@ static struct clk *clks[] __initdata = {
612 &clk_usb_bus, 615 &clk_usb_bus,
613 &clk_armdiv, 616 &clk_armdiv,
614 &hsmmc1_clk, 617 &hsmmc1_clk,
618 &hsspi_clk,
615}; 619};
616 620
617static struct clksrc_clk *clksrcs[] __initdata = { 621static struct clksrc_clk *clksrcs[] __initdata = {
@@ -629,6 +633,7 @@ static struct clk_lookup s3c2443_clk_lookup[] = {
629 CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p), 633 CLKDEV_INIT(NULL, "clk_uart_baud2", &clk_p),
630 CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk), 634 CLKDEV_INIT(NULL, "clk_uart_baud3", &clk_esys_uart.clk),
631 CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk), 635 CLKDEV_INIT("s3c-sdhci.1", "mmc_busclk.0", &hsmmc1_clk),
636 CLKDEV_INIT("s3c64xx-spi.0", "spi_busclk0", &hsspi_clk),
632}; 637};
633 638
634void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll, 639void __init s3c2443_common_init_clocks(int xtal, pll_fn get_mpll,
diff --git a/arch/arm/plat-s3c24xx/cpu.c b/arch/arm/mach-s3c24xx/common.c
index 290942d9adda..56cdd34cce41 100644
--- a/arch/arm/plat-s3c24xx/cpu.c
+++ b/arch/arm/mach-s3c24xx/common.c
@@ -4,7 +4,7 @@
4 * http://www.simtec.co.uk/products/SWLINUX/ 4 * http://www.simtec.co.uk/products/SWLINUX/
5 * Ben Dooks <ben@simtec.co.uk> 5 * Ben Dooks <ben@simtec.co.uk>
6 * 6 *
7 * S3C24XX CPU Support 7 * Common code for S3C24XX machines
8 * 8 *
9 * This program is free software; you can redistribute it and/or modify 9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by 10 * it under the terms of the GNU General Public License as published by
@@ -41,6 +41,7 @@
41#include <asm/mach/arch.h> 41#include <asm/mach/arch.h>
42#include <asm/mach/map.h> 42#include <asm/mach/map.h>
43 43
44#include <mach/regs-clock.h>
44#include <mach/regs-gpio.h> 45#include <mach/regs-gpio.h>
45#include <plat/regs-serial.h> 46#include <plat/regs-serial.h>
46 47
@@ -52,6 +53,8 @@
52#include <plat/s3c2416.h> 53#include <plat/s3c2416.h>
53#include <plat/s3c244x.h> 54#include <plat/s3c244x.h>
54#include <plat/s3c2443.h> 55#include <plat/s3c2443.h>
56#include <plat/cpu-freq.h>
57#include <plat/pll.h>
55 58
56/* table of supported CPUs */ 59/* table of supported CPUs */
57 60
@@ -234,3 +237,67 @@ void __init s3c24xx_init_io(struct map_desc *mach_desc, int size)
234 237
235 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids)); 238 s3c_init_cpu(samsung_cpu_id, cpu_ids, ARRAY_SIZE(cpu_ids));
236} 239}
240
241/* Serial port registrations */
242
243static struct resource s3c2410_uart0_resource[] = {
244 [0] = DEFINE_RES_MEM(S3C2410_PA_UART0, SZ_16K),
245 [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX0, \
246 IRQ_S3CUART_ERR0 - IRQ_S3CUART_RX0 + 1, \
247 NULL, IORESOURCE_IRQ)
248};
249
250static struct resource s3c2410_uart1_resource[] = {
251 [0] = DEFINE_RES_MEM(S3C2410_PA_UART1, SZ_16K),
252 [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX1, \
253 IRQ_S3CUART_ERR1 - IRQ_S3CUART_RX1 + 1, \
254 NULL, IORESOURCE_IRQ)
255};
256
257static struct resource s3c2410_uart2_resource[] = {
258 [0] = DEFINE_RES_MEM(S3C2410_PA_UART2, SZ_16K),
259 [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX2, \
260 IRQ_S3CUART_ERR2 - IRQ_S3CUART_RX2 + 1, \
261 NULL, IORESOURCE_IRQ)
262};
263
264static struct resource s3c2410_uart3_resource[] = {
265 [0] = DEFINE_RES_MEM(S3C2443_PA_UART3, SZ_16K),
266 [1] = DEFINE_RES_NAMED(IRQ_S3CUART_RX3, \
267 IRQ_S3CUART_ERR3 - IRQ_S3CUART_RX3 + 1, \
268 NULL, IORESOURCE_IRQ)
269};
270
271struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
272 [0] = {
273 .resources = s3c2410_uart0_resource,
274 .nr_resources = ARRAY_SIZE(s3c2410_uart0_resource),
275 },
276 [1] = {
277 .resources = s3c2410_uart1_resource,
278 .nr_resources = ARRAY_SIZE(s3c2410_uart1_resource),
279 },
280 [2] = {
281 .resources = s3c2410_uart2_resource,
282 .nr_resources = ARRAY_SIZE(s3c2410_uart2_resource),
283 },
284 [3] = {
285 .resources = s3c2410_uart3_resource,
286 .nr_resources = ARRAY_SIZE(s3c2410_uart3_resource),
287 },
288};
289
290/* initialise all the clocks */
291
292void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
293 unsigned long hclk,
294 unsigned long pclk)
295{
296 clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
297 clk_xtal.rate);
298
299 clk_mpll.rate = fclk;
300 clk_h.rate = hclk;
301 clk_p.rate = pclk;
302 clk_f.rate = fclk;
303}
diff --git a/arch/arm/mach-s3c24xx/dma-s3c2443.c b/arch/arm/mach-s3c24xx/dma-s3c2443.c
index e227c472a40a..2d94228d2866 100644
--- a/arch/arm/mach-s3c24xx/dma-s3c2443.c
+++ b/arch/arm/mach-s3c24xx/dma-s3c2443.c
@@ -55,12 +55,20 @@ static struct s3c24xx_dma_map __initdata s3c2443_dma_mappings[] = {
55 .name = "sdi", 55 .name = "sdi",
56 .channels = MAP(S3C2443_DMAREQSEL_SDI), 56 .channels = MAP(S3C2443_DMAREQSEL_SDI),
57 }, 57 },
58 [DMACH_SPI0] = { 58 [DMACH_SPI0_RX] = {
59 .name = "spi0", 59 .name = "spi0-rx",
60 .channels = MAP(S3C2443_DMAREQSEL_SPI0RX),
61 },
62 [DMACH_SPI0_TX] = {
63 .name = "spi0-tx",
60 .channels = MAP(S3C2443_DMAREQSEL_SPI0TX), 64 .channels = MAP(S3C2443_DMAREQSEL_SPI0TX),
61 }, 65 },
62 [DMACH_SPI1] = { /* only on S3C2443/S3C2450 */ 66 [DMACH_SPI1_RX] = { /* only on S3C2443/S3C2450 */
63 .name = "spi1", 67 .name = "spi1-rx",
68 .channels = MAP(S3C2443_DMAREQSEL_SPI1RX),
69 },
70 [DMACH_SPI1_TX] = { /* only on S3C2443/S3C2450 */
71 .name = "spi1-tx",
64 .channels = MAP(S3C2443_DMAREQSEL_SPI1TX), 72 .channels = MAP(S3C2443_DMAREQSEL_SPI1TX),
65 }, 73 },
66 [DMACH_UART0] = { 74 [DMACH_UART0] = {
diff --git a/arch/arm/mach-s3c24xx/include/mach/dma.h b/arch/arm/mach-s3c24xx/include/mach/dma.h
index acbdfecd4186..454831b66037 100644
--- a/arch/arm/mach-s3c24xx/include/mach/dma.h
+++ b/arch/arm/mach-s3c24xx/include/mach/dma.h
@@ -47,6 +47,10 @@ enum dma_ch {
47 DMACH_UART2_SRC2, 47 DMACH_UART2_SRC2,
48 DMACH_UART3, /* s3c2443 has extra uart */ 48 DMACH_UART3, /* s3c2443 has extra uart */
49 DMACH_UART3_SRC2, 49 DMACH_UART3_SRC2,
50 DMACH_SPI0_TX, /* s3c2443/2416/2450 hsspi0 */
51 DMACH_SPI0_RX, /* s3c2443/2416/2450 hsspi0 */
52 DMACH_SPI1_TX, /* s3c2443/2450 hsspi1 */
53 DMACH_SPI1_RX, /* s3c2443/2450 hsspi1 */
50 DMACH_MAX, /* the end entry */ 54 DMACH_MAX, /* the end entry */
51}; 55};
52 56
diff --git a/arch/arm/mach-s3c24xx/include/mach/map.h b/arch/arm/mach-s3c24xx/include/mach/map.h
index 78ae807f1281..8ba381f2dbe1 100644
--- a/arch/arm/mach-s3c24xx/include/mach/map.h
+++ b/arch/arm/mach-s3c24xx/include/mach/map.h
@@ -98,6 +98,8 @@
98 98
99/* SPI */ 99/* SPI */
100#define S3C2410_PA_SPI (0x59000000) 100#define S3C2410_PA_SPI (0x59000000)
101#define S3C2443_PA_SPI0 (0x52000000)
102#define S3C2443_PA_SPI1 S3C2410_PA_SPI
101 103
102/* SDI */ 104/* SDI */
103#define S3C2410_PA_SDI (0x5A000000) 105#define S3C2410_PA_SDI (0x5A000000)
@@ -162,4 +164,7 @@
162#define S3C_PA_WDT S3C2410_PA_WATCHDOG 164#define S3C_PA_WDT S3C2410_PA_WATCHDOG
163#define S3C_PA_NAND S3C24XX_PA_NAND 165#define S3C_PA_NAND S3C24XX_PA_NAND
164 166
167#define S3C_PA_SPI0 S3C2443_PA_SPI0
168#define S3C_PA_SPI1 S3C2443_PA_SPI1
169
165#endif /* __ASM_ARCH_MAP_H */ 170#endif /* __ASM_ARCH_MAP_H */
diff --git a/arch/arm/plat-s3c24xx/irq-pm.c b/arch/arm/mach-s3c24xx/irq-pm.c
index 0efb2e2848c8..0efb2e2848c8 100644
--- a/arch/arm/plat-s3c24xx/irq-pm.c
+++ b/arch/arm/mach-s3c24xx/irq-pm.c
diff --git a/arch/arm/plat-s3c24xx/pm.c b/arch/arm/mach-s3c24xx/pm.c
index 60627e63a254..60627e63a254 100644
--- a/arch/arm/plat-s3c24xx/pm.c
+++ b/arch/arm/mach-s3c24xx/pm.c
diff --git a/arch/arm/mach-s3c24xx/setup-spi.c b/arch/arm/mach-s3c24xx/setup-spi.c
new file mode 100644
index 000000000000..5712c85f39b1
--- /dev/null
+++ b/arch/arm/mach-s3c24xx/setup-spi.c
@@ -0,0 +1,39 @@
1/*
2 * HS-SPI device setup for S3C2443/S3C2416
3 *
4 * Copyright (C) 2011 Samsung Electronics Ltd.
5 * http://www.samsung.com/
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 */
11
12#include <linux/gpio.h>
13#include <linux/platform_device.h>
14
15#include <plat/gpio-cfg.h>
16#include <plat/s3c64xx-spi.h>
17
18#include <mach/hardware.h>
19#include <mach/regs-gpio.h>
20
21#ifdef CONFIG_S3C64XX_DEV_SPI0
22struct s3c64xx_spi_info s3c64xx_spi0_pdata __initdata = {
23 .fifo_lvl_mask = 0x7f,
24 .rx_lvl_offset = 13,
25 .tx_st_done = 21,
26 .high_speed = 1,
27};
28
29int s3c64xx_spi0_cfg_gpio(struct platform_device *pdev)
30{
31 /* enable hsspi bit in misccr */
32 s3c2410_modify_misccr(S3C2416_MISCCR_HSSPI_EN2, 1);
33
34 s3c_gpio_cfgall_range(S3C2410_GPE(11), 3,
35 S3C_GPIO_SFN(2), S3C_GPIO_PULL_UP);
36
37 return 0;
38}
39#endif
diff --git a/arch/arm/plat-s3c24xx/sleep.S b/arch/arm/mach-s3c24xx/sleep.S
index c56612569b40..c56612569b40 100644
--- a/arch/arm/plat-s3c24xx/sleep.S
+++ b/arch/arm/mach-s3c24xx/sleep.S
diff --git a/arch/arm/mach-s3c64xx/common.c b/arch/arm/mach-s3c64xx/common.c
index b313380342a5..be746e33e86c 100644
--- a/arch/arm/mach-s3c64xx/common.c
+++ b/arch/arm/mach-s3c64xx/common.c
@@ -384,3 +384,8 @@ void s3c64xx_restart(char mode, const char *cmd)
384 /* if all else fails, or mode was for soft, jump to 0 */ 384 /* if all else fails, or mode was for soft, jump to 0 */
385 soft_restart(0); 385 soft_restart(0);
386} 386}
387
388void __init s3c64xx_init_late(void)
389{
390 s3c64xx_pm_late_initcall();
391}
diff --git a/arch/arm/mach-s3c64xx/common.h b/arch/arm/mach-s3c64xx/common.h
index 7a10be629aba..6cfc99bdfb37 100644
--- a/arch/arm/mach-s3c64xx/common.h
+++ b/arch/arm/mach-s3c64xx/common.h
@@ -24,6 +24,7 @@ void s3c64xx_register_clocks(unsigned long xtal, unsigned armclk_limit);
24void s3c64xx_setup_clocks(void); 24void s3c64xx_setup_clocks(void);
25 25
26void s3c64xx_restart(char mode, const char *cmd); 26void s3c64xx_restart(char mode, const char *cmd);
27void s3c64xx_init_late(void);
27 28
28#ifdef CONFIG_CPU_S3C6400 29#ifdef CONFIG_CPU_S3C6400
29 30
@@ -51,4 +52,10 @@ extern void s3c6410_init_clocks(int xtal);
51#define s3c6410_init NULL 52#define s3c6410_init NULL
52#endif 53#endif
53 54
55#ifdef CONFIG_PM
56int __init s3c64xx_pm_late_initcall(void);
57#else
58static inline int s3c64xx_pm_late_initcall(void) { return 0; }
59#endif
60
54#endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */ 61#endif /* __ARCH_ARM_MACH_S3C64XX_COMMON_H */
diff --git a/arch/arm/mach-s3c64xx/mach-anw6410.c b/arch/arm/mach-s3c64xx/mach-anw6410.c
index f252691fb209..314df0518afd 100644
--- a/arch/arm/mach-s3c64xx/mach-anw6410.c
+++ b/arch/arm/mach-s3c64xx/mach-anw6410.c
@@ -230,6 +230,7 @@ MACHINE_START(ANW6410, "A&W6410")
230 .handle_irq = vic_handle_irq, 230 .handle_irq = vic_handle_irq,
231 .map_io = anw6410_map_io, 231 .map_io = anw6410_map_io,
232 .init_machine = anw6410_machine_init, 232 .init_machine = anw6410_machine_init,
233 .init_late = s3c64xx_init_late,
233 .timer = &s3c24xx_timer, 234 .timer = &s3c24xx_timer,
234 .restart = s3c64xx_restart, 235 .restart = s3c64xx_restart,
235MACHINE_END 236MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-crag6410.c b/arch/arm/mach-s3c64xx/mach-crag6410.c
index aa1137fb47e6..eda5e027b109 100644
--- a/arch/arm/mach-s3c64xx/mach-crag6410.c
+++ b/arch/arm/mach-s3c64xx/mach-crag6410.c
@@ -813,6 +813,7 @@ MACHINE_START(WLF_CRAGG_6410, "Wolfson Cragganmore 6410")
813 .handle_irq = vic_handle_irq, 813 .handle_irq = vic_handle_irq,
814 .map_io = crag6410_map_io, 814 .map_io = crag6410_map_io,
815 .init_machine = crag6410_machine_init, 815 .init_machine = crag6410_machine_init,
816 .init_late = s3c64xx_init_late,
816 .timer = &s3c24xx_timer, 817 .timer = &s3c24xx_timer,
817 .restart = s3c64xx_restart, 818 .restart = s3c64xx_restart,
818MACHINE_END 819MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-hmt.c b/arch/arm/mach-s3c64xx/mach-hmt.c
index 521e07b8501b..1bf6b9da20fc 100644
--- a/arch/arm/mach-s3c64xx/mach-hmt.c
+++ b/arch/arm/mach-s3c64xx/mach-hmt.c
@@ -272,6 +272,7 @@ MACHINE_START(HMT, "Airgoo-HMT")
272 .handle_irq = vic_handle_irq, 272 .handle_irq = vic_handle_irq,
273 .map_io = hmt_map_io, 273 .map_io = hmt_map_io,
274 .init_machine = hmt_machine_init, 274 .init_machine = hmt_machine_init,
275 .init_late = s3c64xx_init_late,
275 .timer = &s3c24xx_timer, 276 .timer = &s3c24xx_timer,
276 .restart = s3c64xx_restart, 277 .restart = s3c64xx_restart,
277MACHINE_END 278MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-mini6410.c b/arch/arm/mach-s3c64xx/mach-mini6410.c
index b2166d4a5538..f8ea61ea3b33 100644
--- a/arch/arm/mach-s3c64xx/mach-mini6410.c
+++ b/arch/arm/mach-s3c64xx/mach-mini6410.c
@@ -339,6 +339,7 @@ MACHINE_START(MINI6410, "MINI6410")
339 .handle_irq = vic_handle_irq, 339 .handle_irq = vic_handle_irq,
340 .map_io = mini6410_map_io, 340 .map_io = mini6410_map_io,
341 .init_machine = mini6410_machine_init, 341 .init_machine = mini6410_machine_init,
342 .init_late = s3c64xx_init_late,
342 .timer = &s3c24xx_timer, 343 .timer = &s3c24xx_timer,
343 .restart = s3c64xx_restart, 344 .restart = s3c64xx_restart,
344MACHINE_END 345MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-ncp.c b/arch/arm/mach-s3c64xx/mach-ncp.c
index 0efa2ba783b2..cad2e05eddf7 100644
--- a/arch/arm/mach-s3c64xx/mach-ncp.c
+++ b/arch/arm/mach-s3c64xx/mach-ncp.c
@@ -104,6 +104,7 @@ MACHINE_START(NCP, "NCP")
104 .handle_irq = vic_handle_irq, 104 .handle_irq = vic_handle_irq,
105 .map_io = ncp_map_io, 105 .map_io = ncp_map_io,
106 .init_machine = ncp_machine_init, 106 .init_machine = ncp_machine_init,
107 .init_late = s3c64xx_init_late,
107 .timer = &s3c24xx_timer, 108 .timer = &s3c24xx_timer,
108 .restart = s3c64xx_restart, 109 .restart = s3c64xx_restart,
109MACHINE_END 110MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-real6410.c b/arch/arm/mach-s3c64xx/mach-real6410.c
index 5c08266cea21..b92d8e17d502 100644
--- a/arch/arm/mach-s3c64xx/mach-real6410.c
+++ b/arch/arm/mach-s3c64xx/mach-real6410.c
@@ -320,6 +320,7 @@ MACHINE_START(REAL6410, "REAL6410")
320 .handle_irq = vic_handle_irq, 320 .handle_irq = vic_handle_irq,
321 .map_io = real6410_map_io, 321 .map_io = real6410_map_io,
322 .init_machine = real6410_machine_init, 322 .init_machine = real6410_machine_init,
323 .init_late = s3c64xx_init_late,
323 .timer = &s3c24xx_timer, 324 .timer = &s3c24xx_timer,
324 .restart = s3c64xx_restart, 325 .restart = s3c64xx_restart,
325MACHINE_END 326MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq5.c b/arch/arm/mach-s3c64xx/mach-smartq5.c
index 3f42431d4dda..c5021d0335c6 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq5.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq5.c
@@ -152,6 +152,7 @@ MACHINE_START(SMARTQ5, "SmartQ 5")
152 .handle_irq = vic_handle_irq, 152 .handle_irq = vic_handle_irq,
153 .map_io = smartq_map_io, 153 .map_io = smartq_map_io,
154 .init_machine = smartq5_machine_init, 154 .init_machine = smartq5_machine_init,
155 .init_late = s3c64xx_init_late,
155 .timer = &s3c24xx_timer, 156 .timer = &s3c24xx_timer,
156 .restart = s3c64xx_restart, 157 .restart = s3c64xx_restart,
157MACHINE_END 158MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smartq7.c b/arch/arm/mach-s3c64xx/mach-smartq7.c
index e5c09b6db967..aa9072a4cbef 100644
--- a/arch/arm/mach-s3c64xx/mach-smartq7.c
+++ b/arch/arm/mach-s3c64xx/mach-smartq7.c
@@ -168,6 +168,7 @@ MACHINE_START(SMARTQ7, "SmartQ 7")
168 .handle_irq = vic_handle_irq, 168 .handle_irq = vic_handle_irq,
169 .map_io = smartq_map_io, 169 .map_io = smartq_map_io,
170 .init_machine = smartq7_machine_init, 170 .init_machine = smartq7_machine_init,
171 .init_late = s3c64xx_init_late,
171 .timer = &s3c24xx_timer, 172 .timer = &s3c24xx_timer,
172 .restart = s3c64xx_restart, 173 .restart = s3c64xx_restart,
173MACHINE_END 174MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6400.c b/arch/arm/mach-s3c64xx/mach-smdk6400.c
index 5f096534f4c4..b0f4525c66bd 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6400.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6400.c
@@ -93,6 +93,7 @@ MACHINE_START(SMDK6400, "SMDK6400")
93 .handle_irq = vic_handle_irq, 93 .handle_irq = vic_handle_irq,
94 .map_io = smdk6400_map_io, 94 .map_io = smdk6400_map_io,
95 .init_machine = smdk6400_machine_init, 95 .init_machine = smdk6400_machine_init,
96 .init_late = s3c64xx_init_late,
96 .timer = &s3c24xx_timer, 97 .timer = &s3c24xx_timer,
97 .restart = s3c64xx_restart, 98 .restart = s3c64xx_restart,
98MACHINE_END 99MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/mach-smdk6410.c b/arch/arm/mach-s3c64xx/mach-smdk6410.c
index 7da044f738ac..d44319b09412 100644
--- a/arch/arm/mach-s3c64xx/mach-smdk6410.c
+++ b/arch/arm/mach-s3c64xx/mach-smdk6410.c
@@ -702,6 +702,7 @@ MACHINE_START(SMDK6410, "SMDK6410")
702 .handle_irq = vic_handle_irq, 702 .handle_irq = vic_handle_irq,
703 .map_io = smdk6410_map_io, 703 .map_io = smdk6410_map_io,
704 .init_machine = smdk6410_machine_init, 704 .init_machine = smdk6410_machine_init,
705 .init_late = s3c64xx_init_late,
705 .timer = &s3c24xx_timer, 706 .timer = &s3c24xx_timer,
706 .restart = s3c64xx_restart, 707 .restart = s3c64xx_restart,
707MACHINE_END 708MACHINE_END
diff --git a/arch/arm/mach-s3c64xx/pm.c b/arch/arm/mach-s3c64xx/pm.c
index 7d3e81b9dd06..7feb426fc202 100644
--- a/arch/arm/mach-s3c64xx/pm.c
+++ b/arch/arm/mach-s3c64xx/pm.c
@@ -365,10 +365,9 @@ static __init int s3c64xx_pm_initcall(void)
365} 365}
366arch_initcall(s3c64xx_pm_initcall); 366arch_initcall(s3c64xx_pm_initcall);
367 367
368static __init int s3c64xx_pm_late_initcall(void) 368int __init s3c64xx_pm_late_initcall(void)
369{ 369{
370 pm_genpd_poweroff_unused(); 370 pm_genpd_poweroff_unused();
371 371
372 return 0; 372 return 0;
373} 373}
374late_initcall(s3c64xx_pm_late_initcall);
diff --git a/arch/arm/mach-sa1100/assabet.c b/arch/arm/mach-sa1100/assabet.c
index 375d3f779a88..d1dc7f1a239c 100644
--- a/arch/arm/mach-sa1100/assabet.c
+++ b/arch/arm/mach-sa1100/assabet.c
@@ -538,6 +538,7 @@ MACHINE_START(ASSABET, "Intel-Assabet")
538 .init_irq = sa1100_init_irq, 538 .init_irq = sa1100_init_irq,
539 .timer = &sa1100_timer, 539 .timer = &sa1100_timer,
540 .init_machine = assabet_init, 540 .init_machine = assabet_init,
541 .init_late = sa11x0_init_late,
541#ifdef CONFIG_SA1111 542#ifdef CONFIG_SA1111
542 .dma_zone_size = SZ_1M, 543 .dma_zone_size = SZ_1M,
543#endif 544#endif
diff --git a/arch/arm/mach-sa1100/badge4.c b/arch/arm/mach-sa1100/badge4.c
index e0f0c030258c..b30fb99b587c 100644
--- a/arch/arm/mach-sa1100/badge4.c
+++ b/arch/arm/mach-sa1100/badge4.c
@@ -305,6 +305,7 @@ MACHINE_START(BADGE4, "Hewlett-Packard Laboratories BadgePAD 4")
305 .map_io = badge4_map_io, 305 .map_io = badge4_map_io,
306 .nr_irqs = SA1100_NR_IRQS, 306 .nr_irqs = SA1100_NR_IRQS,
307 .init_irq = sa1100_init_irq, 307 .init_irq = sa1100_init_irq,
308 .init_late = sa11x0_init_late,
308 .timer = &sa1100_timer, 309 .timer = &sa1100_timer,
309#ifdef CONFIG_SA1111 310#ifdef CONFIG_SA1111
310 .dma_zone_size = SZ_1M, 311 .dma_zone_size = SZ_1M,
diff --git a/arch/arm/mach-sa1100/cerf.c b/arch/arm/mach-sa1100/cerf.c
index 4a61f60e0502..09d7f4b4b354 100644
--- a/arch/arm/mach-sa1100/cerf.c
+++ b/arch/arm/mach-sa1100/cerf.c
@@ -134,5 +134,6 @@ MACHINE_START(CERF, "Intrinsyc CerfBoard/CerfCube")
134 .init_irq = cerf_init_irq, 134 .init_irq = cerf_init_irq,
135 .timer = &sa1100_timer, 135 .timer = &sa1100_timer,
136 .init_machine = cerf_init, 136 .init_machine = cerf_init,
137 .init_late = sa11x0_init_late,
137 .restart = sa11x0_restart, 138 .restart = sa11x0_restart,
138MACHINE_END 139MACHINE_END
diff --git a/arch/arm/mach-sa1100/collie.c b/arch/arm/mach-sa1100/collie.c
index c7f418b0cde9..ea5cff38745c 100644
--- a/arch/arm/mach-sa1100/collie.c
+++ b/arch/arm/mach-sa1100/collie.c
@@ -401,5 +401,6 @@ MACHINE_START(COLLIE, "Sharp-Collie")
401 .init_irq = sa1100_init_irq, 401 .init_irq = sa1100_init_irq,
402 .timer = &sa1100_timer, 402 .timer = &sa1100_timer,
403 .init_machine = collie_init, 403 .init_machine = collie_init,
404 .init_late = sa11x0_init_late,
404 .restart = sa11x0_restart, 405 .restart = sa11x0_restart,
405MACHINE_END 406MACHINE_END
diff --git a/arch/arm/mach-sa1100/generic.c b/arch/arm/mach-sa1100/generic.c
index 16be4c56abe3..9db3e98e8b85 100644
--- a/arch/arm/mach-sa1100/generic.c
+++ b/arch/arm/mach-sa1100/generic.c
@@ -359,6 +359,10 @@ static int __init sa1100_init(void)
359 359
360arch_initcall(sa1100_init); 360arch_initcall(sa1100_init);
361 361
362void __init sa11x0_init_late(void)
363{
364 sa11x0_pm_init();
365}
362 366
363/* 367/*
364 * Common I/O mapping: 368 * Common I/O mapping:
diff --git a/arch/arm/mach-sa1100/generic.h b/arch/arm/mach-sa1100/generic.h
index 9eb3b3cd5a63..a5b7c13da3e3 100644
--- a/arch/arm/mach-sa1100/generic.h
+++ b/arch/arm/mach-sa1100/generic.h
@@ -11,6 +11,7 @@ extern void __init sa1100_map_io(void);
11extern void __init sa1100_init_irq(void); 11extern void __init sa1100_init_irq(void);
12extern void __init sa1100_init_gpio(void); 12extern void __init sa1100_init_gpio(void);
13extern void sa11x0_restart(char, const char *); 13extern void sa11x0_restart(char, const char *);
14extern void sa11x0_init_late(void);
14 15
15#define SET_BANK(__nr,__start,__size) \ 16#define SET_BANK(__nr,__start,__size) \
16 mi->bank[__nr].start = (__start), \ 17 mi->bank[__nr].start = (__start), \
@@ -41,3 +42,9 @@ void sa11x0_register_mcp(struct mcp_plat_data *data);
41 42
42struct sa1100fb_mach_info; 43struct sa1100fb_mach_info;
43void sa11x0_register_lcd(struct sa1100fb_mach_info *inf); 44void sa11x0_register_lcd(struct sa1100fb_mach_info *inf);
45
46#ifdef CONFIG_PM
47int sa11x0_pm_init(void);
48#else
49static inline int sa11x0_pm_init(void) { return 0; }
50#endif
diff --git a/arch/arm/mach-sa1100/h3100.c b/arch/arm/mach-sa1100/h3100.c
index b2e8d0f418e0..e1571eab08ae 100644
--- a/arch/arm/mach-sa1100/h3100.c
+++ b/arch/arm/mach-sa1100/h3100.c
@@ -110,6 +110,7 @@ MACHINE_START(H3100, "Compaq iPAQ H3100")
110 .init_irq = sa1100_init_irq, 110 .init_irq = sa1100_init_irq,
111 .timer = &sa1100_timer, 111 .timer = &sa1100_timer,
112 .init_machine = h3100_mach_init, 112 .init_machine = h3100_mach_init,
113 .init_late = sa11x0_init_late,
113 .restart = sa11x0_restart, 114 .restart = sa11x0_restart,
114MACHINE_END 115MACHINE_END
115 116
diff --git a/arch/arm/mach-sa1100/h3600.c b/arch/arm/mach-sa1100/h3600.c
index cb6659f294fe..ba7a2901ab88 100644
--- a/arch/arm/mach-sa1100/h3600.c
+++ b/arch/arm/mach-sa1100/h3600.c
@@ -160,6 +160,7 @@ MACHINE_START(H3600, "Compaq iPAQ H3600")
160 .init_irq = sa1100_init_irq, 160 .init_irq = sa1100_init_irq,
161 .timer = &sa1100_timer, 161 .timer = &sa1100_timer,
162 .init_machine = h3600_mach_init, 162 .init_machine = h3600_mach_init,
163 .init_late = sa11x0_init_late,
163 .restart = sa11x0_restart, 164 .restart = sa11x0_restart,
164MACHINE_END 165MACHINE_END
165 166
diff --git a/arch/arm/mach-sa1100/hackkit.c b/arch/arm/mach-sa1100/hackkit.c
index 5535475bf583..7f86bd911826 100644
--- a/arch/arm/mach-sa1100/hackkit.c
+++ b/arch/arm/mach-sa1100/hackkit.c
@@ -199,5 +199,6 @@ MACHINE_START(HACKKIT, "HackKit Cpu Board")
199 .init_irq = sa1100_init_irq, 199 .init_irq = sa1100_init_irq,
200 .timer = &sa1100_timer, 200 .timer = &sa1100_timer,
201 .init_machine = hackkit_init, 201 .init_machine = hackkit_init,
202 .init_late = sa11x0_init_late,
202 .restart = sa11x0_restart, 203 .restart = sa11x0_restart,
203MACHINE_END 204MACHINE_END
diff --git a/arch/arm/mach-sa1100/jornada720.c b/arch/arm/mach-sa1100/jornada720.c
index ca7a7e834720..e3084f47027d 100644
--- a/arch/arm/mach-sa1100/jornada720.c
+++ b/arch/arm/mach-sa1100/jornada720.c
@@ -348,6 +348,7 @@ MACHINE_START(JORNADA720, "HP Jornada 720")
348 .init_irq = sa1100_init_irq, 348 .init_irq = sa1100_init_irq,
349 .timer = &sa1100_timer, 349 .timer = &sa1100_timer,
350 .init_machine = jornada720_mach_init, 350 .init_machine = jornada720_mach_init,
351 .init_late = sa11x0_init_late,
351#ifdef CONFIG_SA1111 352#ifdef CONFIG_SA1111
352 .dma_zone_size = SZ_1M, 353 .dma_zone_size = SZ_1M,
353#endif 354#endif
diff --git a/arch/arm/mach-sa1100/lart.c b/arch/arm/mach-sa1100/lart.c
index eb6534e0b0d0..b775a0abec0a 100644
--- a/arch/arm/mach-sa1100/lart.c
+++ b/arch/arm/mach-sa1100/lart.c
@@ -147,6 +147,7 @@ MACHINE_START(LART, "LART")
147 .nr_irqs = SA1100_NR_IRQS, 147 .nr_irqs = SA1100_NR_IRQS,
148 .init_irq = sa1100_init_irq, 148 .init_irq = sa1100_init_irq,
149 .init_machine = lart_init, 149 .init_machine = lart_init,
150 .init_late = sa11x0_init_late,
150 .timer = &sa1100_timer, 151 .timer = &sa1100_timer,
151 .restart = sa11x0_restart, 152 .restart = sa11x0_restart,
152MACHINE_END 153MACHINE_END
diff --git a/arch/arm/mach-sa1100/nanoengine.c b/arch/arm/mach-sa1100/nanoengine.c
index 8f6446b9f025..41f69d97066f 100644
--- a/arch/arm/mach-sa1100/nanoengine.c
+++ b/arch/arm/mach-sa1100/nanoengine.c
@@ -112,5 +112,6 @@ MACHINE_START(NANOENGINE, "BSE nanoEngine")
112 .init_irq = sa1100_init_irq, 112 .init_irq = sa1100_init_irq,
113 .timer = &sa1100_timer, 113 .timer = &sa1100_timer,
114 .init_machine = nanoengine_init, 114 .init_machine = nanoengine_init,
115 .init_late = sa11x0_init_late,
115 .restart = sa11x0_restart, 116 .restart = sa11x0_restart,
116MACHINE_END 117MACHINE_END
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 6c58f01b358a..266db873a4e4 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -89,6 +89,7 @@ void neponset_ncr_frob(unsigned int mask, unsigned int val)
89 WARN(1, "nep_base unset\n"); 89 WARN(1, "nep_base unset\n");
90 } 90 }
91} 91}
92EXPORT_SYMBOL(neponset_ncr_frob);
92 93
93static void neponset_set_mctrl(struct uart_port *port, u_int mctrl) 94static void neponset_set_mctrl(struct uart_port *port, u_int mctrl)
94{ 95{
diff --git a/arch/arm/mach-sa1100/pleb.c b/arch/arm/mach-sa1100/pleb.c
index 1602575a0d5c..37fe0a0a5369 100644
--- a/arch/arm/mach-sa1100/pleb.c
+++ b/arch/arm/mach-sa1100/pleb.c
@@ -135,5 +135,6 @@ MACHINE_START(PLEB, "PLEB")
135 .init_irq = sa1100_init_irq, 135 .init_irq = sa1100_init_irq,
136 .timer = &sa1100_timer, 136 .timer = &sa1100_timer,
137 .init_machine = pleb_init, 137 .init_machine = pleb_init,
138 .init_late = sa11x0_init_late,
138 .restart = sa11x0_restart, 139 .restart = sa11x0_restart,
139MACHINE_END 140MACHINE_END
diff --git a/arch/arm/mach-sa1100/pm.c b/arch/arm/mach-sa1100/pm.c
index 2fa499ec6afe..690cf0ce5c0c 100644
--- a/arch/arm/mach-sa1100/pm.c
+++ b/arch/arm/mach-sa1100/pm.c
@@ -117,10 +117,8 @@ static const struct platform_suspend_ops sa11x0_pm_ops = {
117 .valid = suspend_valid_only_mem, 117 .valid = suspend_valid_only_mem,
118}; 118};
119 119
120static int __init sa11x0_pm_init(void) 120int __init sa11x0_pm_init(void)
121{ 121{
122 suspend_set_ops(&sa11x0_pm_ops); 122 suspend_set_ops(&sa11x0_pm_ops);
123 return 0; 123 return 0;
124} 124}
125
126late_initcall(sa11x0_pm_init);
diff --git a/arch/arm/mach-sa1100/shannon.c b/arch/arm/mach-sa1100/shannon.c
index ca8bf59b9047..5d33fc3108ef 100644
--- a/arch/arm/mach-sa1100/shannon.c
+++ b/arch/arm/mach-sa1100/shannon.c
@@ -104,5 +104,6 @@ MACHINE_START(SHANNON, "Shannon (AKA: Tuxscreen)")
104 .init_irq = sa1100_init_irq, 104 .init_irq = sa1100_init_irq,
105 .timer = &sa1100_timer, 105 .timer = &sa1100_timer,
106 .init_machine = shannon_init, 106 .init_machine = shannon_init,
107 .init_late = sa11x0_init_late,
107 .restart = sa11x0_restart, 108 .restart = sa11x0_restart,
108MACHINE_END 109MACHINE_END
diff --git a/arch/arm/mach-sa1100/simpad.c b/arch/arm/mach-sa1100/simpad.c
index 3efae03cb3d7..fbd53593be54 100644
--- a/arch/arm/mach-sa1100/simpad.c
+++ b/arch/arm/mach-sa1100/simpad.c
@@ -395,6 +395,7 @@ MACHINE_START(SIMPAD, "Simpad")
395 .map_io = simpad_map_io, 395 .map_io = simpad_map_io,
396 .nr_irqs = SA1100_NR_IRQS, 396 .nr_irqs = SA1100_NR_IRQS,
397 .init_irq = sa1100_init_irq, 397 .init_irq = sa1100_init_irq,
398 .init_late = sa11x0_init_late,
398 .timer = &sa1100_timer, 399 .timer = &sa1100_timer,
399 .restart = sa11x0_restart, 400 .restart = sa11x0_restart,
400MACHINE_END 401MACHINE_END
diff --git a/arch/arm/mach-shmobile/Makefile b/arch/arm/mach-shmobile/Makefile
index e6b177bc9410..8aa1962c22a2 100644
--- a/arch/arm/mach-shmobile/Makefile
+++ b/arch/arm/mach-shmobile/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# Common objects 5# Common objects
6obj-y := timer.o console.o clock.o 6obj-y := timer.o console.o clock.o common.o
7 7
8# CPU objects 8# CPU objects
9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o 9obj-$(CONFIG_ARCH_SH7367) += setup-sh7367.o clock-sh7367.o intc-sh7367.o
diff --git a/arch/arm/mach-shmobile/board-ag5evm.c b/arch/arm/mach-shmobile/board-ag5evm.c
index 0891ec6e27f5..5a6f22f05e99 100644
--- a/arch/arm/mach-shmobile/board-ag5evm.c
+++ b/arch/arm/mach-shmobile/board-ag5evm.c
@@ -580,5 +580,6 @@ MACHINE_START(AG5EVM, "ag5evm")
580 .init_irq = sh73a0_init_irq, 580 .init_irq = sh73a0_init_irq,
581 .handle_irq = gic_handle_irq, 581 .handle_irq = gic_handle_irq,
582 .init_machine = ag5evm_init, 582 .init_machine = ag5evm_init,
583 .init_late = shmobile_init_late,
583 .timer = &shmobile_timer, 584 .timer = &shmobile_timer,
584MACHINE_END 585MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-ap4evb.c b/arch/arm/mach-shmobile/board-ap4evb.c
index b540b8eb20ca..ace60246a5df 100644
--- a/arch/arm/mach-shmobile/board-ap4evb.c
+++ b/arch/arm/mach-shmobile/board-ap4evb.c
@@ -1469,5 +1469,6 @@ MACHINE_START(AP4EVB, "ap4evb")
1469 .init_irq = sh7372_init_irq, 1469 .init_irq = sh7372_init_irq,
1470 .handle_irq = shmobile_handle_irq_intc, 1470 .handle_irq = shmobile_handle_irq_intc,
1471 .init_machine = ap4evb_init, 1471 .init_machine = ap4evb_init,
1472 .init_late = shmobile_init_late,
1472 .timer = &shmobile_timer, 1473 .timer = &shmobile_timer,
1473MACHINE_END 1474MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-bonito.c b/arch/arm/mach-shmobile/board-bonito.c
index 63ab7062bee3..e9b32cfbf741 100644
--- a/arch/arm/mach-shmobile/board-bonito.c
+++ b/arch/arm/mach-shmobile/board-bonito.c
@@ -500,5 +500,6 @@ MACHINE_START(BONITO, "bonito")
500 .init_irq = r8a7740_init_irq, 500 .init_irq = r8a7740_init_irq,
501 .handle_irq = shmobile_handle_irq_intc, 501 .handle_irq = shmobile_handle_irq_intc,
502 .init_machine = bonito_init, 502 .init_machine = bonito_init,
503 .init_late = shmobile_init_late,
503 .timer = &shmobile_timer, 504 .timer = &shmobile_timer,
504MACHINE_END 505MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g3evm.c b/arch/arm/mach-shmobile/board-g3evm.c
index 39b6cf85ced6..796fa00ad3c4 100644
--- a/arch/arm/mach-shmobile/board-g3evm.c
+++ b/arch/arm/mach-shmobile/board-g3evm.c
@@ -338,5 +338,6 @@ MACHINE_START(G3EVM, "g3evm")
338 .init_irq = sh7367_init_irq, 338 .init_irq = sh7367_init_irq,
339 .handle_irq = shmobile_handle_irq_intc, 339 .handle_irq = shmobile_handle_irq_intc,
340 .init_machine = g3evm_init, 340 .init_machine = g3evm_init,
341 .init_late = shmobile_init_late,
341 .timer = &shmobile_timer, 342 .timer = &shmobile_timer,
342MACHINE_END 343MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-g4evm.c b/arch/arm/mach-shmobile/board-g4evm.c
index 0e5a39c670bc..f1257321999a 100644
--- a/arch/arm/mach-shmobile/board-g4evm.c
+++ b/arch/arm/mach-shmobile/board-g4evm.c
@@ -381,5 +381,6 @@ MACHINE_START(G4EVM, "g4evm")
381 .init_irq = sh7377_init_irq, 381 .init_irq = sh7377_init_irq,
382 .handle_irq = shmobile_handle_irq_intc, 382 .handle_irq = shmobile_handle_irq_intc,
383 .init_machine = g4evm_init, 383 .init_machine = g4evm_init,
384 .init_late = shmobile_init_late,
384 .timer = &shmobile_timer, 385 .timer = &shmobile_timer,
385MACHINE_END 386MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-kota2.c b/arch/arm/mach-shmobile/board-kota2.c
index 200dcd42a3a0..f60f1b281cc4 100644
--- a/arch/arm/mach-shmobile/board-kota2.c
+++ b/arch/arm/mach-shmobile/board-kota2.c
@@ -521,5 +521,6 @@ MACHINE_START(KOTA2, "kota2")
521 .init_irq = sh73a0_init_irq, 521 .init_irq = sh73a0_init_irq,
522 .handle_irq = gic_handle_irq, 522 .handle_irq = gic_handle_irq,
523 .init_machine = kota2_init, 523 .init_machine = kota2_init,
524 .init_late = shmobile_init_late,
524 .timer = &shmobile_timer, 525 .timer = &shmobile_timer,
525MACHINE_END 526MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-mackerel.c b/arch/arm/mach-shmobile/board-mackerel.c
index 50c67b22d087..b577f7c44678 100644
--- a/arch/arm/mach-shmobile/board-mackerel.c
+++ b/arch/arm/mach-shmobile/board-mackerel.c
@@ -1638,5 +1638,6 @@ MACHINE_START(MACKEREL, "mackerel")
1638 .init_irq = sh7372_init_irq, 1638 .init_irq = sh7372_init_irq,
1639 .handle_irq = shmobile_handle_irq_intc, 1639 .handle_irq = shmobile_handle_irq_intc,
1640 .init_machine = mackerel_init, 1640 .init_machine = mackerel_init,
1641 .init_late = shmobile_init_late,
1641 .timer = &shmobile_timer, 1642 .timer = &shmobile_timer,
1642MACHINE_END 1643MACHINE_END
diff --git a/arch/arm/mach-shmobile/board-marzen.c b/arch/arm/mach-shmobile/board-marzen.c
index ef0e13bf0b3a..14de3787cafc 100644
--- a/arch/arm/mach-shmobile/board-marzen.c
+++ b/arch/arm/mach-shmobile/board-marzen.c
@@ -98,5 +98,6 @@ MACHINE_START(MARZEN, "marzen")
98 .init_irq = r8a7779_init_irq, 98 .init_irq = r8a7779_init_irq,
99 .handle_irq = gic_handle_irq, 99 .handle_irq = gic_handle_irq,
100 .init_machine = marzen_init, 100 .init_machine = marzen_init,
101 .init_late = shmobile_init_late,
101 .timer = &shmobile_timer, 102 .timer = &shmobile_timer,
102MACHINE_END 103MACHINE_END
diff --git a/arch/arm/mach-shmobile/common.c b/arch/arm/mach-shmobile/common.c
new file mode 100644
index 000000000000..608aba9d60d7
--- /dev/null
+++ b/arch/arm/mach-shmobile/common.c
@@ -0,0 +1,24 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License as published by
4 * the Free Software Foundation; version 2 of the License.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
14 *
15 */
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <mach/common.h>
19
20void __init shmobile_init_late(void)
21{
22 shmobile_suspend_init();
23 shmobile_cpuidle_init();
24}
diff --git a/arch/arm/mach-shmobile/cpuidle.c b/arch/arm/mach-shmobile/cpuidle.c
index 7e6559105d40..7b541e911ab4 100644
--- a/arch/arm/mach-shmobile/cpuidle.c
+++ b/arch/arm/mach-shmobile/cpuidle.c
@@ -46,7 +46,7 @@ static struct cpuidle_driver shmobile_cpuidle_driver = {
46 46
47void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv); 47void (*shmobile_cpuidle_setup)(struct cpuidle_driver *drv);
48 48
49static int shmobile_cpuidle_init(void) 49int shmobile_cpuidle_init(void)
50{ 50{
51 struct cpuidle_device *dev = &shmobile_cpuidle_dev; 51 struct cpuidle_device *dev = &shmobile_cpuidle_dev;
52 struct cpuidle_driver *drv = &shmobile_cpuidle_driver; 52 struct cpuidle_driver *drv = &shmobile_cpuidle_driver;
@@ -65,4 +65,3 @@ static int shmobile_cpuidle_init(void)
65 65
66 return 0; 66 return 0;
67} 67}
68late_initcall(shmobile_cpuidle_init);
diff --git a/arch/arm/mach-shmobile/include/mach/common.h b/arch/arm/mach-shmobile/include/mach/common.h
index ff5f12fd742f..01e2bc014f15 100644
--- a/arch/arm/mach-shmobile/include/mach/common.h
+++ b/arch/arm/mach-shmobile/include/mach/common.h
@@ -85,4 +85,18 @@ extern int r8a7779_boot_secondary(unsigned int cpu);
85extern void r8a7779_smp_prepare_cpus(void); 85extern void r8a7779_smp_prepare_cpus(void);
86extern void r8a7779_register_twd(void); 86extern void r8a7779_register_twd(void);
87 87
88extern void shmobile_init_late(void);
89
90#ifdef CONFIG_SUSPEND
91int shmobile_suspend_init(void);
92#else
93static inline int shmobile_suspend_init(void) { return 0; }
94#endif
95
96#ifdef CONFIG_CPU_IDLE
97int shmobile_cpuidle_init(void);
98#else
99static inline int shmobile_cpuidle_init(void) { return 0; }
100#endif
101
88#endif /* __ARCH_MACH_COMMON_H */ 102#endif /* __ARCH_MACH_COMMON_H */
diff --git a/arch/arm/mach-shmobile/suspend.c b/arch/arm/mach-shmobile/suspend.c
index 4d1b86a49923..47d83f7a70b6 100644
--- a/arch/arm/mach-shmobile/suspend.c
+++ b/arch/arm/mach-shmobile/suspend.c
@@ -39,9 +39,8 @@ struct platform_suspend_ops shmobile_suspend_ops = {
39 .valid = suspend_valid_only_mem, 39 .valid = suspend_valid_only_mem,
40}; 40};
41 41
42static int __init shmobile_suspend_init(void) 42int __init shmobile_suspend_init(void)
43{ 43{
44 suspend_set_ops(&shmobile_suspend_ops); 44 suspend_set_ops(&shmobile_suspend_ops);
45 return 0; 45 return 0;
46} 46}
47late_initcall(shmobile_suspend_init);
diff --git a/arch/arm/mach-spear13xx/Kconfig b/arch/arm/mach-spear13xx/Kconfig
new file mode 100644
index 000000000000..eaadc66d96b3
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Kconfig
@@ -0,0 +1,20 @@
1#
2# SPEAr13XX Machine configuration file
3#
4
5if ARCH_SPEAR13XX
6
7menu "SPEAr13xx Implementations"
8config MACH_SPEAR1310
9 bool "SPEAr1310 Machine support with Device Tree"
10 select PINCTRL_SPEAR1310
11 help
12 Supports ST SPEAr1310 machine configured via the device-tree
13
14config MACH_SPEAR1340
15 bool "SPEAr1340 Machine support with Device Tree"
16 select PINCTRL_SPEAR1340
17 help
18 Supports ST SPEAr1340 machine configured via the device-tree
19endmenu
20endif #ARCH_SPEAR13XX
diff --git a/arch/arm/mach-spear13xx/Makefile b/arch/arm/mach-spear13xx/Makefile
new file mode 100644
index 000000000000..3435ea78c15d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Makefile
@@ -0,0 +1,10 @@
1#
2# Makefile for SPEAr13XX machine series
3#
4
5obj-$(CONFIG_SMP) += headsmp.o platsmp.o
6obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
7
8obj-$(CONFIG_ARCH_SPEAR13XX) += spear13xx.o
9obj-$(CONFIG_MACH_SPEAR1310) += spear1310.o
10obj-$(CONFIG_MACH_SPEAR1340) += spear1340.o
diff --git a/arch/arm/mach-spear13xx/Makefile.boot b/arch/arm/mach-spear13xx/Makefile.boot
new file mode 100644
index 000000000000..403efd7e6d27
--- /dev/null
+++ b/arch/arm/mach-spear13xx/Makefile.boot
@@ -0,0 +1,6 @@
1zreladdr-y += 0x00008000
2params_phys-y := 0x00000100
3initrd_phys-y := 0x00800000
4
5dtb-$(CONFIG_MACH_SPEAR1310) += spear1310-evb.dtb
6dtb-$(CONFIG_MACH_SPEAR1340) += spear1340-evb.dtb
diff --git a/arch/arm/mach-spear13xx/headsmp.S b/arch/arm/mach-spear13xx/headsmp.S
new file mode 100644
index 000000000000..ed85473a047f
--- /dev/null
+++ b/arch/arm/mach-spear13xx/headsmp.S
@@ -0,0 +1,47 @@
1/*
2 * arch/arm/mach-spear13XX/headsmp.S
3 *
4 * Picked from realview
5 * Copyright (c) 2012 ST Microelectronics Limited
6 * Shiraz Hashim <shiraz.hashim@st.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
13#include <linux/linkage.h>
14#include <linux/init.h>
15
16 __INIT
17
18/*
19 * spear13xx specific entry point for secondary CPUs. This provides
20 * a "holding pen" into which all secondary cores are held until we're
21 * ready for them to initialise.
22 */
23ENTRY(spear13xx_secondary_startup)
24 mrc p15, 0, r0, c0, c0, 5
25 and r0, r0, #15
26 adr r4, 1f
27 ldmia r4, {r5, r6}
28 sub r4, r4, r5
29 add r6, r6, r4
30pen: ldr r7, [r6]
31 cmp r7, r0
32 bne pen
33
34 /* re-enable coherency */
35 mrc p15, 0, r0, c1, c0, 1
36 orr r0, r0, #(1 << 6) | (1 << 0)
37 mcr p15, 0, r0, c1, c0, 1
38 /*
39 * we've been released from the holding pen: secondary_stack
40 * should now contain the SVC stack for this core
41 */
42 b secondary_startup
43
44 .align
451: .long .
46 .long pen_release
47ENDPROC(spear13xx_secondary_startup)
diff --git a/arch/arm/mach-spear13xx/hotplug.c b/arch/arm/mach-spear13xx/hotplug.c
new file mode 100644
index 000000000000..5c6867b46d09
--- /dev/null
+++ b/arch/arm/mach-spear13xx/hotplug.c
@@ -0,0 +1,119 @@
1/*
2 * linux/arch/arm/mach-spear13xx/hotplug.c
3 *
4 * Copyright (C) 2012 ST Microelectronics Ltd.
5 * Deepak Sikri <deepak.sikri@st.com>
6 *
7 * based upon linux/arch/arm/mach-realview/hotplug.c
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13#include <linux/kernel.h>
14#include <linux/errno.h>
15#include <linux/smp.h>
16#include <asm/cacheflush.h>
17#include <asm/cp15.h>
18#include <asm/smp_plat.h>
19
20extern volatile int pen_release;
21
22static inline void cpu_enter_lowpower(void)
23{
24 unsigned int v;
25
26 flush_cache_all();
27 asm volatile(
28 " mcr p15, 0, %1, c7, c5, 0\n"
29 " dsb\n"
30 /*
31 * Turn off coherency
32 */
33 " mrc p15, 0, %0, c1, c0, 1\n"
34 " bic %0, %0, #0x20\n"
35 " mcr p15, 0, %0, c1, c0, 1\n"
36 " mrc p15, 0, %0, c1, c0, 0\n"
37 " bic %0, %0, %2\n"
38 " mcr p15, 0, %0, c1, c0, 0\n"
39 : "=&r" (v)
40 : "r" (0), "Ir" (CR_C)
41 : "cc", "memory");
42}
43
44static inline void cpu_leave_lowpower(void)
45{
46 unsigned int v;
47
48 asm volatile("mrc p15, 0, %0, c1, c0, 0\n"
49 " orr %0, %0, %1\n"
50 " mcr p15, 0, %0, c1, c0, 0\n"
51 " mrc p15, 0, %0, c1, c0, 1\n"
52 " orr %0, %0, #0x20\n"
53 " mcr p15, 0, %0, c1, c0, 1\n"
54 : "=&r" (v)
55 : "Ir" (CR_C)
56 : "cc");
57}
58
59static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
60{
61 for (;;) {
62 wfi();
63
64 if (pen_release == cpu) {
65 /*
66 * OK, proper wakeup, we're done
67 */
68 break;
69 }
70
71 /*
72 * Getting here, means that we have come out of WFI without
73 * having been woken up - this shouldn't happen
74 *
75 * Just note it happening - when we're woken, we can report
76 * its occurrence.
77 */
78 (*spurious)++;
79 }
80}
81
82int platform_cpu_kill(unsigned int cpu)
83{
84 return 1;
85}
86
87/*
88 * platform-specific code to shutdown a CPU
89 *
90 * Called with IRQs disabled
91 */
92void __cpuinit platform_cpu_die(unsigned int cpu)
93{
94 int spurious = 0;
95
96 /*
97 * we're ready for shutdown now, so do it
98 */
99 cpu_enter_lowpower();
100 platform_do_lowpower(cpu, &spurious);
101
102 /*
103 * bring this CPU back into the world of cache
104 * coherency, and then restore interrupts
105 */
106 cpu_leave_lowpower();
107
108 if (spurious)
109 pr_warn("CPU%u: %u spurious wakeup calls\n", cpu, spurious);
110}
111
112int platform_cpu_disable(unsigned int cpu)
113{
114 /*
115 * we don't allow CPU 0 to be shutdown (it is still too special
116 * e.g. clock tick interrupts)
117 */
118 return cpu == 0 ? -EPERM : 0;
119}
diff --git a/arch/arm/mach-spear13xx/include/mach/debug-macro.S b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
new file mode 100644
index 000000000000..ea1564609bd4
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/debug-macro.S
@@ -0,0 +1,14 @@
1/*
2 * arch/arm/mach-spear13xx/include/mach/debug-macro.S
3 *
4 * Debugging macro include header spear13xx machine family
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <plat/debug-macro.S>
diff --git a/arch/arm/mach-spear13xx/include/mach/dma.h b/arch/arm/mach-spear13xx/include/mach/dma.h
new file mode 100644
index 000000000000..383ab04dc6c9
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/dma.h
@@ -0,0 +1,128 @@
1/*
2 * arch/arm/mach-spear13xx/include/mach/dma.h
3 *
4 * DMA information for SPEAr13xx machine family
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_DMA_H
15#define __MACH_DMA_H
16
17/* request id of all the peripherals */
18enum dma_master_info {
19 /* Accessible from only one master */
20 DMA_MASTER_MCIF = 0,
21 DMA_MASTER_FSMC = 1,
22 /* Accessible from both 0 & 1 */
23 DMA_MASTER_MEMORY = 0,
24 DMA_MASTER_ADC = 0,
25 DMA_MASTER_UART0 = 0,
26 DMA_MASTER_SSP0 = 0,
27 DMA_MASTER_I2C0 = 0,
28
29#ifdef CONFIG_MACH_SPEAR1310
30 /* Accessible from only one master */
31 SPEAR1310_DMA_MASTER_JPEG = 1,
32
33 /* Accessible from both 0 & 1 */
34 SPEAR1310_DMA_MASTER_I2S = 0,
35 SPEAR1310_DMA_MASTER_UART1 = 0,
36 SPEAR1310_DMA_MASTER_UART2 = 0,
37 SPEAR1310_DMA_MASTER_UART3 = 0,
38 SPEAR1310_DMA_MASTER_UART4 = 0,
39 SPEAR1310_DMA_MASTER_UART5 = 0,
40 SPEAR1310_DMA_MASTER_I2C1 = 0,
41 SPEAR1310_DMA_MASTER_I2C2 = 0,
42 SPEAR1310_DMA_MASTER_I2C3 = 0,
43 SPEAR1310_DMA_MASTER_I2C4 = 0,
44 SPEAR1310_DMA_MASTER_I2C5 = 0,
45 SPEAR1310_DMA_MASTER_I2C6 = 0,
46 SPEAR1310_DMA_MASTER_I2C7 = 0,
47 SPEAR1310_DMA_MASTER_SSP1 = 0,
48#endif
49
50#ifdef CONFIG_MACH_SPEAR1340
51 /* Accessible from only one master */
52 SPEAR1340_DMA_MASTER_I2S_PLAY = 1,
53 SPEAR1340_DMA_MASTER_I2S_REC = 1,
54 SPEAR1340_DMA_MASTER_I2C1 = 1,
55 SPEAR1340_DMA_MASTER_UART1 = 1,
56
57 /* following are accessible from both master 0 & 1 */
58 SPEAR1340_DMA_MASTER_SPDIF = 0,
59 SPEAR1340_DMA_MASTER_CAM = 1,
60 SPEAR1340_DMA_MASTER_VIDEO_IN = 0,
61 SPEAR1340_DMA_MASTER_MALI = 0,
62#endif
63};
64
65enum request_id {
66 DMA_REQ_ADC = 0,
67 DMA_REQ_SSP0_TX = 4,
68 DMA_REQ_SSP0_RX = 5,
69 DMA_REQ_UART0_TX = 6,
70 DMA_REQ_UART0_RX = 7,
71 DMA_REQ_I2C0_TX = 8,
72 DMA_REQ_I2C0_RX = 9,
73
74#ifdef CONFIG_MACH_SPEAR1310
75 SPEAR1310_DMA_REQ_FROM_JPEG = 2,
76 SPEAR1310_DMA_REQ_TO_JPEG = 3,
77 SPEAR1310_DMA_REQ_I2S_TX = 10,
78 SPEAR1310_DMA_REQ_I2S_RX = 11,
79
80 SPEAR1310_DMA_REQ_I2C1_RX = 0,
81 SPEAR1310_DMA_REQ_I2C1_TX = 1,
82 SPEAR1310_DMA_REQ_I2C2_RX = 2,
83 SPEAR1310_DMA_REQ_I2C2_TX = 3,
84 SPEAR1310_DMA_REQ_I2C3_RX = 4,
85 SPEAR1310_DMA_REQ_I2C3_TX = 5,
86 SPEAR1310_DMA_REQ_I2C4_RX = 6,
87 SPEAR1310_DMA_REQ_I2C4_TX = 7,
88 SPEAR1310_DMA_REQ_I2C5_RX = 8,
89 SPEAR1310_DMA_REQ_I2C5_TX = 9,
90 SPEAR1310_DMA_REQ_I2C6_RX = 10,
91 SPEAR1310_DMA_REQ_I2C6_TX = 11,
92 SPEAR1310_DMA_REQ_UART1_RX = 12,
93 SPEAR1310_DMA_REQ_UART1_TX = 13,
94 SPEAR1310_DMA_REQ_UART2_RX = 14,
95 SPEAR1310_DMA_REQ_UART2_TX = 15,
96 SPEAR1310_DMA_REQ_UART5_RX = 16,
97 SPEAR1310_DMA_REQ_UART5_TX = 17,
98 SPEAR1310_DMA_REQ_SSP1_RX = 18,
99 SPEAR1310_DMA_REQ_SSP1_TX = 19,
100 SPEAR1310_DMA_REQ_I2C7_RX = 20,
101 SPEAR1310_DMA_REQ_I2C7_TX = 21,
102 SPEAR1310_DMA_REQ_UART3_RX = 28,
103 SPEAR1310_DMA_REQ_UART3_TX = 29,
104 SPEAR1310_DMA_REQ_UART4_RX = 30,
105 SPEAR1310_DMA_REQ_UART4_TX = 31,
106#endif
107
108#ifdef CONFIG_MACH_SPEAR1340
109 SPEAR1340_DMA_REQ_SPDIF_TX = 2,
110 SPEAR1340_DMA_REQ_SPDIF_RX = 3,
111 SPEAR1340_DMA_REQ_I2S_TX = 10,
112 SPEAR1340_DMA_REQ_I2S_RX = 11,
113 SPEAR1340_DMA_REQ_UART1_TX = 12,
114 SPEAR1340_DMA_REQ_UART1_RX = 13,
115 SPEAR1340_DMA_REQ_I2C1_TX = 14,
116 SPEAR1340_DMA_REQ_I2C1_RX = 15,
117 SPEAR1340_DMA_REQ_CAM0_EVEN = 0,
118 SPEAR1340_DMA_REQ_CAM0_ODD = 1,
119 SPEAR1340_DMA_REQ_CAM1_EVEN = 2,
120 SPEAR1340_DMA_REQ_CAM1_ODD = 3,
121 SPEAR1340_DMA_REQ_CAM2_EVEN = 4,
122 SPEAR1340_DMA_REQ_CAM2_ODD = 5,
123 SPEAR1340_DMA_REQ_CAM3_EVEN = 6,
124 SPEAR1340_DMA_REQ_CAM3_ODD = 7,
125#endif
126};
127
128#endif /* __MACH_DMA_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/generic.h b/arch/arm/mach-spear13xx/include/mach/generic.h
new file mode 100644
index 000000000000..6d8c45b9f298
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/generic.h
@@ -0,0 +1,49 @@
1/*
2 * arch/arm/mach-spear13xx/include/mach/generic.h
3 *
4 * spear13xx machine family generic header file
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_GENERIC_H
15#define __MACH_GENERIC_H
16
17#include <linux/dmaengine.h>
18#include <asm/mach/time.h>
19
20/* Add spear13xx structure declarations here */
21extern struct sys_timer spear13xx_timer;
22extern struct pl022_ssp_controller pl022_plat_data;
23extern struct dw_dma_platform_data dmac_plat_data;
24extern struct dw_dma_slave cf_dma_priv;
25extern struct dw_dma_slave nand_read_dma_priv;
26extern struct dw_dma_slave nand_write_dma_priv;
27
28/* Add spear13xx family function declarations here */
29void __init spear_setup_of_timer(void);
30void __init spear13xx_map_io(void);
31void __init spear13xx_dt_init_irq(void);
32void __init spear13xx_l2x0_init(void);
33bool dw_dma_filter(struct dma_chan *chan, void *slave);
34void spear_restart(char, const char *);
35void spear13xx_secondary_startup(void);
36
37#ifdef CONFIG_MACH_SPEAR1310
38void __init spear1310_clk_init(void);
39#else
40static inline void spear1310_clk_init(void) {}
41#endif
42
43#ifdef CONFIG_MACH_SPEAR1340
44void __init spear1340_clk_init(void);
45#else
46static inline void spear1340_clk_init(void) {}
47#endif
48
49#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/gpio.h b/arch/arm/mach-spear13xx/include/mach/gpio.h
new file mode 100644
index 000000000000..cd6f4f86a56b
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/gpio.h
@@ -0,0 +1,19 @@
1/*
2 * arch/arm/mach-spear13xx/include/mach/gpio.h
3 *
4 * GPIO macros for SPEAr13xx machine family
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_GPIO_H
15#define __MACH_GPIO_H
16
17#include <plat/gpio.h>
18
19#endif /* __MACH_GPIO_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/hardware.h b/arch/arm/mach-spear13xx/include/mach/hardware.h
new file mode 100644
index 000000000000..40a8c178f10d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/hardware.h
@@ -0,0 +1 @@
/* empty */
diff --git a/arch/arm/mach-spear13xx/include/mach/irqs.h b/arch/arm/mach-spear13xx/include/mach/irqs.h
new file mode 100644
index 000000000000..f542a24aa5f2
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/irqs.h
@@ -0,0 +1,20 @@
1/*
2 * arch/arm/mach-spear13xx/include/mach/irqs.h
3 *
4 * IRQ helper macros for spear13xx machine family
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_IRQS_H
15#define __MACH_IRQS_H
16
17#define IRQ_GIC_END 160
18#define NR_IRQS IRQ_GIC_END
19
20#endif /* __MACH_IRQS_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear.h b/arch/arm/mach-spear13xx/include/mach/spear.h
new file mode 100644
index 000000000000..30c57ef72686
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear.h
@@ -0,0 +1,62 @@
1/*
2 * arch/arm/mach-spear13xx/include/mach/spear.h
3 *
4 * spear13xx Machine family specific definition
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_SPEAR13XX_H
15#define __MACH_SPEAR13XX_H
16
17#include <asm/memory.h>
18
19#define PERIP_GRP2_BASE UL(0xB3000000)
20#define VA_PERIP_GRP2_BASE UL(0xFE000000)
21#define MCIF_SDHCI_BASE UL(0xB3000000)
22#define SYSRAM0_BASE UL(0xB3800000)
23#define VA_SYSRAM0_BASE UL(0xFE800000)
24#define SYS_LOCATION (VA_SYSRAM0_BASE + 0x600)
25
26#define PERIP_GRP1_BASE UL(0xE0000000)
27#define VA_PERIP_GRP1_BASE UL(0xFD000000)
28#define UART_BASE UL(0xE0000000)
29#define VA_UART_BASE UL(0xFD000000)
30#define SSP_BASE UL(0xE0100000)
31#define MISC_BASE UL(0xE0700000)
32#define VA_MISC_BASE IOMEM(UL(0xFD700000))
33
34#define A9SM_AND_MPMC_BASE UL(0xEC000000)
35#define VA_A9SM_AND_MPMC_BASE UL(0xFC000000)
36
37/* A9SM peripheral offsets */
38#define A9SM_PERIP_BASE UL(0xEC800000)
39#define VA_A9SM_PERIP_BASE UL(0xFC800000)
40#define VA_SCU_BASE (VA_A9SM_PERIP_BASE + 0x00)
41
42#define L2CC_BASE UL(0xED000000)
43#define VA_L2CC_BASE IOMEM(UL(0xFB000000))
44
45/* others */
46#define DMAC0_BASE UL(0xEA800000)
47#define DMAC1_BASE UL(0xEB000000)
48#define MCIF_CF_BASE UL(0xB2800000)
49
50/* Devices present in SPEAr1310 */
51#ifdef CONFIG_MACH_SPEAR1310
52#define SPEAR1310_RAS_GRP1_BASE UL(0xD8000000)
53#define VA_SPEAR1310_RAS_GRP1_BASE UL(0xFA000000)
54#define SPEAR1310_RAS_BASE UL(0xD8400000)
55#define VA_SPEAR1310_RAS_BASE IOMEM(UL(0xFA400000))
56#endif /* CONFIG_MACH_SPEAR1310 */
57
58/* Debug uart for linux, will be used for debug and uncompress messages */
59#define SPEAR_DBG_UART_BASE UART_BASE
60#define VA_SPEAR_DBG_UART_BASE VA_UART_BASE
61
62#endif /* __MACH_SPEAR13XX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear1310_misc_regs.h
diff --git a/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/spear1340_misc_regs.h
diff --git a/arch/arm/mach-spear13xx/include/mach/timex.h b/arch/arm/mach-spear13xx/include/mach/timex.h
new file mode 100644
index 000000000000..31af3e8d976e
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/timex.h
@@ -0,0 +1,19 @@
1/*
2 * arch/arm/mach-spear3xx/include/mach/timex.h
3 *
4 * SPEAr3XX machine family specific timex definitions
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_TIMEX_H
15#define __MACH_TIMEX_H
16
17#include <plat/timex.h>
18
19#endif /* __MACH_TIMEX_H */
diff --git a/arch/arm/mach-spear13xx/include/mach/uncompress.h b/arch/arm/mach-spear13xx/include/mach/uncompress.h
new file mode 100644
index 000000000000..c7840896ae6e
--- /dev/null
+++ b/arch/arm/mach-spear13xx/include/mach/uncompress.h
@@ -0,0 +1,19 @@
1/*
2 * arch/arm/mach-spear13xx/include/mach/uncompress.h
3 *
4 * Serial port stubs for kernel decompress status messages
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __MACH_UNCOMPRESS_H
15#define __MACH_UNCOMPRESS_H
16
17#include <plat/uncompress.h>
18
19#endif /* __MACH_UNCOMPRESS_H */
diff --git a/arch/arm/mach-spear13xx/platsmp.c b/arch/arm/mach-spear13xx/platsmp.c
new file mode 100644
index 000000000000..f5d07f2663d7
--- /dev/null
+++ b/arch/arm/mach-spear13xx/platsmp.c
@@ -0,0 +1,127 @@
1/*
2 * arch/arm/mach-spear13xx/platsmp.c
3 *
4 * based upon linux/arch/arm/mach-realview/platsmp.c
5 *
6 * Copyright (C) 2012 ST Microelectronics Ltd.
7 * Shiraz Hashim <shiraz.hashim@st.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 */
13
14#include <linux/delay.h>
15#include <linux/jiffies.h>
16#include <linux/io.h>
17#include <linux/smp.h>
18#include <asm/cacheflush.h>
19#include <asm/hardware/gic.h>
20#include <asm/smp_scu.h>
21#include <mach/spear.h>
22
23/*
24 * control for which core is the next to come out of the secondary
25 * boot "holding pen"
26 */
27volatile int __cpuinitdata pen_release = -1;
28static DEFINE_SPINLOCK(boot_lock);
29
30static void __iomem *scu_base = IOMEM(VA_SCU_BASE);
31extern void spear13xx_secondary_startup(void);
32
33void __cpuinit platform_secondary_init(unsigned int cpu)
34{
35 /*
36 * if any interrupts are already enabled for the primary
37 * core (e.g. timer irq), then they will not have been enabled
38 * for us: do so
39 */
40 gic_secondary_init(0);
41
42 /*
43 * let the primary processor know we're out of the
44 * pen, then head off into the C entry point
45 */
46 pen_release = -1;
47 smp_wmb();
48
49 /*
50 * Synchronise with the boot thread.
51 */
52 spin_lock(&boot_lock);
53 spin_unlock(&boot_lock);
54}
55
56int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
57{
58 unsigned long timeout;
59
60 /*
61 * set synchronisation state between this boot processor
62 * and the secondary one
63 */
64 spin_lock(&boot_lock);
65
66 /*
67 * The secondary processor is waiting to be released from
68 * the holding pen - release it, then wait for it to flag
69 * that it has been released by resetting pen_release.
70 *
71 * Note that "pen_release" is the hardware CPU ID, whereas
72 * "cpu" is Linux's internal ID.
73 */
74 pen_release = cpu;
75 flush_cache_all();
76 outer_flush_all();
77
78 timeout = jiffies + (1 * HZ);
79 while (time_before(jiffies, timeout)) {
80 smp_rmb();
81 if (pen_release == -1)
82 break;
83
84 udelay(10);
85 }
86
87 /*
88 * now the secondary core is starting up let it run its
89 * calibrations, then wait for it to finish
90 */
91 spin_unlock(&boot_lock);
92
93 return pen_release != -1 ? -ENOSYS : 0;
94}
95
96/*
97 * Initialise the CPU possible map early - this describes the CPUs
98 * which may be present or become present in the system.
99 */
100void __init smp_init_cpus(void)
101{
102 unsigned int i, ncores = scu_get_core_count(scu_base);
103
104 if (ncores > nr_cpu_ids) {
105 pr_warn("SMP: %u cores greater than maximum (%u), clipping\n",
106 ncores, nr_cpu_ids);
107 ncores = nr_cpu_ids;
108 }
109
110 for (i = 0; i < ncores; i++)
111 set_cpu_possible(i, true);
112
113 set_smp_cross_call(gic_raise_softirq);
114}
115
116void __init platform_smp_prepare_cpus(unsigned int max_cpus)
117{
118
119 scu_enable(scu_base);
120
121 /*
122 * Write the address of secondary startup into the system-wide location
123 * (presently it is in SRAM). The BootMonitor waits until it receives a
124 * soft interrupt, and then the secondary CPU branches to this address.
125 */
126 __raw_writel(virt_to_phys(spear13xx_secondary_startup), SYS_LOCATION);
127}
diff --git a/arch/arm/mach-spear13xx/spear1310.c b/arch/arm/mach-spear13xx/spear1310.c
new file mode 100644
index 000000000000..fefd15b2f380
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear1310.c
@@ -0,0 +1,88 @@
1/*
2 * arch/arm/mach-spear13xx/spear1310.c
3 *
4 * SPEAr1310 machine source file
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#define pr_fmt(fmt) "SPEAr1310: " fmt
15
16#include <linux/amba/pl022.h>
17#include <linux/of_platform.h>
18#include <asm/hardware/gic.h>
19#include <asm/mach/arch.h>
20#include <asm/mach/map.h>
21#include <mach/generic.h>
22#include <mach/spear.h>
23
24/* Base addresses */
25#define SPEAR1310_SSP1_BASE UL(0x5D400000)
26#define SPEAR1310_SATA0_BASE UL(0xB1000000)
27#define SPEAR1310_SATA1_BASE UL(0xB1800000)
28#define SPEAR1310_SATA2_BASE UL(0xB4000000)
29
30/* ssp device registration */
31static struct pl022_ssp_controller ssp1_plat_data = {
32 .bus_id = 0,
33 .enable_dma = 0,
34 .num_chipselect = 3,
35};
36
37/* Add SPEAr1310 auxdata to pass platform data */
38static struct of_dev_auxdata spear1310_auxdata_lookup[] __initdata = {
39 OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
40 OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
41 OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
42 OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
43
44 OF_DEV_AUXDATA("arm,pl022", SPEAR1310_SSP1_BASE, NULL, &ssp1_plat_data),
45 {}
46};
47
48static void __init spear1310_dt_init(void)
49{
50 of_platform_populate(NULL, of_default_bus_match_table,
51 spear1310_auxdata_lookup, NULL);
52}
53
54static const char * const spear1310_dt_board_compat[] = {
55 "st,spear1310",
56 "st,spear1310-evb",
57 NULL,
58};
59
60/*
61 * Following will create 16MB static virtual/physical mappings
62 * PHYSICAL VIRTUAL
63 * 0xD8000000 0xFA000000
64 */
65struct map_desc spear1310_io_desc[] __initdata = {
66 {
67 .virtual = VA_SPEAR1310_RAS_GRP1_BASE,
68 .pfn = __phys_to_pfn(SPEAR1310_RAS_GRP1_BASE),
69 .length = SZ_16M,
70 .type = MT_DEVICE
71 },
72};
73
74static void __init spear1310_map_io(void)
75{
76 iotable_init(spear1310_io_desc, ARRAY_SIZE(spear1310_io_desc));
77 spear13xx_map_io();
78}
79
80DT_MACHINE_START(SPEAR1310_DT, "ST SPEAr1310 SoC with Flattened Device Tree")
81 .map_io = spear1310_map_io,
82 .init_irq = spear13xx_dt_init_irq,
83 .handle_irq = gic_handle_irq,
84 .timer = &spear13xx_timer,
85 .init_machine = spear1310_dt_init,
86 .restart = spear_restart,
87 .dt_compat = spear1310_dt_board_compat,
88MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear1340.c b/arch/arm/mach-spear13xx/spear1340.c
new file mode 100644
index 000000000000..ee38cbc56869
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear1340.c
@@ -0,0 +1,192 @@
1/*
2 * arch/arm/mach-spear13xx/spear1340.c
3 *
4 * SPEAr1340 machine source file
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#define pr_fmt(fmt) "SPEAr1340: " fmt
15
16#include <linux/ahci_platform.h>
17#include <linux/amba/serial.h>
18#include <linux/delay.h>
19#include <linux/dw_dmac.h>
20#include <linux/of_platform.h>
21#include <asm/hardware/gic.h>
22#include <asm/mach/arch.h>
23#include <mach/dma.h>
24#include <mach/generic.h>
25#include <mach/spear.h>
26
27/* Base addresses */
28#define SPEAR1340_SATA_BASE UL(0xB1000000)
29#define SPEAR1340_UART1_BASE UL(0xB4100000)
30
31/* Power Management Registers */
32#define SPEAR1340_PCM_CFG (VA_MISC_BASE + 0x100)
33#define SPEAR1340_PCM_WKUP_CFG (VA_MISC_BASE + 0x104)
34#define SPEAR1340_SWITCH_CTR (VA_MISC_BASE + 0x108)
35
36#define SPEAR1340_PERIP1_SW_RST (VA_MISC_BASE + 0x318)
37#define SPEAR1340_PERIP2_SW_RST (VA_MISC_BASE + 0x31C)
38#define SPEAR1340_PERIP3_SW_RST (VA_MISC_BASE + 0x320)
39
40/* PCIE - SATA configuration registers */
41#define SPEAR1340_PCIE_SATA_CFG (VA_MISC_BASE + 0x424)
42 /* PCIE CFG MASks */
43 #define SPEAR1340_PCIE_CFG_DEVICE_PRESENT (1 << 11)
44 #define SPEAR1340_PCIE_CFG_POWERUP_RESET (1 << 10)
45 #define SPEAR1340_PCIE_CFG_CORE_CLK_EN (1 << 9)
46 #define SPEAR1340_PCIE_CFG_AUX_CLK_EN (1 << 8)
47 #define SPEAR1340_SATA_CFG_TX_CLK_EN (1 << 4)
48 #define SPEAR1340_SATA_CFG_RX_CLK_EN (1 << 3)
49 #define SPEAR1340_SATA_CFG_POWERUP_RESET (1 << 2)
50 #define SPEAR1340_SATA_CFG_PM_CLK_EN (1 << 1)
51 #define SPEAR1340_PCIE_SATA_SEL_PCIE (0)
52 #define SPEAR1340_PCIE_SATA_SEL_SATA (1)
53 #define SPEAR1340_SATA_PCIE_CFG_MASK 0xF1F
54 #define SPEAR1340_PCIE_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_PCIE | \
55 SPEAR1340_PCIE_CFG_AUX_CLK_EN | \
56 SPEAR1340_PCIE_CFG_CORE_CLK_EN | \
57 SPEAR1340_PCIE_CFG_POWERUP_RESET | \
58 SPEAR1340_PCIE_CFG_DEVICE_PRESENT)
59 #define SPEAR1340_SATA_CFG_VAL (SPEAR1340_PCIE_SATA_SEL_SATA | \
60 SPEAR1340_SATA_CFG_PM_CLK_EN | \
61 SPEAR1340_SATA_CFG_POWERUP_RESET | \
62 SPEAR1340_SATA_CFG_RX_CLK_EN | \
63 SPEAR1340_SATA_CFG_TX_CLK_EN)
64
65#define SPEAR1340_PCIE_MIPHY_CFG (VA_MISC_BASE + 0x428)
66 #define SPEAR1340_MIPHY_OSC_BYPASS_EXT (1 << 31)
67 #define SPEAR1340_MIPHY_CLK_REF_DIV2 (1 << 27)
68 #define SPEAR1340_MIPHY_CLK_REF_DIV4 (2 << 27)
69 #define SPEAR1340_MIPHY_CLK_REF_DIV8 (3 << 27)
70 #define SPEAR1340_MIPHY_PLL_RATIO_TOP(x) (x << 0)
71 #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA \
72 (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
73 SPEAR1340_MIPHY_CLK_REF_DIV2 | \
74 SPEAR1340_MIPHY_PLL_RATIO_TOP(60))
75 #define SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK \
76 (SPEAR1340_MIPHY_PLL_RATIO_TOP(120))
77 #define SPEAR1340_PCIE_SATA_MIPHY_CFG_PCIE \
78 (SPEAR1340_MIPHY_OSC_BYPASS_EXT | \
79 SPEAR1340_MIPHY_PLL_RATIO_TOP(25))
80
81static struct dw_dma_slave uart1_dma_param[] = {
82 {
83 /* Tx */
84 .cfg_hi = DWC_CFGH_DST_PER(SPEAR1340_DMA_REQ_UART1_TX),
85 .cfg_lo = 0,
86 .src_master = DMA_MASTER_MEMORY,
87 .dst_master = SPEAR1340_DMA_MASTER_UART1,
88 }, {
89 /* Rx */
90 .cfg_hi = DWC_CFGH_SRC_PER(SPEAR1340_DMA_REQ_UART1_RX),
91 .cfg_lo = 0,
92 .src_master = SPEAR1340_DMA_MASTER_UART1,
93 .dst_master = DMA_MASTER_MEMORY,
94 }
95};
96
97static struct amba_pl011_data uart1_data = {
98 .dma_filter = dw_dma_filter,
99 .dma_tx_param = &uart1_dma_param[0],
100 .dma_rx_param = &uart1_dma_param[1],
101};
102
103/* SATA device registration */
104static int sata_miphy_init(struct device *dev, void __iomem *addr)
105{
106 writel(SPEAR1340_SATA_CFG_VAL, SPEAR1340_PCIE_SATA_CFG);
107 writel(SPEAR1340_PCIE_SATA_MIPHY_CFG_SATA_25M_CRYSTAL_CLK,
108 SPEAR1340_PCIE_MIPHY_CFG);
109 /* Switch on sata power domain */
110 writel((readl(SPEAR1340_PCM_CFG) | (0x800)), SPEAR1340_PCM_CFG);
111 msleep(20);
112 /* Disable PCIE SATA Controller reset */
113 writel((readl(SPEAR1340_PERIP1_SW_RST) & (~0x1000)),
114 SPEAR1340_PERIP1_SW_RST);
115 msleep(20);
116
117 return 0;
118}
119
120void sata_miphy_exit(struct device *dev)
121{
122 writel(0, SPEAR1340_PCIE_SATA_CFG);
123 writel(0, SPEAR1340_PCIE_MIPHY_CFG);
124
125 /* Enable PCIE SATA Controller reset */
126 writel((readl(SPEAR1340_PERIP1_SW_RST) | (0x1000)),
127 SPEAR1340_PERIP1_SW_RST);
128 msleep(20);
129 /* Switch off sata power domain */
130 writel((readl(SPEAR1340_PCM_CFG) & (~0x800)), SPEAR1340_PCM_CFG);
131 msleep(20);
132}
133
134int sata_suspend(struct device *dev)
135{
136 if (dev->power.power_state.event == PM_EVENT_FREEZE)
137 return 0;
138
139 sata_miphy_exit(dev);
140
141 return 0;
142}
143
144int sata_resume(struct device *dev)
145{
146 if (dev->power.power_state.event == PM_EVENT_THAW)
147 return 0;
148
149 return sata_miphy_init(dev, NULL);
150}
151
152static struct ahci_platform_data sata_pdata = {
153 .init = sata_miphy_init,
154 .exit = sata_miphy_exit,
155 .suspend = sata_suspend,
156 .resume = sata_resume,
157};
158
159/* Add SPEAr1340 auxdata to pass platform data */
160static struct of_dev_auxdata spear1340_auxdata_lookup[] __initdata = {
161 OF_DEV_AUXDATA("arasan,cf-spear1340", MCIF_CF_BASE, NULL, &cf_dma_priv),
162 OF_DEV_AUXDATA("snps,dma-spear1340", DMAC0_BASE, NULL, &dmac_plat_data),
163 OF_DEV_AUXDATA("snps,dma-spear1340", DMAC1_BASE, NULL, &dmac_plat_data),
164 OF_DEV_AUXDATA("arm,pl022", SSP_BASE, NULL, &pl022_plat_data),
165
166 OF_DEV_AUXDATA("snps,spear-ahci", SPEAR1340_SATA_BASE, NULL,
167 &sata_pdata),
168 OF_DEV_AUXDATA("arm,pl011", SPEAR1340_UART1_BASE, NULL, &uart1_data),
169 {}
170};
171
172static void __init spear1340_dt_init(void)
173{
174 of_platform_populate(NULL, of_default_bus_match_table,
175 spear1340_auxdata_lookup, NULL);
176}
177
178static const char * const spear1340_dt_board_compat[] = {
179 "st,spear1340",
180 "st,spear1340-evb",
181 NULL,
182};
183
184DT_MACHINE_START(SPEAR1340_DT, "ST SPEAr1340 SoC with Flattened Device Tree")
185 .map_io = spear13xx_map_io,
186 .init_irq = spear13xx_dt_init_irq,
187 .handle_irq = gic_handle_irq,
188 .timer = &spear13xx_timer,
189 .init_machine = spear1340_dt_init,
190 .restart = spear_restart,
191 .dt_compat = spear1340_dt_board_compat,
192MACHINE_END
diff --git a/arch/arm/mach-spear13xx/spear13xx.c b/arch/arm/mach-spear13xx/spear13xx.c
new file mode 100644
index 000000000000..50b349ae863d
--- /dev/null
+++ b/arch/arm/mach-spear13xx/spear13xx.c
@@ -0,0 +1,197 @@
1/*
2 * arch/arm/mach-spear13xx/spear13xx.c
3 *
4 * SPEAr13XX machines common source file
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#define pr_fmt(fmt) "SPEAr13xx: " fmt
15
16#include <linux/amba/pl022.h>
17#include <linux/clk.h>
18#include <linux/dw_dmac.h>
19#include <linux/err.h>
20#include <linux/of_irq.h>
21#include <asm/hardware/cache-l2x0.h>
22#include <asm/hardware/gic.h>
23#include <asm/mach/map.h>
24#include <asm/smp_twd.h>
25#include <mach/dma.h>
26#include <mach/generic.h>
27#include <mach/spear.h>
28
29/* common dw_dma filter routine to be used by peripherals */
30bool dw_dma_filter(struct dma_chan *chan, void *slave)
31{
32 struct dw_dma_slave *dws = (struct dw_dma_slave *)slave;
33
34 if (chan->device->dev == dws->dma_dev) {
35 chan->private = slave;
36 return true;
37 } else {
38 return false;
39 }
40}
41
42/* ssp device registration */
43static struct dw_dma_slave ssp_dma_param[] = {
44 {
45 /* Tx */
46 .cfg_hi = DWC_CFGH_DST_PER(DMA_REQ_SSP0_TX),
47 .cfg_lo = 0,
48 .src_master = DMA_MASTER_MEMORY,
49 .dst_master = DMA_MASTER_SSP0,
50 }, {
51 /* Rx */
52 .cfg_hi = DWC_CFGH_SRC_PER(DMA_REQ_SSP0_RX),
53 .cfg_lo = 0,
54 .src_master = DMA_MASTER_SSP0,
55 .dst_master = DMA_MASTER_MEMORY,
56 }
57};
58
59struct pl022_ssp_controller pl022_plat_data = {
60 .bus_id = 0,
61 .enable_dma = 1,
62 .dma_filter = dw_dma_filter,
63 .dma_rx_param = &ssp_dma_param[1],
64 .dma_tx_param = &ssp_dma_param[0],
65 .num_chipselect = 3,
66};
67
68/* CF device registration */
69struct dw_dma_slave cf_dma_priv = {
70 .cfg_hi = 0,
71 .cfg_lo = 0,
72 .src_master = 0,
73 .dst_master = 0,
74};
75
76/* dmac device registeration */
77struct dw_dma_platform_data dmac_plat_data = {
78 .nr_channels = 8,
79 .chan_allocation_order = CHAN_ALLOCATION_DESCENDING,
80 .chan_priority = CHAN_PRIORITY_DESCENDING,
81};
82
83void __init spear13xx_l2x0_init(void)
84{
85 /*
86 * 512KB (64KB/way), 8-way associativity, parity supported
87 *
88 * FIXME: 9th bit, of Auxillary Controller register must be set
89 * for some spear13xx devices for stable L2 operation.
90 *
91 * Enable Early BRESP, L2 prefetch for Instruction and Data,
92 * write alloc and 'Full line of zero' options
93 *
94 */
95
96 writel_relaxed(0x06, VA_L2CC_BASE + L2X0_PREFETCH_CTRL);
97
98 /*
99 * Program following latencies in order to make
100 * SPEAr1340 work at 600 MHz
101 */
102 writel_relaxed(0x221, VA_L2CC_BASE + L2X0_TAG_LATENCY_CTRL);
103 writel_relaxed(0x441, VA_L2CC_BASE + L2X0_DATA_LATENCY_CTRL);
104 l2x0_init(VA_L2CC_BASE, 0x70A60001, 0xfe00ffff);
105}
106
107/*
108 * Following will create 16MB static virtual/physical mappings
109 * PHYSICAL VIRTUAL
110 * 0xB3000000 0xFE000000
111 * 0xE0000000 0xFD000000
112 * 0xEC000000 0xFC000000
113 * 0xED000000 0xFB000000
114 */
115struct map_desc spear13xx_io_desc[] __initdata = {
116 {
117 .virtual = VA_PERIP_GRP2_BASE,
118 .pfn = __phys_to_pfn(PERIP_GRP2_BASE),
119 .length = SZ_16M,
120 .type = MT_DEVICE
121 }, {
122 .virtual = VA_PERIP_GRP1_BASE,
123 .pfn = __phys_to_pfn(PERIP_GRP1_BASE),
124 .length = SZ_16M,
125 .type = MT_DEVICE
126 }, {
127 .virtual = VA_A9SM_AND_MPMC_BASE,
128 .pfn = __phys_to_pfn(A9SM_AND_MPMC_BASE),
129 .length = SZ_16M,
130 .type = MT_DEVICE
131 }, {
132 .virtual = (unsigned long)VA_L2CC_BASE,
133 .pfn = __phys_to_pfn(L2CC_BASE),
134 .length = SZ_4K,
135 .type = MT_DEVICE
136 },
137};
138
139/* This will create static memory mapping for selected devices */
140void __init spear13xx_map_io(void)
141{
142 iotable_init(spear13xx_io_desc, ARRAY_SIZE(spear13xx_io_desc));
143}
144
145static void __init spear13xx_clk_init(void)
146{
147 if (of_machine_is_compatible("st,spear1310"))
148 spear1310_clk_init();
149 else if (of_machine_is_compatible("st,spear1340"))
150 spear1340_clk_init();
151 else
152 pr_err("%s: Unknown machine\n", __func__);
153}
154
155static void __init spear13xx_timer_init(void)
156{
157 char pclk_name[] = "osc_24m_clk";
158 struct clk *gpt_clk, *pclk;
159
160 spear13xx_clk_init();
161
162 /* get the system timer clock */
163 gpt_clk = clk_get_sys("gpt0", NULL);
164 if (IS_ERR(gpt_clk)) {
165 pr_err("%s:couldn't get clk for gpt\n", __func__);
166 BUG();
167 }
168
169 /* get the suitable parent clock for timer*/
170 pclk = clk_get(NULL, pclk_name);
171 if (IS_ERR(pclk)) {
172 pr_err("%s:couldn't get %s as parent for gpt\n", __func__,
173 pclk_name);
174 BUG();
175 }
176
177 clk_set_parent(gpt_clk, pclk);
178 clk_put(gpt_clk);
179 clk_put(pclk);
180
181 spear_setup_of_timer();
182 twd_local_timer_of_register();
183}
184
185struct sys_timer spear13xx_timer = {
186 .init = spear13xx_timer_init,
187};
188
189static const struct of_device_id gic_of_match[] __initconst = {
190 { .compatible = "arm,cortex-a9-gic", .data = gic_of_init },
191 { /* Sentinel */ }
192};
193
194void __init spear13xx_dt_init_irq(void)
195{
196 of_irq_init(gic_of_match);
197}
diff --git a/arch/arm/mach-spear3xx/Makefile b/arch/arm/mach-spear3xx/Makefile
index 17b5d83cf2d5..8d12faa178fd 100644
--- a/arch/arm/mach-spear3xx/Makefile
+++ b/arch/arm/mach-spear3xx/Makefile
@@ -3,7 +3,7 @@
3# 3#
4 4
5# common files 5# common files
6obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx.o clock.o 6obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx.o
7 7
8# spear300 specific files 8# spear300 specific files
9obj-$(CONFIG_MACH_SPEAR300) += spear300.o 9obj-$(CONFIG_MACH_SPEAR300) += spear300.o
diff --git a/arch/arm/mach-spear3xx/clock.c b/arch/arm/mach-spear3xx/clock.c
deleted file mode 100644
index cd6c11099083..000000000000
--- a/arch/arm/mach-spear3xx/clock.c
+++ /dev/null
@@ -1,892 +0,0 @@
1/*
2 * arch/arm/mach-spear3xx/clock.c
3 *
4 * SPEAr3xx machines clock framework source file
5 *
6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/clkdev.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <linux/kernel.h>
18#include <linux/of_platform.h>
19#include <asm/mach-types.h>
20#include <plat/clock.h>
21#include <mach/misc_regs.h>
22#include <mach/spear.h>
23
24#define PLL1_CTR (MISC_BASE + 0x008)
25#define PLL1_FRQ (MISC_BASE + 0x00C)
26#define PLL1_MOD (MISC_BASE + 0x010)
27#define PLL2_CTR (MISC_BASE + 0x014)
28/* PLL_CTR register masks */
29#define PLL_ENABLE 2
30#define PLL_MODE_SHIFT 4
31#define PLL_MODE_MASK 0x3
32#define PLL_MODE_NORMAL 0
33#define PLL_MODE_FRACTION 1
34#define PLL_MODE_DITH_DSB 2
35#define PLL_MODE_DITH_SSB 3
36
37#define PLL2_FRQ (MISC_BASE + 0x018)
38/* PLL FRQ register masks */
39#define PLL_DIV_N_SHIFT 0
40#define PLL_DIV_N_MASK 0xFF
41#define PLL_DIV_P_SHIFT 8
42#define PLL_DIV_P_MASK 0x7
43#define PLL_NORM_FDBK_M_SHIFT 24
44#define PLL_NORM_FDBK_M_MASK 0xFF
45#define PLL_DITH_FDBK_M_SHIFT 16
46#define PLL_DITH_FDBK_M_MASK 0xFFFF
47
48#define PLL2_MOD (MISC_BASE + 0x01C)
49#define PLL_CLK_CFG (MISC_BASE + 0x020)
50#define CORE_CLK_CFG (MISC_BASE + 0x024)
51/* CORE CLK CFG register masks */
52#define PLL_HCLK_RATIO_SHIFT 10
53#define PLL_HCLK_RATIO_MASK 0x3
54#define HCLK_PCLK_RATIO_SHIFT 8
55#define HCLK_PCLK_RATIO_MASK 0x3
56
57#define PERIP_CLK_CFG (MISC_BASE + 0x028)
58/* PERIP_CLK_CFG register masks */
59#define UART_CLK_SHIFT 4
60#define UART_CLK_MASK 0x1
61#define FIRDA_CLK_SHIFT 5
62#define FIRDA_CLK_MASK 0x3
63#define GPT0_CLK_SHIFT 8
64#define GPT1_CLK_SHIFT 11
65#define GPT2_CLK_SHIFT 12
66#define GPT_CLK_MASK 0x1
67#define AUX_CLK_PLL3_VAL 0
68#define AUX_CLK_PLL1_VAL 1
69
70#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
71/* PERIP1_CLK_ENB register masks */
72#define UART_CLK_ENB 3
73#define SSP_CLK_ENB 5
74#define I2C_CLK_ENB 7
75#define JPEG_CLK_ENB 8
76#define FIRDA_CLK_ENB 10
77#define GPT1_CLK_ENB 11
78#define GPT2_CLK_ENB 12
79#define ADC_CLK_ENB 15
80#define RTC_CLK_ENB 17
81#define GPIO_CLK_ENB 18
82#define DMA_CLK_ENB 19
83#define SMI_CLK_ENB 21
84#define GMAC_CLK_ENB 23
85#define USBD_CLK_ENB 24
86#define USBH_CLK_ENB 25
87#define C3_CLK_ENB 31
88
89#define RAS_CLK_ENB (MISC_BASE + 0x034)
90
91#define PRSC1_CLK_CFG (MISC_BASE + 0x044)
92#define PRSC2_CLK_CFG (MISC_BASE + 0x048)
93#define PRSC3_CLK_CFG (MISC_BASE + 0x04C)
94/* gpt synthesizer register masks */
95#define GPT_MSCALE_SHIFT 0
96#define GPT_MSCALE_MASK 0xFFF
97#define GPT_NSCALE_SHIFT 12
98#define GPT_NSCALE_MASK 0xF
99
100#define AMEM_CLK_CFG (MISC_BASE + 0x050)
101#define EXPI_CLK_CFG (MISC_BASE + 0x054)
102#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
103#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
104#define UART_CLK_SYNT (MISC_BASE + 0x064)
105#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
106#define RAS1_CLK_SYNT (MISC_BASE + 0x06C)
107#define RAS2_CLK_SYNT (MISC_BASE + 0x070)
108#define RAS3_CLK_SYNT (MISC_BASE + 0x074)
109#define RAS4_CLK_SYNT (MISC_BASE + 0x078)
110/* aux clk synthesiser register masks for irda to ras4 */
111#define AUX_SYNT_ENB 31
112#define AUX_EQ_SEL_SHIFT 30
113#define AUX_EQ_SEL_MASK 1
114#define AUX_EQ1_SEL 0
115#define AUX_EQ2_SEL 1
116#define AUX_XSCALE_SHIFT 16
117#define AUX_XSCALE_MASK 0xFFF
118#define AUX_YSCALE_SHIFT 0
119#define AUX_YSCALE_MASK 0xFFF
120
121/* root clks */
122/* 32 KHz oscillator clock */
123static struct clk osc_32k_clk = {
124 .flags = ALWAYS_ENABLED,
125 .rate = 32000,
126};
127
128/* 24 MHz oscillator clock */
129static struct clk osc_24m_clk = {
130 .flags = ALWAYS_ENABLED,
131 .rate = 24000000,
132};
133
134/* clock derived from 32 KHz osc clk */
135/* rtc clock */
136static struct clk rtc_clk = {
137 .pclk = &osc_32k_clk,
138 .en_reg = PERIP1_CLK_ENB,
139 .en_reg_bit = RTC_CLK_ENB,
140 .recalc = &follow_parent,
141};
142
143/* clock derived from 24 MHz osc clk */
144/* pll masks structure */
145static struct pll_clk_masks pll1_masks = {
146 .mode_mask = PLL_MODE_MASK,
147 .mode_shift = PLL_MODE_SHIFT,
148 .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
149 .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
150 .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
151 .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
152 .div_p_mask = PLL_DIV_P_MASK,
153 .div_p_shift = PLL_DIV_P_SHIFT,
154 .div_n_mask = PLL_DIV_N_MASK,
155 .div_n_shift = PLL_DIV_N_SHIFT,
156};
157
158/* pll1 configuration structure */
159static struct pll_clk_config pll1_config = {
160 .mode_reg = PLL1_CTR,
161 .cfg_reg = PLL1_FRQ,
162 .masks = &pll1_masks,
163};
164
165/* pll rate configuration table, in ascending order of rates */
166struct pll_rate_tbl pll_rtbl[] = {
167 {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
168 {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
169};
170
171/* PLL1 clock */
172static struct clk pll1_clk = {
173 .flags = ENABLED_ON_INIT,
174 .pclk = &osc_24m_clk,
175 .en_reg = PLL1_CTR,
176 .en_reg_bit = PLL_ENABLE,
177 .calc_rate = &pll_calc_rate,
178 .recalc = &pll_clk_recalc,
179 .set_rate = &pll_clk_set_rate,
180 .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
181 .private_data = &pll1_config,
182};
183
184/* PLL3 48 MHz clock */
185static struct clk pll3_48m_clk = {
186 .flags = ALWAYS_ENABLED,
187 .pclk = &osc_24m_clk,
188 .rate = 48000000,
189};
190
191/* watch dog timer clock */
192static struct clk wdt_clk = {
193 .flags = ALWAYS_ENABLED,
194 .pclk = &osc_24m_clk,
195 .recalc = &follow_parent,
196};
197
198/* clock derived from pll1 clk */
199/* cpu clock */
200static struct clk cpu_clk = {
201 .flags = ALWAYS_ENABLED,
202 .pclk = &pll1_clk,
203 .recalc = &follow_parent,
204};
205
206/* ahb masks structure */
207static struct bus_clk_masks ahb_masks = {
208 .mask = PLL_HCLK_RATIO_MASK,
209 .shift = PLL_HCLK_RATIO_SHIFT,
210};
211
212/* ahb configuration structure */
213static struct bus_clk_config ahb_config = {
214 .reg = CORE_CLK_CFG,
215 .masks = &ahb_masks,
216};
217
218/* ahb rate configuration table, in ascending order of rates */
219struct bus_rate_tbl bus_rtbl[] = {
220 {.div = 3}, /* == parent divided by 4 */
221 {.div = 2}, /* == parent divided by 3 */
222 {.div = 1}, /* == parent divided by 2 */
223 {.div = 0}, /* == parent divided by 1 */
224};
225
226/* ahb clock */
227static struct clk ahb_clk = {
228 .flags = ALWAYS_ENABLED,
229 .pclk = &pll1_clk,
230 .calc_rate = &bus_calc_rate,
231 .recalc = &bus_clk_recalc,
232 .set_rate = &bus_clk_set_rate,
233 .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
234 .private_data = &ahb_config,
235};
236
237/* auxiliary synthesizers masks */
238static struct aux_clk_masks aux_masks = {
239 .eq_sel_mask = AUX_EQ_SEL_MASK,
240 .eq_sel_shift = AUX_EQ_SEL_SHIFT,
241 .eq1_mask = AUX_EQ1_SEL,
242 .eq2_mask = AUX_EQ2_SEL,
243 .xscale_sel_mask = AUX_XSCALE_MASK,
244 .xscale_sel_shift = AUX_XSCALE_SHIFT,
245 .yscale_sel_mask = AUX_YSCALE_MASK,
246 .yscale_sel_shift = AUX_YSCALE_SHIFT,
247};
248
249/* uart synth configurations */
250static struct aux_clk_config uart_synth_config = {
251 .synth_reg = UART_CLK_SYNT,
252 .masks = &aux_masks,
253};
254
255/* aux rate configuration table, in ascending order of rates */
256struct aux_rate_tbl aux_rtbl[] = {
257 /* For PLL1 = 332 MHz */
258 {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
259 {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
260 {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
261};
262
263/* uart synth clock */
264static struct clk uart_synth_clk = {
265 .en_reg = UART_CLK_SYNT,
266 .en_reg_bit = AUX_SYNT_ENB,
267 .pclk = &pll1_clk,
268 .calc_rate = &aux_calc_rate,
269 .recalc = &aux_clk_recalc,
270 .set_rate = &aux_clk_set_rate,
271 .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
272 .private_data = &uart_synth_config,
273};
274
275/* uart parents */
276static struct pclk_info uart_pclk_info[] = {
277 {
278 .pclk = &uart_synth_clk,
279 .pclk_val = AUX_CLK_PLL1_VAL,
280 }, {
281 .pclk = &pll3_48m_clk,
282 .pclk_val = AUX_CLK_PLL3_VAL,
283 },
284};
285
286/* uart parent select structure */
287static struct pclk_sel uart_pclk_sel = {
288 .pclk_info = uart_pclk_info,
289 .pclk_count = ARRAY_SIZE(uart_pclk_info),
290 .pclk_sel_reg = PERIP_CLK_CFG,
291 .pclk_sel_mask = UART_CLK_MASK,
292};
293
294/* uart clock */
295static struct clk uart_clk = {
296 .en_reg = PERIP1_CLK_ENB,
297 .en_reg_bit = UART_CLK_ENB,
298 .pclk_sel = &uart_pclk_sel,
299 .pclk_sel_shift = UART_CLK_SHIFT,
300 .recalc = &follow_parent,
301};
302
303/* firda configurations */
304static struct aux_clk_config firda_synth_config = {
305 .synth_reg = FIRDA_CLK_SYNT,
306 .masks = &aux_masks,
307};
308
309/* firda synth clock */
310static struct clk firda_synth_clk = {
311 .en_reg = FIRDA_CLK_SYNT,
312 .en_reg_bit = AUX_SYNT_ENB,
313 .pclk = &pll1_clk,
314 .calc_rate = &aux_calc_rate,
315 .recalc = &aux_clk_recalc,
316 .set_rate = &aux_clk_set_rate,
317 .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 1},
318 .private_data = &firda_synth_config,
319};
320
321/* firda parents */
322static struct pclk_info firda_pclk_info[] = {
323 {
324 .pclk = &firda_synth_clk,
325 .pclk_val = AUX_CLK_PLL1_VAL,
326 }, {
327 .pclk = &pll3_48m_clk,
328 .pclk_val = AUX_CLK_PLL3_VAL,
329 },
330};
331
332/* firda parent select structure */
333static struct pclk_sel firda_pclk_sel = {
334 .pclk_info = firda_pclk_info,
335 .pclk_count = ARRAY_SIZE(firda_pclk_info),
336 .pclk_sel_reg = PERIP_CLK_CFG,
337 .pclk_sel_mask = FIRDA_CLK_MASK,
338};
339
340/* firda clock */
341static struct clk firda_clk = {
342 .en_reg = PERIP1_CLK_ENB,
343 .en_reg_bit = FIRDA_CLK_ENB,
344 .pclk_sel = &firda_pclk_sel,
345 .pclk_sel_shift = FIRDA_CLK_SHIFT,
346 .recalc = &follow_parent,
347};
348
349/* gpt synthesizer masks */
350static struct gpt_clk_masks gpt_masks = {
351 .mscale_sel_mask = GPT_MSCALE_MASK,
352 .mscale_sel_shift = GPT_MSCALE_SHIFT,
353 .nscale_sel_mask = GPT_NSCALE_MASK,
354 .nscale_sel_shift = GPT_NSCALE_SHIFT,
355};
356
357/* gpt rate configuration table, in ascending order of rates */
358struct gpt_rate_tbl gpt_rtbl[] = {
359 /* For pll1 = 332 MHz */
360 {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
361 {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
362 {.mscale = 1, .nscale = 0}, /* 83 MHz */
363};
364
365/* gpt0 synth clk config*/
366static struct gpt_clk_config gpt0_synth_config = {
367 .synth_reg = PRSC1_CLK_CFG,
368 .masks = &gpt_masks,
369};
370
371/* gpt synth clock */
372static struct clk gpt0_synth_clk = {
373 .flags = ALWAYS_ENABLED,
374 .pclk = &pll1_clk,
375 .calc_rate = &gpt_calc_rate,
376 .recalc = &gpt_clk_recalc,
377 .set_rate = &gpt_clk_set_rate,
378 .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
379 .private_data = &gpt0_synth_config,
380};
381
382/* gpt parents */
383static struct pclk_info gpt0_pclk_info[] = {
384 {
385 .pclk = &gpt0_synth_clk,
386 .pclk_val = AUX_CLK_PLL1_VAL,
387 }, {
388 .pclk = &pll3_48m_clk,
389 .pclk_val = AUX_CLK_PLL3_VAL,
390 },
391};
392
393/* gpt parent select structure */
394static struct pclk_sel gpt0_pclk_sel = {
395 .pclk_info = gpt0_pclk_info,
396 .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
397 .pclk_sel_reg = PERIP_CLK_CFG,
398 .pclk_sel_mask = GPT_CLK_MASK,
399};
400
401/* gpt0 timer clock */
402static struct clk gpt0_clk = {
403 .flags = ALWAYS_ENABLED,
404 .pclk_sel = &gpt0_pclk_sel,
405 .pclk_sel_shift = GPT0_CLK_SHIFT,
406 .recalc = &follow_parent,
407};
408
409/* gpt1 synth clk configurations */
410static struct gpt_clk_config gpt1_synth_config = {
411 .synth_reg = PRSC2_CLK_CFG,
412 .masks = &gpt_masks,
413};
414
415/* gpt1 synth clock */
416static struct clk gpt1_synth_clk = {
417 .flags = ALWAYS_ENABLED,
418 .pclk = &pll1_clk,
419 .calc_rate = &gpt_calc_rate,
420 .recalc = &gpt_clk_recalc,
421 .set_rate = &gpt_clk_set_rate,
422 .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
423 .private_data = &gpt1_synth_config,
424};
425
426static struct pclk_info gpt1_pclk_info[] = {
427 {
428 .pclk = &gpt1_synth_clk,
429 .pclk_val = AUX_CLK_PLL1_VAL,
430 }, {
431 .pclk = &pll3_48m_clk,
432 .pclk_val = AUX_CLK_PLL3_VAL,
433 },
434};
435
436/* gpt parent select structure */
437static struct pclk_sel gpt1_pclk_sel = {
438 .pclk_info = gpt1_pclk_info,
439 .pclk_count = ARRAY_SIZE(gpt1_pclk_info),
440 .pclk_sel_reg = PERIP_CLK_CFG,
441 .pclk_sel_mask = GPT_CLK_MASK,
442};
443
444/* gpt1 timer clock */
445static struct clk gpt1_clk = {
446 .en_reg = PERIP1_CLK_ENB,
447 .en_reg_bit = GPT1_CLK_ENB,
448 .pclk_sel = &gpt1_pclk_sel,
449 .pclk_sel_shift = GPT1_CLK_SHIFT,
450 .recalc = &follow_parent,
451};
452
453/* gpt2 synth clk configurations */
454static struct gpt_clk_config gpt2_synth_config = {
455 .synth_reg = PRSC3_CLK_CFG,
456 .masks = &gpt_masks,
457};
458
459/* gpt1 synth clock */
460static struct clk gpt2_synth_clk = {
461 .flags = ALWAYS_ENABLED,
462 .pclk = &pll1_clk,
463 .calc_rate = &gpt_calc_rate,
464 .recalc = &gpt_clk_recalc,
465 .set_rate = &gpt_clk_set_rate,
466 .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
467 .private_data = &gpt2_synth_config,
468};
469
470static struct pclk_info gpt2_pclk_info[] = {
471 {
472 .pclk = &gpt2_synth_clk,
473 .pclk_val = AUX_CLK_PLL1_VAL,
474 }, {
475 .pclk = &pll3_48m_clk,
476 .pclk_val = AUX_CLK_PLL3_VAL,
477 },
478};
479
480/* gpt parent select structure */
481static struct pclk_sel gpt2_pclk_sel = {
482 .pclk_info = gpt2_pclk_info,
483 .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
484 .pclk_sel_reg = PERIP_CLK_CFG,
485 .pclk_sel_mask = GPT_CLK_MASK,
486};
487
488/* gpt2 timer clock */
489static struct clk gpt2_clk = {
490 .en_reg = PERIP1_CLK_ENB,
491 .en_reg_bit = GPT2_CLK_ENB,
492 .pclk_sel = &gpt2_pclk_sel,
493 .pclk_sel_shift = GPT2_CLK_SHIFT,
494 .recalc = &follow_parent,
495};
496
497/* clock derived from pll3 clk */
498/* usbh clock */
499static struct clk usbh_clk = {
500 .pclk = &pll3_48m_clk,
501 .en_reg = PERIP1_CLK_ENB,
502 .en_reg_bit = USBH_CLK_ENB,
503 .recalc = &follow_parent,
504};
505
506/* usbd clock */
507static struct clk usbd_clk = {
508 .pclk = &pll3_48m_clk,
509 .en_reg = PERIP1_CLK_ENB,
510 .en_reg_bit = USBD_CLK_ENB,
511 .recalc = &follow_parent,
512};
513
514/* clock derived from usbh clk */
515/* usbh0 clock */
516static struct clk usbh0_clk = {
517 .flags = ALWAYS_ENABLED,
518 .pclk = &usbh_clk,
519 .recalc = &follow_parent,
520};
521
522/* usbh1 clock */
523static struct clk usbh1_clk = {
524 .flags = ALWAYS_ENABLED,
525 .pclk = &usbh_clk,
526 .recalc = &follow_parent,
527};
528
529/* clock derived from ahb clk */
530/* apb masks structure */
531static struct bus_clk_masks apb_masks = {
532 .mask = HCLK_PCLK_RATIO_MASK,
533 .shift = HCLK_PCLK_RATIO_SHIFT,
534};
535
536/* apb configuration structure */
537static struct bus_clk_config apb_config = {
538 .reg = CORE_CLK_CFG,
539 .masks = &apb_masks,
540};
541
542/* apb clock */
543static struct clk apb_clk = {
544 .flags = ALWAYS_ENABLED,
545 .pclk = &ahb_clk,
546 .calc_rate = &bus_calc_rate,
547 .recalc = &bus_clk_recalc,
548 .set_rate = &bus_clk_set_rate,
549 .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
550 .private_data = &apb_config,
551};
552
553/* i2c clock */
554static struct clk i2c_clk = {
555 .pclk = &ahb_clk,
556 .en_reg = PERIP1_CLK_ENB,
557 .en_reg_bit = I2C_CLK_ENB,
558 .recalc = &follow_parent,
559};
560
561/* dma clock */
562static struct clk dma_clk = {
563 .pclk = &ahb_clk,
564 .en_reg = PERIP1_CLK_ENB,
565 .en_reg_bit = DMA_CLK_ENB,
566 .recalc = &follow_parent,
567};
568
569/* jpeg clock */
570static struct clk jpeg_clk = {
571 .pclk = &ahb_clk,
572 .en_reg = PERIP1_CLK_ENB,
573 .en_reg_bit = JPEG_CLK_ENB,
574 .recalc = &follow_parent,
575};
576
577/* gmac clock */
578static struct clk gmac_clk = {
579 .pclk = &ahb_clk,
580 .en_reg = PERIP1_CLK_ENB,
581 .en_reg_bit = GMAC_CLK_ENB,
582 .recalc = &follow_parent,
583};
584
585/* smi clock */
586static struct clk smi_clk = {
587 .pclk = &ahb_clk,
588 .en_reg = PERIP1_CLK_ENB,
589 .en_reg_bit = SMI_CLK_ENB,
590 .recalc = &follow_parent,
591};
592
593/* c3 clock */
594static struct clk c3_clk = {
595 .pclk = &ahb_clk,
596 .en_reg = PERIP1_CLK_ENB,
597 .en_reg_bit = C3_CLK_ENB,
598 .recalc = &follow_parent,
599};
600
601/* clock derived from apb clk */
602/* adc clock */
603static struct clk adc_clk = {
604 .pclk = &apb_clk,
605 .en_reg = PERIP1_CLK_ENB,
606 .en_reg_bit = ADC_CLK_ENB,
607 .recalc = &follow_parent,
608};
609
610#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
611/* emi clock */
612static struct clk emi_clk = {
613 .flags = ALWAYS_ENABLED,
614 .pclk = &ahb_clk,
615 .recalc = &follow_parent,
616};
617#endif
618
619/* ssp clock */
620static struct clk ssp0_clk = {
621 .pclk = &apb_clk,
622 .en_reg = PERIP1_CLK_ENB,
623 .en_reg_bit = SSP_CLK_ENB,
624 .recalc = &follow_parent,
625};
626
627/* gpio clock */
628static struct clk gpio_clk = {
629 .pclk = &apb_clk,
630 .en_reg = PERIP1_CLK_ENB,
631 .en_reg_bit = GPIO_CLK_ENB,
632 .recalc = &follow_parent,
633};
634
635static struct clk dummy_apb_pclk;
636
637#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR310) || \
638 defined(CONFIG_MACH_SPEAR320)
639/* fsmc clock */
640static struct clk fsmc_clk = {
641 .flags = ALWAYS_ENABLED,
642 .pclk = &ahb_clk,
643 .recalc = &follow_parent,
644};
645#endif
646
647/* common clocks to spear310 and spear320 */
648#if defined(CONFIG_MACH_SPEAR310) || defined(CONFIG_MACH_SPEAR320)
649/* uart1 clock */
650static struct clk uart1_clk = {
651 .flags = ALWAYS_ENABLED,
652 .pclk = &apb_clk,
653 .recalc = &follow_parent,
654};
655
656/* uart2 clock */
657static struct clk uart2_clk = {
658 .flags = ALWAYS_ENABLED,
659 .pclk = &apb_clk,
660 .recalc = &follow_parent,
661};
662#endif /* CONFIG_MACH_SPEAR310 || CONFIG_MACH_SPEAR320 */
663
664/* common clocks to spear300 and spear320 */
665#if defined(CONFIG_MACH_SPEAR300) || defined(CONFIG_MACH_SPEAR320)
666/* clcd clock */
667static struct clk clcd_clk = {
668 .flags = ALWAYS_ENABLED,
669 .pclk = &pll3_48m_clk,
670 .recalc = &follow_parent,
671};
672
673/* sdhci clock */
674static struct clk sdhci_clk = {
675 .flags = ALWAYS_ENABLED,
676 .pclk = &ahb_clk,
677 .recalc = &follow_parent,
678};
679#endif /* CONFIG_MACH_SPEAR300 || CONFIG_MACH_SPEAR320 */
680
681/* spear300 machine specific clock structures */
682#ifdef CONFIG_MACH_SPEAR300
683/* gpio1 clock */
684static struct clk gpio1_clk = {
685 .flags = ALWAYS_ENABLED,
686 .pclk = &apb_clk,
687 .recalc = &follow_parent,
688};
689
690/* keyboard clock */
691static struct clk kbd_clk = {
692 .flags = ALWAYS_ENABLED,
693 .pclk = &apb_clk,
694 .recalc = &follow_parent,
695};
696
697#endif
698
699/* spear310 machine specific clock structures */
700#ifdef CONFIG_MACH_SPEAR310
701/* uart3 clock */
702static struct clk uart3_clk = {
703 .flags = ALWAYS_ENABLED,
704 .pclk = &apb_clk,
705 .recalc = &follow_parent,
706};
707
708/* uart4 clock */
709static struct clk uart4_clk = {
710 .flags = ALWAYS_ENABLED,
711 .pclk = &apb_clk,
712 .recalc = &follow_parent,
713};
714
715/* uart5 clock */
716static struct clk uart5_clk = {
717 .flags = ALWAYS_ENABLED,
718 .pclk = &apb_clk,
719 .recalc = &follow_parent,
720};
721#endif
722
723/* spear320 machine specific clock structures */
724#ifdef CONFIG_MACH_SPEAR320
725/* can0 clock */
726static struct clk can0_clk = {
727 .flags = ALWAYS_ENABLED,
728 .pclk = &apb_clk,
729 .recalc = &follow_parent,
730};
731
732/* can1 clock */
733static struct clk can1_clk = {
734 .flags = ALWAYS_ENABLED,
735 .pclk = &apb_clk,
736 .recalc = &follow_parent,
737};
738
739/* i2c1 clock */
740static struct clk i2c1_clk = {
741 .flags = ALWAYS_ENABLED,
742 .pclk = &ahb_clk,
743 .recalc = &follow_parent,
744};
745
746/* ssp1 clock */
747static struct clk ssp1_clk = {
748 .flags = ALWAYS_ENABLED,
749 .pclk = &apb_clk,
750 .recalc = &follow_parent,
751};
752
753/* ssp2 clock */
754static struct clk ssp2_clk = {
755 .flags = ALWAYS_ENABLED,
756 .pclk = &apb_clk,
757 .recalc = &follow_parent,
758};
759
760/* pwm clock */
761static struct clk pwm_clk = {
762 .flags = ALWAYS_ENABLED,
763 .pclk = &apb_clk,
764 .recalc = &follow_parent,
765};
766#endif
767
768/* array of all spear 3xx clock lookups */
769static struct clk_lookup spear_clk_lookups[] = {
770 CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
771 /* root clks */
772 CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
773 CLKDEV_INIT(NULL, "osc_24m_clk", &osc_24m_clk),
774 /* clock derived from 32 KHz osc clk */
775 CLKDEV_INIT("fc900000.rtc", NULL, &rtc_clk),
776 /* clock derived from 24 MHz osc clk */
777 CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
778 CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
779 CLKDEV_INIT("fc880000.wdt", NULL, &wdt_clk),
780 /* clock derived from pll1 clk */
781 CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
782 CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
783 CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
784 CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
785 CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
786 CLKDEV_INIT(NULL, "gpt1_synth_clk", &gpt1_synth_clk),
787 CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
788 CLKDEV_INIT("d0000000.serial", NULL, &uart_clk),
789 CLKDEV_INIT("firda", NULL, &firda_clk),
790 CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
791 CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
792 CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
793 /* clock derived from pll3 clk */
794 CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
795 CLKDEV_INIT(NULL, "usbh_clk", &usbh_clk),
796 /* clock derived from usbh clk */
797 CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
798 CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
799 /* clock derived from ahb clk */
800 CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
801 CLKDEV_INIT("d0180000.i2c", NULL, &i2c_clk),
802 CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
803 CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
804 CLKDEV_INIT("e0800000.eth", NULL, &gmac_clk),
805 CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
806 CLKDEV_INIT("c3", NULL, &c3_clk),
807 /* clock derived from apb clk */
808 CLKDEV_INIT("adc", NULL, &adc_clk),
809 CLKDEV_INIT("d0100000.spi", NULL, &ssp0_clk),
810 CLKDEV_INIT("fc980000.gpio", NULL, &gpio_clk),
811};
812
813/* array of all spear 300 clock lookups */
814#ifdef CONFIG_MACH_SPEAR300
815static struct clk_lookup spear300_clk_lookups[] = {
816 CLKDEV_INIT("60000000.clcd", NULL, &clcd_clk),
817 CLKDEV_INIT("94000000.flash", NULL, &fsmc_clk),
818 CLKDEV_INIT("a9000000.gpio", NULL, &gpio1_clk),
819 CLKDEV_INIT("a0000000.kbd", NULL, &kbd_clk),
820 CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
821};
822
823void __init spear300_clk_init(void)
824{
825 int i;
826
827 for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
828 clk_register(&spear_clk_lookups[i]);
829
830 for (i = 0; i < ARRAY_SIZE(spear300_clk_lookups); i++)
831 clk_register(&spear300_clk_lookups[i]);
832
833 clk_init();
834}
835#endif
836
837/* array of all spear 310 clock lookups */
838#ifdef CONFIG_MACH_SPEAR310
839static struct clk_lookup spear310_clk_lookups[] = {
840 CLKDEV_INIT("44000000.flash", NULL, &fsmc_clk),
841 CLKDEV_INIT(NULL, "emi", &emi_clk),
842 CLKDEV_INIT("b2000000.serial", NULL, &uart1_clk),
843 CLKDEV_INIT("b2080000.serial", NULL, &uart2_clk),
844 CLKDEV_INIT("b2100000.serial", NULL, &uart3_clk),
845 CLKDEV_INIT("b2180000.serial", NULL, &uart4_clk),
846 CLKDEV_INIT("b2200000.serial", NULL, &uart5_clk),
847};
848
849void __init spear310_clk_init(void)
850{
851 int i;
852
853 for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
854 clk_register(&spear_clk_lookups[i]);
855
856 for (i = 0; i < ARRAY_SIZE(spear310_clk_lookups); i++)
857 clk_register(&spear310_clk_lookups[i]);
858
859 clk_init();
860}
861#endif
862
863/* array of all spear 320 clock lookups */
864#ifdef CONFIG_MACH_SPEAR320
865static struct clk_lookup spear320_clk_lookups[] = {
866 CLKDEV_INIT("90000000.clcd", NULL, &clcd_clk),
867 CLKDEV_INIT("4c000000.flash", NULL, &fsmc_clk),
868 CLKDEV_INIT("a7000000.i2c", NULL, &i2c1_clk),
869 CLKDEV_INIT(NULL, "emi", &emi_clk),
870 CLKDEV_INIT("pwm", NULL, &pwm_clk),
871 CLKDEV_INIT("70000000.sdhci", NULL, &sdhci_clk),
872 CLKDEV_INIT("c_can_platform.0", NULL, &can0_clk),
873 CLKDEV_INIT("c_can_platform.1", NULL, &can1_clk),
874 CLKDEV_INIT("a5000000.spi", NULL, &ssp1_clk),
875 CLKDEV_INIT("a6000000.spi", NULL, &ssp2_clk),
876 CLKDEV_INIT("a3000000.serial", NULL, &uart1_clk),
877 CLKDEV_INIT("a4000000.serial", NULL, &uart2_clk),
878};
879
880void __init spear320_clk_init(void)
881{
882 int i;
883
884 for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
885 clk_register(&spear_clk_lookups[i]);
886
887 for (i = 0; i < ARRAY_SIZE(spear320_clk_lookups); i++)
888 clk_register(&spear320_clk_lookups[i]);
889
890 clk_init();
891}
892#endif
diff --git a/arch/arm/mach-spear3xx/include/mach/generic.h b/arch/arm/mach-spear3xx/include/mach/generic.h
index bdb304551caf..4a95b9453c2a 100644
--- a/arch/arm/mach-spear3xx/include/mach/generic.h
+++ b/arch/arm/mach-spear3xx/include/mach/generic.h
@@ -27,28 +27,11 @@ extern struct pl022_ssp_controller pl022_plat_data;
27extern struct pl08x_platform_data pl080_plat_data; 27extern struct pl08x_platform_data pl080_plat_data;
28 28
29/* Add spear3xx family function declarations here */ 29/* Add spear3xx family function declarations here */
30void __init spear_setup_timer(resource_size_t base, int irq); 30void __init spear_setup_of_timer(void);
31void __init spear3xx_clk_init(void);
31void __init spear3xx_map_io(void); 32void __init spear3xx_map_io(void);
32void __init spear3xx_dt_init_irq(void); 33void __init spear3xx_dt_init_irq(void);
33 34
34void spear_restart(char, const char *); 35void spear_restart(char, const char *);
35 36
36/* spear300 declarations */
37#ifdef CONFIG_MACH_SPEAR300
38void __init spear300_clk_init(void);
39
40#endif /* CONFIG_MACH_SPEAR300 */
41
42/* spear310 declarations */
43#ifdef CONFIG_MACH_SPEAR310
44void __init spear310_clk_init(void);
45
46#endif /* CONFIG_MACH_SPEAR310 */
47
48/* spear320 declarations */
49#ifdef CONFIG_MACH_SPEAR320
50void __init spear320_clk_init(void);
51
52#endif /* CONFIG_MACH_SPEAR320 */
53
54#endif /* __MACH_GENERIC_H */ 37#endif /* __MACH_GENERIC_H */
diff --git a/arch/arm/mach-spear3xx/include/mach/irqs.h b/arch/arm/mach-spear3xx/include/mach/irqs.h
index 319620a1afb4..51bd62a0254c 100644
--- a/arch/arm/mach-spear3xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear3xx/include/mach/irqs.h
@@ -16,7 +16,6 @@
16 16
17/* FIXME: probe all these from DT */ 17/* FIXME: probe all these from DT */
18#define SPEAR3XX_IRQ_INTRCOMM_RAS_ARM 1 18#define SPEAR3XX_IRQ_INTRCOMM_RAS_ARM 1
19#define SPEAR3XX_IRQ_CPU_GPT1_1 2
20#define SPEAR3XX_IRQ_GEN_RAS_1 28 19#define SPEAR3XX_IRQ_GEN_RAS_1 28
21#define SPEAR3XX_IRQ_GEN_RAS_2 29 20#define SPEAR3XX_IRQ_GEN_RAS_2 29
22#define SPEAR3XX_IRQ_GEN_RAS_3 30 21#define SPEAR3XX_IRQ_GEN_RAS_3 30
diff --git a/arch/arm/mach-spear3xx/include/mach/misc_regs.h b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
index e0ab72e61507..18e2ac576f25 100644
--- a/arch/arm/mach-spear3xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear3xx/include/mach/misc_regs.h
@@ -14,6 +14,8 @@
14#ifndef __MACH_MISC_REGS_H 14#ifndef __MACH_MISC_REGS_H
15#define __MACH_MISC_REGS_H 15#define __MACH_MISC_REGS_H
16 16
17#include <mach/spear.h>
18
17#define MISC_BASE IOMEM(VA_SPEAR3XX_ICM3_MISC_REG_BASE) 19#define MISC_BASE IOMEM(VA_SPEAR3XX_ICM3_MISC_REG_BASE)
18#define DMA_CHN_CFG (MISC_BASE + 0x0A0) 20#define DMA_CHN_CFG (MISC_BASE + 0x0A0)
19 21
diff --git a/arch/arm/mach-spear3xx/include/mach/spear.h b/arch/arm/mach-spear3xx/include/mach/spear.h
index 6d4dadc67633..51eb953148a9 100644
--- a/arch/arm/mach-spear3xx/include/mach/spear.h
+++ b/arch/arm/mach-spear3xx/include/mach/spear.h
@@ -26,7 +26,6 @@
26/* ML1 - Multi Layer CPU Subsystem */ 26/* ML1 - Multi Layer CPU Subsystem */
27#define SPEAR3XX_ICM3_ML1_2_BASE UL(0xF0000000) 27#define SPEAR3XX_ICM3_ML1_2_BASE UL(0xF0000000)
28#define VA_SPEAR6XX_ML_CPU_BASE UL(0xF0000000) 28#define VA_SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
29#define SPEAR3XX_CPU_TMR_BASE UL(0xF0000000)
30 29
31/* ICM3 - Basic Subsystem */ 30/* ICM3 - Basic Subsystem */
32#define SPEAR3XX_ICM3_SMI_CTRL_BASE UL(0xFC000000) 31#define SPEAR3XX_ICM3_SMI_CTRL_BASE UL(0xFC000000)
@@ -45,4 +44,17 @@
45#define SPEAR_SYS_CTRL_BASE SPEAR3XX_ICM3_SYS_CTRL_BASE 44#define SPEAR_SYS_CTRL_BASE SPEAR3XX_ICM3_SYS_CTRL_BASE
46#define VA_SPEAR_SYS_CTRL_BASE VA_SPEAR3XX_ICM3_SYS_CTRL_BASE 45#define VA_SPEAR_SYS_CTRL_BASE VA_SPEAR3XX_ICM3_SYS_CTRL_BASE
47 46
47/* SPEAr320 Macros */
48#define SPEAR320_SOC_CONFIG_BASE UL(0xB3000000)
49#define VA_SPEAR320_SOC_CONFIG_BASE UL(0xFE000000)
50#define SPEAR320_CONTROL_REG IOMEM(VA_SPEAR320_SOC_CONFIG_BASE)
51#define SPEAR320_EXT_CTRL_REG IOMEM(VA_SPEAR320_SOC_CONFIG_BASE + 0x0018)
52 #define SPEAR320_UARTX_PCLK_MASK 0x1
53 #define SPEAR320_UART2_PCLK_SHIFT 8
54 #define SPEAR320_UART3_PCLK_SHIFT 9
55 #define SPEAR320_UART4_PCLK_SHIFT 10
56 #define SPEAR320_UART5_PCLK_SHIFT 11
57 #define SPEAR320_UART6_PCLK_SHIFT 12
58 #define SPEAR320_RS485_PCLK_SHIFT 13
59
48#endif /* __MACH_SPEAR3XX_H */ 60#endif /* __MACH_SPEAR3XX_H */
diff --git a/arch/arm/mach-spear3xx/spear300.c b/arch/arm/mach-spear3xx/spear300.c
index f75fe25a620c..f74a05bdb829 100644
--- a/arch/arm/mach-spear3xx/spear300.c
+++ b/arch/arm/mach-spear3xx/spear300.c
@@ -337,7 +337,6 @@ static const char * const spear300_dt_board_compat[] = {
337static void __init spear300_map_io(void) 337static void __init spear300_map_io(void)
338{ 338{
339 spear3xx_map_io(); 339 spear3xx_map_io();
340 spear300_clk_init();
341} 340}
342 341
343DT_MACHINE_START(SPEAR300_DT, "ST SPEAr300 SoC with Flattened Device Tree") 342DT_MACHINE_START(SPEAR300_DT, "ST SPEAr300 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear310.c b/arch/arm/mach-spear3xx/spear310.c
index f0842a58dc02..84dfb0900747 100644
--- a/arch/arm/mach-spear3xx/spear310.c
+++ b/arch/arm/mach-spear3xx/spear310.c
@@ -478,7 +478,6 @@ static const char * const spear310_dt_board_compat[] = {
478static void __init spear310_map_io(void) 478static void __init spear310_map_io(void)
479{ 479{
480 spear3xx_map_io(); 480 spear3xx_map_io();
481 spear310_clk_init();
482} 481}
483 482
484DT_MACHINE_START(SPEAR310_DT, "ST SPEAr310 SoC with Flattened Device Tree") 483DT_MACHINE_START(SPEAR310_DT, "ST SPEAr310 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear320.c b/arch/arm/mach-spear3xx/spear320.c
index e8caeef50a5c..a88fa841d29d 100644
--- a/arch/arm/mach-spear3xx/spear320.c
+++ b/arch/arm/mach-spear3xx/spear320.c
@@ -27,7 +27,6 @@
27#define SPEAR320_UART2_BASE UL(0xA4000000) 27#define SPEAR320_UART2_BASE UL(0xA4000000)
28#define SPEAR320_SSP0_BASE UL(0xA5000000) 28#define SPEAR320_SSP0_BASE UL(0xA5000000)
29#define SPEAR320_SSP1_BASE UL(0xA6000000) 29#define SPEAR320_SSP1_BASE UL(0xA6000000)
30#define SPEAR320_SOC_CONFIG_BASE UL(0xB3000000)
31 30
32/* Interrupt registers offsets and masks */ 31/* Interrupt registers offsets and masks */
33#define SPEAR320_INT_STS_MASK_REG 0x04 32#define SPEAR320_INT_STS_MASK_REG 0x04
@@ -481,10 +480,19 @@ static const char * const spear320_dt_board_compat[] = {
481 NULL, 480 NULL,
482}; 481};
483 482
483struct map_desc spear320_io_desc[] __initdata = {
484 {
485 .virtual = VA_SPEAR320_SOC_CONFIG_BASE,
486 .pfn = __phys_to_pfn(SPEAR320_SOC_CONFIG_BASE),
487 .length = SZ_16M,
488 .type = MT_DEVICE
489 },
490};
491
484static void __init spear320_map_io(void) 492static void __init spear320_map_io(void)
485{ 493{
494 iotable_init(spear320_io_desc, ARRAY_SIZE(spear320_io_desc));
486 spear3xx_map_io(); 495 spear3xx_map_io();
487 spear320_clk_init();
488} 496}
489 497
490DT_MACHINE_START(SPEAR320_DT, "ST SPEAr320 SoC with Flattened Device Tree") 498DT_MACHINE_START(SPEAR320_DT, "ST SPEAr320 SoC with Flattened Device Tree")
diff --git a/arch/arm/mach-spear3xx/spear3xx.c b/arch/arm/mach-spear3xx/spear3xx.c
index 826ac20ef1e7..f22419ed74a8 100644
--- a/arch/arm/mach-spear3xx/spear3xx.c
+++ b/arch/arm/mach-spear3xx/spear3xx.c
@@ -90,6 +90,8 @@ static void __init spear3xx_timer_init(void)
90 char pclk_name[] = "pll3_48m_clk"; 90 char pclk_name[] = "pll3_48m_clk";
91 struct clk *gpt_clk, *pclk; 91 struct clk *gpt_clk, *pclk;
92 92
93 spear3xx_clk_init();
94
93 /* get the system timer clock */ 95 /* get the system timer clock */
94 gpt_clk = clk_get_sys("gpt0", NULL); 96 gpt_clk = clk_get_sys("gpt0", NULL);
95 if (IS_ERR(gpt_clk)) { 97 if (IS_ERR(gpt_clk)) {
@@ -109,7 +111,7 @@ static void __init spear3xx_timer_init(void)
109 clk_put(gpt_clk); 111 clk_put(gpt_clk);
110 clk_put(pclk); 112 clk_put(pclk);
111 113
112 spear_setup_timer(SPEAR3XX_CPU_TMR_BASE, SPEAR3XX_IRQ_CPU_GPT1_1); 114 spear_setup_of_timer();
113} 115}
114 116
115struct sys_timer spear3xx_timer = { 117struct sys_timer spear3xx_timer = {
diff --git a/arch/arm/mach-spear6xx/Makefile b/arch/arm/mach-spear6xx/Makefile
index 76e5750552fc..898831d93f37 100644
--- a/arch/arm/mach-spear6xx/Makefile
+++ b/arch/arm/mach-spear6xx/Makefile
@@ -3,4 +3,4 @@
3# 3#
4 4
5# common files 5# common files
6obj-y += clock.o spear6xx.o 6obj-y += spear6xx.o
diff --git a/arch/arm/mach-spear6xx/clock.c b/arch/arm/mach-spear6xx/clock.c
deleted file mode 100644
index bef77d43db87..000000000000
--- a/arch/arm/mach-spear6xx/clock.c
+++ /dev/null
@@ -1,789 +0,0 @@
1/*
2 * arch/arm/mach-spear6xx/clock.c
3 *
4 * SPEAr6xx machines clock framework source file
5 *
6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/init.h>
15#include <linux/io.h>
16#include <linux/kernel.h>
17#include <plat/clock.h>
18#include <mach/misc_regs.h>
19#include <mach/spear.h>
20
21#define PLL1_CTR (MISC_BASE + 0x008)
22#define PLL1_FRQ (MISC_BASE + 0x00C)
23#define PLL1_MOD (MISC_BASE + 0x010)
24#define PLL2_CTR (MISC_BASE + 0x014)
25/* PLL_CTR register masks */
26#define PLL_ENABLE 2
27#define PLL_MODE_SHIFT 4
28#define PLL_MODE_MASK 0x3
29#define PLL_MODE_NORMAL 0
30#define PLL_MODE_FRACTION 1
31#define PLL_MODE_DITH_DSB 2
32#define PLL_MODE_DITH_SSB 3
33
34#define PLL2_FRQ (MISC_BASE + 0x018)
35/* PLL FRQ register masks */
36#define PLL_DIV_N_SHIFT 0
37#define PLL_DIV_N_MASK 0xFF
38#define PLL_DIV_P_SHIFT 8
39#define PLL_DIV_P_MASK 0x7
40#define PLL_NORM_FDBK_M_SHIFT 24
41#define PLL_NORM_FDBK_M_MASK 0xFF
42#define PLL_DITH_FDBK_M_SHIFT 16
43#define PLL_DITH_FDBK_M_MASK 0xFFFF
44
45#define PLL2_MOD (MISC_BASE + 0x01C)
46#define PLL_CLK_CFG (MISC_BASE + 0x020)
47#define CORE_CLK_CFG (MISC_BASE + 0x024)
48/* CORE CLK CFG register masks */
49#define PLL_HCLK_RATIO_SHIFT 10
50#define PLL_HCLK_RATIO_MASK 0x3
51#define HCLK_PCLK_RATIO_SHIFT 8
52#define HCLK_PCLK_RATIO_MASK 0x3
53
54#define PERIP_CLK_CFG (MISC_BASE + 0x028)
55/* PERIP_CLK_CFG register masks */
56#define CLCD_CLK_SHIFT 2
57#define CLCD_CLK_MASK 0x3
58#define UART_CLK_SHIFT 4
59#define UART_CLK_MASK 0x1
60#define FIRDA_CLK_SHIFT 5
61#define FIRDA_CLK_MASK 0x3
62#define GPT0_CLK_SHIFT 8
63#define GPT1_CLK_SHIFT 10
64#define GPT2_CLK_SHIFT 11
65#define GPT3_CLK_SHIFT 12
66#define GPT_CLK_MASK 0x1
67#define AUX_CLK_PLL3_VAL 0
68#define AUX_CLK_PLL1_VAL 1
69
70#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
71/* PERIP1_CLK_ENB register masks */
72#define UART0_CLK_ENB 3
73#define UART1_CLK_ENB 4
74#define SSP0_CLK_ENB 5
75#define SSP1_CLK_ENB 6
76#define I2C_CLK_ENB 7
77#define JPEG_CLK_ENB 8
78#define FSMC_CLK_ENB 9
79#define FIRDA_CLK_ENB 10
80#define GPT2_CLK_ENB 11
81#define GPT3_CLK_ENB 12
82#define GPIO2_CLK_ENB 13
83#define SSP2_CLK_ENB 14
84#define ADC_CLK_ENB 15
85#define GPT1_CLK_ENB 11
86#define RTC_CLK_ENB 17
87#define GPIO1_CLK_ENB 18
88#define DMA_CLK_ENB 19
89#define SMI_CLK_ENB 21
90#define CLCD_CLK_ENB 22
91#define GMAC_CLK_ENB 23
92#define USBD_CLK_ENB 24
93#define USBH0_CLK_ENB 25
94#define USBH1_CLK_ENB 26
95
96#define PRSC1_CLK_CFG (MISC_BASE + 0x044)
97#define PRSC2_CLK_CFG (MISC_BASE + 0x048)
98#define PRSC3_CLK_CFG (MISC_BASE + 0x04C)
99/* gpt synthesizer register masks */
100#define GPT_MSCALE_SHIFT 0
101#define GPT_MSCALE_MASK 0xFFF
102#define GPT_NSCALE_SHIFT 12
103#define GPT_NSCALE_MASK 0xF
104
105#define AMEM_CLK_CFG (MISC_BASE + 0x050)
106#define EXPI_CLK_CFG (MISC_BASE + 0x054)
107#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
108#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
109#define UART_CLK_SYNT (MISC_BASE + 0x064)
110#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
111#define RAS1_CLK_SYNT (MISC_BASE + 0x06C)
112#define RAS2_CLK_SYNT (MISC_BASE + 0x070)
113#define RAS3_CLK_SYNT (MISC_BASE + 0x074)
114#define RAS4_CLK_SYNT (MISC_BASE + 0x078)
115/* aux clk synthesiser register masks for irda to ras4 */
116#define AUX_SYNT_ENB 31
117#define AUX_EQ_SEL_SHIFT 30
118#define AUX_EQ_SEL_MASK 1
119#define AUX_EQ1_SEL 0
120#define AUX_EQ2_SEL 1
121#define AUX_XSCALE_SHIFT 16
122#define AUX_XSCALE_MASK 0xFFF
123#define AUX_YSCALE_SHIFT 0
124#define AUX_YSCALE_MASK 0xFFF
125
126/* root clks */
127/* 32 KHz oscillator clock */
128static struct clk osc_32k_clk = {
129 .flags = ALWAYS_ENABLED,
130 .rate = 32000,
131};
132
133/* 30 MHz oscillator clock */
134static struct clk osc_30m_clk = {
135 .flags = ALWAYS_ENABLED,
136 .rate = 30000000,
137};
138
139/* clock derived from 32 KHz osc clk */
140/* rtc clock */
141static struct clk rtc_clk = {
142 .pclk = &osc_32k_clk,
143 .en_reg = PERIP1_CLK_ENB,
144 .en_reg_bit = RTC_CLK_ENB,
145 .recalc = &follow_parent,
146};
147
148/* clock derived from 30 MHz osc clk */
149/* pll masks structure */
150static struct pll_clk_masks pll1_masks = {
151 .mode_mask = PLL_MODE_MASK,
152 .mode_shift = PLL_MODE_SHIFT,
153 .norm_fdbk_m_mask = PLL_NORM_FDBK_M_MASK,
154 .norm_fdbk_m_shift = PLL_NORM_FDBK_M_SHIFT,
155 .dith_fdbk_m_mask = PLL_DITH_FDBK_M_MASK,
156 .dith_fdbk_m_shift = PLL_DITH_FDBK_M_SHIFT,
157 .div_p_mask = PLL_DIV_P_MASK,
158 .div_p_shift = PLL_DIV_P_SHIFT,
159 .div_n_mask = PLL_DIV_N_MASK,
160 .div_n_shift = PLL_DIV_N_SHIFT,
161};
162
163/* pll1 configuration structure */
164static struct pll_clk_config pll1_config = {
165 .mode_reg = PLL1_CTR,
166 .cfg_reg = PLL1_FRQ,
167 .masks = &pll1_masks,
168};
169
170/* pll rate configuration table, in ascending order of rates */
171struct pll_rate_tbl pll_rtbl[] = {
172 {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* 266 MHz */
173 {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* 332 MHz */
174};
175
176/* PLL1 clock */
177static struct clk pll1_clk = {
178 .flags = ENABLED_ON_INIT,
179 .pclk = &osc_30m_clk,
180 .en_reg = PLL1_CTR,
181 .en_reg_bit = PLL_ENABLE,
182 .calc_rate = &pll_calc_rate,
183 .recalc = &pll_clk_recalc,
184 .set_rate = &pll_clk_set_rate,
185 .rate_config = {pll_rtbl, ARRAY_SIZE(pll_rtbl), 1},
186 .private_data = &pll1_config,
187};
188
189/* PLL3 48 MHz clock */
190static struct clk pll3_48m_clk = {
191 .flags = ALWAYS_ENABLED,
192 .pclk = &osc_30m_clk,
193 .rate = 48000000,
194};
195
196/* watch dog timer clock */
197static struct clk wdt_clk = {
198 .flags = ALWAYS_ENABLED,
199 .pclk = &osc_30m_clk,
200 .recalc = &follow_parent,
201};
202
203/* clock derived from pll1 clk */
204/* cpu clock */
205static struct clk cpu_clk = {
206 .flags = ALWAYS_ENABLED,
207 .pclk = &pll1_clk,
208 .recalc = &follow_parent,
209};
210
211/* ahb masks structure */
212static struct bus_clk_masks ahb_masks = {
213 .mask = PLL_HCLK_RATIO_MASK,
214 .shift = PLL_HCLK_RATIO_SHIFT,
215};
216
217/* ahb configuration structure */
218static struct bus_clk_config ahb_config = {
219 .reg = CORE_CLK_CFG,
220 .masks = &ahb_masks,
221};
222
223/* ahb rate configuration table, in ascending order of rates */
224struct bus_rate_tbl bus_rtbl[] = {
225 {.div = 3}, /* == parent divided by 4 */
226 {.div = 2}, /* == parent divided by 3 */
227 {.div = 1}, /* == parent divided by 2 */
228 {.div = 0}, /* == parent divided by 1 */
229};
230
231/* ahb clock */
232static struct clk ahb_clk = {
233 .flags = ALWAYS_ENABLED,
234 .pclk = &pll1_clk,
235 .calc_rate = &bus_calc_rate,
236 .recalc = &bus_clk_recalc,
237 .set_rate = &bus_clk_set_rate,
238 .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
239 .private_data = &ahb_config,
240};
241
242/* auxiliary synthesizers masks */
243static struct aux_clk_masks aux_masks = {
244 .eq_sel_mask = AUX_EQ_SEL_MASK,
245 .eq_sel_shift = AUX_EQ_SEL_SHIFT,
246 .eq1_mask = AUX_EQ1_SEL,
247 .eq2_mask = AUX_EQ2_SEL,
248 .xscale_sel_mask = AUX_XSCALE_MASK,
249 .xscale_sel_shift = AUX_XSCALE_SHIFT,
250 .yscale_sel_mask = AUX_YSCALE_MASK,
251 .yscale_sel_shift = AUX_YSCALE_SHIFT,
252};
253
254/* uart configurations */
255static struct aux_clk_config uart_synth_config = {
256 .synth_reg = UART_CLK_SYNT,
257 .masks = &aux_masks,
258};
259
260/* aux rate configuration table, in ascending order of rates */
261struct aux_rate_tbl aux_rtbl[] = {
262 /* For PLL1 = 332 MHz */
263 {.xscale = 1, .yscale = 8, .eq = 1}, /* 41.5 MHz */
264 {.xscale = 1, .yscale = 4, .eq = 1}, /* 83 MHz */
265 {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
266};
267
268/* uart synth clock */
269static struct clk uart_synth_clk = {
270 .en_reg = UART_CLK_SYNT,
271 .en_reg_bit = AUX_SYNT_ENB,
272 .pclk = &pll1_clk,
273 .calc_rate = &aux_calc_rate,
274 .recalc = &aux_clk_recalc,
275 .set_rate = &aux_clk_set_rate,
276 .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
277 .private_data = &uart_synth_config,
278};
279
280/* uart parents */
281static struct pclk_info uart_pclk_info[] = {
282 {
283 .pclk = &uart_synth_clk,
284 .pclk_val = AUX_CLK_PLL1_VAL,
285 }, {
286 .pclk = &pll3_48m_clk,
287 .pclk_val = AUX_CLK_PLL3_VAL,
288 },
289};
290
291/* uart parent select structure */
292static struct pclk_sel uart_pclk_sel = {
293 .pclk_info = uart_pclk_info,
294 .pclk_count = ARRAY_SIZE(uart_pclk_info),
295 .pclk_sel_reg = PERIP_CLK_CFG,
296 .pclk_sel_mask = UART_CLK_MASK,
297};
298
299/* uart0 clock */
300static struct clk uart0_clk = {
301 .en_reg = PERIP1_CLK_ENB,
302 .en_reg_bit = UART0_CLK_ENB,
303 .pclk_sel = &uart_pclk_sel,
304 .pclk_sel_shift = UART_CLK_SHIFT,
305 .recalc = &follow_parent,
306};
307
308/* uart1 clock */
309static struct clk uart1_clk = {
310 .en_reg = PERIP1_CLK_ENB,
311 .en_reg_bit = UART1_CLK_ENB,
312 .pclk_sel = &uart_pclk_sel,
313 .pclk_sel_shift = UART_CLK_SHIFT,
314 .recalc = &follow_parent,
315};
316
317/* firda configurations */
318static struct aux_clk_config firda_synth_config = {
319 .synth_reg = FIRDA_CLK_SYNT,
320 .masks = &aux_masks,
321};
322
323/* firda synth clock */
324static struct clk firda_synth_clk = {
325 .en_reg = FIRDA_CLK_SYNT,
326 .en_reg_bit = AUX_SYNT_ENB,
327 .pclk = &pll1_clk,
328 .calc_rate = &aux_calc_rate,
329 .recalc = &aux_clk_recalc,
330 .set_rate = &aux_clk_set_rate,
331 .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
332 .private_data = &firda_synth_config,
333};
334
335/* firda parents */
336static struct pclk_info firda_pclk_info[] = {
337 {
338 .pclk = &firda_synth_clk,
339 .pclk_val = AUX_CLK_PLL1_VAL,
340 }, {
341 .pclk = &pll3_48m_clk,
342 .pclk_val = AUX_CLK_PLL3_VAL,
343 },
344};
345
346/* firda parent select structure */
347static struct pclk_sel firda_pclk_sel = {
348 .pclk_info = firda_pclk_info,
349 .pclk_count = ARRAY_SIZE(firda_pclk_info),
350 .pclk_sel_reg = PERIP_CLK_CFG,
351 .pclk_sel_mask = FIRDA_CLK_MASK,
352};
353
354/* firda clock */
355static struct clk firda_clk = {
356 .en_reg = PERIP1_CLK_ENB,
357 .en_reg_bit = FIRDA_CLK_ENB,
358 .pclk_sel = &firda_pclk_sel,
359 .pclk_sel_shift = FIRDA_CLK_SHIFT,
360 .recalc = &follow_parent,
361};
362
363/* clcd configurations */
364static struct aux_clk_config clcd_synth_config = {
365 .synth_reg = CLCD_CLK_SYNT,
366 .masks = &aux_masks,
367};
368
369/* firda synth clock */
370static struct clk clcd_synth_clk = {
371 .en_reg = CLCD_CLK_SYNT,
372 .en_reg_bit = AUX_SYNT_ENB,
373 .pclk = &pll1_clk,
374 .calc_rate = &aux_calc_rate,
375 .recalc = &aux_clk_recalc,
376 .set_rate = &aux_clk_set_rate,
377 .rate_config = {aux_rtbl, ARRAY_SIZE(aux_rtbl), 2},
378 .private_data = &clcd_synth_config,
379};
380
381/* clcd parents */
382static struct pclk_info clcd_pclk_info[] = {
383 {
384 .pclk = &clcd_synth_clk,
385 .pclk_val = AUX_CLK_PLL1_VAL,
386 }, {
387 .pclk = &pll3_48m_clk,
388 .pclk_val = AUX_CLK_PLL3_VAL,
389 },
390};
391
392/* clcd parent select structure */
393static struct pclk_sel clcd_pclk_sel = {
394 .pclk_info = clcd_pclk_info,
395 .pclk_count = ARRAY_SIZE(clcd_pclk_info),
396 .pclk_sel_reg = PERIP_CLK_CFG,
397 .pclk_sel_mask = CLCD_CLK_MASK,
398};
399
400/* clcd clock */
401static struct clk clcd_clk = {
402 .en_reg = PERIP1_CLK_ENB,
403 .en_reg_bit = CLCD_CLK_ENB,
404 .pclk_sel = &clcd_pclk_sel,
405 .pclk_sel_shift = CLCD_CLK_SHIFT,
406 .recalc = &follow_parent,
407};
408
409/* gpt synthesizer masks */
410static struct gpt_clk_masks gpt_masks = {
411 .mscale_sel_mask = GPT_MSCALE_MASK,
412 .mscale_sel_shift = GPT_MSCALE_SHIFT,
413 .nscale_sel_mask = GPT_NSCALE_MASK,
414 .nscale_sel_shift = GPT_NSCALE_SHIFT,
415};
416
417/* gpt rate configuration table, in ascending order of rates */
418struct gpt_rate_tbl gpt_rtbl[] = {
419 /* For pll1 = 332 MHz */
420 {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
421 {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
422 {.mscale = 1, .nscale = 0}, /* 83 MHz */
423};
424
425/* gpt0 synth clk config*/
426static struct gpt_clk_config gpt0_synth_config = {
427 .synth_reg = PRSC1_CLK_CFG,
428 .masks = &gpt_masks,
429};
430
431/* gpt synth clock */
432static struct clk gpt0_synth_clk = {
433 .flags = ALWAYS_ENABLED,
434 .pclk = &pll1_clk,
435 .calc_rate = &gpt_calc_rate,
436 .recalc = &gpt_clk_recalc,
437 .set_rate = &gpt_clk_set_rate,
438 .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
439 .private_data = &gpt0_synth_config,
440};
441
442/* gpt parents */
443static struct pclk_info gpt0_pclk_info[] = {
444 {
445 .pclk = &gpt0_synth_clk,
446 .pclk_val = AUX_CLK_PLL1_VAL,
447 }, {
448 .pclk = &pll3_48m_clk,
449 .pclk_val = AUX_CLK_PLL3_VAL,
450 },
451};
452
453/* gpt parent select structure */
454static struct pclk_sel gpt0_pclk_sel = {
455 .pclk_info = gpt0_pclk_info,
456 .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
457 .pclk_sel_reg = PERIP_CLK_CFG,
458 .pclk_sel_mask = GPT_CLK_MASK,
459};
460
461/* gpt0 ARM1 subsystem timer clock */
462static struct clk gpt0_clk = {
463 .flags = ALWAYS_ENABLED,
464 .pclk_sel = &gpt0_pclk_sel,
465 .pclk_sel_shift = GPT0_CLK_SHIFT,
466 .recalc = &follow_parent,
467};
468
469
470/* Note: gpt0 and gpt1 share same parent clocks */
471/* gpt parent select structure */
472static struct pclk_sel gpt1_pclk_sel = {
473 .pclk_info = gpt0_pclk_info,
474 .pclk_count = ARRAY_SIZE(gpt0_pclk_info),
475 .pclk_sel_reg = PERIP_CLK_CFG,
476 .pclk_sel_mask = GPT_CLK_MASK,
477};
478
479/* gpt1 timer clock */
480static struct clk gpt1_clk = {
481 .flags = ALWAYS_ENABLED,
482 .pclk_sel = &gpt1_pclk_sel,
483 .pclk_sel_shift = GPT1_CLK_SHIFT,
484 .recalc = &follow_parent,
485};
486
487/* gpt2 synth clk config*/
488static struct gpt_clk_config gpt2_synth_config = {
489 .synth_reg = PRSC2_CLK_CFG,
490 .masks = &gpt_masks,
491};
492
493/* gpt synth clock */
494static struct clk gpt2_synth_clk = {
495 .flags = ALWAYS_ENABLED,
496 .pclk = &pll1_clk,
497 .calc_rate = &gpt_calc_rate,
498 .recalc = &gpt_clk_recalc,
499 .set_rate = &gpt_clk_set_rate,
500 .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
501 .private_data = &gpt2_synth_config,
502};
503
504/* gpt parents */
505static struct pclk_info gpt2_pclk_info[] = {
506 {
507 .pclk = &gpt2_synth_clk,
508 .pclk_val = AUX_CLK_PLL1_VAL,
509 }, {
510 .pclk = &pll3_48m_clk,
511 .pclk_val = AUX_CLK_PLL3_VAL,
512 },
513};
514
515/* gpt parent select structure */
516static struct pclk_sel gpt2_pclk_sel = {
517 .pclk_info = gpt2_pclk_info,
518 .pclk_count = ARRAY_SIZE(gpt2_pclk_info),
519 .pclk_sel_reg = PERIP_CLK_CFG,
520 .pclk_sel_mask = GPT_CLK_MASK,
521};
522
523/* gpt2 timer clock */
524static struct clk gpt2_clk = {
525 .flags = ALWAYS_ENABLED,
526 .pclk_sel = &gpt2_pclk_sel,
527 .pclk_sel_shift = GPT2_CLK_SHIFT,
528 .recalc = &follow_parent,
529};
530
531/* gpt3 synth clk config*/
532static struct gpt_clk_config gpt3_synth_config = {
533 .synth_reg = PRSC3_CLK_CFG,
534 .masks = &gpt_masks,
535};
536
537/* gpt synth clock */
538static struct clk gpt3_synth_clk = {
539 .flags = ALWAYS_ENABLED,
540 .pclk = &pll1_clk,
541 .calc_rate = &gpt_calc_rate,
542 .recalc = &gpt_clk_recalc,
543 .set_rate = &gpt_clk_set_rate,
544 .rate_config = {gpt_rtbl, ARRAY_SIZE(gpt_rtbl), 2},
545 .private_data = &gpt3_synth_config,
546};
547
548/* gpt parents */
549static struct pclk_info gpt3_pclk_info[] = {
550 {
551 .pclk = &gpt3_synth_clk,
552 .pclk_val = AUX_CLK_PLL1_VAL,
553 }, {
554 .pclk = &pll3_48m_clk,
555 .pclk_val = AUX_CLK_PLL3_VAL,
556 },
557};
558
559/* gpt parent select structure */
560static struct pclk_sel gpt3_pclk_sel = {
561 .pclk_info = gpt3_pclk_info,
562 .pclk_count = ARRAY_SIZE(gpt3_pclk_info),
563 .pclk_sel_reg = PERIP_CLK_CFG,
564 .pclk_sel_mask = GPT_CLK_MASK,
565};
566
567/* gpt3 timer clock */
568static struct clk gpt3_clk = {
569 .flags = ALWAYS_ENABLED,
570 .pclk_sel = &gpt3_pclk_sel,
571 .pclk_sel_shift = GPT3_CLK_SHIFT,
572 .recalc = &follow_parent,
573};
574
575/* clock derived from pll3 clk */
576/* usbh0 clock */
577static struct clk usbh0_clk = {
578 .pclk = &pll3_48m_clk,
579 .en_reg = PERIP1_CLK_ENB,
580 .en_reg_bit = USBH0_CLK_ENB,
581 .recalc = &follow_parent,
582};
583
584/* usbh1 clock */
585static struct clk usbh1_clk = {
586 .pclk = &pll3_48m_clk,
587 .en_reg = PERIP1_CLK_ENB,
588 .en_reg_bit = USBH1_CLK_ENB,
589 .recalc = &follow_parent,
590};
591
592/* usbd clock */
593static struct clk usbd_clk = {
594 .pclk = &pll3_48m_clk,
595 .en_reg = PERIP1_CLK_ENB,
596 .en_reg_bit = USBD_CLK_ENB,
597 .recalc = &follow_parent,
598};
599
600/* clock derived from ahb clk */
601/* apb masks structure */
602static struct bus_clk_masks apb_masks = {
603 .mask = HCLK_PCLK_RATIO_MASK,
604 .shift = HCLK_PCLK_RATIO_SHIFT,
605};
606
607/* apb configuration structure */
608static struct bus_clk_config apb_config = {
609 .reg = CORE_CLK_CFG,
610 .masks = &apb_masks,
611};
612
613/* apb clock */
614static struct clk apb_clk = {
615 .flags = ALWAYS_ENABLED,
616 .pclk = &ahb_clk,
617 .calc_rate = &bus_calc_rate,
618 .recalc = &bus_clk_recalc,
619 .set_rate = &bus_clk_set_rate,
620 .rate_config = {bus_rtbl, ARRAY_SIZE(bus_rtbl), 2},
621 .private_data = &apb_config,
622};
623
624/* i2c clock */
625static struct clk i2c_clk = {
626 .pclk = &ahb_clk,
627 .en_reg = PERIP1_CLK_ENB,
628 .en_reg_bit = I2C_CLK_ENB,
629 .recalc = &follow_parent,
630};
631
632/* dma clock */
633static struct clk dma_clk = {
634 .pclk = &ahb_clk,
635 .en_reg = PERIP1_CLK_ENB,
636 .en_reg_bit = DMA_CLK_ENB,
637 .recalc = &follow_parent,
638};
639
640/* jpeg clock */
641static struct clk jpeg_clk = {
642 .pclk = &ahb_clk,
643 .en_reg = PERIP1_CLK_ENB,
644 .en_reg_bit = JPEG_CLK_ENB,
645 .recalc = &follow_parent,
646};
647
648/* gmac clock */
649static struct clk gmac_clk = {
650 .pclk = &ahb_clk,
651 .en_reg = PERIP1_CLK_ENB,
652 .en_reg_bit = GMAC_CLK_ENB,
653 .recalc = &follow_parent,
654};
655
656/* smi clock */
657static struct clk smi_clk = {
658 .pclk = &ahb_clk,
659 .en_reg = PERIP1_CLK_ENB,
660 .en_reg_bit = SMI_CLK_ENB,
661 .recalc = &follow_parent,
662};
663
664/* fsmc clock */
665static struct clk fsmc_clk = {
666 .pclk = &ahb_clk,
667 .en_reg = PERIP1_CLK_ENB,
668 .en_reg_bit = FSMC_CLK_ENB,
669 .recalc = &follow_parent,
670};
671
672/* clock derived from apb clk */
673/* adc clock */
674static struct clk adc_clk = {
675 .pclk = &apb_clk,
676 .en_reg = PERIP1_CLK_ENB,
677 .en_reg_bit = ADC_CLK_ENB,
678 .recalc = &follow_parent,
679};
680
681/* ssp0 clock */
682static struct clk ssp0_clk = {
683 .pclk = &apb_clk,
684 .en_reg = PERIP1_CLK_ENB,
685 .en_reg_bit = SSP0_CLK_ENB,
686 .recalc = &follow_parent,
687};
688
689/* ssp1 clock */
690static struct clk ssp1_clk = {
691 .pclk = &apb_clk,
692 .en_reg = PERIP1_CLK_ENB,
693 .en_reg_bit = SSP1_CLK_ENB,
694 .recalc = &follow_parent,
695};
696
697/* ssp2 clock */
698static struct clk ssp2_clk = {
699 .pclk = &apb_clk,
700 .en_reg = PERIP1_CLK_ENB,
701 .en_reg_bit = SSP2_CLK_ENB,
702 .recalc = &follow_parent,
703};
704
705/* gpio0 ARM subsystem clock */
706static struct clk gpio0_clk = {
707 .flags = ALWAYS_ENABLED,
708 .pclk = &apb_clk,
709 .recalc = &follow_parent,
710};
711
712/* gpio1 clock */
713static struct clk gpio1_clk = {
714 .pclk = &apb_clk,
715 .en_reg = PERIP1_CLK_ENB,
716 .en_reg_bit = GPIO1_CLK_ENB,
717 .recalc = &follow_parent,
718};
719
720/* gpio2 clock */
721static struct clk gpio2_clk = {
722 .pclk = &apb_clk,
723 .en_reg = PERIP1_CLK_ENB,
724 .en_reg_bit = GPIO2_CLK_ENB,
725 .recalc = &follow_parent,
726};
727
728static struct clk dummy_apb_pclk;
729
730/* array of all spear 6xx clock lookups */
731static struct clk_lookup spear_clk_lookups[] = {
732 CLKDEV_INIT(NULL, "apb_pclk", &dummy_apb_pclk),
733 /* root clks */
734 CLKDEV_INIT(NULL, "osc_32k_clk", &osc_32k_clk),
735 CLKDEV_INIT(NULL, "osc_30m_clk", &osc_30m_clk),
736 /* clock derived from 32 KHz os clk */
737 CLKDEV_INIT("rtc-spear", NULL, &rtc_clk),
738 /* clock derived from 30 MHz os clk */
739 CLKDEV_INIT(NULL, "pll1_clk", &pll1_clk),
740 CLKDEV_INIT(NULL, "pll3_48m_clk", &pll3_48m_clk),
741 CLKDEV_INIT("wdt", NULL, &wdt_clk),
742 /* clock derived from pll1 clk */
743 CLKDEV_INIT(NULL, "cpu_clk", &cpu_clk),
744 CLKDEV_INIT(NULL, "ahb_clk", &ahb_clk),
745 CLKDEV_INIT(NULL, "uart_synth_clk", &uart_synth_clk),
746 CLKDEV_INIT(NULL, "firda_synth_clk", &firda_synth_clk),
747 CLKDEV_INIT(NULL, "clcd_synth_clk", &clcd_synth_clk),
748 CLKDEV_INIT(NULL, "gpt0_synth_clk", &gpt0_synth_clk),
749 CLKDEV_INIT(NULL, "gpt2_synth_clk", &gpt2_synth_clk),
750 CLKDEV_INIT(NULL, "gpt3_synth_clk", &gpt3_synth_clk),
751 CLKDEV_INIT("d0000000.serial", NULL, &uart0_clk),
752 CLKDEV_INIT("d0080000.serial", NULL, &uart1_clk),
753 CLKDEV_INIT("firda", NULL, &firda_clk),
754 CLKDEV_INIT("clcd", NULL, &clcd_clk),
755 CLKDEV_INIT("gpt0", NULL, &gpt0_clk),
756 CLKDEV_INIT("gpt1", NULL, &gpt1_clk),
757 CLKDEV_INIT("gpt2", NULL, &gpt2_clk),
758 CLKDEV_INIT("gpt3", NULL, &gpt3_clk),
759 /* clock derived from pll3 clk */
760 CLKDEV_INIT("designware_udc", NULL, &usbd_clk),
761 CLKDEV_INIT(NULL, "usbh.0_clk", &usbh0_clk),
762 CLKDEV_INIT(NULL, "usbh.1_clk", &usbh1_clk),
763 /* clock derived from ahb clk */
764 CLKDEV_INIT(NULL, "apb_clk", &apb_clk),
765 CLKDEV_INIT("d0200000.i2c", NULL, &i2c_clk),
766 CLKDEV_INIT("fc400000.dma", NULL, &dma_clk),
767 CLKDEV_INIT("jpeg", NULL, &jpeg_clk),
768 CLKDEV_INIT("gmac", NULL, &gmac_clk),
769 CLKDEV_INIT("fc000000.flash", NULL, &smi_clk),
770 CLKDEV_INIT("d1800000.flash", NULL, &fsmc_clk),
771 /* clock derived from apb clk */
772 CLKDEV_INIT("adc", NULL, &adc_clk),
773 CLKDEV_INIT("ssp-pl022.0", NULL, &ssp0_clk),
774 CLKDEV_INIT("ssp-pl022.1", NULL, &ssp1_clk),
775 CLKDEV_INIT("ssp-pl022.2", NULL, &ssp2_clk),
776 CLKDEV_INIT("f0100000.gpio", NULL, &gpio0_clk),
777 CLKDEV_INIT("fc980000.gpio", NULL, &gpio1_clk),
778 CLKDEV_INIT("d8100000.gpio", NULL, &gpio2_clk),
779};
780
781void __init spear6xx_clk_init(void)
782{
783 int i;
784
785 for (i = 0; i < ARRAY_SIZE(spear_clk_lookups); i++)
786 clk_register(&spear_clk_lookups[i]);
787
788 clk_init();
789}
diff --git a/arch/arm/mach-spear6xx/include/mach/generic.h b/arch/arm/mach-spear6xx/include/mach/generic.h
index 7167fd331d86..65514b159370 100644
--- a/arch/arm/mach-spear6xx/include/mach/generic.h
+++ b/arch/arm/mach-spear6xx/include/mach/generic.h
@@ -16,7 +16,7 @@
16 16
17#include <linux/init.h> 17#include <linux/init.h>
18 18
19void __init spear_setup_timer(resource_size_t base, int irq); 19void __init spear_setup_of_timer(void);
20void spear_restart(char, const char *); 20void spear_restart(char, const char *);
21void __init spear6xx_clk_init(void); 21void __init spear6xx_clk_init(void);
22 22
diff --git a/arch/arm/mach-spear6xx/include/mach/irqs.h b/arch/arm/mach-spear6xx/include/mach/irqs.h
index 2b735389e74b..37a5c411a866 100644
--- a/arch/arm/mach-spear6xx/include/mach/irqs.h
+++ b/arch/arm/mach-spear6xx/include/mach/irqs.h
@@ -16,9 +16,6 @@
16 16
17/* IRQ definitions */ 17/* IRQ definitions */
18/* VIC 1 */ 18/* VIC 1 */
19/* FIXME: probe this from DT */
20#define IRQ_CPU_GPT1_1 16
21
22#define IRQ_VIC_END 64 19#define IRQ_VIC_END 64
23 20
24/* GPIO pins virtual irqs */ 21/* GPIO pins virtual irqs */
diff --git a/arch/arm/mach-spear6xx/include/mach/misc_regs.h b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
index 2b9aaa6cdd11..179e45774b3a 100644
--- a/arch/arm/mach-spear6xx/include/mach/misc_regs.h
+++ b/arch/arm/mach-spear6xx/include/mach/misc_regs.h
@@ -14,6 +14,8 @@
14#ifndef __MACH_MISC_REGS_H 14#ifndef __MACH_MISC_REGS_H
15#define __MACH_MISC_REGS_H 15#define __MACH_MISC_REGS_H
16 16
17#include <mach/spear.h>
18
17#define MISC_BASE IOMEM(VA_SPEAR6XX_ICM3_MISC_REG_BASE) 19#define MISC_BASE IOMEM(VA_SPEAR6XX_ICM3_MISC_REG_BASE)
18#define DMA_CHN_CFG (MISC_BASE + 0x0A0) 20#define DMA_CHN_CFG (MISC_BASE + 0x0A0)
19 21
diff --git a/arch/arm/mach-spear6xx/include/mach/spear.h b/arch/arm/mach-spear6xx/include/mach/spear.h
index d278ed047a53..cb8ed2f4dc85 100644
--- a/arch/arm/mach-spear6xx/include/mach/spear.h
+++ b/arch/arm/mach-spear6xx/include/mach/spear.h
@@ -25,7 +25,6 @@
25/* ML-1, 2 - Multi Layer CPU Subsystem */ 25/* ML-1, 2 - Multi Layer CPU Subsystem */
26#define SPEAR6XX_ML_CPU_BASE UL(0xF0000000) 26#define SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
27#define VA_SPEAR6XX_ML_CPU_BASE UL(0xF0000000) 27#define VA_SPEAR6XX_ML_CPU_BASE UL(0xF0000000)
28#define SPEAR6XX_CPU_TMR_BASE UL(0xF0000000)
29 28
30/* ICM3 - Basic Subsystem */ 29/* ICM3 - Basic Subsystem */
31#define SPEAR6XX_ICM3_SMI_CTRL_BASE UL(0xFC000000) 30#define SPEAR6XX_ICM3_SMI_CTRL_BASE UL(0xFC000000)
diff --git a/arch/arm/mach-spear6xx/spear6xx.c b/arch/arm/mach-spear6xx/spear6xx.c
index de194dbb8371..2e2e3596583e 100644
--- a/arch/arm/mach-spear6xx/spear6xx.c
+++ b/arch/arm/mach-spear6xx/spear6xx.c
@@ -419,9 +419,6 @@ struct map_desc spear6xx_io_desc[] __initdata = {
419void __init spear6xx_map_io(void) 419void __init spear6xx_map_io(void)
420{ 420{
421 iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc)); 421 iotable_init(spear6xx_io_desc, ARRAY_SIZE(spear6xx_io_desc));
422
423 /* This will initialize clock framework */
424 spear6xx_clk_init();
425} 422}
426 423
427static void __init spear6xx_timer_init(void) 424static void __init spear6xx_timer_init(void)
@@ -429,6 +426,8 @@ static void __init spear6xx_timer_init(void)
429 char pclk_name[] = "pll3_48m_clk"; 426 char pclk_name[] = "pll3_48m_clk";
430 struct clk *gpt_clk, *pclk; 427 struct clk *gpt_clk, *pclk;
431 428
429 spear6xx_clk_init();
430
432 /* get the system timer clock */ 431 /* get the system timer clock */
433 gpt_clk = clk_get_sys("gpt0", NULL); 432 gpt_clk = clk_get_sys("gpt0", NULL);
434 if (IS_ERR(gpt_clk)) { 433 if (IS_ERR(gpt_clk)) {
@@ -448,7 +447,7 @@ static void __init spear6xx_timer_init(void)
448 clk_put(gpt_clk); 447 clk_put(gpt_clk);
449 clk_put(pclk); 448 clk_put(pclk);
450 449
451 spear_setup_timer(SPEAR6XX_CPU_TMR_BASE, IRQ_CPU_GPT1_1); 450 spear_setup_of_timer();
452} 451}
453 452
454struct sys_timer spear6xx_timer = { 453struct sys_timer spear6xx_timer = {
diff --git a/arch/arm/mach-tegra/Kconfig b/arch/arm/mach-tegra/Kconfig
index d0f2546706ca..6a113a9bb87a 100644
--- a/arch/arm/mach-tegra/Kconfig
+++ b/arch/arm/mach-tegra/Kconfig
@@ -50,6 +50,14 @@ config TEGRA_PCI
50 depends on ARCH_TEGRA_2x_SOC 50 depends on ARCH_TEGRA_2x_SOC
51 select PCI 51 select PCI
52 52
53config TEGRA_AHB
54 bool "Enable AHB driver for NVIDIA Tegra SoCs"
55 default y
56 help
57 Adds AHB configuration functionality for NVIDIA Tegra SoCs,
58 which controls AHB bus master arbitration and some
59 perfomance parameters(priority, prefech size).
60
53comment "Tegra board type" 61comment "Tegra board type"
54 62
55config MACH_HARMONY 63config MACH_HARMONY
@@ -111,7 +119,7 @@ config MACH_VENTANA
111 Support for the nVidia Ventana development platform 119 Support for the nVidia Ventana development platform
112 120
113choice 121choice
114 prompt "Low-level debug console UART" 122 prompt "Default low-level debug console UART"
115 default TEGRA_DEBUG_UART_NONE 123 default TEGRA_DEBUG_UART_NONE
116 124
117config TEGRA_DEBUG_UART_NONE 125config TEGRA_DEBUG_UART_NONE
@@ -134,6 +142,33 @@ config TEGRA_DEBUG_UARTE
134 142
135endchoice 143endchoice
136 144
145choice
146 prompt "Automatic low-level debug console UART"
147 default TEGRA_DEBUG_UART_AUTO_NONE
148
149config TEGRA_DEBUG_UART_AUTO_NONE
150 bool "None"
151
152config TEGRA_DEBUG_UART_AUTO_ODMDATA
153 bool "Via ODMDATA"
154 help
155 Automatically determines which UART to use for low-level debug based
156 on the ODMDATA value. This value is part of the BCT, and is written
157 to the boot memory device using nvflash, or other flashing tool.
158 When bits 19:18 are 3, then bits 17:15 indicate which UART to use;
159 0/1/2/3/4 are UART A/B/C/D/E.
160
161config TEGRA_DEBUG_UART_AUTO_SCRATCH
162 bool "Via UART scratch register"
163 help
164 Automatically determines which UART to use for low-level debug based
165 on the UART scratch register value. Some bootloaders put ASCII 'D'
166 in this register when they initialize their own console UART output.
167 Using this option allows the kernel to automatically pick the same
168 UART.
169
170endchoice
171
137config TEGRA_SYSTEM_DMA 172config TEGRA_SYSTEM_DMA
138 bool "Enable system DMA driver for NVIDIA Tegra SoCs" 173 bool "Enable system DMA driver for NVIDIA Tegra SoCs"
139 default y 174 default y
diff --git a/arch/arm/mach-tegra/board-dt-tegra20.c b/arch/arm/mach-tegra/board-dt-tegra20.c
index fac3eb1af17e..eb7249db50a5 100644
--- a/arch/arm/mach-tegra/board-dt-tegra20.c
+++ b/arch/arm/mach-tegra/board-dt-tegra20.c
@@ -110,6 +110,7 @@ DT_MACHINE_START(TEGRA_DT, "nVidia Tegra20 (Flattened Device Tree)")
110 .handle_irq = gic_handle_irq, 110 .handle_irq = gic_handle_irq,
111 .timer = &tegra_timer, 111 .timer = &tegra_timer,
112 .init_machine = tegra_dt_init, 112 .init_machine = tegra_dt_init,
113 .init_late = tegra_init_late,
113 .restart = tegra_assert_system_reset, 114 .restart = tegra_assert_system_reset,
114 .dt_compat = tegra20_dt_board_compat, 115 .dt_compat = tegra20_dt_board_compat,
115MACHINE_END 116MACHINE_END
diff --git a/arch/arm/mach-tegra/board-dt-tegra30.c b/arch/arm/mach-tegra/board-dt-tegra30.c
index 5f7c03e972f3..4f76fa7a5da3 100644
--- a/arch/arm/mach-tegra/board-dt-tegra30.c
+++ b/arch/arm/mach-tegra/board-dt-tegra30.c
@@ -51,12 +51,22 @@ struct of_dev_auxdata tegra30_auxdata_lookup[] __initdata = {
51 OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C500, "tegra-i2c.2", NULL), 51 OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C500, "tegra-i2c.2", NULL),
52 OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C700, "tegra-i2c.3", NULL), 52 OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000C700, "tegra-i2c.3", NULL),
53 OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL), 53 OF_DEV_AUXDATA("nvidia,tegra20-i2c", 0x7000D000, "tegra-i2c.4", NULL),
54 OF_DEV_AUXDATA("nvidia,tegra30-ahub", 0x70080000, "tegra30-ahub", NULL),
54 {} 55 {}
55}; 56};
56 57
57static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = { 58static __initdata struct tegra_clk_init_table tegra_dt_clk_init_table[] = {
58 /* name parent rate enabled */ 59 /* name parent rate enabled */
59 { "uarta", "pll_p", 408000000, true }, 60 { "uarta", "pll_p", 408000000, true },
61 { "pll_a", "pll_p_out1", 564480000, true },
62 { "pll_a_out0", "pll_a", 11289600, true },
63 { "extern1", "pll_a_out0", 0, true },
64 { "clk_out_1", "extern1", 0, true },
65 { "i2s0", "pll_a_out0", 11289600, false},
66 { "i2s1", "pll_a_out0", 11289600, false},
67 { "i2s2", "pll_a_out0", 11289600, false},
68 { "i2s3", "pll_a_out0", 11289600, false},
69 { "i2s4", "pll_a_out0", 11289600, false},
60 { NULL, NULL, 0, 0}, 70 { NULL, NULL, 0, 0},
61}; 71};
62 72
@@ -80,6 +90,7 @@ DT_MACHINE_START(TEGRA30_DT, "NVIDIA Tegra30 (Flattened Device Tree)")
80 .handle_irq = gic_handle_irq, 90 .handle_irq = gic_handle_irq,
81 .timer = &tegra_timer, 91 .timer = &tegra_timer,
82 .init_machine = tegra30_dt_init, 92 .init_machine = tegra30_dt_init,
93 .init_late = tegra_init_late,
83 .restart = tegra_assert_system_reset, 94 .restart = tegra_assert_system_reset,
84 .dt_compat = tegra30_dt_board_compat, 95 .dt_compat = tegra30_dt_board_compat,
85MACHINE_END 96MACHINE_END
diff --git a/arch/arm/mach-tegra/board-harmony.c b/arch/arm/mach-tegra/board-harmony.c
index b906b3b6077b..e65e837f4013 100644
--- a/arch/arm/mach-tegra/board-harmony.c
+++ b/arch/arm/mach-tegra/board-harmony.c
@@ -192,5 +192,6 @@ MACHINE_START(HARMONY, "harmony")
192 .handle_irq = gic_handle_irq, 192 .handle_irq = gic_handle_irq,
193 .timer = &tegra_timer, 193 .timer = &tegra_timer,
194 .init_machine = tegra_harmony_init, 194 .init_machine = tegra_harmony_init,
195 .init_late = tegra_init_late,
195 .restart = tegra_assert_system_reset, 196 .restart = tegra_assert_system_reset,
196MACHINE_END 197MACHINE_END
diff --git a/arch/arm/mach-tegra/board-paz00.c b/arch/arm/mach-tegra/board-paz00.c
index d0735c70d688..bbc1907e98a6 100644
--- a/arch/arm/mach-tegra/board-paz00.c
+++ b/arch/arm/mach-tegra/board-paz00.c
@@ -162,6 +162,8 @@ static void paz00_i2c_init(void)
162 162
163static void paz00_usb_init(void) 163static void paz00_usb_init(void)
164{ 164{
165 tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_ULPI_RST;
166
165 platform_device_register(&tegra_ehci2_device); 167 platform_device_register(&tegra_ehci2_device);
166 platform_device_register(&tegra_ehci3_device); 168 platform_device_register(&tegra_ehci3_device);
167} 169}
@@ -179,7 +181,6 @@ static __initdata struct tegra_clk_init_table paz00_clk_init_table[] = {
179 { "uarta", "pll_p", 216000000, true }, 181 { "uarta", "pll_p", 216000000, true },
180 { "uartc", "pll_p", 216000000, true }, 182 { "uartc", "pll_p", 216000000, true },
181 183
182 { "pll_p_out4", "pll_p", 24000000, true },
183 { "usbd", "clk_m", 12000000, false }, 184 { "usbd", "clk_m", 12000000, false },
184 { "usb2", "clk_m", 12000000, false }, 185 { "usb2", "clk_m", 12000000, false },
185 { "usb3", "clk_m", 12000000, false }, 186 { "usb3", "clk_m", 12000000, false },
@@ -224,5 +225,6 @@ MACHINE_START(PAZ00, "Toshiba AC100 / Dynabook AZ")
224 .handle_irq = gic_handle_irq, 225 .handle_irq = gic_handle_irq,
225 .timer = &tegra_timer, 226 .timer = &tegra_timer,
226 .init_machine = tegra_paz00_init, 227 .init_machine = tegra_paz00_init,
228 .init_late = tegra_init_late,
227 .restart = tegra_assert_system_reset, 229 .restart = tegra_assert_system_reset,
228MACHINE_END 230MACHINE_END
diff --git a/arch/arm/mach-tegra/board-seaboard.c b/arch/arm/mach-tegra/board-seaboard.c
index 79064c7a7907..71e9f3fc7fba 100644
--- a/arch/arm/mach-tegra/board-seaboard.c
+++ b/arch/arm/mach-tegra/board-seaboard.c
@@ -277,6 +277,7 @@ MACHINE_START(SEABOARD, "seaboard")
277 .handle_irq = gic_handle_irq, 277 .handle_irq = gic_handle_irq,
278 .timer = &tegra_timer, 278 .timer = &tegra_timer,
279 .init_machine = tegra_seaboard_init, 279 .init_machine = tegra_seaboard_init,
280 .init_late = tegra_init_late,
280 .restart = tegra_assert_system_reset, 281 .restart = tegra_assert_system_reset,
281MACHINE_END 282MACHINE_END
282 283
@@ -288,6 +289,7 @@ MACHINE_START(KAEN, "kaen")
288 .handle_irq = gic_handle_irq, 289 .handle_irq = gic_handle_irq,
289 .timer = &tegra_timer, 290 .timer = &tegra_timer,
290 .init_machine = tegra_kaen_init, 291 .init_machine = tegra_kaen_init,
292 .init_late = tegra_init_late,
291 .restart = tegra_assert_system_reset, 293 .restart = tegra_assert_system_reset,
292MACHINE_END 294MACHINE_END
293 295
@@ -299,5 +301,6 @@ MACHINE_START(WARIO, "wario")
299 .handle_irq = gic_handle_irq, 301 .handle_irq = gic_handle_irq,
300 .timer = &tegra_timer, 302 .timer = &tegra_timer,
301 .init_machine = tegra_wario_init, 303 .init_machine = tegra_wario_init,
304 .init_late = tegra_init_late,
302 .restart = tegra_assert_system_reset, 305 .restart = tegra_assert_system_reset,
303MACHINE_END 306MACHINE_END
diff --git a/arch/arm/mach-tegra/board-trimslice.c b/arch/arm/mach-tegra/board-trimslice.c
index bc59b379c6fe..776aa9564d5d 100644
--- a/arch/arm/mach-tegra/board-trimslice.c
+++ b/arch/arm/mach-tegra/board-trimslice.c
@@ -118,6 +118,8 @@ static void trimslice_usb_init(void)
118 pdata = tegra_ehci1_device.dev.platform_data; 118 pdata = tegra_ehci1_device.dev.platform_data;
119 pdata->vbus_gpio = TRIMSLICE_GPIO_USB1_MODE; 119 pdata->vbus_gpio = TRIMSLICE_GPIO_USB1_MODE;
120 120
121 tegra_ehci2_ulpi_phy_config.reset_gpio = TEGRA_GPIO_PV0;
122
121 platform_device_register(&tegra_ehci3_device); 123 platform_device_register(&tegra_ehci3_device);
122 platform_device_register(&tegra_ehci2_device); 124 platform_device_register(&tegra_ehci2_device);
123 platform_device_register(&tegra_ehci1_device); 125 platform_device_register(&tegra_ehci1_device);
@@ -176,5 +178,6 @@ MACHINE_START(TRIMSLICE, "trimslice")
176 .handle_irq = gic_handle_irq, 178 .handle_irq = gic_handle_irq,
177 .timer = &tegra_timer, 179 .timer = &tegra_timer,
178 .init_machine = tegra_trimslice_init, 180 .init_machine = tegra_trimslice_init,
181 .init_late = tegra_init_late,
179 .restart = tegra_assert_system_reset, 182 .restart = tegra_assert_system_reset,
180MACHINE_END 183MACHINE_END
diff --git a/arch/arm/mach-tegra/board.h b/arch/arm/mach-tegra/board.h
index 75d1543d77c0..65014968fc6c 100644
--- a/arch/arm/mach-tegra/board.h
+++ b/arch/arm/mach-tegra/board.h
@@ -32,5 +32,19 @@ void __init tegra_init_irq(void);
32void __init tegra_dt_init_irq(void); 32void __init tegra_dt_init_irq(void);
33int __init tegra_pcie_init(bool init_port0, bool init_port1); 33int __init tegra_pcie_init(bool init_port0, bool init_port1);
34 34
35void tegra_init_late(void);
36
37#ifdef CONFIG_DEBUG_FS
38int tegra_clk_debugfs_init(void);
39#else
40static inline int tegra_clk_debugfs_init(void) { return 0; }
41#endif
42
43#if defined(CONFIG_ARCH_TEGRA_2x_SOC) && defined(CONFIG_DEBUG_FS)
44int __init tegra_powergate_debugfs_init(void);
45#else
46static inline int tegra_powergate_debugfs_init(void) { return 0; }
47#endif
48
35extern struct sys_timer tegra_timer; 49extern struct sys_timer tegra_timer;
36#endif 50#endif
diff --git a/arch/arm/mach-tegra/clock.c b/arch/arm/mach-tegra/clock.c
index 8dad8d18cb49..58f981c0819c 100644
--- a/arch/arm/mach-tegra/clock.c
+++ b/arch/arm/mach-tegra/clock.c
@@ -642,7 +642,7 @@ static int clk_debugfs_register(struct clk *c)
642 return 0; 642 return 0;
643} 643}
644 644
645static int __init clk_debugfs_init(void) 645int __init tegra_clk_debugfs_init(void)
646{ 646{
647 struct clk *c; 647 struct clk *c;
648 struct dentry *d; 648 struct dentry *d;
@@ -669,5 +669,4 @@ err_out:
669 return err; 669 return err;
670} 670}
671 671
672late_initcall(clk_debugfs_init);
673#endif 672#endif
diff --git a/arch/arm/mach-tegra/common.c b/arch/arm/mach-tegra/common.c
index 22df10fb9972..204a5c8b0b57 100644
--- a/arch/arm/mach-tegra/common.c
+++ b/arch/arm/mach-tegra/common.c
@@ -82,10 +82,12 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
82 { "pll_p_out1", "pll_p", 28800000, true }, 82 { "pll_p_out1", "pll_p", 28800000, true },
83 { "pll_p_out2", "pll_p", 48000000, true }, 83 { "pll_p_out2", "pll_p", 48000000, true },
84 { "pll_p_out3", "pll_p", 72000000, true }, 84 { "pll_p_out3", "pll_p", 72000000, true },
85 { "pll_p_out4", "pll_p", 108000000, true }, 85 { "pll_p_out4", "pll_p", 24000000, true },
86 { "sclk", "pll_p_out4", 108000000, true }, 86 { "pll_c", "clk_m", 600000000, true },
87 { "hclk", "sclk", 108000000, true }, 87 { "pll_c_out1", "pll_c", 120000000, true },
88 { "pclk", "hclk", 54000000, true }, 88 { "sclk", "pll_c_out1", 120000000, true },
89 { "hclk", "sclk", 120000000, true },
90 { "pclk", "hclk", 60000000, true },
89 { "csite", NULL, 0, true }, 91 { "csite", NULL, 0, true },
90 { "emc", NULL, 0, true }, 92 { "emc", NULL, 0, true },
91 { "cpu", NULL, 0, true }, 93 { "cpu", NULL, 0, true },
@@ -93,6 +95,17 @@ static __initdata struct tegra_clk_init_table tegra20_clk_init_table[] = {
93}; 95};
94#endif 96#endif
95 97
98#ifdef CONFIG_ARCH_TEGRA_3x_SOC
99static __initdata struct tegra_clk_init_table tegra30_clk_init_table[] = {
100 /* name parent rate enabled */
101 { "clk_m", NULL, 0, true },
102 { "pll_p", "clk_m", 408000000, true },
103 { "pll_p_out1", "pll_p", 9600000, true },
104 { NULL, NULL, 0, 0},
105};
106#endif
107
108
96static void __init tegra_init_cache(u32 tag_latency, u32 data_latency) 109static void __init tegra_init_cache(u32 tag_latency, u32 data_latency)
97{ 110{
98#ifdef CONFIG_CACHE_L2X0 111#ifdef CONFIG_CACHE_L2X0
@@ -127,8 +140,15 @@ void __init tegra30_init_early(void)
127{ 140{
128 tegra_init_fuse(); 141 tegra_init_fuse();
129 tegra30_init_clocks(); 142 tegra30_init_clocks();
143 tegra_clk_init_from_table(tegra30_clk_init_table);
130 tegra_init_cache(0x441, 0x551); 144 tegra_init_cache(0x441, 0x551);
131 tegra_pmc_init(); 145 tegra_pmc_init();
132 tegra_powergate_init(); 146 tegra_powergate_init();
133} 147}
134#endif 148#endif
149
150void __init tegra_init_late(void)
151{
152 tegra_clk_debugfs_init();
153 tegra_powergate_debugfs_init();
154}
diff --git a/arch/arm/mach-tegra/devices.c b/arch/arm/mach-tegra/devices.c
index 2d8dfa2faf8f..c70e65ffa36b 100644
--- a/arch/arm/mach-tegra/devices.c
+++ b/arch/arm/mach-tegra/devices.c
@@ -439,9 +439,8 @@ static struct resource tegra_usb3_resources[] = {
439 }, 439 },
440}; 440};
441 441
442static struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = { 442struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config = {
443 /* All existing boards use GPIO PV0 for phy reset */ 443 .reset_gpio = -1,
444 .reset_gpio = TEGRA_GPIO_PV0,
445 .clk = "cdev2", 444 .clk = "cdev2",
446}; 445};
447 446
diff --git a/arch/arm/mach-tegra/devices.h b/arch/arm/mach-tegra/devices.h
index 138c642e59f4..4f5052726495 100644
--- a/arch/arm/mach-tegra/devices.h
+++ b/arch/arm/mach-tegra/devices.h
@@ -22,6 +22,10 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/platform_data/tegra_usb.h> 23#include <linux/platform_data/tegra_usb.h>
24 24
25#include <mach/usb_phy.h>
26
27extern struct tegra_ulpi_config tegra_ehci2_ulpi_phy_config;
28
25extern struct tegra_ehci_platform_data tegra_ehci1_pdata; 29extern struct tegra_ehci_platform_data tegra_ehci1_pdata;
26extern struct tegra_ehci_platform_data tegra_ehci2_pdata; 30extern struct tegra_ehci_platform_data tegra_ehci2_pdata;
27extern struct tegra_ehci_platform_data tegra_ehci3_pdata; 31extern struct tegra_ehci_platform_data tegra_ehci3_pdata;
diff --git a/arch/arm/mach-tegra/include/mach/tegra-ahb.h b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
new file mode 100644
index 000000000000..e0f8c84b1d8c
--- /dev/null
+++ b/arch/arm/mach-tegra/include/mach/tegra-ahb.h
@@ -0,0 +1,19 @@
1/*
2 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms and conditions of the GNU General Public License,
6 * version 2, as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope it will be useful, but WITHOUT
9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
11 * more details.
12 */
13
14#ifndef __MACH_TEGRA_AHB_H__
15#define __MACH_TEGRA_AHB_H__
16
17extern int tegra_ahb_enable_smmu(struct device_node *ahb);
18
19#endif /* __MACH_TEGRA_AHB_H__ */
diff --git a/arch/arm/mach-tegra/include/mach/uncompress.h b/arch/arm/mach-tegra/include/mach/uncompress.h
index 5a440f315e57..937c4c50219e 100644
--- a/arch/arm/mach-tegra/include/mach/uncompress.h
+++ b/arch/arm/mach-tegra/include/mach/uncompress.h
@@ -63,52 +63,86 @@ static inline void save_uart_address(void)
63 buf[0] = 0; 63 buf[0] = 0;
64} 64}
65 65
66/* 66static const struct {
67 * Setup before decompression. This is where we do UART selection for 67 u32 base;
68 * earlyprintk and init the uart_base register. 68 u32 reset_reg;
69 */ 69 u32 clock_reg;
70static inline void arch_decomp_setup(void) 70 u32 bit;
71} uarts[] = {
72 {
73 TEGRA_UARTA_BASE,
74 TEGRA_CLK_RESET_BASE + 0x04,
75 TEGRA_CLK_RESET_BASE + 0x10,
76 6,
77 },
78 {
79 TEGRA_UARTB_BASE,
80 TEGRA_CLK_RESET_BASE + 0x04,
81 TEGRA_CLK_RESET_BASE + 0x10,
82 7,
83 },
84 {
85 TEGRA_UARTC_BASE,
86 TEGRA_CLK_RESET_BASE + 0x08,
87 TEGRA_CLK_RESET_BASE + 0x14,
88 23,
89 },
90 {
91 TEGRA_UARTD_BASE,
92 TEGRA_CLK_RESET_BASE + 0x0c,
93 TEGRA_CLK_RESET_BASE + 0x18,
94 1,
95 },
96 {
97 TEGRA_UARTE_BASE,
98 TEGRA_CLK_RESET_BASE + 0x0c,
99 TEGRA_CLK_RESET_BASE + 0x18,
100 2,
101 },
102};
103
104static inline bool uart_clocked(int i)
105{
106 if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit))
107 return false;
108
109 if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
110 return false;
111
112 return true;
113}
114
115#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA
116int auto_odmdata(void)
117{
118 volatile u32 *pmc = (volatile u32 *)TEGRA_PMC_BASE;
119 u32 odmdata = pmc[0xa0 / 4];
120
121 /*
122 * Bits 19:18 are the console type: 0=default, 1=none, 2==DCC, 3==UART
123 * Some boards apparently swap the last two values, but we don't have
124 * any way of catering for that here, so we just accept either. If this
125 * doesn't make sense for your board, just don't enable this feature.
126 *
127 * Bits 17:15 indicate the UART to use, 0/1/2/3/4 are UART A/B/C/D/E.
128 */
129
130 switch ((odmdata >> 18) & 3) {
131 case 2:
132 case 3:
133 break;
134 default:
135 return -1;
136 }
137
138 return (odmdata >> 15) & 7;
139}
140#endif
141
142#ifdef CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH
143int auto_scratch(void)
71{ 144{
72 static const struct {
73 u32 base;
74 u32 reset_reg;
75 u32 clock_reg;
76 u32 bit;
77 } uarts[] = {
78 {
79 TEGRA_UARTA_BASE,
80 TEGRA_CLK_RESET_BASE + 0x04,
81 TEGRA_CLK_RESET_BASE + 0x10,
82 6,
83 },
84 {
85 TEGRA_UARTB_BASE,
86 TEGRA_CLK_RESET_BASE + 0x04,
87 TEGRA_CLK_RESET_BASE + 0x10,
88 7,
89 },
90 {
91 TEGRA_UARTC_BASE,
92 TEGRA_CLK_RESET_BASE + 0x08,
93 TEGRA_CLK_RESET_BASE + 0x14,
94 23,
95 },
96 {
97 TEGRA_UARTD_BASE,
98 TEGRA_CLK_RESET_BASE + 0x0c,
99 TEGRA_CLK_RESET_BASE + 0x18,
100 1,
101 },
102 {
103 TEGRA_UARTE_BASE,
104 TEGRA_CLK_RESET_BASE + 0x0c,
105 TEGRA_CLK_RESET_BASE + 0x18,
106 2,
107 },
108 };
109 int i; 145 int i;
110 volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
111 u32 chip, div;
112 146
113 /* 147 /*
114 * Look for the first UART that: 148 * Look for the first UART that:
@@ -125,20 +159,60 @@ static inline void arch_decomp_setup(void)
125 * back to what's specified in TEGRA_DEBUG_UART_BASE. 159 * back to what's specified in TEGRA_DEBUG_UART_BASE.
126 */ 160 */
127 for (i = 0; i < ARRAY_SIZE(uarts); i++) { 161 for (i = 0; i < ARRAY_SIZE(uarts); i++) {
128 if (*(u8 *)uarts[i].reset_reg & BIT(uarts[i].bit)) 162 if (!uart_clocked(i))
129 continue;
130
131 if (!(*(u8 *)uarts[i].clock_reg & BIT(uarts[i].bit)))
132 continue; 163 continue;
133 164
134 uart = (volatile u8 *)uarts[i].base; 165 uart = (volatile u8 *)uarts[i].base;
135 if (uart[UART_SCR << DEBUG_UART_SHIFT] != 'D') 166 if (uart[UART_SCR << DEBUG_UART_SHIFT] != 'D')
136 continue; 167 continue;
137 168
138 break; 169 return i;
139 } 170 }
140 if (i == ARRAY_SIZE(uarts)) 171
141 uart = (volatile u8 *)TEGRA_DEBUG_UART_BASE; 172 return -1;
173}
174#endif
175
176/*
177 * Setup before decompression. This is where we do UART selection for
178 * earlyprintk and init the uart_base register.
179 */
180static inline void arch_decomp_setup(void)
181{
182 int uart_id, auto_uart_id;
183 volatile u32 *apb_misc = (volatile u32 *)TEGRA_APB_MISC_BASE;
184 u32 chip, div;
185
186#if defined(CONFIG_TEGRA_DEBUG_UARTA)
187 uart_id = 0;
188#elif defined(CONFIG_TEGRA_DEBUG_UARTB)
189 uart_id = 1;
190#elif defined(CONFIG_TEGRA_DEBUG_UARTC)
191 uart_id = 2;
192#elif defined(CONFIG_TEGRA_DEBUG_UARTD)
193 uart_id = 3;
194#elif defined(CONFIG_TEGRA_DEBUG_UARTE)
195 uart_id = 4;
196#else
197 uart_id = -1;
198#endif
199
200#if defined(CONFIG_TEGRA_DEBUG_UART_AUTO_ODMDATA)
201 auto_uart_id = auto_odmdata();
202#elif defined(CONFIG_TEGRA_DEBUG_UART_AUTO_SCRATCH)
203 auto_uart_id = auto_scratch();
204#else
205 auto_uart_id = -1;
206#endif
207 if (auto_uart_id != -1)
208 uart_id = auto_uart_id;
209
210 if (uart_id < 0 || uart_id >= ARRAY_SIZE(uarts) ||
211 !uart_clocked(uart_id))
212 uart = NULL;
213 else
214 uart = (volatile u8 *)uarts[uart_id].base;
215
142 save_uart_address(); 216 save_uart_address();
143 if (uart == NULL) 217 if (uart == NULL)
144 return; 218 return;
diff --git a/arch/arm/mach-tegra/include/mach/usb_phy.h b/arch/arm/mach-tegra/include/mach/usb_phy.h
index de1a0f602b28..935ce9f65590 100644
--- a/arch/arm/mach-tegra/include/mach/usb_phy.h
+++ b/arch/arm/mach-tegra/include/mach/usb_phy.h
@@ -61,8 +61,8 @@ struct tegra_usb_phy {
61 struct usb_phy *ulpi; 61 struct usb_phy *ulpi;
62}; 62};
63 63
64struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs, 64struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
65 void *config, enum tegra_usb_phy_mode phy_mode); 65 void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode);
66 66
67int tegra_usb_phy_power_on(struct tegra_usb_phy *phy); 67int tegra_usb_phy_power_on(struct tegra_usb_phy *phy);
68 68
diff --git a/arch/arm/mach-tegra/powergate.c b/arch/arm/mach-tegra/powergate.c
index c238699ae86f..f5b12fb4ff12 100644
--- a/arch/arm/mach-tegra/powergate.c
+++ b/arch/arm/mach-tegra/powergate.c
@@ -234,7 +234,7 @@ static const struct file_operations powergate_fops = {
234 .release = single_release, 234 .release = single_release,
235}; 235};
236 236
237static int __init powergate_debugfs_init(void) 237int __init tegra_powergate_debugfs_init(void)
238{ 238{
239 struct dentry *d; 239 struct dentry *d;
240 int err = -ENOMEM; 240 int err = -ENOMEM;
@@ -247,6 +247,4 @@ static int __init powergate_debugfs_init(void)
247 return err; 247 return err;
248} 248}
249 249
250late_initcall(powergate_debugfs_init);
251
252#endif 250#endif
diff --git a/arch/arm/mach-tegra/tegra2_clocks.c b/arch/arm/mach-tegra/tegra2_clocks.c
index bae09b859891..b59315ce3691 100644
--- a/arch/arm/mach-tegra/tegra2_clocks.c
+++ b/arch/arm/mach-tegra/tegra2_clocks.c
@@ -1486,6 +1486,10 @@ static struct clk tegra_clk_m = {
1486}; 1486};
1487 1487
1488static struct clk_pll_freq_table tegra_pll_c_freq_table[] = { 1488static struct clk_pll_freq_table tegra_pll_c_freq_table[] = {
1489 { 12000000, 600000000, 600, 12, 1, 8 },
1490 { 13000000, 600000000, 600, 13, 1, 8 },
1491 { 19200000, 600000000, 500, 16, 1, 6 },
1492 { 26000000, 600000000, 600, 26, 1, 8 },
1489 { 0, 0, 0, 0, 0, 0 }, 1493 { 0, 0, 0, 0, 0, 0 },
1490}; 1494};
1491 1495
diff --git a/arch/arm/mach-tegra/tegra30_clocks.c b/arch/arm/mach-tegra/tegra30_clocks.c
index 6d08b53f92d2..e33fe4b14a2a 100644
--- a/arch/arm/mach-tegra/tegra30_clocks.c
+++ b/arch/arm/mach-tegra/tegra30_clocks.c
@@ -3015,6 +3015,15 @@ struct clk_duplicate tegra_clk_duplicates[] = {
3015 CLK_DUPLICATE("sbc6", "spi_slave_tegra.5", NULL), 3015 CLK_DUPLICATE("sbc6", "spi_slave_tegra.5", NULL),
3016 CLK_DUPLICATE("twd", "smp_twd", NULL), 3016 CLK_DUPLICATE("twd", "smp_twd", NULL),
3017 CLK_DUPLICATE("vcp", "nvavp", "vcp"), 3017 CLK_DUPLICATE("vcp", "nvavp", "vcp"),
3018 CLK_DUPLICATE("i2s0", NULL, "i2s0"),
3019 CLK_DUPLICATE("i2s1", NULL, "i2s1"),
3020 CLK_DUPLICATE("i2s2", NULL, "i2s2"),
3021 CLK_DUPLICATE("i2s3", NULL, "i2s3"),
3022 CLK_DUPLICATE("i2s4", NULL, "i2s4"),
3023 CLK_DUPLICATE("dam0", NULL, "dam0"),
3024 CLK_DUPLICATE("dam1", NULL, "dam1"),
3025 CLK_DUPLICATE("dam2", NULL, "dam2"),
3026 CLK_DUPLICATE("spdif_in", NULL, "spdif_in"),
3018}; 3027};
3019 3028
3020struct clk *tegra_ptr_clks[] = { 3029struct clk *tegra_ptr_clks[] = {
diff --git a/arch/arm/mach-tegra/usb_phy.c b/arch/arm/mach-tegra/usb_phy.c
index d71d2fed6721..54e353c8e304 100644
--- a/arch/arm/mach-tegra/usb_phy.c
+++ b/arch/arm/mach-tegra/usb_phy.c
@@ -26,6 +26,7 @@
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/io.h> 27#include <linux/io.h>
28#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_gpio.h>
29#include <linux/usb/otg.h> 30#include <linux/usb/otg.h>
30#include <linux/usb/ulpi.h> 31#include <linux/usb/ulpi.h>
31#include <asm/mach-types.h> 32#include <asm/mach-types.h>
@@ -654,8 +655,8 @@ static void ulpi_phy_power_off(struct tegra_usb_phy *phy)
654 clk_disable(phy->clk); 655 clk_disable(phy->clk);
655} 656}
656 657
657struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs, 658struct tegra_usb_phy *tegra_usb_phy_open(struct device *dev, int instance,
658 void *config, enum tegra_usb_phy_mode phy_mode) 659 void __iomem *regs, void *config, enum tegra_usb_phy_mode phy_mode)
659{ 660{
660 struct tegra_usb_phy *phy; 661 struct tegra_usb_phy *phy;
661 struct tegra_ulpi_config *ulpi_config; 662 struct tegra_ulpi_config *ulpi_config;
@@ -711,6 +712,16 @@ struct tegra_usb_phy *tegra_usb_phy_open(int instance, void __iomem *regs,
711 err = -ENXIO; 712 err = -ENXIO;
712 goto err1; 713 goto err1;
713 } 714 }
715 if (!gpio_is_valid(ulpi_config->reset_gpio))
716 ulpi_config->reset_gpio =
717 of_get_named_gpio(dev->of_node,
718 "nvidia,phy-reset-gpio", 0);
719 if (!gpio_is_valid(ulpi_config->reset_gpio)) {
720 pr_err("%s: invalid reset gpio: %d\n", __func__,
721 ulpi_config->reset_gpio);
722 err = -EINVAL;
723 goto err1;
724 }
714 gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b"); 725 gpio_request(ulpi_config->reset_gpio, "ulpi_phy_reset_b");
715 gpio_direction_output(ulpi_config->reset_gpio, 0); 726 gpio_direction_output(ulpi_config->reset_gpio, 0);
716 phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0); 727 phy->ulpi = otg_ulpi_create(&ulpi_viewport_access_ops, 0);
diff --git a/arch/arm/mach-ux500/board-mop500.c b/arch/arm/mach-ux500/board-mop500.c
index f943687acaf0..fba8adea421e 100644
--- a/arch/arm/mach-ux500/board-mop500.c
+++ b/arch/arm/mach-ux500/board-mop500.c
@@ -206,7 +206,7 @@ static struct resource ab8500_resources[] = {
206}; 206};
207 207
208struct platform_device ab8500_device = { 208struct platform_device ab8500_device = {
209 .name = "ab8500-i2c", 209 .name = "ab8500-core",
210 .id = 0, 210 .id = 0,
211 .dev = { 211 .dev = {
212 .platform_data = &ab8500_platdata, 212 .platform_data = &ab8500_platdata,
@@ -785,6 +785,7 @@ MACHINE_START(U8500, "ST-Ericsson MOP500 platform")
785 .timer = &ux500_timer, 785 .timer = &ux500_timer,
786 .handle_irq = gic_handle_irq, 786 .handle_irq = gic_handle_irq,
787 .init_machine = mop500_init_machine, 787 .init_machine = mop500_init_machine,
788 .init_late = ux500_init_late,
788MACHINE_END 789MACHINE_END
789 790
790MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+") 791MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
@@ -794,6 +795,7 @@ MACHINE_START(HREFV60, "ST-Ericsson U8500 Platform HREFv60+")
794 .timer = &ux500_timer, 795 .timer = &ux500_timer,
795 .handle_irq = gic_handle_irq, 796 .handle_irq = gic_handle_irq,
796 .init_machine = hrefv60_init_machine, 797 .init_machine = hrefv60_init_machine,
798 .init_late = ux500_init_late,
797MACHINE_END 799MACHINE_END
798 800
799MACHINE_START(SNOWBALL, "Calao Systems Snowball platform") 801MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
@@ -804,6 +806,7 @@ MACHINE_START(SNOWBALL, "Calao Systems Snowball platform")
804 .timer = &ux500_timer, 806 .timer = &ux500_timer,
805 .handle_irq = gic_handle_irq, 807 .handle_irq = gic_handle_irq,
806 .init_machine = snowball_init_machine, 808 .init_machine = snowball_init_machine,
809 .init_late = ux500_init_late,
807MACHINE_END 810MACHINE_END
808 811
809#ifdef CONFIG_MACH_UX500_DT 812#ifdef CONFIG_MACH_UX500_DT
@@ -918,6 +921,7 @@ DT_MACHINE_START(U8500_DT, "ST-Ericsson U8500 platform (Device Tree Support)")
918 .timer = &ux500_timer, 921 .timer = &ux500_timer,
919 .handle_irq = gic_handle_irq, 922 .handle_irq = gic_handle_irq,
920 .init_machine = u8500_init_machine, 923 .init_machine = u8500_init_machine,
924 .init_late = ux500_init_late,
921 .dt_compat = u8500_dt_board_compat, 925 .dt_compat = u8500_dt_board_compat,
922MACHINE_END 926MACHINE_END
923#endif 927#endif
diff --git a/arch/arm/mach-ux500/clock.c b/arch/arm/mach-ux500/clock.c
index 1762c4728f1e..8d73b066a18d 100644
--- a/arch/arm/mach-ux500/clock.c
+++ b/arch/arm/mach-ux500/clock.c
@@ -635,7 +635,7 @@ static int clk_debugfs_register(struct clk *c)
635 return 0; 635 return 0;
636} 636}
637 637
638static int __init clk_debugfs_init(void) 638int __init clk_debugfs_init(void)
639{ 639{
640 struct clk *c; 640 struct clk *c;
641 struct dentry *d; 641 struct dentry *d;
@@ -657,7 +657,6 @@ err_out:
657 return err; 657 return err;
658} 658}
659 659
660late_initcall(clk_debugfs_init);
661#endif /* defined(CONFIG_DEBUG_FS) */ 660#endif /* defined(CONFIG_DEBUG_FS) */
662 661
663unsigned long clk_smp_twd_rate = 500000000; 662unsigned long clk_smp_twd_rate = 500000000;
@@ -696,12 +695,11 @@ static struct notifier_block clk_twd_cpufreq_nb = {
696 .notifier_call = clk_twd_cpufreq_transition, 695 .notifier_call = clk_twd_cpufreq_transition,
697}; 696};
698 697
699static int clk_init_smp_twd_cpufreq(void) 698int clk_init_smp_twd_cpufreq(void)
700{ 699{
701 return cpufreq_register_notifier(&clk_twd_cpufreq_nb, 700 return cpufreq_register_notifier(&clk_twd_cpufreq_nb,
702 CPUFREQ_TRANSITION_NOTIFIER); 701 CPUFREQ_TRANSITION_NOTIFIER);
703} 702}
704late_initcall(clk_init_smp_twd_cpufreq);
705 703
706#endif 704#endif
707 705
diff --git a/arch/arm/mach-ux500/clock.h b/arch/arm/mach-ux500/clock.h
index d776ada08dbf..65d27a13f46d 100644
--- a/arch/arm/mach-ux500/clock.h
+++ b/arch/arm/mach-ux500/clock.h
@@ -150,3 +150,15 @@ struct clk clk_##_name = { \
150 150
151int __init clk_db8500_ed_fixup(void); 151int __init clk_db8500_ed_fixup(void);
152int __init clk_init(void); 152int __init clk_init(void);
153
154#ifdef CONFIG_DEBUG_FS
155int clk_debugfs_init(void);
156#else
157static inline int clk_debugfs_init(void) { return 0; }
158#endif
159
160#ifdef CONFIG_CPU_FREQ
161int clk_init_smp_twd_cpufreq(void);
162#else
163static inline int clk_init_smp_twd_cpufreq(void) { return 0; }
164#endif
diff --git a/arch/arm/mach-ux500/cpu.c b/arch/arm/mach-ux500/cpu.c
index a29a0e3adcf9..e2360e7c770d 100644
--- a/arch/arm/mach-ux500/cpu.c
+++ b/arch/arm/mach-ux500/cpu.c
@@ -73,6 +73,12 @@ void __init ux500_init_irq(void)
73 clk_init(); 73 clk_init();
74} 74}
75 75
76void __init ux500_init_late(void)
77{
78 clk_debugfs_init();
79 clk_init_smp_twd_cpufreq();
80}
81
76static const char * __init ux500_get_machine(void) 82static const char * __init ux500_get_machine(void)
77{ 83{
78 return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber()); 84 return kasprintf(GFP_KERNEL, "DB%4x", dbx500_partnumber());
diff --git a/arch/arm/mach-ux500/include/mach/setup.h b/arch/arm/mach-ux500/include/mach/setup.h
index 4e369f1645ec..8b7ed82a2866 100644
--- a/arch/arm/mach-ux500/include/mach/setup.h
+++ b/arch/arm/mach-ux500/include/mach/setup.h
@@ -20,6 +20,7 @@ extern void __init u8500_map_io(void);
20extern struct device * __init u8500_init_devices(void); 20extern struct device * __init u8500_init_devices(void);
21 21
22extern void __init ux500_init_irq(void); 22extern void __init ux500_init_irq(void);
23extern void __init ux500_init_late(void);
23 24
24extern struct device *ux500_soc_device_init(const char *soc_id); 25extern struct device *ux500_soc_device_init(const char *soc_id);
25 26
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index db23ae4aaaab..ea6b43154090 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -17,8 +17,12 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/device.h> 18#include <linux/device.h>
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/dma-contiguous.h>
20#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/memblock.h>
21#include <linux/slab.h> 23#include <linux/slab.h>
24#include <linux/iommu.h>
25#include <linux/vmalloc.h>
22 26
23#include <asm/memory.h> 27#include <asm/memory.h>
24#include <asm/highmem.h> 28#include <asm/highmem.h>
@@ -26,9 +30,112 @@
26#include <asm/tlbflush.h> 30#include <asm/tlbflush.h>
27#include <asm/sizes.h> 31#include <asm/sizes.h>
28#include <asm/mach/arch.h> 32#include <asm/mach/arch.h>
33#include <asm/dma-iommu.h>
34#include <asm/mach/map.h>
35#include <asm/system_info.h>
36#include <asm/dma-contiguous.h>
29 37
30#include "mm.h" 38#include "mm.h"
31 39
40/*
41 * The DMA API is built upon the notion of "buffer ownership". A buffer
42 * is either exclusively owned by the CPU (and therefore may be accessed
43 * by it) or exclusively owned by the DMA device. These helper functions
44 * represent the transitions between these two ownership states.
45 *
46 * Note, however, that on later ARMs, this notion does not work due to
47 * speculative prefetches. We model our approach on the assumption that
48 * the CPU does do speculative prefetches, which means we clean caches
49 * before transfers and delay cache invalidation until transfer completion.
50 *
51 */
52static void __dma_page_cpu_to_dev(struct page *, unsigned long,
53 size_t, enum dma_data_direction);
54static void __dma_page_dev_to_cpu(struct page *, unsigned long,
55 size_t, enum dma_data_direction);
56
57/**
58 * arm_dma_map_page - map a portion of a page for streaming DMA
59 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
60 * @page: page that buffer resides in
61 * @offset: offset into page for start of buffer
62 * @size: size of buffer to map
63 * @dir: DMA transfer direction
64 *
65 * Ensure that any data held in the cache is appropriately discarded
66 * or written back.
67 *
68 * The device owns this memory once this call has completed. The CPU
69 * can regain ownership by calling dma_unmap_page().
70 */
71static dma_addr_t arm_dma_map_page(struct device *dev, struct page *page,
72 unsigned long offset, size_t size, enum dma_data_direction dir,
73 struct dma_attrs *attrs)
74{
75 if (!arch_is_coherent())
76 __dma_page_cpu_to_dev(page, offset, size, dir);
77 return pfn_to_dma(dev, page_to_pfn(page)) + offset;
78}
79
80/**
81 * arm_dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
82 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
83 * @handle: DMA address of buffer
84 * @size: size of buffer (same as passed to dma_map_page)
85 * @dir: DMA transfer direction (same as passed to dma_map_page)
86 *
87 * Unmap a page streaming mode DMA translation. The handle and size
88 * must match what was provided in the previous dma_map_page() call.
89 * All other usages are undefined.
90 *
91 * After this call, reads by the CPU to the buffer are guaranteed to see
92 * whatever the device wrote there.
93 */
94static void arm_dma_unmap_page(struct device *dev, dma_addr_t handle,
95 size_t size, enum dma_data_direction dir,
96 struct dma_attrs *attrs)
97{
98 if (!arch_is_coherent())
99 __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
100 handle & ~PAGE_MASK, size, dir);
101}
102
103static void arm_dma_sync_single_for_cpu(struct device *dev,
104 dma_addr_t handle, size_t size, enum dma_data_direction dir)
105{
106 unsigned int offset = handle & (PAGE_SIZE - 1);
107 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
108 if (!arch_is_coherent())
109 __dma_page_dev_to_cpu(page, offset, size, dir);
110}
111
112static void arm_dma_sync_single_for_device(struct device *dev,
113 dma_addr_t handle, size_t size, enum dma_data_direction dir)
114{
115 unsigned int offset = handle & (PAGE_SIZE - 1);
116 struct page *page = pfn_to_page(dma_to_pfn(dev, handle-offset));
117 if (!arch_is_coherent())
118 __dma_page_cpu_to_dev(page, offset, size, dir);
119}
120
121static int arm_dma_set_mask(struct device *dev, u64 dma_mask);
122
123struct dma_map_ops arm_dma_ops = {
124 .alloc = arm_dma_alloc,
125 .free = arm_dma_free,
126 .mmap = arm_dma_mmap,
127 .map_page = arm_dma_map_page,
128 .unmap_page = arm_dma_unmap_page,
129 .map_sg = arm_dma_map_sg,
130 .unmap_sg = arm_dma_unmap_sg,
131 .sync_single_for_cpu = arm_dma_sync_single_for_cpu,
132 .sync_single_for_device = arm_dma_sync_single_for_device,
133 .sync_sg_for_cpu = arm_dma_sync_sg_for_cpu,
134 .sync_sg_for_device = arm_dma_sync_sg_for_device,
135 .set_dma_mask = arm_dma_set_mask,
136};
137EXPORT_SYMBOL(arm_dma_ops);
138
32static u64 get_coherent_dma_mask(struct device *dev) 139static u64 get_coherent_dma_mask(struct device *dev)
33{ 140{
34 u64 mask = (u64)arm_dma_limit; 141 u64 mask = (u64)arm_dma_limit;
@@ -56,6 +163,21 @@ static u64 get_coherent_dma_mask(struct device *dev)
56 return mask; 163 return mask;
57} 164}
58 165
166static void __dma_clear_buffer(struct page *page, size_t size)
167{
168 void *ptr;
169 /*
170 * Ensure that the allocated pages are zeroed, and that any data
171 * lurking in the kernel direct-mapped region is invalidated.
172 */
173 ptr = page_address(page);
174 if (ptr) {
175 memset(ptr, 0, size);
176 dmac_flush_range(ptr, ptr + size);
177 outer_flush_range(__pa(ptr), __pa(ptr) + size);
178 }
179}
180
59/* 181/*
60 * Allocate a DMA buffer for 'dev' of size 'size' using the 182 * Allocate a DMA buffer for 'dev' of size 'size' using the
61 * specified gfp mask. Note that 'size' must be page aligned. 183 * specified gfp mask. Note that 'size' must be page aligned.
@@ -64,23 +186,6 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
64{ 186{
65 unsigned long order = get_order(size); 187 unsigned long order = get_order(size);
66 struct page *page, *p, *e; 188 struct page *page, *p, *e;
67 void *ptr;
68 u64 mask = get_coherent_dma_mask(dev);
69
70#ifdef CONFIG_DMA_API_DEBUG
71 u64 limit = (mask + 1) & ~mask;
72 if (limit && size >= limit) {
73 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
74 size, mask);
75 return NULL;
76 }
77#endif
78
79 if (!mask)
80 return NULL;
81
82 if (mask < 0xffffffffULL)
83 gfp |= GFP_DMA;
84 189
85 page = alloc_pages(gfp, order); 190 page = alloc_pages(gfp, order);
86 if (!page) 191 if (!page)
@@ -93,14 +198,7 @@ static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gf
93 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++) 198 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
94 __free_page(p); 199 __free_page(p);
95 200
96 /* 201 __dma_clear_buffer(page, size);
97 * Ensure that the allocated pages are zeroed, and that any data
98 * lurking in the kernel direct-mapped region is invalidated.
99 */
100 ptr = page_address(page);
101 memset(ptr, 0, size);
102 dmac_flush_range(ptr, ptr + size);
103 outer_flush_range(__pa(ptr), __pa(ptr) + size);
104 202
105 return page; 203 return page;
106} 204}
@@ -170,6 +268,11 @@ static int __init consistent_init(void)
170 unsigned long base = consistent_base; 268 unsigned long base = consistent_base;
171 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT; 269 unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
172 270
271#ifndef CONFIG_ARM_DMA_USE_IOMMU
272 if (cpu_architecture() >= CPU_ARCH_ARMv6)
273 return 0;
274#endif
275
173 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); 276 consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
174 if (!consistent_pte) { 277 if (!consistent_pte) {
175 pr_err("%s: no memory\n", __func__); 278 pr_err("%s: no memory\n", __func__);
@@ -184,14 +287,14 @@ static int __init consistent_init(void)
184 287
185 pud = pud_alloc(&init_mm, pgd, base); 288 pud = pud_alloc(&init_mm, pgd, base);
186 if (!pud) { 289 if (!pud) {
187 printk(KERN_ERR "%s: no pud tables\n", __func__); 290 pr_err("%s: no pud tables\n", __func__);
188 ret = -ENOMEM; 291 ret = -ENOMEM;
189 break; 292 break;
190 } 293 }
191 294
192 pmd = pmd_alloc(&init_mm, pud, base); 295 pmd = pmd_alloc(&init_mm, pud, base);
193 if (!pmd) { 296 if (!pmd) {
194 printk(KERN_ERR "%s: no pmd tables\n", __func__); 297 pr_err("%s: no pmd tables\n", __func__);
195 ret = -ENOMEM; 298 ret = -ENOMEM;
196 break; 299 break;
197 } 300 }
@@ -199,7 +302,7 @@ static int __init consistent_init(void)
199 302
200 pte = pte_alloc_kernel(pmd, base); 303 pte = pte_alloc_kernel(pmd, base);
201 if (!pte) { 304 if (!pte) {
202 printk(KERN_ERR "%s: no pte tables\n", __func__); 305 pr_err("%s: no pte tables\n", __func__);
203 ret = -ENOMEM; 306 ret = -ENOMEM;
204 break; 307 break;
205 } 308 }
@@ -210,9 +313,101 @@ static int __init consistent_init(void)
210 313
211 return ret; 314 return ret;
212} 315}
213
214core_initcall(consistent_init); 316core_initcall(consistent_init);
215 317
318static void *__alloc_from_contiguous(struct device *dev, size_t size,
319 pgprot_t prot, struct page **ret_page);
320
321static struct arm_vmregion_head coherent_head = {
322 .vm_lock = __SPIN_LOCK_UNLOCKED(&coherent_head.vm_lock),
323 .vm_list = LIST_HEAD_INIT(coherent_head.vm_list),
324};
325
326size_t coherent_pool_size = DEFAULT_CONSISTENT_DMA_SIZE / 8;
327
328static int __init early_coherent_pool(char *p)
329{
330 coherent_pool_size = memparse(p, &p);
331 return 0;
332}
333early_param("coherent_pool", early_coherent_pool);
334
335/*
336 * Initialise the coherent pool for atomic allocations.
337 */
338static int __init coherent_init(void)
339{
340 pgprot_t prot = pgprot_dmacoherent(pgprot_kernel);
341 size_t size = coherent_pool_size;
342 struct page *page;
343 void *ptr;
344
345 if (cpu_architecture() < CPU_ARCH_ARMv6)
346 return 0;
347
348 ptr = __alloc_from_contiguous(NULL, size, prot, &page);
349 if (ptr) {
350 coherent_head.vm_start = (unsigned long) ptr;
351 coherent_head.vm_end = (unsigned long) ptr + size;
352 printk(KERN_INFO "DMA: preallocated %u KiB pool for atomic coherent allocations\n",
353 (unsigned)size / 1024);
354 return 0;
355 }
356 printk(KERN_ERR "DMA: failed to allocate %u KiB pool for atomic coherent allocation\n",
357 (unsigned)size / 1024);
358 return -ENOMEM;
359}
360/*
361 * CMA is activated by core_initcall, so we must be called after it.
362 */
363postcore_initcall(coherent_init);
364
365struct dma_contig_early_reserve {
366 phys_addr_t base;
367 unsigned long size;
368};
369
370static struct dma_contig_early_reserve dma_mmu_remap[MAX_CMA_AREAS] __initdata;
371
372static int dma_mmu_remap_num __initdata;
373
374void __init dma_contiguous_early_fixup(phys_addr_t base, unsigned long size)
375{
376 dma_mmu_remap[dma_mmu_remap_num].base = base;
377 dma_mmu_remap[dma_mmu_remap_num].size = size;
378 dma_mmu_remap_num++;
379}
380
381void __init dma_contiguous_remap(void)
382{
383 int i;
384 for (i = 0; i < dma_mmu_remap_num; i++) {
385 phys_addr_t start = dma_mmu_remap[i].base;
386 phys_addr_t end = start + dma_mmu_remap[i].size;
387 struct map_desc map;
388 unsigned long addr;
389
390 if (end > arm_lowmem_limit)
391 end = arm_lowmem_limit;
392 if (start >= end)
393 return;
394
395 map.pfn = __phys_to_pfn(start);
396 map.virtual = __phys_to_virt(start);
397 map.length = end - start;
398 map.type = MT_MEMORY_DMA_READY;
399
400 /*
401 * Clear previous low-memory mapping
402 */
403 for (addr = __phys_to_virt(start); addr < __phys_to_virt(end);
404 addr += PMD_SIZE)
405 pmd_clear(pmd_off_k(addr));
406
407 iotable_init(&map, 1);
408 }
409}
410
216static void * 411static void *
217__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot, 412__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
218 const void *caller) 413 const void *caller)
@@ -222,7 +417,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
222 int bit; 417 int bit;
223 418
224 if (!consistent_pte) { 419 if (!consistent_pte) {
225 printk(KERN_ERR "%s: not initialised\n", __func__); 420 pr_err("%s: not initialised\n", __func__);
226 dump_stack(); 421 dump_stack();
227 return NULL; 422 return NULL;
228 } 423 }
@@ -249,7 +444,7 @@ __dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot,
249 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1); 444 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
250 445
251 pte = consistent_pte[idx] + off; 446 pte = consistent_pte[idx] + off;
252 c->vm_pages = page; 447 c->priv = page;
253 448
254 do { 449 do {
255 BUG_ON(!pte_none(*pte)); 450 BUG_ON(!pte_none(*pte));
@@ -281,14 +476,14 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
281 476
282 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr); 477 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
283 if (!c) { 478 if (!c) {
284 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n", 479 pr_err("%s: trying to free invalid coherent area: %p\n",
285 __func__, cpu_addr); 480 __func__, cpu_addr);
286 dump_stack(); 481 dump_stack();
287 return; 482 return;
288 } 483 }
289 484
290 if ((c->vm_end - c->vm_start) != size) { 485 if ((c->vm_end - c->vm_start) != size) {
291 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n", 486 pr_err("%s: freeing wrong coherent size (%ld != %d)\n",
292 __func__, c->vm_end - c->vm_start, size); 487 __func__, c->vm_end - c->vm_start, size);
293 dump_stack(); 488 dump_stack();
294 size = c->vm_end - c->vm_start; 489 size = c->vm_end - c->vm_start;
@@ -310,8 +505,8 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
310 } 505 }
311 506
312 if (pte_none(pte) || !pte_present(pte)) 507 if (pte_none(pte) || !pte_present(pte))
313 printk(KERN_CRIT "%s: bad page in kernel page table\n", 508 pr_crit("%s: bad page in kernel page table\n",
314 __func__); 509 __func__);
315 } while (size -= PAGE_SIZE); 510 } while (size -= PAGE_SIZE);
316 511
317 flush_tlb_kernel_range(c->vm_start, c->vm_end); 512 flush_tlb_kernel_range(c->vm_start, c->vm_end);
@@ -319,20 +514,182 @@ static void __dma_free_remap(void *cpu_addr, size_t size)
319 arm_vmregion_free(&consistent_head, c); 514 arm_vmregion_free(&consistent_head, c);
320} 515}
321 516
517static int __dma_update_pte(pte_t *pte, pgtable_t token, unsigned long addr,
518 void *data)
519{
520 struct page *page = virt_to_page(addr);
521 pgprot_t prot = *(pgprot_t *)data;
522
523 set_pte_ext(pte, mk_pte(page, prot), 0);
524 return 0;
525}
526
527static void __dma_remap(struct page *page, size_t size, pgprot_t prot)
528{
529 unsigned long start = (unsigned long) page_address(page);
530 unsigned end = start + size;
531
532 apply_to_page_range(&init_mm, start, size, __dma_update_pte, &prot);
533 dsb();
534 flush_tlb_kernel_range(start, end);
535}
536
537static void *__alloc_remap_buffer(struct device *dev, size_t size, gfp_t gfp,
538 pgprot_t prot, struct page **ret_page,
539 const void *caller)
540{
541 struct page *page;
542 void *ptr;
543 page = __dma_alloc_buffer(dev, size, gfp);
544 if (!page)
545 return NULL;
546
547 ptr = __dma_alloc_remap(page, size, gfp, prot, caller);
548 if (!ptr) {
549 __dma_free_buffer(page, size);
550 return NULL;
551 }
552
553 *ret_page = page;
554 return ptr;
555}
556
557static void *__alloc_from_pool(struct device *dev, size_t size,
558 struct page **ret_page, const void *caller)
559{
560 struct arm_vmregion *c;
561 size_t align;
562
563 if (!coherent_head.vm_start) {
564 printk(KERN_ERR "%s: coherent pool not initialised!\n",
565 __func__);
566 dump_stack();
567 return NULL;
568 }
569
570 /*
571 * Align the region allocation - allocations from pool are rather
572 * small, so align them to their order in pages, minimum is a page
573 * size. This helps reduce fragmentation of the DMA space.
574 */
575 align = PAGE_SIZE << get_order(size);
576 c = arm_vmregion_alloc(&coherent_head, align, size, 0, caller);
577 if (c) {
578 void *ptr = (void *)c->vm_start;
579 struct page *page = virt_to_page(ptr);
580 *ret_page = page;
581 return ptr;
582 }
583 return NULL;
584}
585
586static int __free_from_pool(void *cpu_addr, size_t size)
587{
588 unsigned long start = (unsigned long)cpu_addr;
589 unsigned long end = start + size;
590 struct arm_vmregion *c;
591
592 if (start < coherent_head.vm_start || end > coherent_head.vm_end)
593 return 0;
594
595 c = arm_vmregion_find_remove(&coherent_head, (unsigned long)start);
596
597 if ((c->vm_end - c->vm_start) != size) {
598 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
599 __func__, c->vm_end - c->vm_start, size);
600 dump_stack();
601 size = c->vm_end - c->vm_start;
602 }
603
604 arm_vmregion_free(&coherent_head, c);
605 return 1;
606}
607
608static void *__alloc_from_contiguous(struct device *dev, size_t size,
609 pgprot_t prot, struct page **ret_page)
610{
611 unsigned long order = get_order(size);
612 size_t count = size >> PAGE_SHIFT;
613 struct page *page;
614
615 page = dma_alloc_from_contiguous(dev, count, order);
616 if (!page)
617 return NULL;
618
619 __dma_clear_buffer(page, size);
620 __dma_remap(page, size, prot);
621
622 *ret_page = page;
623 return page_address(page);
624}
625
626static void __free_from_contiguous(struct device *dev, struct page *page,
627 size_t size)
628{
629 __dma_remap(page, size, pgprot_kernel);
630 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
631}
632
633static inline pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot)
634{
635 prot = dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs) ?
636 pgprot_writecombine(prot) :
637 pgprot_dmacoherent(prot);
638 return prot;
639}
640
641#define nommu() 0
642
322#else /* !CONFIG_MMU */ 643#else /* !CONFIG_MMU */
323 644
324#define __dma_alloc_remap(page, size, gfp, prot, c) page_address(page) 645#define nommu() 1
325#define __dma_free_remap(addr, size) do { } while (0) 646
647#define __get_dma_pgprot(attrs, prot) __pgprot(0)
648#define __alloc_remap_buffer(dev, size, gfp, prot, ret, c) NULL
649#define __alloc_from_pool(dev, size, ret_page, c) NULL
650#define __alloc_from_contiguous(dev, size, prot, ret) NULL
651#define __free_from_pool(cpu_addr, size) 0
652#define __free_from_contiguous(dev, page, size) do { } while (0)
653#define __dma_free_remap(cpu_addr, size) do { } while (0)
326 654
327#endif /* CONFIG_MMU */ 655#endif /* CONFIG_MMU */
328 656
329static void * 657static void *__alloc_simple_buffer(struct device *dev, size_t size, gfp_t gfp,
330__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, 658 struct page **ret_page)
331 pgprot_t prot, const void *caller) 659{
660 struct page *page;
661 page = __dma_alloc_buffer(dev, size, gfp);
662 if (!page)
663 return NULL;
664
665 *ret_page = page;
666 return page_address(page);
667}
668
669
670
671static void *__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
672 gfp_t gfp, pgprot_t prot, const void *caller)
332{ 673{
674 u64 mask = get_coherent_dma_mask(dev);
333 struct page *page; 675 struct page *page;
334 void *addr; 676 void *addr;
335 677
678#ifdef CONFIG_DMA_API_DEBUG
679 u64 limit = (mask + 1) & ~mask;
680 if (limit && size >= limit) {
681 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
682 size, mask);
683 return NULL;
684 }
685#endif
686
687 if (!mask)
688 return NULL;
689
690 if (mask < 0xffffffffULL)
691 gfp |= GFP_DMA;
692
336 /* 693 /*
337 * Following is a work-around (a.k.a. hack) to prevent pages 694 * Following is a work-around (a.k.a. hack) to prevent pages
338 * with __GFP_COMP being passed to split_page() which cannot 695 * with __GFP_COMP being passed to split_page() which cannot
@@ -342,22 +699,20 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
342 */ 699 */
343 gfp &= ~(__GFP_COMP); 700 gfp &= ~(__GFP_COMP);
344 701
345 *handle = ~0; 702 *handle = DMA_ERROR_CODE;
346 size = PAGE_ALIGN(size); 703 size = PAGE_ALIGN(size);
347 704
348 page = __dma_alloc_buffer(dev, size, gfp); 705 if (arch_is_coherent() || nommu())
349 if (!page) 706 addr = __alloc_simple_buffer(dev, size, gfp, &page);
350 return NULL; 707 else if (cpu_architecture() < CPU_ARCH_ARMv6)
351 708 addr = __alloc_remap_buffer(dev, size, gfp, prot, &page, caller);
352 if (!arch_is_coherent()) 709 else if (gfp & GFP_ATOMIC)
353 addr = __dma_alloc_remap(page, size, gfp, prot, caller); 710 addr = __alloc_from_pool(dev, size, &page, caller);
354 else 711 else
355 addr = page_address(page); 712 addr = __alloc_from_contiguous(dev, size, prot, &page);
356 713
357 if (addr) 714 if (addr)
358 *handle = pfn_to_dma(dev, page_to_pfn(page)); 715 *handle = pfn_to_dma(dev, page_to_pfn(page));
359 else
360 __dma_free_buffer(page, size);
361 716
362 return addr; 717 return addr;
363} 718}
@@ -366,138 +721,71 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
366 * Allocate DMA-coherent memory space and return both the kernel remapped 721 * Allocate DMA-coherent memory space and return both the kernel remapped
367 * virtual and bus address for that space. 722 * virtual and bus address for that space.
368 */ 723 */
369void * 724void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
370dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 725 gfp_t gfp, struct dma_attrs *attrs)
371{ 726{
727 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
372 void *memory; 728 void *memory;
373 729
374 if (dma_alloc_from_coherent(dev, size, handle, &memory)) 730 if (dma_alloc_from_coherent(dev, size, handle, &memory))
375 return memory; 731 return memory;
376 732
377 return __dma_alloc(dev, size, handle, gfp, 733 return __dma_alloc(dev, size, handle, gfp, prot,
378 pgprot_dmacoherent(pgprot_kernel),
379 __builtin_return_address(0)); 734 __builtin_return_address(0));
380} 735}
381EXPORT_SYMBOL(dma_alloc_coherent);
382 736
383/* 737/*
384 * Allocate a writecombining region, in much the same way as 738 * Create userspace mapping for the DMA-coherent memory.
385 * dma_alloc_coherent above.
386 */ 739 */
387void * 740int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
388dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) 741 void *cpu_addr, dma_addr_t dma_addr, size_t size,
389{ 742 struct dma_attrs *attrs)
390 return __dma_alloc(dev, size, handle, gfp,
391 pgprot_writecombine(pgprot_kernel),
392 __builtin_return_address(0));
393}
394EXPORT_SYMBOL(dma_alloc_writecombine);
395
396static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
397 void *cpu_addr, dma_addr_t dma_addr, size_t size)
398{ 743{
399 int ret = -ENXIO; 744 int ret = -ENXIO;
400#ifdef CONFIG_MMU 745#ifdef CONFIG_MMU
401 unsigned long user_size, kern_size; 746 unsigned long pfn = dma_to_pfn(dev, dma_addr);
402 struct arm_vmregion *c; 747 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
403 748
404 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; 749 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
750 return ret;
405 751
406 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr); 752 ret = remap_pfn_range(vma, vma->vm_start,
407 if (c) { 753 pfn + vma->vm_pgoff,
408 unsigned long off = vma->vm_pgoff; 754 vma->vm_end - vma->vm_start,
409 755 vma->vm_page_prot);
410 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
411
412 if (off < kern_size &&
413 user_size <= (kern_size - off)) {
414 ret = remap_pfn_range(vma, vma->vm_start,
415 page_to_pfn(c->vm_pages) + off,
416 user_size << PAGE_SHIFT,
417 vma->vm_page_prot);
418 }
419 }
420#endif /* CONFIG_MMU */ 756#endif /* CONFIG_MMU */
421 757
422 return ret; 758 return ret;
423} 759}
424 760
425int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
426 void *cpu_addr, dma_addr_t dma_addr, size_t size)
427{
428 vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
429 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
430}
431EXPORT_SYMBOL(dma_mmap_coherent);
432
433int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
434 void *cpu_addr, dma_addr_t dma_addr, size_t size)
435{
436 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
437 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
438}
439EXPORT_SYMBOL(dma_mmap_writecombine);
440
441/* 761/*
442 * free a page as defined by the above mapping. 762 * Free a buffer as defined by the above mapping.
443 * Must not be called with IRQs disabled.
444 */ 763 */
445void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle) 764void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
765 dma_addr_t handle, struct dma_attrs *attrs)
446{ 766{
447 WARN_ON(irqs_disabled()); 767 struct page *page = pfn_to_page(dma_to_pfn(dev, handle));
448 768
449 if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) 769 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
450 return; 770 return;
451 771
452 size = PAGE_ALIGN(size); 772 size = PAGE_ALIGN(size);
453 773
454 if (!arch_is_coherent()) 774 if (arch_is_coherent() || nommu()) {
775 __dma_free_buffer(page, size);
776 } else if (cpu_architecture() < CPU_ARCH_ARMv6) {
455 __dma_free_remap(cpu_addr, size); 777 __dma_free_remap(cpu_addr, size);
456 778 __dma_free_buffer(page, size);
457 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
458}
459EXPORT_SYMBOL(dma_free_coherent);
460
461/*
462 * Make an area consistent for devices.
463 * Note: Drivers should NOT use this function directly, as it will break
464 * platforms with CONFIG_DMABOUNCE.
465 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
466 */
467void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
468 enum dma_data_direction dir)
469{
470 unsigned long paddr;
471
472 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
473
474 dmac_map_area(kaddr, size, dir);
475
476 paddr = __pa(kaddr);
477 if (dir == DMA_FROM_DEVICE) {
478 outer_inv_range(paddr, paddr + size);
479 } else { 779 } else {
480 outer_clean_range(paddr, paddr + size); 780 if (__free_from_pool(cpu_addr, size))
481 } 781 return;
482 /* FIXME: non-speculating: flush on bidirectional mappings? */ 782 /*
483} 783 * Non-atomic allocations cannot be freed with IRQs disabled
484EXPORT_SYMBOL(___dma_single_cpu_to_dev); 784 */
485 785 WARN_ON(irqs_disabled());
486void ___dma_single_dev_to_cpu(const void *kaddr, size_t size, 786 __free_from_contiguous(dev, page, size);
487 enum dma_data_direction dir)
488{
489 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
490
491 /* FIXME: non-speculating: not required */
492 /* don't bother invalidating if DMA to device */
493 if (dir != DMA_TO_DEVICE) {
494 unsigned long paddr = __pa(kaddr);
495 outer_inv_range(paddr, paddr + size);
496 } 787 }
497
498 dmac_unmap_area(kaddr, size, dir);
499} 788}
500EXPORT_SYMBOL(___dma_single_dev_to_cpu);
501 789
502static void dma_cache_maint_page(struct page *page, unsigned long offset, 790static void dma_cache_maint_page(struct page *page, unsigned long offset,
503 size_t size, enum dma_data_direction dir, 791 size_t size, enum dma_data_direction dir,
@@ -543,7 +831,13 @@ static void dma_cache_maint_page(struct page *page, unsigned long offset,
543 } while (left); 831 } while (left);
544} 832}
545 833
546void ___dma_page_cpu_to_dev(struct page *page, unsigned long off, 834/*
835 * Make an area consistent for devices.
836 * Note: Drivers should NOT use this function directly, as it will break
837 * platforms with CONFIG_DMABOUNCE.
838 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
839 */
840static void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
547 size_t size, enum dma_data_direction dir) 841 size_t size, enum dma_data_direction dir)
548{ 842{
549 unsigned long paddr; 843 unsigned long paddr;
@@ -558,9 +852,8 @@ void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
558 } 852 }
559 /* FIXME: non-speculating: flush on bidirectional mappings? */ 853 /* FIXME: non-speculating: flush on bidirectional mappings? */
560} 854}
561EXPORT_SYMBOL(___dma_page_cpu_to_dev);
562 855
563void ___dma_page_dev_to_cpu(struct page *page, unsigned long off, 856static void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
564 size_t size, enum dma_data_direction dir) 857 size_t size, enum dma_data_direction dir)
565{ 858{
566 unsigned long paddr = page_to_phys(page) + off; 859 unsigned long paddr = page_to_phys(page) + off;
@@ -578,10 +871,9 @@ void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
578 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE) 871 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
579 set_bit(PG_dcache_clean, &page->flags); 872 set_bit(PG_dcache_clean, &page->flags);
580} 873}
581EXPORT_SYMBOL(___dma_page_dev_to_cpu);
582 874
583/** 875/**
584 * dma_map_sg - map a set of SG buffers for streaming mode DMA 876 * arm_dma_map_sg - map a set of SG buffers for streaming mode DMA
585 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 877 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
586 * @sg: list of buffers 878 * @sg: list of buffers
587 * @nents: number of buffers to map 879 * @nents: number of buffers to map
@@ -596,32 +888,32 @@ EXPORT_SYMBOL(___dma_page_dev_to_cpu);
596 * Device ownership issues as mentioned for dma_map_single are the same 888 * Device ownership issues as mentioned for dma_map_single are the same
597 * here. 889 * here.
598 */ 890 */
599int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, 891int arm_dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
600 enum dma_data_direction dir) 892 enum dma_data_direction dir, struct dma_attrs *attrs)
601{ 893{
894 struct dma_map_ops *ops = get_dma_ops(dev);
602 struct scatterlist *s; 895 struct scatterlist *s;
603 int i, j; 896 int i, j;
604 897
605 BUG_ON(!valid_dma_direction(dir));
606
607 for_each_sg(sg, s, nents, i) { 898 for_each_sg(sg, s, nents, i) {
608 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset, 899#ifdef CONFIG_NEED_SG_DMA_LENGTH
609 s->length, dir); 900 s->dma_length = s->length;
901#endif
902 s->dma_address = ops->map_page(dev, sg_page(s), s->offset,
903 s->length, dir, attrs);
610 if (dma_mapping_error(dev, s->dma_address)) 904 if (dma_mapping_error(dev, s->dma_address))
611 goto bad_mapping; 905 goto bad_mapping;
612 } 906 }
613 debug_dma_map_sg(dev, sg, nents, nents, dir);
614 return nents; 907 return nents;
615 908
616 bad_mapping: 909 bad_mapping:
617 for_each_sg(sg, s, i, j) 910 for_each_sg(sg, s, i, j)
618 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 911 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
619 return 0; 912 return 0;
620} 913}
621EXPORT_SYMBOL(dma_map_sg);
622 914
623/** 915/**
624 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg 916 * arm_dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
625 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 917 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
626 * @sg: list of buffers 918 * @sg: list of buffers
627 * @nents: number of buffers to unmap (same as was passed to dma_map_sg) 919 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
@@ -630,70 +922,55 @@ EXPORT_SYMBOL(dma_map_sg);
630 * Unmap a set of streaming mode DMA translations. Again, CPU access 922 * Unmap a set of streaming mode DMA translations. Again, CPU access
631 * rules concerning calls here are the same as for dma_unmap_single(). 923 * rules concerning calls here are the same as for dma_unmap_single().
632 */ 924 */
633void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, 925void arm_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
634 enum dma_data_direction dir) 926 enum dma_data_direction dir, struct dma_attrs *attrs)
635{ 927{
928 struct dma_map_ops *ops = get_dma_ops(dev);
636 struct scatterlist *s; 929 struct scatterlist *s;
637 int i;
638 930
639 debug_dma_unmap_sg(dev, sg, nents, dir); 931 int i;
640 932
641 for_each_sg(sg, s, nents, i) 933 for_each_sg(sg, s, nents, i)
642 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir); 934 ops->unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir, attrs);
643} 935}
644EXPORT_SYMBOL(dma_unmap_sg);
645 936
646/** 937/**
647 * dma_sync_sg_for_cpu 938 * arm_dma_sync_sg_for_cpu
648 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 939 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
649 * @sg: list of buffers 940 * @sg: list of buffers
650 * @nents: number of buffers to map (returned from dma_map_sg) 941 * @nents: number of buffers to map (returned from dma_map_sg)
651 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 942 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
652 */ 943 */
653void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, 944void arm_dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
654 int nents, enum dma_data_direction dir) 945 int nents, enum dma_data_direction dir)
655{ 946{
947 struct dma_map_ops *ops = get_dma_ops(dev);
656 struct scatterlist *s; 948 struct scatterlist *s;
657 int i; 949 int i;
658 950
659 for_each_sg(sg, s, nents, i) { 951 for_each_sg(sg, s, nents, i)
660 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0, 952 ops->sync_single_for_cpu(dev, sg_dma_address(s), s->length,
661 sg_dma_len(s), dir)) 953 dir);
662 continue;
663
664 __dma_page_dev_to_cpu(sg_page(s), s->offset,
665 s->length, dir);
666 }
667
668 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
669} 954}
670EXPORT_SYMBOL(dma_sync_sg_for_cpu);
671 955
672/** 956/**
673 * dma_sync_sg_for_device 957 * arm_dma_sync_sg_for_device
674 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices 958 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
675 * @sg: list of buffers 959 * @sg: list of buffers
676 * @nents: number of buffers to map (returned from dma_map_sg) 960 * @nents: number of buffers to map (returned from dma_map_sg)
677 * @dir: DMA transfer direction (same as was passed to dma_map_sg) 961 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
678 */ 962 */
679void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, 963void arm_dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
680 int nents, enum dma_data_direction dir) 964 int nents, enum dma_data_direction dir)
681{ 965{
966 struct dma_map_ops *ops = get_dma_ops(dev);
682 struct scatterlist *s; 967 struct scatterlist *s;
683 int i; 968 int i;
684 969
685 for_each_sg(sg, s, nents, i) { 970 for_each_sg(sg, s, nents, i)
686 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0, 971 ops->sync_single_for_device(dev, sg_dma_address(s), s->length,
687 sg_dma_len(s), dir)) 972 dir);
688 continue;
689
690 __dma_page_cpu_to_dev(sg_page(s), s->offset,
691 s->length, dir);
692 }
693
694 debug_dma_sync_sg_for_device(dev, sg, nents, dir);
695} 973}
696EXPORT_SYMBOL(dma_sync_sg_for_device);
697 974
698/* 975/*
699 * Return whether the given device DMA address mask can be supported 976 * Return whether the given device DMA address mask can be supported
@@ -709,18 +986,15 @@ int dma_supported(struct device *dev, u64 mask)
709} 986}
710EXPORT_SYMBOL(dma_supported); 987EXPORT_SYMBOL(dma_supported);
711 988
712int dma_set_mask(struct device *dev, u64 dma_mask) 989static int arm_dma_set_mask(struct device *dev, u64 dma_mask)
713{ 990{
714 if (!dev->dma_mask || !dma_supported(dev, dma_mask)) 991 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
715 return -EIO; 992 return -EIO;
716 993
717#ifndef CONFIG_DMABOUNCE
718 *dev->dma_mask = dma_mask; 994 *dev->dma_mask = dma_mask;
719#endif
720 995
721 return 0; 996 return 0;
722} 997}
723EXPORT_SYMBOL(dma_set_mask);
724 998
725#define PREALLOC_DMA_DEBUG_ENTRIES 4096 999#define PREALLOC_DMA_DEBUG_ENTRIES 4096
726 1000
@@ -733,3 +1007,679 @@ static int __init dma_debug_do_init(void)
733 return 0; 1007 return 0;
734} 1008}
735fs_initcall(dma_debug_do_init); 1009fs_initcall(dma_debug_do_init);
1010
1011#ifdef CONFIG_ARM_DMA_USE_IOMMU
1012
1013/* IOMMU */
1014
1015static inline dma_addr_t __alloc_iova(struct dma_iommu_mapping *mapping,
1016 size_t size)
1017{
1018 unsigned int order = get_order(size);
1019 unsigned int align = 0;
1020 unsigned int count, start;
1021 unsigned long flags;
1022
1023 count = ((PAGE_ALIGN(size) >> PAGE_SHIFT) +
1024 (1 << mapping->order) - 1) >> mapping->order;
1025
1026 if (order > mapping->order)
1027 align = (1 << (order - mapping->order)) - 1;
1028
1029 spin_lock_irqsave(&mapping->lock, flags);
1030 start = bitmap_find_next_zero_area(mapping->bitmap, mapping->bits, 0,
1031 count, align);
1032 if (start > mapping->bits) {
1033 spin_unlock_irqrestore(&mapping->lock, flags);
1034 return DMA_ERROR_CODE;
1035 }
1036
1037 bitmap_set(mapping->bitmap, start, count);
1038 spin_unlock_irqrestore(&mapping->lock, flags);
1039
1040 return mapping->base + (start << (mapping->order + PAGE_SHIFT));
1041}
1042
1043static inline void __free_iova(struct dma_iommu_mapping *mapping,
1044 dma_addr_t addr, size_t size)
1045{
1046 unsigned int start = (addr - mapping->base) >>
1047 (mapping->order + PAGE_SHIFT);
1048 unsigned int count = ((size >> PAGE_SHIFT) +
1049 (1 << mapping->order) - 1) >> mapping->order;
1050 unsigned long flags;
1051
1052 spin_lock_irqsave(&mapping->lock, flags);
1053 bitmap_clear(mapping->bitmap, start, count);
1054 spin_unlock_irqrestore(&mapping->lock, flags);
1055}
1056
1057static struct page **__iommu_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
1058{
1059 struct page **pages;
1060 int count = size >> PAGE_SHIFT;
1061 int array_size = count * sizeof(struct page *);
1062 int i = 0;
1063
1064 if (array_size <= PAGE_SIZE)
1065 pages = kzalloc(array_size, gfp);
1066 else
1067 pages = vzalloc(array_size);
1068 if (!pages)
1069 return NULL;
1070
1071 while (count) {
1072 int j, order = __ffs(count);
1073
1074 pages[i] = alloc_pages(gfp | __GFP_NOWARN, order);
1075 while (!pages[i] && order)
1076 pages[i] = alloc_pages(gfp | __GFP_NOWARN, --order);
1077 if (!pages[i])
1078 goto error;
1079
1080 if (order)
1081 split_page(pages[i], order);
1082 j = 1 << order;
1083 while (--j)
1084 pages[i + j] = pages[i] + j;
1085
1086 __dma_clear_buffer(pages[i], PAGE_SIZE << order);
1087 i += 1 << order;
1088 count -= 1 << order;
1089 }
1090
1091 return pages;
1092error:
1093 while (--i)
1094 if (pages[i])
1095 __free_pages(pages[i], 0);
1096 if (array_size < PAGE_SIZE)
1097 kfree(pages);
1098 else
1099 vfree(pages);
1100 return NULL;
1101}
1102
1103static int __iommu_free_buffer(struct device *dev, struct page **pages, size_t size)
1104{
1105 int count = size >> PAGE_SHIFT;
1106 int array_size = count * sizeof(struct page *);
1107 int i;
1108 for (i = 0; i < count; i++)
1109 if (pages[i])
1110 __free_pages(pages[i], 0);
1111 if (array_size < PAGE_SIZE)
1112 kfree(pages);
1113 else
1114 vfree(pages);
1115 return 0;
1116}
1117
1118/*
1119 * Create a CPU mapping for a specified pages
1120 */
1121static void *
1122__iommu_alloc_remap(struct page **pages, size_t size, gfp_t gfp, pgprot_t prot)
1123{
1124 struct arm_vmregion *c;
1125 size_t align;
1126 size_t count = size >> PAGE_SHIFT;
1127 int bit;
1128
1129 if (!consistent_pte[0]) {
1130 pr_err("%s: not initialised\n", __func__);
1131 dump_stack();
1132 return NULL;
1133 }
1134
1135 /*
1136 * Align the virtual region allocation - maximum alignment is
1137 * a section size, minimum is a page size. This helps reduce
1138 * fragmentation of the DMA space, and also prevents allocations
1139 * smaller than a section from crossing a section boundary.
1140 */
1141 bit = fls(size - 1);
1142 if (bit > SECTION_SHIFT)
1143 bit = SECTION_SHIFT;
1144 align = 1 << bit;
1145
1146 /*
1147 * Allocate a virtual address in the consistent mapping region.
1148 */
1149 c = arm_vmregion_alloc(&consistent_head, align, size,
1150 gfp & ~(__GFP_DMA | __GFP_HIGHMEM), NULL);
1151 if (c) {
1152 pte_t *pte;
1153 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
1154 int i = 0;
1155 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
1156
1157 pte = consistent_pte[idx] + off;
1158 c->priv = pages;
1159
1160 do {
1161 BUG_ON(!pte_none(*pte));
1162
1163 set_pte_ext(pte, mk_pte(pages[i], prot), 0);
1164 pte++;
1165 off++;
1166 i++;
1167 if (off >= PTRS_PER_PTE) {
1168 off = 0;
1169 pte = consistent_pte[++idx];
1170 }
1171 } while (i < count);
1172
1173 dsb();
1174
1175 return (void *)c->vm_start;
1176 }
1177 return NULL;
1178}
1179
1180/*
1181 * Create a mapping in device IO address space for specified pages
1182 */
1183static dma_addr_t
1184__iommu_create_mapping(struct device *dev, struct page **pages, size_t size)
1185{
1186 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1187 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
1188 dma_addr_t dma_addr, iova;
1189 int i, ret = DMA_ERROR_CODE;
1190
1191 dma_addr = __alloc_iova(mapping, size);
1192 if (dma_addr == DMA_ERROR_CODE)
1193 return dma_addr;
1194
1195 iova = dma_addr;
1196 for (i = 0; i < count; ) {
1197 unsigned int next_pfn = page_to_pfn(pages[i]) + 1;
1198 phys_addr_t phys = page_to_phys(pages[i]);
1199 unsigned int len, j;
1200
1201 for (j = i + 1; j < count; j++, next_pfn++)
1202 if (page_to_pfn(pages[j]) != next_pfn)
1203 break;
1204
1205 len = (j - i) << PAGE_SHIFT;
1206 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1207 if (ret < 0)
1208 goto fail;
1209 iova += len;
1210 i = j;
1211 }
1212 return dma_addr;
1213fail:
1214 iommu_unmap(mapping->domain, dma_addr, iova-dma_addr);
1215 __free_iova(mapping, dma_addr, size);
1216 return DMA_ERROR_CODE;
1217}
1218
1219static int __iommu_remove_mapping(struct device *dev, dma_addr_t iova, size_t size)
1220{
1221 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1222
1223 /*
1224 * add optional in-page offset from iova to size and align
1225 * result to page size
1226 */
1227 size = PAGE_ALIGN((iova & ~PAGE_MASK) + size);
1228 iova &= PAGE_MASK;
1229
1230 iommu_unmap(mapping->domain, iova, size);
1231 __free_iova(mapping, iova, size);
1232 return 0;
1233}
1234
1235static void *arm_iommu_alloc_attrs(struct device *dev, size_t size,
1236 dma_addr_t *handle, gfp_t gfp, struct dma_attrs *attrs)
1237{
1238 pgprot_t prot = __get_dma_pgprot(attrs, pgprot_kernel);
1239 struct page **pages;
1240 void *addr = NULL;
1241
1242 *handle = DMA_ERROR_CODE;
1243 size = PAGE_ALIGN(size);
1244
1245 pages = __iommu_alloc_buffer(dev, size, gfp);
1246 if (!pages)
1247 return NULL;
1248
1249 *handle = __iommu_create_mapping(dev, pages, size);
1250 if (*handle == DMA_ERROR_CODE)
1251 goto err_buffer;
1252
1253 addr = __iommu_alloc_remap(pages, size, gfp, prot);
1254 if (!addr)
1255 goto err_mapping;
1256
1257 return addr;
1258
1259err_mapping:
1260 __iommu_remove_mapping(dev, *handle, size);
1261err_buffer:
1262 __iommu_free_buffer(dev, pages, size);
1263 return NULL;
1264}
1265
1266static int arm_iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
1267 void *cpu_addr, dma_addr_t dma_addr, size_t size,
1268 struct dma_attrs *attrs)
1269{
1270 struct arm_vmregion *c;
1271
1272 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot);
1273 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
1274
1275 if (c) {
1276 struct page **pages = c->priv;
1277
1278 unsigned long uaddr = vma->vm_start;
1279 unsigned long usize = vma->vm_end - vma->vm_start;
1280 int i = 0;
1281
1282 do {
1283 int ret;
1284
1285 ret = vm_insert_page(vma, uaddr, pages[i++]);
1286 if (ret) {
1287 pr_err("Remapping memory, error: %d\n", ret);
1288 return ret;
1289 }
1290
1291 uaddr += PAGE_SIZE;
1292 usize -= PAGE_SIZE;
1293 } while (usize > 0);
1294 }
1295 return 0;
1296}
1297
1298/*
1299 * free a page as defined by the above mapping.
1300 * Must not be called with IRQs disabled.
1301 */
1302void arm_iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
1303 dma_addr_t handle, struct dma_attrs *attrs)
1304{
1305 struct arm_vmregion *c;
1306 size = PAGE_ALIGN(size);
1307
1308 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
1309 if (c) {
1310 struct page **pages = c->priv;
1311 __dma_free_remap(cpu_addr, size);
1312 __iommu_remove_mapping(dev, handle, size);
1313 __iommu_free_buffer(dev, pages, size);
1314 }
1315}
1316
1317/*
1318 * Map a part of the scatter-gather list into contiguous io address space
1319 */
1320static int __map_sg_chunk(struct device *dev, struct scatterlist *sg,
1321 size_t size, dma_addr_t *handle,
1322 enum dma_data_direction dir)
1323{
1324 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1325 dma_addr_t iova, iova_base;
1326 int ret = 0;
1327 unsigned int count;
1328 struct scatterlist *s;
1329
1330 size = PAGE_ALIGN(size);
1331 *handle = DMA_ERROR_CODE;
1332
1333 iova_base = iova = __alloc_iova(mapping, size);
1334 if (iova == DMA_ERROR_CODE)
1335 return -ENOMEM;
1336
1337 for (count = 0, s = sg; count < (size >> PAGE_SHIFT); s = sg_next(s)) {
1338 phys_addr_t phys = page_to_phys(sg_page(s));
1339 unsigned int len = PAGE_ALIGN(s->offset + s->length);
1340
1341 if (!arch_is_coherent())
1342 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1343
1344 ret = iommu_map(mapping->domain, iova, phys, len, 0);
1345 if (ret < 0)
1346 goto fail;
1347 count += len >> PAGE_SHIFT;
1348 iova += len;
1349 }
1350 *handle = iova_base;
1351
1352 return 0;
1353fail:
1354 iommu_unmap(mapping->domain, iova_base, count * PAGE_SIZE);
1355 __free_iova(mapping, iova_base, size);
1356 return ret;
1357}
1358
1359/**
1360 * arm_iommu_map_sg - map a set of SG buffers for streaming mode DMA
1361 * @dev: valid struct device pointer
1362 * @sg: list of buffers
1363 * @nents: number of buffers to map
1364 * @dir: DMA transfer direction
1365 *
1366 * Map a set of buffers described by scatterlist in streaming mode for DMA.
1367 * The scatter gather list elements are merged together (if possible) and
1368 * tagged with the appropriate dma address and length. They are obtained via
1369 * sg_dma_{address,length}.
1370 */
1371int arm_iommu_map_sg(struct device *dev, struct scatterlist *sg, int nents,
1372 enum dma_data_direction dir, struct dma_attrs *attrs)
1373{
1374 struct scatterlist *s = sg, *dma = sg, *start = sg;
1375 int i, count = 0;
1376 unsigned int offset = s->offset;
1377 unsigned int size = s->offset + s->length;
1378 unsigned int max = dma_get_max_seg_size(dev);
1379
1380 for (i = 1; i < nents; i++) {
1381 s = sg_next(s);
1382
1383 s->dma_address = DMA_ERROR_CODE;
1384 s->dma_length = 0;
1385
1386 if (s->offset || (size & ~PAGE_MASK) || size + s->length > max) {
1387 if (__map_sg_chunk(dev, start, size, &dma->dma_address,
1388 dir) < 0)
1389 goto bad_mapping;
1390
1391 dma->dma_address += offset;
1392 dma->dma_length = size - offset;
1393
1394 size = offset = s->offset;
1395 start = s;
1396 dma = sg_next(dma);
1397 count += 1;
1398 }
1399 size += s->length;
1400 }
1401 if (__map_sg_chunk(dev, start, size, &dma->dma_address, dir) < 0)
1402 goto bad_mapping;
1403
1404 dma->dma_address += offset;
1405 dma->dma_length = size - offset;
1406
1407 return count+1;
1408
1409bad_mapping:
1410 for_each_sg(sg, s, count, i)
1411 __iommu_remove_mapping(dev, sg_dma_address(s), sg_dma_len(s));
1412 return 0;
1413}
1414
1415/**
1416 * arm_iommu_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
1417 * @dev: valid struct device pointer
1418 * @sg: list of buffers
1419 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
1420 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1421 *
1422 * Unmap a set of streaming mode DMA translations. Again, CPU access
1423 * rules concerning calls here are the same as for dma_unmap_single().
1424 */
1425void arm_iommu_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
1426 enum dma_data_direction dir, struct dma_attrs *attrs)
1427{
1428 struct scatterlist *s;
1429 int i;
1430
1431 for_each_sg(sg, s, nents, i) {
1432 if (sg_dma_len(s))
1433 __iommu_remove_mapping(dev, sg_dma_address(s),
1434 sg_dma_len(s));
1435 if (!arch_is_coherent())
1436 __dma_page_dev_to_cpu(sg_page(s), s->offset,
1437 s->length, dir);
1438 }
1439}
1440
1441/**
1442 * arm_iommu_sync_sg_for_cpu
1443 * @dev: valid struct device pointer
1444 * @sg: list of buffers
1445 * @nents: number of buffers to map (returned from dma_map_sg)
1446 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1447 */
1448void arm_iommu_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
1449 int nents, enum dma_data_direction dir)
1450{
1451 struct scatterlist *s;
1452 int i;
1453
1454 for_each_sg(sg, s, nents, i)
1455 if (!arch_is_coherent())
1456 __dma_page_dev_to_cpu(sg_page(s), s->offset, s->length, dir);
1457
1458}
1459
1460/**
1461 * arm_iommu_sync_sg_for_device
1462 * @dev: valid struct device pointer
1463 * @sg: list of buffers
1464 * @nents: number of buffers to map (returned from dma_map_sg)
1465 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
1466 */
1467void arm_iommu_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
1468 int nents, enum dma_data_direction dir)
1469{
1470 struct scatterlist *s;
1471 int i;
1472
1473 for_each_sg(sg, s, nents, i)
1474 if (!arch_is_coherent())
1475 __dma_page_cpu_to_dev(sg_page(s), s->offset, s->length, dir);
1476}
1477
1478
1479/**
1480 * arm_iommu_map_page
1481 * @dev: valid struct device pointer
1482 * @page: page that buffer resides in
1483 * @offset: offset into page for start of buffer
1484 * @size: size of buffer to map
1485 * @dir: DMA transfer direction
1486 *
1487 * IOMMU aware version of arm_dma_map_page()
1488 */
1489static dma_addr_t arm_iommu_map_page(struct device *dev, struct page *page,
1490 unsigned long offset, size_t size, enum dma_data_direction dir,
1491 struct dma_attrs *attrs)
1492{
1493 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1494 dma_addr_t dma_addr;
1495 int ret, len = PAGE_ALIGN(size + offset);
1496
1497 if (!arch_is_coherent())
1498 __dma_page_cpu_to_dev(page, offset, size, dir);
1499
1500 dma_addr = __alloc_iova(mapping, len);
1501 if (dma_addr == DMA_ERROR_CODE)
1502 return dma_addr;
1503
1504 ret = iommu_map(mapping->domain, dma_addr, page_to_phys(page), len, 0);
1505 if (ret < 0)
1506 goto fail;
1507
1508 return dma_addr + offset;
1509fail:
1510 __free_iova(mapping, dma_addr, len);
1511 return DMA_ERROR_CODE;
1512}
1513
1514/**
1515 * arm_iommu_unmap_page
1516 * @dev: valid struct device pointer
1517 * @handle: DMA address of buffer
1518 * @size: size of buffer (same as passed to dma_map_page)
1519 * @dir: DMA transfer direction (same as passed to dma_map_page)
1520 *
1521 * IOMMU aware version of arm_dma_unmap_page()
1522 */
1523static void arm_iommu_unmap_page(struct device *dev, dma_addr_t handle,
1524 size_t size, enum dma_data_direction dir,
1525 struct dma_attrs *attrs)
1526{
1527 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1528 dma_addr_t iova = handle & PAGE_MASK;
1529 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1530 int offset = handle & ~PAGE_MASK;
1531 int len = PAGE_ALIGN(size + offset);
1532
1533 if (!iova)
1534 return;
1535
1536 if (!arch_is_coherent())
1537 __dma_page_dev_to_cpu(page, offset, size, dir);
1538
1539 iommu_unmap(mapping->domain, iova, len);
1540 __free_iova(mapping, iova, len);
1541}
1542
1543static void arm_iommu_sync_single_for_cpu(struct device *dev,
1544 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1545{
1546 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1547 dma_addr_t iova = handle & PAGE_MASK;
1548 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1549 unsigned int offset = handle & ~PAGE_MASK;
1550
1551 if (!iova)
1552 return;
1553
1554 if (!arch_is_coherent())
1555 __dma_page_dev_to_cpu(page, offset, size, dir);
1556}
1557
1558static void arm_iommu_sync_single_for_device(struct device *dev,
1559 dma_addr_t handle, size_t size, enum dma_data_direction dir)
1560{
1561 struct dma_iommu_mapping *mapping = dev->archdata.mapping;
1562 dma_addr_t iova = handle & PAGE_MASK;
1563 struct page *page = phys_to_page(iommu_iova_to_phys(mapping->domain, iova));
1564 unsigned int offset = handle & ~PAGE_MASK;
1565
1566 if (!iova)
1567 return;
1568
1569 __dma_page_cpu_to_dev(page, offset, size, dir);
1570}
1571
1572struct dma_map_ops iommu_ops = {
1573 .alloc = arm_iommu_alloc_attrs,
1574 .free = arm_iommu_free_attrs,
1575 .mmap = arm_iommu_mmap_attrs,
1576
1577 .map_page = arm_iommu_map_page,
1578 .unmap_page = arm_iommu_unmap_page,
1579 .sync_single_for_cpu = arm_iommu_sync_single_for_cpu,
1580 .sync_single_for_device = arm_iommu_sync_single_for_device,
1581
1582 .map_sg = arm_iommu_map_sg,
1583 .unmap_sg = arm_iommu_unmap_sg,
1584 .sync_sg_for_cpu = arm_iommu_sync_sg_for_cpu,
1585 .sync_sg_for_device = arm_iommu_sync_sg_for_device,
1586};
1587
1588/**
1589 * arm_iommu_create_mapping
1590 * @bus: pointer to the bus holding the client device (for IOMMU calls)
1591 * @base: start address of the valid IO address space
1592 * @size: size of the valid IO address space
1593 * @order: accuracy of the IO addresses allocations
1594 *
1595 * Creates a mapping structure which holds information about used/unused
1596 * IO address ranges, which is required to perform memory allocation and
1597 * mapping with IOMMU aware functions.
1598 *
1599 * The client device need to be attached to the mapping with
1600 * arm_iommu_attach_device function.
1601 */
1602struct dma_iommu_mapping *
1603arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size,
1604 int order)
1605{
1606 unsigned int count = size >> (PAGE_SHIFT + order);
1607 unsigned int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
1608 struct dma_iommu_mapping *mapping;
1609 int err = -ENOMEM;
1610
1611 if (!count)
1612 return ERR_PTR(-EINVAL);
1613
1614 mapping = kzalloc(sizeof(struct dma_iommu_mapping), GFP_KERNEL);
1615 if (!mapping)
1616 goto err;
1617
1618 mapping->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
1619 if (!mapping->bitmap)
1620 goto err2;
1621
1622 mapping->base = base;
1623 mapping->bits = BITS_PER_BYTE * bitmap_size;
1624 mapping->order = order;
1625 spin_lock_init(&mapping->lock);
1626
1627 mapping->domain = iommu_domain_alloc(bus);
1628 if (!mapping->domain)
1629 goto err3;
1630
1631 kref_init(&mapping->kref);
1632 return mapping;
1633err3:
1634 kfree(mapping->bitmap);
1635err2:
1636 kfree(mapping);
1637err:
1638 return ERR_PTR(err);
1639}
1640
1641static void release_iommu_mapping(struct kref *kref)
1642{
1643 struct dma_iommu_mapping *mapping =
1644 container_of(kref, struct dma_iommu_mapping, kref);
1645
1646 iommu_domain_free(mapping->domain);
1647 kfree(mapping->bitmap);
1648 kfree(mapping);
1649}
1650
1651void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping)
1652{
1653 if (mapping)
1654 kref_put(&mapping->kref, release_iommu_mapping);
1655}
1656
1657/**
1658 * arm_iommu_attach_device
1659 * @dev: valid struct device pointer
1660 * @mapping: io address space mapping structure (returned from
1661 * arm_iommu_create_mapping)
1662 *
1663 * Attaches specified io address space mapping to the provided device,
1664 * this replaces the dma operations (dma_map_ops pointer) with the
1665 * IOMMU aware version. More than one client might be attached to
1666 * the same io address space mapping.
1667 */
1668int arm_iommu_attach_device(struct device *dev,
1669 struct dma_iommu_mapping *mapping)
1670{
1671 int err;
1672
1673 err = iommu_attach_device(mapping->domain, dev);
1674 if (err)
1675 return err;
1676
1677 kref_get(&mapping->kref);
1678 dev->archdata.mapping = mapping;
1679 set_dma_ops(dev, &iommu_ops);
1680
1681 pr_info("Attached IOMMU controller to %s device.\n", dev_name(dev));
1682 return 0;
1683}
1684
1685#endif
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8f5813bbffb5..c21d06c7dd7e 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -20,6 +20,7 @@
20#include <linux/highmem.h> 20#include <linux/highmem.h>
21#include <linux/gfp.h> 21#include <linux/gfp.h>
22#include <linux/memblock.h> 22#include <linux/memblock.h>
23#include <linux/dma-contiguous.h>
23 24
24#include <asm/mach-types.h> 25#include <asm/mach-types.h>
25#include <asm/memblock.h> 26#include <asm/memblock.h>
@@ -226,6 +227,17 @@ static void __init arm_adjust_dma_zone(unsigned long *size, unsigned long *hole,
226} 227}
227#endif 228#endif
228 229
230void __init setup_dma_zone(struct machine_desc *mdesc)
231{
232#ifdef CONFIG_ZONE_DMA
233 if (mdesc->dma_zone_size) {
234 arm_dma_zone_size = mdesc->dma_zone_size;
235 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
236 } else
237 arm_dma_limit = 0xffffffff;
238#endif
239}
240
229static void __init arm_bootmem_free(unsigned long min, unsigned long max_low, 241static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
230 unsigned long max_high) 242 unsigned long max_high)
231{ 243{
@@ -273,12 +285,9 @@ static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
273 * Adjust the sizes according to any special requirements for 285 * Adjust the sizes according to any special requirements for
274 * this machine type. 286 * this machine type.
275 */ 287 */
276 if (arm_dma_zone_size) { 288 if (arm_dma_zone_size)
277 arm_adjust_dma_zone(zone_size, zhole_size, 289 arm_adjust_dma_zone(zone_size, zhole_size,
278 arm_dma_zone_size >> PAGE_SHIFT); 290 arm_dma_zone_size >> PAGE_SHIFT);
279 arm_dma_limit = PHYS_OFFSET + arm_dma_zone_size - 1;
280 } else
281 arm_dma_limit = 0xffffffff;
282#endif 291#endif
283 292
284 free_area_init_node(0, zone_size, min, zhole_size); 293 free_area_init_node(0, zone_size, min, zhole_size);
@@ -364,6 +373,12 @@ void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
364 if (mdesc->reserve) 373 if (mdesc->reserve)
365 mdesc->reserve(); 374 mdesc->reserve();
366 375
376 /*
377 * reserve memory for DMA contigouos allocations,
378 * must come from DMA area inside low memory
379 */
380 dma_contiguous_reserve(min(arm_dma_limit, arm_lowmem_limit));
381
367 arm_memblock_steal_permitted = false; 382 arm_memblock_steal_permitted = false;
368 memblock_allow_resize(); 383 memblock_allow_resize();
369 memblock_dump_all(); 384 memblock_dump_all();
diff --git a/arch/arm/mm/mm.h b/arch/arm/mm/mm.h
index 27f4a619b35d..93dc0c17cdcb 100644
--- a/arch/arm/mm/mm.h
+++ b/arch/arm/mm/mm.h
@@ -67,5 +67,8 @@ extern u32 arm_dma_limit;
67#define arm_dma_limit ((u32)~0) 67#define arm_dma_limit ((u32)~0)
68#endif 68#endif
69 69
70extern phys_addr_t arm_lowmem_limit;
71
70void __init bootmem_init(void); 72void __init bootmem_init(void);
71void arm_mm_memblock_reserve(void); 73void arm_mm_memblock_reserve(void);
74void dma_contiguous_remap(void);
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index aa78de8bfdd3..e5dad60b558b 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -288,6 +288,11 @@ static struct mem_type mem_types[] = {
288 PMD_SECT_UNCACHED | PMD_SECT_XN, 288 PMD_SECT_UNCACHED | PMD_SECT_XN,
289 .domain = DOMAIN_KERNEL, 289 .domain = DOMAIN_KERNEL,
290 }, 290 },
291 [MT_MEMORY_DMA_READY] = {
292 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
293 .prot_l1 = PMD_TYPE_TABLE,
294 .domain = DOMAIN_KERNEL,
295 },
291}; 296};
292 297
293const struct mem_type *get_mem_type(unsigned int type) 298const struct mem_type *get_mem_type(unsigned int type)
@@ -429,6 +434,7 @@ static void __init build_mem_type_table(void)
429 if (arch_is_coherent() && cpu_is_xsc3()) { 434 if (arch_is_coherent() && cpu_is_xsc3()) {
430 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 435 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
431 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 436 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
437 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
432 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 438 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
433 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 439 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
434 } 440 }
@@ -460,6 +466,7 @@ static void __init build_mem_type_table(void)
460 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED; 466 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
461 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; 467 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
462 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED; 468 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
469 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
463 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; 470 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
464 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED; 471 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
465 } 472 }
@@ -512,6 +519,7 @@ static void __init build_mem_type_table(void)
512 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; 519 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
513 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; 520 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
514 mem_types[MT_MEMORY].prot_pte |= kern_pgprot; 521 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
522 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
515 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask; 523 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
516 mem_types[MT_ROM].prot_sect |= cp->pmd; 524 mem_types[MT_ROM].prot_sect |= cp->pmd;
517 525
@@ -596,7 +604,7 @@ static void __init alloc_init_section(pud_t *pud, unsigned long addr,
596 * L1 entries, whereas PGDs refer to a group of L1 entries making 604 * L1 entries, whereas PGDs refer to a group of L1 entries making
597 * up one logical pointer to an L2 table. 605 * up one logical pointer to an L2 table.
598 */ 606 */
599 if (((addr | end | phys) & ~SECTION_MASK) == 0) { 607 if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0) {
600 pmd_t *p = pmd; 608 pmd_t *p = pmd;
601 609
602#ifndef CONFIG_ARM_LPAE 610#ifndef CONFIG_ARM_LPAE
@@ -814,7 +822,7 @@ static int __init early_vmalloc(char *arg)
814} 822}
815early_param("vmalloc", early_vmalloc); 823early_param("vmalloc", early_vmalloc);
816 824
817static phys_addr_t lowmem_limit __initdata = 0; 825phys_addr_t arm_lowmem_limit __initdata = 0;
818 826
819void __init sanity_check_meminfo(void) 827void __init sanity_check_meminfo(void)
820{ 828{
@@ -897,8 +905,8 @@ void __init sanity_check_meminfo(void)
897 bank->size = newsize; 905 bank->size = newsize;
898 } 906 }
899#endif 907#endif
900 if (!bank->highmem && bank->start + bank->size > lowmem_limit) 908 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
901 lowmem_limit = bank->start + bank->size; 909 arm_lowmem_limit = bank->start + bank->size;
902 910
903 j++; 911 j++;
904 } 912 }
@@ -923,8 +931,8 @@ void __init sanity_check_meminfo(void)
923 } 931 }
924#endif 932#endif
925 meminfo.nr_banks = j; 933 meminfo.nr_banks = j;
926 high_memory = __va(lowmem_limit - 1) + 1; 934 high_memory = __va(arm_lowmem_limit - 1) + 1;
927 memblock_set_current_limit(lowmem_limit); 935 memblock_set_current_limit(arm_lowmem_limit);
928} 936}
929 937
930static inline void prepare_page_table(void) 938static inline void prepare_page_table(void)
@@ -949,8 +957,8 @@ static inline void prepare_page_table(void)
949 * Find the end of the first block of lowmem. 957 * Find the end of the first block of lowmem.
950 */ 958 */
951 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size; 959 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
952 if (end >= lowmem_limit) 960 if (end >= arm_lowmem_limit)
953 end = lowmem_limit; 961 end = arm_lowmem_limit;
954 962
955 /* 963 /*
956 * Clear out all the kernel space mappings, except for the first 964 * Clear out all the kernel space mappings, except for the first
@@ -1093,8 +1101,8 @@ static void __init map_lowmem(void)
1093 phys_addr_t end = start + reg->size; 1101 phys_addr_t end = start + reg->size;
1094 struct map_desc map; 1102 struct map_desc map;
1095 1103
1096 if (end > lowmem_limit) 1104 if (end > arm_lowmem_limit)
1097 end = lowmem_limit; 1105 end = arm_lowmem_limit;
1098 if (start >= end) 1106 if (start >= end)
1099 break; 1107 break;
1100 1108
@@ -1115,11 +1123,12 @@ void __init paging_init(struct machine_desc *mdesc)
1115{ 1123{
1116 void *zero_page; 1124 void *zero_page;
1117 1125
1118 memblock_set_current_limit(lowmem_limit); 1126 memblock_set_current_limit(arm_lowmem_limit);
1119 1127
1120 build_mem_type_table(); 1128 build_mem_type_table();
1121 prepare_page_table(); 1129 prepare_page_table();
1122 map_lowmem(); 1130 map_lowmem();
1131 dma_contiguous_remap();
1123 devicemaps_init(mdesc); 1132 devicemaps_init(mdesc);
1124 kmap_init(); 1133 kmap_init();
1125 1134
diff --git a/arch/arm/mm/vmregion.h b/arch/arm/mm/vmregion.h
index 162be662c088..bf312c354a21 100644
--- a/arch/arm/mm/vmregion.h
+++ b/arch/arm/mm/vmregion.h
@@ -17,7 +17,7 @@ struct arm_vmregion {
17 struct list_head vm_list; 17 struct list_head vm_list;
18 unsigned long vm_start; 18 unsigned long vm_start;
19 unsigned long vm_end; 19 unsigned long vm_end;
20 struct page *vm_pages; 20 void *priv;
21 int vm_active; 21 int vm_active;
22 const void *caller; 22 const void *caller;
23}; 23};
diff --git a/arch/arm/plat-mxc/clock.c b/arch/arm/plat-mxc/clock.c
index 2ed3ab173add..5079787273d2 100644
--- a/arch/arm/plat-mxc/clock.c
+++ b/arch/arm/plat-mxc/clock.c
@@ -41,6 +41,7 @@
41#include <mach/clock.h> 41#include <mach/clock.h>
42#include <mach/hardware.h> 42#include <mach/hardware.h>
43 43
44#ifndef CONFIG_COMMON_CLK
44static LIST_HEAD(clocks); 45static LIST_HEAD(clocks);
45static DEFINE_MUTEX(clocks_mutex); 46static DEFINE_MUTEX(clocks_mutex);
46 47
@@ -200,6 +201,16 @@ struct clk *clk_get_parent(struct clk *clk)
200} 201}
201EXPORT_SYMBOL(clk_get_parent); 202EXPORT_SYMBOL(clk_get_parent);
202 203
204#else
205
206/*
207 * Lock to protect the clock module (ccm) registers. Used
208 * on all i.MXs
209 */
210DEFINE_SPINLOCK(imx_ccm_lock);
211
212#endif /* CONFIG_COMMON_CLK */
213
203/* 214/*
204 * Get the resulting clock rate from a PLL register value and the input 215 * Get the resulting clock rate from a PLL register value and the input
205 * frequency. PLLs with this register layout can at least be found on 216 * frequency. PLLs with this register layout can at least be found on
diff --git a/arch/arm/plat-mxc/include/mach/clock.h b/arch/arm/plat-mxc/include/mach/clock.h
index 753a5988d85c..bd940c795cbb 100644
--- a/arch/arm/plat-mxc/include/mach/clock.h
+++ b/arch/arm/plat-mxc/include/mach/clock.h
@@ -23,6 +23,7 @@
23#ifndef __ASSEMBLY__ 23#ifndef __ASSEMBLY__
24#include <linux/list.h> 24#include <linux/list.h>
25 25
26#ifndef CONFIG_COMMON_CLK
26struct module; 27struct module;
27 28
28struct clk { 29struct clk {
@@ -59,6 +60,9 @@ struct clk {
59 60
60int clk_register(struct clk *clk); 61int clk_register(struct clk *clk);
61void clk_unregister(struct clk *clk); 62void clk_unregister(struct clk *clk);
63#endif /* CONFIG_COMMON_CLK */
64
65extern spinlock_t imx_ccm_lock;
62 66
63unsigned long mxc_decode_pll(unsigned int pll, u32 f_ref); 67unsigned long mxc_decode_pll(unsigned int pll, u32 f_ref);
64 68
diff --git a/arch/arm/plat-mxc/include/mach/common.h b/arch/arm/plat-mxc/include/mach/common.h
index 0319c4a0cafa..cf663d84e7c1 100644
--- a/arch/arm/plat-mxc/include/mach/common.h
+++ b/arch/arm/plat-mxc/include/mach/common.h
@@ -53,6 +53,7 @@ extern void imx35_soc_init(void);
53extern void imx50_soc_init(void); 53extern void imx50_soc_init(void);
54extern void imx51_soc_init(void); 54extern void imx51_soc_init(void);
55extern void imx53_soc_init(void); 55extern void imx53_soc_init(void);
56extern void imx51_init_late(void);
56extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq); 57extern void epit_timer_init(struct clk *timer_clk, void __iomem *base, int irq);
57extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int); 58extern void mxc_timer_init(struct clk *timer_clk, void __iomem *, int);
58extern int mx1_clocks_init(unsigned long fref); 59extern int mx1_clocks_init(unsigned long fref);
@@ -149,4 +150,10 @@ extern void imx6q_pm_init(void);
149static inline void imx6q_pm_init(void) {} 150static inline void imx6q_pm_init(void) {}
150#endif 151#endif
151 152
153#ifdef CONFIG_NEON
154extern int mx51_neon_fixup(void);
155#else
156static inline int mx51_neon_fixup(void) { return 0; }
157#endif
158
152#endif 159#endif
diff --git a/arch/arm/plat-mxc/include/mach/debug-macro.S b/arch/arm/plat-mxc/include/mach/debug-macro.S
index 8ddda365f1a0..761e45f9456f 100644
--- a/arch/arm/plat-mxc/include/mach/debug-macro.S
+++ b/arch/arm/plat-mxc/include/mach/debug-macro.S
@@ -24,6 +24,8 @@
24#define UART_PADDR MX51_UART1_BASE_ADDR 24#define UART_PADDR MX51_UART1_BASE_ADDR
25#elif defined (CONFIG_DEBUG_IMX50_IMX53_UART) 25#elif defined (CONFIG_DEBUG_IMX50_IMX53_UART)
26#define UART_PADDR MX53_UART1_BASE_ADDR 26#define UART_PADDR MX53_UART1_BASE_ADDR
27#elif defined (CONFIG_DEBUG_IMX6Q_UART2)
28#define UART_PADDR MX6Q_UART2_BASE_ADDR
27#elif defined (CONFIG_DEBUG_IMX6Q_UART4) 29#elif defined (CONFIG_DEBUG_IMX6Q_UART4)
28#define UART_PADDR MX6Q_UART4_BASE_ADDR 30#define UART_PADDR MX6Q_UART4_BASE_ADDR
29#endif 31#endif
diff --git a/arch/arm/plat-mxc/include/mach/mx6q.h b/arch/arm/plat-mxc/include/mach/mx6q.h
index 254a561a2799..f7e7dbac8f4b 100644
--- a/arch/arm/plat-mxc/include/mach/mx6q.h
+++ b/arch/arm/plat-mxc/include/mach/mx6q.h
@@ -27,6 +27,8 @@
27#define MX6Q_CCM_SIZE 0x4000 27#define MX6Q_CCM_SIZE 0x4000
28#define MX6Q_ANATOP_BASE_ADDR 0x020c8000 28#define MX6Q_ANATOP_BASE_ADDR 0x020c8000
29#define MX6Q_ANATOP_SIZE 0x1000 29#define MX6Q_ANATOP_SIZE 0x1000
30#define MX6Q_UART2_BASE_ADDR 0x021e8000
31#define MX6Q_UART2_SIZE 0x4000
30#define MX6Q_UART4_BASE_ADDR 0x021f0000 32#define MX6Q_UART4_BASE_ADDR 0x021f0000
31#define MX6Q_UART4_SIZE 0x4000 33#define MX6Q_UART4_SIZE 0x4000
32 34
diff --git a/arch/arm/plat-mxc/time.c b/arch/arm/plat-mxc/time.c
index 7daf7c9a413b..99f958ca6cb8 100644
--- a/arch/arm/plat-mxc/time.c
+++ b/arch/arm/plat-mxc/time.c
@@ -25,6 +25,7 @@
25#include <linux/irq.h> 25#include <linux/irq.h>
26#include <linux/clockchips.h> 26#include <linux/clockchips.h>
27#include <linux/clk.h> 27#include <linux/clk.h>
28#include <linux/err.h>
28 29
29#include <mach/hardware.h> 30#include <mach/hardware.h>
30#include <asm/sched_clock.h> 31#include <asm/sched_clock.h>
@@ -282,6 +283,19 @@ static int __init mxc_clockevent_init(struct clk *timer_clk)
282void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq) 283void __init mxc_timer_init(struct clk *timer_clk, void __iomem *base, int irq)
283{ 284{
284 uint32_t tctl_val; 285 uint32_t tctl_val;
286 struct clk *timer_ipg_clk;
287
288 if (!timer_clk) {
289 timer_clk = clk_get_sys("imx-gpt.0", "per");
290 if (IS_ERR(timer_clk)) {
291 pr_err("i.MX timer: unable to get clk\n");
292 return;
293 }
294
295 timer_ipg_clk = clk_get_sys("imx-gpt.0", "ipg");
296 if (!IS_ERR(timer_ipg_clk))
297 clk_prepare_enable(timer_ipg_clk);
298 }
285 299
286 clk_prepare_enable(timer_clk); 300 clk_prepare_enable(timer_clk);
287 301
diff --git a/arch/arm/plat-omap/counter_32k.c b/arch/arm/plat-omap/counter_32k.c
index 44ae077dbc28..2132c4f389e1 100644
--- a/arch/arm/plat-omap/counter_32k.c
+++ b/arch/arm/plat-omap/counter_32k.c
@@ -28,19 +28,20 @@
28 28
29#include <plat/clock.h> 29#include <plat/clock.h>
30 30
31/* OMAP2_32KSYNCNT_CR_OFF: offset of 32ksync counter register */
32#define OMAP2_32KSYNCNT_CR_OFF 0x10
33
31/* 34/*
32 * 32KHz clocksource ... always available, on pretty most chips except 35 * 32KHz clocksource ... always available, on pretty most chips except
33 * OMAP 730 and 1510. Other timers could be used as clocksources, with 36 * OMAP 730 and 1510. Other timers could be used as clocksources, with
34 * higher resolution in free-running counter modes (e.g. 12 MHz xtal), 37 * higher resolution in free-running counter modes (e.g. 12 MHz xtal),
35 * but systems won't necessarily want to spend resources that way. 38 * but systems won't necessarily want to spend resources that way.
36 */ 39 */
37static void __iomem *timer_32k_base; 40static void __iomem *sync32k_cnt_reg;
38
39#define OMAP16XX_TIMER_32K_SYNCHRONIZED 0xfffbc410
40 41
41static u32 notrace omap_32k_read_sched_clock(void) 42static u32 notrace omap_32k_read_sched_clock(void)
42{ 43{
43 return timer_32k_base ? __raw_readl(timer_32k_base) : 0; 44 return sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
44} 45}
45 46
46/** 47/**
@@ -60,7 +61,7 @@ static void omap_read_persistent_clock(struct timespec *ts)
60 struct timespec *tsp = &persistent_ts; 61 struct timespec *tsp = &persistent_ts;
61 62
62 last_cycles = cycles; 63 last_cycles = cycles;
63 cycles = timer_32k_base ? __raw_readl(timer_32k_base) : 0; 64 cycles = sync32k_cnt_reg ? __raw_readl(sync32k_cnt_reg) : 0;
64 delta = cycles - last_cycles; 65 delta = cycles - last_cycles;
65 66
66 nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift); 67 nsecs = clocksource_cyc2ns(delta, persistent_mult, persistent_shift);
@@ -69,55 +70,41 @@ static void omap_read_persistent_clock(struct timespec *ts)
69 *ts = *tsp; 70 *ts = *tsp;
70} 71}
71 72
72int __init omap_init_clocksource_32k(void) 73/**
74 * omap_init_clocksource_32k - setup and register counter 32k as a
75 * kernel clocksource
76 * @pbase: base addr of counter_32k module
77 * @size: size of counter_32k to map
78 *
79 * Returns 0 upon success or negative error code upon failure.
80 *
81 */
82int __init omap_init_clocksource_32k(void __iomem *vbase)
73{ 83{
74 static char err[] __initdata = KERN_ERR 84 int ret;
75 "%s: can't register clocksource!\n"; 85
76 86 /*
77 if (cpu_is_omap16xx() || cpu_class_is_omap2()) { 87 * 32k sync Counter register offset is at 0x10
78 u32 pbase; 88 */
79 unsigned long size = SZ_4K; 89 sync32k_cnt_reg = vbase + OMAP2_32KSYNCNT_CR_OFF;
80 void __iomem *base; 90
81 struct clk *sync_32k_ick; 91 /*
82 92 * 120000 rough estimate from the calculations in
83 if (cpu_is_omap16xx()) { 93 * __clocksource_updatefreq_scale.
84 pbase = OMAP16XX_TIMER_32K_SYNCHRONIZED; 94 */
85 size = SZ_1K; 95 clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
86 } else if (cpu_is_omap2420()) 96 32768, NSEC_PER_SEC, 120000);
87 pbase = OMAP2420_32KSYNCT_BASE + 0x10; 97
88 else if (cpu_is_omap2430()) 98 ret = clocksource_mmio_init(sync32k_cnt_reg, "32k_counter", 32768,
89 pbase = OMAP2430_32KSYNCT_BASE + 0x10; 99 250, 32, clocksource_mmio_readl_up);
90 else if (cpu_is_omap34xx()) 100 if (ret) {
91 pbase = OMAP3430_32KSYNCT_BASE + 0x10; 101 pr_err("32k_counter: can't register clocksource\n");
92 else if (cpu_is_omap44xx()) 102 return ret;
93 pbase = OMAP4430_32KSYNCT_BASE + 0x10;
94 else
95 return -ENODEV;
96
97 /* For this to work we must have a static mapping in io.c for this area */
98 base = ioremap(pbase, size);
99 if (!base)
100 return -ENODEV;
101
102 sync_32k_ick = clk_get(NULL, "omap_32ksync_ick");
103 if (!IS_ERR(sync_32k_ick))
104 clk_enable(sync_32k_ick);
105
106 timer_32k_base = base;
107
108 /*
109 * 120000 rough estimate from the calculations in
110 * __clocksource_updatefreq_scale.
111 */
112 clocks_calc_mult_shift(&persistent_mult, &persistent_shift,
113 32768, NSEC_PER_SEC, 120000);
114
115 if (clocksource_mmio_init(base, "32k_counter", 32768, 250, 32,
116 clocksource_mmio_readl_up))
117 printk(err, "32k_counter");
118
119 setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
120 register_persistent_clock(NULL, omap_read_persistent_clock);
121 } 103 }
104
105 setup_sched_clock(omap_32k_read_sched_clock, 32, 32768);
106 register_persistent_clock(NULL, omap_read_persistent_clock);
107 pr_info("OMAP clocksource: 32k_counter at 32768 Hz\n");
108
122 return 0; 109 return 0;
123} 110}
diff --git a/arch/arm/plat-omap/devices.c b/arch/arm/plat-omap/devices.c
index 09b07d252892..1cba9273d2cb 100644
--- a/arch/arm/plat-omap/devices.c
+++ b/arch/arm/plat-omap/devices.c
@@ -28,54 +28,6 @@
28#include <plat/menelaus.h> 28#include <plat/menelaus.h>
29#include <plat/omap44xx.h> 29#include <plat/omap44xx.h>
30 30
31#if defined(CONFIG_MMC_OMAP) || defined(CONFIG_MMC_OMAP_MODULE) || \
32 defined(CONFIG_MMC_OMAP_HS) || defined(CONFIG_MMC_OMAP_HS_MODULE)
33
34#define OMAP_MMC_NR_RES 2
35
36/*
37 * Register MMC devices. Called from mach-omap1 and mach-omap2 device init.
38 */
39int __init omap_mmc_add(const char *name, int id, unsigned long base,
40 unsigned long size, unsigned int irq,
41 struct omap_mmc_platform_data *data)
42{
43 struct platform_device *pdev;
44 struct resource res[OMAP_MMC_NR_RES];
45 int ret;
46
47 pdev = platform_device_alloc(name, id);
48 if (!pdev)
49 return -ENOMEM;
50
51 memset(res, 0, OMAP_MMC_NR_RES * sizeof(struct resource));
52 res[0].start = base;
53 res[0].end = base + size - 1;
54 res[0].flags = IORESOURCE_MEM;
55 res[1].start = res[1].end = irq;
56 res[1].flags = IORESOURCE_IRQ;
57
58 ret = platform_device_add_resources(pdev, res, ARRAY_SIZE(res));
59 if (ret == 0)
60 ret = platform_device_add_data(pdev, data, sizeof(*data));
61 if (ret)
62 goto fail;
63
64 ret = platform_device_add(pdev);
65 if (ret)
66 goto fail;
67
68 /* return device handle to board setup code */
69 data->dev = &pdev->dev;
70 return 0;
71
72fail:
73 platform_device_put(pdev);
74 return ret;
75}
76
77#endif
78
79/*-------------------------------------------------------------------------*/ 31/*-------------------------------------------------------------------------*/
80 32
81#if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE) 33#if defined(CONFIG_HW_RANDOM_OMAP) || defined(CONFIG_HW_RANDOM_OMAP_MODULE)
@@ -109,79 +61,6 @@ static void omap_init_rng(void)
109static inline void omap_init_rng(void) {} 61static inline void omap_init_rng(void) {}
110#endif 62#endif
111 63
112/*-------------------------------------------------------------------------*/
113
114/* Numbering for the SPI-capable controllers when used for SPI:
115 * spi = 1
116 * uwire = 2
117 * mmc1..2 = 3..4
118 * mcbsp1..3 = 5..7
119 */
120
121#if defined(CONFIG_SPI_OMAP_UWIRE) || defined(CONFIG_SPI_OMAP_UWIRE_MODULE)
122
123#define OMAP_UWIRE_BASE 0xfffb3000
124
125static struct resource uwire_resources[] = {
126 {
127 .start = OMAP_UWIRE_BASE,
128 .end = OMAP_UWIRE_BASE + 0x20,
129 .flags = IORESOURCE_MEM,
130 },
131};
132
133static struct platform_device omap_uwire_device = {
134 .name = "omap_uwire",
135 .id = -1,
136 .num_resources = ARRAY_SIZE(uwire_resources),
137 .resource = uwire_resources,
138};
139
140static void omap_init_uwire(void)
141{
142 /* FIXME define and use a boot tag; not all boards will be hooking
143 * up devices to the microwire controller, and multi-board configs
144 * mean that CONFIG_SPI_OMAP_UWIRE may be configured anyway...
145 */
146
147 /* board-specific code must configure chipselects (only a few
148 * are normally used) and SCLK/SDI/SDO (each has two choices).
149 */
150 (void) platform_device_register(&omap_uwire_device);
151}
152#else
153static inline void omap_init_uwire(void) {}
154#endif
155
156#if defined(CONFIG_TIDSPBRIDGE) || defined(CONFIG_TIDSPBRIDGE_MODULE)
157
158static phys_addr_t omap_dsp_phys_mempool_base;
159
160void __init omap_dsp_reserve_sdram_memblock(void)
161{
162 phys_addr_t size = CONFIG_TIDSPBRIDGE_MEMPOOL_SIZE;
163 phys_addr_t paddr;
164
165 if (!size)
166 return;
167
168 paddr = arm_memblock_steal(size, SZ_1M);
169 if (!paddr) {
170 pr_err("%s: failed to reserve %llx bytes\n",
171 __func__, (unsigned long long)size);
172 return;
173 }
174
175 omap_dsp_phys_mempool_base = paddr;
176}
177
178phys_addr_t omap_dsp_get_mempool_base(void)
179{
180 return omap_dsp_phys_mempool_base;
181}
182EXPORT_SYMBOL(omap_dsp_get_mempool_base);
183#endif
184
185/* 64/*
186 * This gets called after board-specific INIT_MACHINE, and initializes most 65 * This gets called after board-specific INIT_MACHINE, and initializes most
187 * on-chip peripherals accessible on this board (except for few like USB): 66 * on-chip peripherals accessible on this board (except for few like USB):
@@ -208,7 +87,6 @@ static int __init omap_init_devices(void)
208 * in alphabetical order so they're easier to sort through. 87 * in alphabetical order so they're easier to sort through.
209 */ 88 */
210 omap_init_rng(); 89 omap_init_rng();
211 omap_init_uwire();
212 return 0; 90 return 0;
213} 91}
214arch_initcall(omap_init_devices); 92arch_initcall(omap_init_devices);
diff --git a/arch/arm/plat-omap/dma.c b/arch/arm/plat-omap/dma.c
index 987e6101267d..cb16ade437cb 100644
--- a/arch/arm/plat-omap/dma.c
+++ b/arch/arm/plat-omap/dma.c
@@ -852,7 +852,7 @@ omap_dma_set_prio_lch(int lch, unsigned char read_prio,
852 } 852 }
853 l = p->dma_read(CCR, lch); 853 l = p->dma_read(CCR, lch);
854 l &= ~((1 << 6) | (1 << 26)); 854 l &= ~((1 << 6) | (1 << 26));
855 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) 855 if (cpu_class_is_omap2() && !cpu_is_omap242x())
856 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26); 856 l |= ((read_prio & 0x1) << 6) | ((write_prio & 0x1) << 26);
857 else 857 else
858 l |= ((read_prio & 0x1) << 6); 858 l |= ((read_prio & 0x1) << 6);
@@ -2080,7 +2080,7 @@ static int __devinit omap_system_dma_probe(struct platform_device *pdev)
2080 } 2080 }
2081 } 2081 }
2082 2082
2083 if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) 2083 if (cpu_class_is_omap2() && !cpu_is_omap242x())
2084 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE, 2084 omap_dma_set_global_params(DMA_DEFAULT_ARB_RATE,
2085 DMA_DEFAULT_FIFO_DEPTH, 0); 2085 DMA_DEFAULT_FIFO_DEPTH, 0);
2086 2086
diff --git a/arch/arm/plat-omap/dmtimer.c b/arch/arm/plat-omap/dmtimer.c
index c4ed35e89fbd..3b0cfeb33d05 100644
--- a/arch/arm/plat-omap/dmtimer.c
+++ b/arch/arm/plat-omap/dmtimer.c
@@ -82,8 +82,6 @@ static void omap_dm_timer_write_reg(struct omap_dm_timer *timer, u32 reg,
82 82
83static void omap_timer_restore_context(struct omap_dm_timer *timer) 83static void omap_timer_restore_context(struct omap_dm_timer *timer)
84{ 84{
85 __raw_writel(timer->context.tiocp_cfg,
86 timer->io_base + OMAP_TIMER_OCP_CFG_OFFSET);
87 if (timer->revision == 1) 85 if (timer->revision == 1)
88 __raw_writel(timer->context.tistat, timer->sys_stat); 86 __raw_writel(timer->context.tistat, timer->sys_stat);
89 87
diff --git a/arch/arm/plat-omap/include/plat/common.h b/arch/arm/plat-omap/include/plat/common.h
index a557b8484e6c..d1cb6f527b7e 100644
--- a/arch/arm/plat-omap/include/plat/common.h
+++ b/arch/arm/plat-omap/include/plat/common.h
@@ -30,7 +30,7 @@
30#include <plat/i2c.h> 30#include <plat/i2c.h>
31#include <plat/omap_hwmod.h> 31#include <plat/omap_hwmod.h>
32 32
33extern int __init omap_init_clocksource_32k(void); 33extern int __init omap_init_clocksource_32k(void __iomem *vbase);
34 34
35extern void __init omap_check_revision(void); 35extern void __init omap_check_revision(void);
36 36
diff --git a/arch/arm/plat-omap/include/plat/cpu.h b/arch/arm/plat-omap/include/plat/cpu.h
index 4bdf14ec6747..297245dba66e 100644
--- a/arch/arm/plat-omap/include/plat/cpu.h
+++ b/arch/arm/plat-omap/include/plat/cpu.h
@@ -121,6 +121,7 @@ IS_OMAP_CLASS(16xx, 0x16)
121IS_OMAP_CLASS(24xx, 0x24) 121IS_OMAP_CLASS(24xx, 0x24)
122IS_OMAP_CLASS(34xx, 0x34) 122IS_OMAP_CLASS(34xx, 0x34)
123IS_OMAP_CLASS(44xx, 0x44) 123IS_OMAP_CLASS(44xx, 0x44)
124IS_AM_CLASS(35xx, 0x35)
124IS_AM_CLASS(33xx, 0x33) 125IS_AM_CLASS(33xx, 0x33)
125 126
126IS_TI_CLASS(81xx, 0x81) 127IS_TI_CLASS(81xx, 0x81)
@@ -148,6 +149,7 @@ IS_AM_SUBCLASS(335x, 0x335)
148#define cpu_is_ti81xx() 0 149#define cpu_is_ti81xx() 0
149#define cpu_is_ti816x() 0 150#define cpu_is_ti816x() 0
150#define cpu_is_ti814x() 0 151#define cpu_is_ti814x() 0
152#define soc_is_am35xx() 0
151#define cpu_is_am33xx() 0 153#define cpu_is_am33xx() 0
152#define cpu_is_am335x() 0 154#define cpu_is_am335x() 0
153#define cpu_is_omap44xx() 0 155#define cpu_is_omap44xx() 0
@@ -357,6 +359,7 @@ IS_OMAP_TYPE(3517, 0x3517)
357# undef cpu_is_ti81xx 359# undef cpu_is_ti81xx
358# undef cpu_is_ti816x 360# undef cpu_is_ti816x
359# undef cpu_is_ti814x 361# undef cpu_is_ti814x
362# undef soc_is_am35xx
360# undef cpu_is_am33xx 363# undef cpu_is_am33xx
361# undef cpu_is_am335x 364# undef cpu_is_am335x
362# define cpu_is_omap3430() is_omap3430() 365# define cpu_is_omap3430() is_omap3430()
@@ -378,6 +381,7 @@ IS_OMAP_TYPE(3517, 0x3517)
378# define cpu_is_ti81xx() is_ti81xx() 381# define cpu_is_ti81xx() is_ti81xx()
379# define cpu_is_ti816x() is_ti816x() 382# define cpu_is_ti816x() is_ti816x()
380# define cpu_is_ti814x() is_ti814x() 383# define cpu_is_ti814x() is_ti814x()
384# define soc_is_am35xx() is_am35xx()
381# define cpu_is_am33xx() is_am33xx() 385# define cpu_is_am33xx() is_am33xx()
382# define cpu_is_am335x() is_am335x() 386# define cpu_is_am335x() is_am335x()
383#endif 387#endif
@@ -433,6 +437,10 @@ IS_OMAP_TYPE(3517, 0x3517)
433#define TI8148_REV_ES2_0 (TI814X_CLASS | (0x1 << 8)) 437#define TI8148_REV_ES2_0 (TI814X_CLASS | (0x1 << 8))
434#define TI8148_REV_ES2_1 (TI814X_CLASS | (0x2 << 8)) 438#define TI8148_REV_ES2_1 (TI814X_CLASS | (0x2 << 8))
435 439
440#define AM35XX_CLASS 0x35170034
441#define AM35XX_REV_ES1_0 AM35XX_CLASS
442#define AM35XX_REV_ES1_1 (AM35XX_CLASS | (0x1 << 8))
443
436#define AM335X_CLASS 0x33500034 444#define AM335X_CLASS 0x33500034
437#define AM335X_REV_ES1_0 AM335X_CLASS 445#define AM335X_REV_ES1_0 AM335X_CLASS
438 446
diff --git a/arch/arm/plat-omap/include/plat/dma.h b/arch/arm/plat-omap/include/plat/dma.h
index 42afb4c45517..c5811d4409b0 100644
--- a/arch/arm/plat-omap/include/plat/dma.h
+++ b/arch/arm/plat-omap/include/plat/dma.h
@@ -312,6 +312,11 @@
312#define CLEAR_CSR_ON_READ BIT(0xC) 312#define CLEAR_CSR_ON_READ BIT(0xC)
313#define IS_WORD_16 BIT(0xD) 313#define IS_WORD_16 BIT(0xD)
314 314
315/* Defines for DMA Capabilities */
316#define DMA_HAS_TRANSPARENT_CAPS (0x1 << 18)
317#define DMA_HAS_CONSTANT_FILL_CAPS (0x1 << 19)
318#define DMA_HAS_DESCRIPTOR_CAPS (0x3 << 20)
319
315enum omap_reg_offsets { 320enum omap_reg_offsets {
316 321
317GCR, GSCR, GRST1, HW_ID, 322GCR, GSCR, GRST1, HW_ID,
diff --git a/arch/arm/plat-omap/include/plat/dmtimer.h b/arch/arm/plat-omap/include/plat/dmtimer.h
index bdf871a84d62..5da73562e486 100644
--- a/arch/arm/plat-omap/include/plat/dmtimer.h
+++ b/arch/arm/plat-omap/include/plat/dmtimer.h
@@ -75,7 +75,6 @@ struct clk;
75 75
76struct timer_regs { 76struct timer_regs {
77 u32 tidr; 77 u32 tidr;
78 u32 tiocp_cfg;
79 u32 tistat; 78 u32 tistat;
80 u32 tisr; 79 u32 tisr;
81 u32 tier; 80 u32 tier;
diff --git a/arch/arm/plat-omap/include/plat/mmc.h b/arch/arm/plat-omap/include/plat/mmc.h
index 3e7ae0f0215f..a7754a886d42 100644
--- a/arch/arm/plat-omap/include/plat/mmc.h
+++ b/arch/arm/plat-omap/include/plat/mmc.h
@@ -177,9 +177,6 @@ extern void omap_mmc_notify_cover_event(struct device *dev, int slot,
177void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, 177void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
178 int nr_controllers); 178 int nr_controllers);
179void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data); 179void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data);
180int omap_mmc_add(const char *name, int id, unsigned long base,
181 unsigned long size, unsigned int irq,
182 struct omap_mmc_platform_data *data);
183#else 180#else
184static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data, 181static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
185 int nr_controllers) 182 int nr_controllers)
@@ -188,12 +185,6 @@ static inline void omap1_init_mmc(struct omap_mmc_platform_data **mmc_data,
188static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data) 185static inline void omap242x_init_mmc(struct omap_mmc_platform_data **mmc_data)
189{ 186{
190} 187}
191static inline int omap_mmc_add(const char *name, int id, unsigned long base,
192 unsigned long size, unsigned int irq,
193 struct omap_mmc_platform_data *data)
194{
195 return 0;
196}
197 188
198#endif 189#endif
199 190
diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c
index 74daf5ed1432..61fd837624a8 100644
--- a/arch/arm/plat-orion/common.c
+++ b/arch/arm/plat-orion/common.c
@@ -14,15 +14,41 @@
14#include <linux/dma-mapping.h> 14#include <linux/dma-mapping.h>
15#include <linux/serial_8250.h> 15#include <linux/serial_8250.h>
16#include <linux/ata_platform.h> 16#include <linux/ata_platform.h>
17#include <linux/clk.h>
18#include <linux/clkdev.h>
17#include <linux/mv643xx_eth.h> 19#include <linux/mv643xx_eth.h>
18#include <linux/mv643xx_i2c.h> 20#include <linux/mv643xx_i2c.h>
19#include <net/dsa.h> 21#include <net/dsa.h>
20#include <linux/spi/orion_spi.h>
21#include <plat/orion_wdt.h>
22#include <plat/mv_xor.h> 22#include <plat/mv_xor.h>
23#include <plat/ehci-orion.h> 23#include <plat/ehci-orion.h>
24#include <mach/bridge-regs.h> 24#include <mach/bridge-regs.h>
25 25
26/* Create a clkdev entry for a given device/clk */
27void __init orion_clkdev_add(const char *con_id, const char *dev_id,
28 struct clk *clk)
29{
30 struct clk_lookup *cl;
31
32 cl = clkdev_alloc(clk, con_id, dev_id);
33 if (cl)
34 clkdev_add(cl);
35}
36
37/* Create clkdev entries for all orion platforms except kirkwood.
38 Kirkwood has gated clocks for some of its peripherals, so creates
39 its own clkdev entries. For all the other orion devices, create
40 clkdev entries to the tclk. */
41void __init orion_clkdev_init(struct clk *tclk)
42{
43 orion_clkdev_add(NULL, "orion_spi.0", tclk);
44 orion_clkdev_add(NULL, "orion_spi.1", tclk);
45 orion_clkdev_add(NULL, MV643XX_ETH_NAME ".0", tclk);
46 orion_clkdev_add(NULL, MV643XX_ETH_NAME ".1", tclk);
47 orion_clkdev_add(NULL, MV643XX_ETH_NAME ".2", tclk);
48 orion_clkdev_add(NULL, MV643XX_ETH_NAME ".3", tclk);
49 orion_clkdev_add(NULL, "orion_wdt", tclk);
50}
51
26/* Fill in the resources structure and link it into the platform 52/* Fill in the resources structure and link it into the platform
27 device structure. There is always a memory region, and nearly 53 device structure. There is always a memory region, and nearly
28 always an interrupt.*/ 54 always an interrupt.*/
@@ -49,6 +75,12 @@ static void fill_resources(struct platform_device *device,
49/***************************************************************************** 75/*****************************************************************************
50 * UART 76 * UART
51 ****************************************************************************/ 77 ****************************************************************************/
78static unsigned long __init uart_get_clk_rate(struct clk *clk)
79{
80 clk_prepare_enable(clk);
81 return clk_get_rate(clk);
82}
83
52static void __init uart_complete( 84static void __init uart_complete(
53 struct platform_device *orion_uart, 85 struct platform_device *orion_uart,
54 struct plat_serial8250_port *data, 86 struct plat_serial8250_port *data,
@@ -56,12 +88,12 @@ static void __init uart_complete(
56 unsigned int membase, 88 unsigned int membase,
57 resource_size_t mapbase, 89 resource_size_t mapbase,
58 unsigned int irq, 90 unsigned int irq,
59 unsigned int uartclk) 91 struct clk *clk)
60{ 92{
61 data->mapbase = mapbase; 93 data->mapbase = mapbase;
62 data->membase = (void __iomem *)membase; 94 data->membase = (void __iomem *)membase;
63 data->irq = irq; 95 data->irq = irq;
64 data->uartclk = uartclk; 96 data->uartclk = uart_get_clk_rate(clk);
65 orion_uart->dev.platform_data = data; 97 orion_uart->dev.platform_data = data;
66 98
67 fill_resources(orion_uart, resources, mapbase, 0xff, irq); 99 fill_resources(orion_uart, resources, mapbase, 0xff, irq);
@@ -90,10 +122,10 @@ static struct platform_device orion_uart0 = {
90void __init orion_uart0_init(unsigned int membase, 122void __init orion_uart0_init(unsigned int membase,
91 resource_size_t mapbase, 123 resource_size_t mapbase,
92 unsigned int irq, 124 unsigned int irq,
93 unsigned int uartclk) 125 struct clk *clk)
94{ 126{
95 uart_complete(&orion_uart0, orion_uart0_data, orion_uart0_resources, 127 uart_complete(&orion_uart0, orion_uart0_data, orion_uart0_resources,
96 membase, mapbase, irq, uartclk); 128 membase, mapbase, irq, clk);
97} 129}
98 130
99/***************************************************************************** 131/*****************************************************************************
@@ -118,10 +150,10 @@ static struct platform_device orion_uart1 = {
118void __init orion_uart1_init(unsigned int membase, 150void __init orion_uart1_init(unsigned int membase,
119 resource_size_t mapbase, 151 resource_size_t mapbase,
120 unsigned int irq, 152 unsigned int irq,
121 unsigned int uartclk) 153 struct clk *clk)
122{ 154{
123 uart_complete(&orion_uart1, orion_uart1_data, orion_uart1_resources, 155 uart_complete(&orion_uart1, orion_uart1_data, orion_uart1_resources,
124 membase, mapbase, irq, uartclk); 156 membase, mapbase, irq, clk);
125} 157}
126 158
127/***************************************************************************** 159/*****************************************************************************
@@ -146,10 +178,10 @@ static struct platform_device orion_uart2 = {
146void __init orion_uart2_init(unsigned int membase, 178void __init orion_uart2_init(unsigned int membase,
147 resource_size_t mapbase, 179 resource_size_t mapbase,
148 unsigned int irq, 180 unsigned int irq,
149 unsigned int uartclk) 181 struct clk *clk)
150{ 182{
151 uart_complete(&orion_uart2, orion_uart2_data, orion_uart2_resources, 183 uart_complete(&orion_uart2, orion_uart2_data, orion_uart2_resources,
152 membase, mapbase, irq, uartclk); 184 membase, mapbase, irq, clk);
153} 185}
154 186
155/***************************************************************************** 187/*****************************************************************************
@@ -174,10 +206,10 @@ static struct platform_device orion_uart3 = {
174void __init orion_uart3_init(unsigned int membase, 206void __init orion_uart3_init(unsigned int membase,
175 resource_size_t mapbase, 207 resource_size_t mapbase,
176 unsigned int irq, 208 unsigned int irq,
177 unsigned int uartclk) 209 struct clk *clk)
178{ 210{
179 uart_complete(&orion_uart3, orion_uart3_data, orion_uart3_resources, 211 uart_complete(&orion_uart3, orion_uart3_data, orion_uart3_resources,
180 membase, mapbase, irq, uartclk); 212 membase, mapbase, irq, clk);
181} 213}
182 214
183/***************************************************************************** 215/*****************************************************************************
@@ -203,13 +235,11 @@ void __init orion_rtc_init(unsigned long mapbase,
203 ****************************************************************************/ 235 ****************************************************************************/
204static __init void ge_complete( 236static __init void ge_complete(
205 struct mv643xx_eth_shared_platform_data *orion_ge_shared_data, 237 struct mv643xx_eth_shared_platform_data *orion_ge_shared_data,
206 int tclk,
207 struct resource *orion_ge_resource, unsigned long irq, 238 struct resource *orion_ge_resource, unsigned long irq,
208 struct platform_device *orion_ge_shared, 239 struct platform_device *orion_ge_shared,
209 struct mv643xx_eth_platform_data *eth_data, 240 struct mv643xx_eth_platform_data *eth_data,
210 struct platform_device *orion_ge) 241 struct platform_device *orion_ge)
211{ 242{
212 orion_ge_shared_data->t_clk = tclk;
213 orion_ge_resource->start = irq; 243 orion_ge_resource->start = irq;
214 orion_ge_resource->end = irq; 244 orion_ge_resource->end = irq;
215 eth_data->shared = orion_ge_shared; 245 eth_data->shared = orion_ge_shared;
@@ -260,12 +290,11 @@ static struct platform_device orion_ge00 = {
260void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, 290void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
261 unsigned long mapbase, 291 unsigned long mapbase,
262 unsigned long irq, 292 unsigned long irq,
263 unsigned long irq_err, 293 unsigned long irq_err)
264 int tclk)
265{ 294{
266 fill_resources(&orion_ge00_shared, orion_ge00_shared_resources, 295 fill_resources(&orion_ge00_shared, orion_ge00_shared_resources,
267 mapbase + 0x2000, SZ_16K - 1, irq_err); 296 mapbase + 0x2000, SZ_16K - 1, irq_err);
268 ge_complete(&orion_ge00_shared_data, tclk, 297 ge_complete(&orion_ge00_shared_data,
269 orion_ge00_resources, irq, &orion_ge00_shared, 298 orion_ge00_resources, irq, &orion_ge00_shared,
270 eth_data, &orion_ge00); 299 eth_data, &orion_ge00);
271} 300}
@@ -313,12 +342,11 @@ static struct platform_device orion_ge01 = {
313void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, 342void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
314 unsigned long mapbase, 343 unsigned long mapbase,
315 unsigned long irq, 344 unsigned long irq,
316 unsigned long irq_err, 345 unsigned long irq_err)
317 int tclk)
318{ 346{
319 fill_resources(&orion_ge01_shared, orion_ge01_shared_resources, 347 fill_resources(&orion_ge01_shared, orion_ge01_shared_resources,
320 mapbase + 0x2000, SZ_16K - 1, irq_err); 348 mapbase + 0x2000, SZ_16K - 1, irq_err);
321 ge_complete(&orion_ge01_shared_data, tclk, 349 ge_complete(&orion_ge01_shared_data,
322 orion_ge01_resources, irq, &orion_ge01_shared, 350 orion_ge01_resources, irq, &orion_ge01_shared,
323 eth_data, &orion_ge01); 351 eth_data, &orion_ge01);
324} 352}
@@ -366,12 +394,11 @@ static struct platform_device orion_ge10 = {
366void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data, 394void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
367 unsigned long mapbase, 395 unsigned long mapbase,
368 unsigned long irq, 396 unsigned long irq,
369 unsigned long irq_err, 397 unsigned long irq_err)
370 int tclk)
371{ 398{
372 fill_resources(&orion_ge10_shared, orion_ge10_shared_resources, 399 fill_resources(&orion_ge10_shared, orion_ge10_shared_resources,
373 mapbase + 0x2000, SZ_16K - 1, irq_err); 400 mapbase + 0x2000, SZ_16K - 1, irq_err);
374 ge_complete(&orion_ge10_shared_data, tclk, 401 ge_complete(&orion_ge10_shared_data,
375 orion_ge10_resources, irq, &orion_ge10_shared, 402 orion_ge10_resources, irq, &orion_ge10_shared,
376 eth_data, &orion_ge10); 403 eth_data, &orion_ge10);
377} 404}
@@ -419,12 +446,11 @@ static struct platform_device orion_ge11 = {
419void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, 446void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
420 unsigned long mapbase, 447 unsigned long mapbase,
421 unsigned long irq, 448 unsigned long irq,
422 unsigned long irq_err, 449 unsigned long irq_err)
423 int tclk)
424{ 450{
425 fill_resources(&orion_ge11_shared, orion_ge11_shared_resources, 451 fill_resources(&orion_ge11_shared, orion_ge11_shared_resources,
426 mapbase + 0x2000, SZ_16K - 1, irq_err); 452 mapbase + 0x2000, SZ_16K - 1, irq_err);
427 ge_complete(&orion_ge11_shared_data, tclk, 453 ge_complete(&orion_ge11_shared_data,
428 orion_ge11_resources, irq, &orion_ge11_shared, 454 orion_ge11_resources, irq, &orion_ge11_shared,
429 eth_data, &orion_ge11); 455 eth_data, &orion_ge11);
430} 456}
@@ -521,44 +547,32 @@ void __init orion_i2c_1_init(unsigned long mapbase,
521/***************************************************************************** 547/*****************************************************************************
522 * SPI 548 * SPI
523 ****************************************************************************/ 549 ****************************************************************************/
524static struct orion_spi_info orion_spi_plat_data;
525static struct resource orion_spi_resources; 550static struct resource orion_spi_resources;
526 551
527static struct platform_device orion_spi = { 552static struct platform_device orion_spi = {
528 .name = "orion_spi", 553 .name = "orion_spi",
529 .id = 0, 554 .id = 0,
530 .dev = {
531 .platform_data = &orion_spi_plat_data,
532 },
533}; 555};
534 556
535static struct orion_spi_info orion_spi_1_plat_data;
536static struct resource orion_spi_1_resources; 557static struct resource orion_spi_1_resources;
537 558
538static struct platform_device orion_spi_1 = { 559static struct platform_device orion_spi_1 = {
539 .name = "orion_spi", 560 .name = "orion_spi",
540 .id = 1, 561 .id = 1,
541 .dev = {
542 .platform_data = &orion_spi_1_plat_data,
543 },
544}; 562};
545 563
546/* Note: The SPI silicon core does have interrupts. However the 564/* Note: The SPI silicon core does have interrupts. However the
547 * current Linux software driver does not use interrupts. */ 565 * current Linux software driver does not use interrupts. */
548 566
549void __init orion_spi_init(unsigned long mapbase, 567void __init orion_spi_init(unsigned long mapbase)
550 unsigned long tclk)
551{ 568{
552 orion_spi_plat_data.tclk = tclk;
553 fill_resources(&orion_spi, &orion_spi_resources, 569 fill_resources(&orion_spi, &orion_spi_resources,
554 mapbase, SZ_512 - 1, NO_IRQ); 570 mapbase, SZ_512 - 1, NO_IRQ);
555 platform_device_register(&orion_spi); 571 platform_device_register(&orion_spi);
556} 572}
557 573
558void __init orion_spi_1_init(unsigned long mapbase, 574void __init orion_spi_1_init(unsigned long mapbase)
559 unsigned long tclk)
560{ 575{
561 orion_spi_1_plat_data.tclk = tclk;
562 fill_resources(&orion_spi_1, &orion_spi_1_resources, 576 fill_resources(&orion_spi_1, &orion_spi_1_resources,
563 mapbase, SZ_512 - 1, NO_IRQ); 577 mapbase, SZ_512 - 1, NO_IRQ);
564 platform_device_register(&orion_spi_1); 578 platform_device_register(&orion_spi_1);
@@ -567,24 +581,18 @@ void __init orion_spi_1_init(unsigned long mapbase,
567/***************************************************************************** 581/*****************************************************************************
568 * Watchdog 582 * Watchdog
569 ****************************************************************************/ 583 ****************************************************************************/
570static struct orion_wdt_platform_data orion_wdt_data;
571
572static struct resource orion_wdt_resource = 584static struct resource orion_wdt_resource =
573 DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28); 585 DEFINE_RES_MEM(TIMER_VIRT_BASE, 0x28);
574 586
575static struct platform_device orion_wdt_device = { 587static struct platform_device orion_wdt_device = {
576 .name = "orion_wdt", 588 .name = "orion_wdt",
577 .id = -1, 589 .id = -1,
578 .dev = {
579 .platform_data = &orion_wdt_data,
580 },
581 .resource = &orion_wdt_resource,
582 .num_resources = 1, 590 .num_resources = 1,
591 .resource = &orion_wdt_resource,
583}; 592};
584 593
585void __init orion_wdt_init(unsigned long tclk) 594void __init orion_wdt_init(void)
586{ 595{
587 orion_wdt_data.tclk = tclk;
588 platform_device_register(&orion_wdt_device); 596 platform_device_register(&orion_wdt_device);
589} 597}
590 598
diff --git a/arch/arm/plat-orion/include/plat/common.h b/arch/arm/plat-orion/include/plat/common.h
index a7fa005a5a0e..e00fdb213609 100644
--- a/arch/arm/plat-orion/include/plat/common.h
+++ b/arch/arm/plat-orion/include/plat/common.h
@@ -16,22 +16,22 @@ struct dsa_platform_data;
16void __init orion_uart0_init(unsigned int membase, 16void __init orion_uart0_init(unsigned int membase,
17 resource_size_t mapbase, 17 resource_size_t mapbase,
18 unsigned int irq, 18 unsigned int irq,
19 unsigned int uartclk); 19 struct clk *clk);
20 20
21void __init orion_uart1_init(unsigned int membase, 21void __init orion_uart1_init(unsigned int membase,
22 resource_size_t mapbase, 22 resource_size_t mapbase,
23 unsigned int irq, 23 unsigned int irq,
24 unsigned int uartclk); 24 struct clk *clk);
25 25
26void __init orion_uart2_init(unsigned int membase, 26void __init orion_uart2_init(unsigned int membase,
27 resource_size_t mapbase, 27 resource_size_t mapbase,
28 unsigned int irq, 28 unsigned int irq,
29 unsigned int uartclk); 29 struct clk *clk);
30 30
31void __init orion_uart3_init(unsigned int membase, 31void __init orion_uart3_init(unsigned int membase,
32 resource_size_t mapbase, 32 resource_size_t mapbase,
33 unsigned int irq, 33 unsigned int irq,
34 unsigned int uartclk); 34 struct clk *clk);
35 35
36void __init orion_rtc_init(unsigned long mapbase, 36void __init orion_rtc_init(unsigned long mapbase,
37 unsigned long irq); 37 unsigned long irq);
@@ -39,29 +39,26 @@ void __init orion_rtc_init(unsigned long mapbase,
39void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data, 39void __init orion_ge00_init(struct mv643xx_eth_platform_data *eth_data,
40 unsigned long mapbase, 40 unsigned long mapbase,
41 unsigned long irq, 41 unsigned long irq,
42 unsigned long irq_err, 42 unsigned long irq_err);
43 int tclk);
44 43
45void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data, 44void __init orion_ge01_init(struct mv643xx_eth_platform_data *eth_data,
46 unsigned long mapbase, 45 unsigned long mapbase,
47 unsigned long irq, 46 unsigned long irq,
48 unsigned long irq_err, 47 unsigned long irq_err);
49 int tclk);
50 48
51void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data, 49void __init orion_ge10_init(struct mv643xx_eth_platform_data *eth_data,
52 unsigned long mapbase, 50 unsigned long mapbase,
53 unsigned long irq, 51 unsigned long irq,
54 unsigned long irq_err, 52 unsigned long irq_err);
55 int tclk);
56 53
57void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data, 54void __init orion_ge11_init(struct mv643xx_eth_platform_data *eth_data,
58 unsigned long mapbase, 55 unsigned long mapbase,
59 unsigned long irq, 56 unsigned long irq,
60 unsigned long irq_err, 57 unsigned long irq_err);
61 int tclk);
62 58
63void __init orion_ge00_switch_init(struct dsa_platform_data *d, 59void __init orion_ge00_switch_init(struct dsa_platform_data *d,
64 int irq); 60 int irq);
61
65void __init orion_i2c_init(unsigned long mapbase, 62void __init orion_i2c_init(unsigned long mapbase,
66 unsigned long irq, 63 unsigned long irq,
67 unsigned long freq_m); 64 unsigned long freq_m);
@@ -70,13 +67,11 @@ void __init orion_i2c_1_init(unsigned long mapbase,
70 unsigned long irq, 67 unsigned long irq,
71 unsigned long freq_m); 68 unsigned long freq_m);
72 69
73void __init orion_spi_init(unsigned long mapbase, 70void __init orion_spi_init(unsigned long mapbase);
74 unsigned long tclk);
75 71
76void __init orion_spi_1_init(unsigned long mapbase, 72void __init orion_spi_1_init(unsigned long mapbase);
77 unsigned long tclk);
78 73
79void __init orion_wdt_init(unsigned long tclk); 74void __init orion_wdt_init(void);
80 75
81void __init orion_xor0_init(unsigned long mapbase_low, 76void __init orion_xor0_init(unsigned long mapbase_low,
82 unsigned long mapbase_high, 77 unsigned long mapbase_high,
@@ -106,4 +101,9 @@ void __init orion_crypto_init(unsigned long mapbase,
106 unsigned long srambase, 101 unsigned long srambase,
107 unsigned long sram_size, 102 unsigned long sram_size,
108 unsigned long irq); 103 unsigned long irq);
104
105void __init orion_clkdev_add(const char *con_id, const char *dev_id,
106 struct clk *clk);
107
108void __init orion_clkdev_init(struct clk *tclk);
109#endif 109#endif
diff --git a/arch/arm/plat-orion/include/plat/orion_wdt.h b/arch/arm/plat-orion/include/plat/orion_wdt.h
deleted file mode 100644
index 665c362a2fba..000000000000
--- a/arch/arm/plat-orion/include/plat/orion_wdt.h
+++ /dev/null
@@ -1,18 +0,0 @@
1/*
2 * arch/arm/plat-orion/include/plat/orion_wdt.h
3 *
4 * This file is licensed under the terms of the GNU General Public
5 * License version 2. This program is licensed "as is" without any
6 * warranty of any kind, whether express or implied.
7 */
8
9#ifndef __PLAT_ORION_WDT_H
10#define __PLAT_ORION_WDT_H
11
12struct orion_wdt_platform_data {
13 u32 tclk; /* no <linux/clk.h> support yet */
14};
15
16
17#endif
18
diff --git a/arch/arm/plat-orion/pcie.c b/arch/arm/plat-orion/pcie.c
index 86dbb5bdb172..f20a321088a2 100644
--- a/arch/arm/plat-orion/pcie.c
+++ b/arch/arm/plat-orion/pcie.c
@@ -52,12 +52,12 @@
52#define PCIE_DEBUG_SOFT_RESET (1<<20) 52#define PCIE_DEBUG_SOFT_RESET (1<<20)
53 53
54 54
55u32 __init orion_pcie_dev_id(void __iomem *base) 55u32 orion_pcie_dev_id(void __iomem *base)
56{ 56{
57 return readl(base + PCIE_DEV_ID_OFF) >> 16; 57 return readl(base + PCIE_DEV_ID_OFF) >> 16;
58} 58}
59 59
60u32 __init orion_pcie_rev(void __iomem *base) 60u32 orion_pcie_rev(void __iomem *base)
61{ 61{
62 return readl(base + PCIE_DEV_REV_OFF) & 0xff; 62 return readl(base + PCIE_DEV_REV_OFF) & 0xff;
63} 63}
diff --git a/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h b/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
index abcc36eb1242..5ce8d5e6ea51 100644
--- a/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
+++ b/arch/arm/plat-pxa/include/plat/pxa27x_keypad.h
@@ -44,6 +44,10 @@ struct pxa27x_keypad_platform_data {
44 /* direct keys */ 44 /* direct keys */
45 int direct_key_num; 45 int direct_key_num;
46 unsigned int direct_key_map[MAX_DIRECT_KEY_NUM]; 46 unsigned int direct_key_map[MAX_DIRECT_KEY_NUM];
47 /* the key output may be low active */
48 int direct_key_low_active;
49 /* give board a chance to choose the start direct key */
50 unsigned int direct_key_mask;
47 51
48 /* rotary encoders 0 */ 52 /* rotary encoders 0 */
49 int enable_rotary0; 53 int enable_rotary0;
diff --git a/arch/arm/plat-s3c24xx/Makefile b/arch/arm/plat-s3c24xx/Makefile
index 2467b800cc76..9f60549c8da1 100644
--- a/arch/arm/plat-s3c24xx/Makefile
+++ b/arch/arm/plat-s3c24xx/Makefile
@@ -12,10 +12,7 @@ obj- :=
12 12
13# Core files 13# Core files
14 14
15obj-y += cpu.o
16obj-y += irq.o 15obj-y += irq.o
17obj-y += dev-uart.o
18obj-y += clock.o
19obj-$(CONFIG_S3C24XX_DCLK) += clock-dclk.o 16obj-$(CONFIG_S3C24XX_DCLK) += clock-dclk.o
20 17
21obj-$(CONFIG_CPU_FREQ_S3C24XX) += cpu-freq.o 18obj-$(CONFIG_CPU_FREQ_S3C24XX) += cpu-freq.o
@@ -23,9 +20,6 @@ obj-$(CONFIG_CPU_FREQ_S3C24XX_DEBUGFS) += cpu-freq-debugfs.o
23 20
24# Architecture dependent builds 21# Architecture dependent builds
25 22
26obj-$(CONFIG_PM) += pm.o
27obj-$(CONFIG_PM) += irq-pm.o
28obj-$(CONFIG_PM) += sleep.o
29obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o 23obj-$(CONFIG_S3C2410_CLOCK) += s3c2410-clock.o
30obj-$(CONFIG_S3C24XX_DMA) += dma.o 24obj-$(CONFIG_S3C24XX_DMA) += dma.o
31obj-$(CONFIG_S3C2410_IOTIMING) += s3c2410-iotiming.o 25obj-$(CONFIG_S3C2410_IOTIMING) += s3c2410-iotiming.o
diff --git a/arch/arm/plat-s3c24xx/clock.c b/arch/arm/plat-s3c24xx/clock.c
deleted file mode 100644
index 931d26d1a54b..000000000000
--- a/arch/arm/plat-s3c24xx/clock.c
+++ /dev/null
@@ -1,59 +0,0 @@
1/* linux/arch/arm/plat-s3c24xx/clock.c
2 *
3 * Copyright (c) 2004-2005 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * S3C24XX Core clock control support
7 *
8 * Based on, and code from linux/arch/arm/mach-versatile/clock.c
9 **
10 ** Copyright (C) 2004 ARM Limited.
11 ** Written by Deep Blue Solutions Limited.
12 *
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License as published by
16 * the Free Software Foundation; either version 2 of the License, or
17 * (at your option) any later version.
18 *
19 * This program is distributed in the hope that it will be useful,
20 * but WITHOUT ANY WARRANTY; without even the implied warranty of
21 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
22 * GNU General Public License for more details.
23 *
24 * You should have received a copy of the GNU General Public License
25 * along with this program; if not, write to the Free Software
26 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
27*/
28
29#include <linux/init.h>
30#include <linux/kernel.h>
31#include <linux/clk.h>
32#include <linux/io.h>
33
34#include <mach/hardware.h>
35#include <asm/irq.h>
36
37#include <mach/regs-clock.h>
38#include <mach/regs-gpio.h>
39
40#include <plat/cpu-freq.h>
41
42#include <plat/clock.h>
43#include <plat/cpu.h>
44#include <plat/pll.h>
45
46/* initialise all the clocks */
47
48void __init_or_cpufreq s3c24xx_setup_clocks(unsigned long fclk,
49 unsigned long hclk,
50 unsigned long pclk)
51{
52 clk_upll.rate = s3c24xx_get_pll(__raw_readl(S3C2410_UPLLCON),
53 clk_xtal.rate);
54
55 clk_mpll.rate = fclk;
56 clk_h.rate = hclk;
57 clk_p.rate = pclk;
58 clk_f.rate = fclk;
59}
diff --git a/arch/arm/plat-s3c24xx/dev-uart.c b/arch/arm/plat-s3c24xx/dev-uart.c
deleted file mode 100644
index 9ab22e662fff..000000000000
--- a/arch/arm/plat-s3c24xx/dev-uart.c
+++ /dev/null
@@ -1,100 +0,0 @@
1/* linux/arch/arm/plat-s3c24xx/dev-uart.c
2 *
3 * Copyright (c) 2004 Simtec Electronics
4 * Ben Dooks <ben@simtec.co.uk>
5 *
6 * Base S3C24XX UART resource and platform device definitions
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#include <linux/kernel.h>
14#include <linux/types.h>
15#include <linux/interrupt.h>
16#include <linux/list.h>
17#include <linux/serial_core.h>
18#include <linux/platform_device.h>
19
20#include <asm/mach/arch.h>
21#include <asm/mach/map.h>
22#include <asm/mach/irq.h>
23#include <mach/hardware.h>
24#include <mach/map.h>
25
26#include <plat/devs.h>
27#include <plat/regs-serial.h>
28
29/* Serial port registrations */
30
31static struct resource s3c2410_uart0_resource[] = {
32 [0] = {
33 .start = S3C2410_PA_UART0,
34 .end = S3C2410_PA_UART0 + 0x3fff,
35 .flags = IORESOURCE_MEM,
36 },
37 [1] = {
38 .start = IRQ_S3CUART_RX0,
39 .end = IRQ_S3CUART_ERR0,
40 .flags = IORESOURCE_IRQ,
41 }
42};
43
44static struct resource s3c2410_uart1_resource[] = {
45 [0] = {
46 .start = S3C2410_PA_UART1,
47 .end = S3C2410_PA_UART1 + 0x3fff,
48 .flags = IORESOURCE_MEM,
49 },
50 [1] = {
51 .start = IRQ_S3CUART_RX1,
52 .end = IRQ_S3CUART_ERR1,
53 .flags = IORESOURCE_IRQ,
54 }
55};
56
57static struct resource s3c2410_uart2_resource[] = {
58 [0] = {
59 .start = S3C2410_PA_UART2,
60 .end = S3C2410_PA_UART2 + 0x3fff,
61 .flags = IORESOURCE_MEM,
62 },
63 [1] = {
64 .start = IRQ_S3CUART_RX2,
65 .end = IRQ_S3CUART_ERR2,
66 .flags = IORESOURCE_IRQ,
67 }
68};
69
70static struct resource s3c2410_uart3_resource[] = {
71 [0] = {
72 .start = S3C2443_PA_UART3,
73 .end = S3C2443_PA_UART3 + 0x3fff,
74 .flags = IORESOURCE_MEM,
75 },
76 [1] = {
77 .start = IRQ_S3CUART_RX3,
78 .end = IRQ_S3CUART_ERR3,
79 .flags = IORESOURCE_IRQ,
80 },
81};
82
83struct s3c24xx_uart_resources s3c2410_uart_resources[] __initdata = {
84 [0] = {
85 .resources = s3c2410_uart0_resource,
86 .nr_resources = ARRAY_SIZE(s3c2410_uart0_resource),
87 },
88 [1] = {
89 .resources = s3c2410_uart1_resource,
90 .nr_resources = ARRAY_SIZE(s3c2410_uart1_resource),
91 },
92 [2] = {
93 .resources = s3c2410_uart2_resource,
94 .nr_resources = ARRAY_SIZE(s3c2410_uart2_resource),
95 },
96 [3] = {
97 .resources = s3c2410_uart3_resource,
98 .nr_resources = ARRAY_SIZE(s3c2410_uart3_resource),
99 },
100};
diff --git a/arch/arm/plat-s5p/Kconfig b/arch/arm/plat-s5p/Kconfig
deleted file mode 100644
index 96bea3202304..000000000000
--- a/arch/arm/plat-s5p/Kconfig
+++ /dev/null
@@ -1,140 +0,0 @@
1# arch/arm/plat-s5p/Kconfig
2#
3# Copyright (c) 2009 Samsung Electronics Co., Ltd.
4# http://www.samsung.com/
5#
6# Licensed under GPLv2
7
8config PLAT_S5P
9 bool
10 depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
11 default y
12 select ARM_VIC if !ARCH_EXYNOS
13 select ARM_GIC if ARCH_EXYNOS
14 select GIC_NON_BANKED if ARCH_EXYNOS4
15 select NO_IOPORT
16 select ARCH_REQUIRE_GPIOLIB
17 select S3C_GPIO_TRACK
18 select S5P_GPIO_DRVSTR
19 select SAMSUNG_GPIOLIB_4BIT
20 select PLAT_SAMSUNG
21 select SAMSUNG_CLKSRC
22 select SAMSUNG_IRQ_VIC_TIMER
23 help
24 Base platform code for Samsung's S5P series SoC.
25
26config S5P_EXT_INT
27 bool
28 help
29 Use the external interrupts (other than GPIO interrupts.)
30 Note: Do not choose this for S5P6440 and S5P6450.
31
32config S5P_GPIO_INT
33 bool
34 help
35 Common code for the GPIO interrupts (other than external interrupts.)
36
37config S5P_HRT
38 bool
39 select SAMSUNG_DEV_PWM
40 help
41 Use the High Resolution timer support
42
43config S5P_DEV_UART
44 def_bool y
45 depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
46
47config S5P_PM
48 bool
49 help
50 Common code for power management support on S5P and newer SoCs
51 Note: Do not select this for S5P6440 and S5P6450.
52
53comment "System MMU"
54
55config S5P_SYSTEM_MMU
56 bool "S5P SYSTEM MMU"
57 depends on ARCH_EXYNOS4
58 help
59 Say Y here if you want to enable System MMU
60
61config S5P_SLEEP
62 bool
63 help
64 Internal config node to apply common S5P sleep management code.
65 Can be selected by S5P and newer SoCs with similar sleep procedure.
66
67config S5P_DEV_FIMC0
68 bool
69 help
70 Compile in platform device definitions for FIMC controller 0
71
72config S5P_DEV_FIMC1
73 bool
74 help
75 Compile in platform device definitions for FIMC controller 1
76
77config S5P_DEV_FIMC2
78 bool
79 help
80 Compile in platform device definitions for FIMC controller 2
81
82config S5P_DEV_FIMC3
83 bool
84 help
85 Compile in platform device definitions for FIMC controller 3
86
87config S5P_DEV_JPEG
88 bool
89 help
90 Compile in platform device definitions for JPEG codec
91
92config S5P_DEV_G2D
93 bool
94 help
95 Compile in platform device definitions for G2D device
96
97config S5P_DEV_FIMD0
98 bool
99 help
100 Compile in platform device definitions for FIMD controller 0
101
102config S5P_DEV_I2C_HDMIPHY
103 bool
104 help
105 Compile in platform device definitions for I2C HDMIPHY controller
106
107config S5P_DEV_MFC
108 bool
109 help
110 Compile in platform device definitions for MFC
111
112config S5P_DEV_ONENAND
113 bool
114 help
115 Compile in platform device definition for OneNAND controller
116
117config S5P_DEV_CSIS0
118 bool
119 help
120 Compile in platform device definitions for MIPI-CSIS channel 0
121
122config S5P_DEV_CSIS1
123 bool
124 help
125 Compile in platform device definitions for MIPI-CSIS channel 1
126
127config S5P_DEV_TV
128 bool
129 help
130 Compile in platform device definition for TV interface
131
132config S5P_DEV_USB_EHCI
133 bool
134 help
135 Compile in platform device definition for USB EHCI
136
137config S5P_SETUP_MIPIPHY
138 bool
139 help
140 Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
diff --git a/arch/arm/plat-s5p/Makefile b/arch/arm/plat-s5p/Makefile
deleted file mode 100644
index 4bd824136659..000000000000
--- a/arch/arm/plat-s5p/Makefile
+++ /dev/null
@@ -1,28 +0,0 @@
1# arch/arm/plat-s5p/Makefile
2#
3# Copyright (c) 2009 Samsung Electronics Co., Ltd.
4# http://www.samsung.com/
5#
6# Licensed under GPLv2
7
8obj-y :=
9obj-m :=
10obj-n := dummy.o
11obj- :=
12
13# Core files
14
15obj-y += clock.o
16obj-y += irq.o
17obj-$(CONFIG_S5P_EXT_INT) += irq-eint.o
18obj-$(CONFIG_S5P_GPIO_INT) += irq-gpioint.o
19obj-$(CONFIG_S5P_SYSTEM_MMU) += sysmmu.o
20obj-$(CONFIG_S5P_PM) += pm.o irq-pm.o
21obj-$(CONFIG_S5P_SLEEP) += sleep.o
22obj-$(CONFIG_S5P_HRT) += s5p-time.o
23
24# devices
25
26obj-$(CONFIG_S5P_DEV_UART) += dev-uart.o
27obj-$(CONFIG_S5P_DEV_MFC) += dev-mfc.o
28obj-$(CONFIG_S5P_SETUP_MIPIPHY) += setup-mipiphy.o
diff --git a/arch/arm/plat-s5p/sysmmu.c b/arch/arm/plat-s5p/sysmmu.c
deleted file mode 100644
index c8bec9c7655d..000000000000
--- a/arch/arm/plat-s5p/sysmmu.c
+++ /dev/null
@@ -1,313 +0,0 @@
1/* linux/arch/arm/plat-s5p/sysmmu.c
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/io.h>
12#include <linux/interrupt.h>
13#include <linux/platform_device.h>
14#include <linux/export.h>
15
16#include <asm/pgtable.h>
17
18#include <mach/map.h>
19#include <mach/regs-sysmmu.h>
20#include <plat/sysmmu.h>
21
22#define CTRL_ENABLE 0x5
23#define CTRL_BLOCK 0x7
24#define CTRL_DISABLE 0x0
25
26static struct device *dev;
27
28static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
29 S5P_PAGE_FAULT_ADDR,
30 S5P_AR_FAULT_ADDR,
31 S5P_AW_FAULT_ADDR,
32 S5P_DEFAULT_SLAVE_ADDR,
33 S5P_AR_FAULT_ADDR,
34 S5P_AR_FAULT_ADDR,
35 S5P_AW_FAULT_ADDR,
36 S5P_AW_FAULT_ADDR
37};
38
39static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
40 "PAGE FAULT",
41 "AR MULTI-HIT FAULT",
42 "AW MULTI-HIT FAULT",
43 "BUS ERROR",
44 "AR SECURITY PROTECTION FAULT",
45 "AR ACCESS PROTECTION FAULT",
46 "AW SECURITY PROTECTION FAULT",
47 "AW ACCESS PROTECTION FAULT"
48};
49
50static int (*fault_handlers[S5P_SYSMMU_TOTAL_IPNUM])(
51 enum S5P_SYSMMU_INTERRUPT_TYPE itype,
52 unsigned long pgtable_base,
53 unsigned long fault_addr);
54
55/*
56 * If adjacent 2 bits are true, the system MMU is enabled.
57 * The system MMU is disabled, otherwise.
58 */
59static unsigned long sysmmu_states;
60
61static inline void set_sysmmu_active(sysmmu_ips ips)
62{
63 sysmmu_states |= 3 << (ips * 2);
64}
65
66static inline void set_sysmmu_inactive(sysmmu_ips ips)
67{
68 sysmmu_states &= ~(3 << (ips * 2));
69}
70
71static inline int is_sysmmu_active(sysmmu_ips ips)
72{
73 return sysmmu_states & (3 << (ips * 2));
74}
75
76static void __iomem *sysmmusfrs[S5P_SYSMMU_TOTAL_IPNUM];
77
78static inline void sysmmu_block(sysmmu_ips ips)
79{
80 __raw_writel(CTRL_BLOCK, sysmmusfrs[ips] + S5P_MMU_CTRL);
81 dev_dbg(dev, "%s is blocked.\n", sysmmu_ips_name[ips]);
82}
83
84static inline void sysmmu_unblock(sysmmu_ips ips)
85{
86 __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
87 dev_dbg(dev, "%s is unblocked.\n", sysmmu_ips_name[ips]);
88}
89
90static inline void __sysmmu_tlb_invalidate(sysmmu_ips ips)
91{
92 __raw_writel(0x1, sysmmusfrs[ips] + S5P_MMU_FLUSH);
93 dev_dbg(dev, "TLB of %s is invalidated.\n", sysmmu_ips_name[ips]);
94}
95
96static inline void __sysmmu_set_ptbase(sysmmu_ips ips, unsigned long pgd)
97{
98 if (unlikely(pgd == 0)) {
99 pgd = (unsigned long)ZERO_PAGE(0);
100 __raw_writel(0x20, sysmmusfrs[ips] + S5P_MMU_CFG); /* 4KB LV1 */
101 } else {
102 __raw_writel(0x0, sysmmusfrs[ips] + S5P_MMU_CFG); /* 16KB LV1 */
103 }
104
105 __raw_writel(pgd, sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
106
107 dev_dbg(dev, "Page table base of %s is initialized with 0x%08lX.\n",
108 sysmmu_ips_name[ips], pgd);
109 __sysmmu_tlb_invalidate(ips);
110}
111
112void sysmmu_set_fault_handler(sysmmu_ips ips,
113 int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
114 unsigned long pgtable_base,
115 unsigned long fault_addr))
116{
117 BUG_ON(!((ips >= SYSMMU_MDMA) && (ips < S5P_SYSMMU_TOTAL_IPNUM)));
118 fault_handlers[ips] = handler;
119}
120
121static irqreturn_t s5p_sysmmu_irq(int irq, void *dev_id)
122{
123 /* SYSMMU is in blocked when interrupt occurred. */
124 unsigned long base = 0;
125 sysmmu_ips ips = (sysmmu_ips)dev_id;
126 enum S5P_SYSMMU_INTERRUPT_TYPE itype;
127
128 itype = (enum S5P_SYSMMU_INTERRUPT_TYPE)
129 __ffs(__raw_readl(sysmmusfrs[ips] + S5P_INT_STATUS));
130
131 BUG_ON(!((itype >= 0) && (itype < 8)));
132
133 dev_alert(dev, "%s occurred by %s.\n", sysmmu_fault_name[itype],
134 sysmmu_ips_name[ips]);
135
136 if (fault_handlers[ips]) {
137 unsigned long addr;
138
139 base = __raw_readl(sysmmusfrs[ips] + S5P_PT_BASE_ADDR);
140 addr = __raw_readl(sysmmusfrs[ips] + fault_reg_offset[itype]);
141
142 if (fault_handlers[ips](itype, base, addr)) {
143 __raw_writel(1 << itype,
144 sysmmusfrs[ips] + S5P_INT_CLEAR);
145 dev_notice(dev, "%s from %s is resolved."
146 " Retrying translation.\n",
147 sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
148 } else {
149 base = 0;
150 }
151 }
152
153 sysmmu_unblock(ips);
154
155 if (!base)
156 dev_notice(dev, "%s from %s is not handled.\n",
157 sysmmu_fault_name[itype], sysmmu_ips_name[ips]);
158
159 return IRQ_HANDLED;
160}
161
162void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd)
163{
164 if (is_sysmmu_active(ips)) {
165 sysmmu_block(ips);
166 __sysmmu_set_ptbase(ips, pgd);
167 sysmmu_unblock(ips);
168 } else {
169 dev_dbg(dev, "%s is disabled. "
170 "Skipping initializing page table base.\n",
171 sysmmu_ips_name[ips]);
172 }
173}
174
175void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd)
176{
177 if (!is_sysmmu_active(ips)) {
178 sysmmu_clk_enable(ips);
179
180 __sysmmu_set_ptbase(ips, pgd);
181
182 __raw_writel(CTRL_ENABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
183
184 set_sysmmu_active(ips);
185 dev_dbg(dev, "%s is enabled.\n", sysmmu_ips_name[ips]);
186 } else {
187 dev_dbg(dev, "%s is already enabled.\n", sysmmu_ips_name[ips]);
188 }
189}
190
191void s5p_sysmmu_disable(sysmmu_ips ips)
192{
193 if (is_sysmmu_active(ips)) {
194 __raw_writel(CTRL_DISABLE, sysmmusfrs[ips] + S5P_MMU_CTRL);
195 set_sysmmu_inactive(ips);
196 sysmmu_clk_disable(ips);
197 dev_dbg(dev, "%s is disabled.\n", sysmmu_ips_name[ips]);
198 } else {
199 dev_dbg(dev, "%s is already disabled.\n", sysmmu_ips_name[ips]);
200 }
201}
202
203void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips)
204{
205 if (is_sysmmu_active(ips)) {
206 sysmmu_block(ips);
207 __sysmmu_tlb_invalidate(ips);
208 sysmmu_unblock(ips);
209 } else {
210 dev_dbg(dev, "%s is disabled. "
211 "Skipping invalidating TLB.\n", sysmmu_ips_name[ips]);
212 }
213}
214
215static int s5p_sysmmu_probe(struct platform_device *pdev)
216{
217 int i, ret;
218 struct resource *res, *mem;
219
220 dev = &pdev->dev;
221
222 for (i = 0; i < S5P_SYSMMU_TOTAL_IPNUM; i++) {
223 int irq;
224
225 sysmmu_clk_init(dev, i);
226 sysmmu_clk_disable(i);
227
228 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
229 if (!res) {
230 dev_err(dev, "Failed to get the resource of %s.\n",
231 sysmmu_ips_name[i]);
232 ret = -ENODEV;
233 goto err_res;
234 }
235
236 mem = request_mem_region(res->start, resource_size(res),
237 pdev->name);
238 if (!mem) {
239 dev_err(dev, "Failed to request the memory region of %s.\n",
240 sysmmu_ips_name[i]);
241 ret = -EBUSY;
242 goto err_res;
243 }
244
245 sysmmusfrs[i] = ioremap(res->start, resource_size(res));
246 if (!sysmmusfrs[i]) {
247 dev_err(dev, "Failed to ioremap() for %s.\n",
248 sysmmu_ips_name[i]);
249 ret = -ENXIO;
250 goto err_reg;
251 }
252
253 irq = platform_get_irq(pdev, i);
254 if (irq <= 0) {
255 dev_err(dev, "Failed to get the IRQ resource of %s.\n",
256 sysmmu_ips_name[i]);
257 ret = -ENOENT;
258 goto err_map;
259 }
260
261 if (request_irq(irq, s5p_sysmmu_irq, IRQF_DISABLED,
262 pdev->name, (void *)i)) {
263 dev_err(dev, "Failed to request IRQ for %s.\n",
264 sysmmu_ips_name[i]);
265 ret = -ENOENT;
266 goto err_map;
267 }
268 }
269
270 return 0;
271
272err_map:
273 iounmap(sysmmusfrs[i]);
274err_reg:
275 release_mem_region(mem->start, resource_size(mem));
276err_res:
277 return ret;
278}
279
280static int s5p_sysmmu_remove(struct platform_device *pdev)
281{
282 return 0;
283}
284int s5p_sysmmu_runtime_suspend(struct device *dev)
285{
286 return 0;
287}
288
289int s5p_sysmmu_runtime_resume(struct device *dev)
290{
291 return 0;
292}
293
294const struct dev_pm_ops s5p_sysmmu_pm_ops = {
295 .runtime_suspend = s5p_sysmmu_runtime_suspend,
296 .runtime_resume = s5p_sysmmu_runtime_resume,
297};
298
299static struct platform_driver s5p_sysmmu_driver = {
300 .probe = s5p_sysmmu_probe,
301 .remove = s5p_sysmmu_remove,
302 .driver = {
303 .owner = THIS_MODULE,
304 .name = "s5p-sysmmu",
305 .pm = &s5p_sysmmu_pm_ops,
306 }
307};
308
309static int __init s5p_sysmmu_init(void)
310{
311 return platform_driver_register(&s5p_sysmmu_driver);
312}
313arch_initcall(s5p_sysmmu_init);
diff --git a/arch/arm/plat-samsung/Kconfig b/arch/arm/plat-samsung/Kconfig
index a0ffc77da809..a2fae4ea0936 100644
--- a/arch/arm/plat-samsung/Kconfig
+++ b/arch/arm/plat-samsung/Kconfig
@@ -13,6 +13,24 @@ config PLAT_SAMSUNG
13 help 13 help
14 Base platform code for all Samsung SoC based systems 14 Base platform code for all Samsung SoC based systems
15 15
16config PLAT_S5P
17 bool
18 depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
19 default y
20 select ARM_VIC if !ARCH_EXYNOS
21 select ARM_GIC if ARCH_EXYNOS
22 select GIC_NON_BANKED if ARCH_EXYNOS4
23 select NO_IOPORT
24 select ARCH_REQUIRE_GPIOLIB
25 select S3C_GPIO_TRACK
26 select S5P_GPIO_DRVSTR
27 select SAMSUNG_GPIOLIB_4BIT
28 select PLAT_SAMSUNG
29 select SAMSUNG_CLKSRC
30 select SAMSUNG_IRQ_VIC_TIMER
31 help
32 Base platform code for Samsung's S5P series SoC.
33
16if PLAT_SAMSUNG 34if PLAT_SAMSUNG
17 35
18# boot configurations 36# boot configurations
@@ -50,6 +68,14 @@ config S3C_LOWLEVEL_UART_PORT
50 this configuration should be between zero and two. The port 68 this configuration should be between zero and two. The port
51 must have been initialised by the boot-loader before use. 69 must have been initialised by the boot-loader before use.
52 70
71# timer options
72
73config S5P_HRT
74 bool
75 select SAMSUNG_DEV_PWM
76 help
77 Use the High Resolution timer support
78
53# clock options 79# clock options
54 80
55config SAMSUNG_CLKSRC 81config SAMSUNG_CLKSRC
@@ -58,6 +84,11 @@ config SAMSUNG_CLKSRC
58 Select the clock code for the clksrc implementation 84 Select the clock code for the clksrc implementation
59 used by newer systems such as the S3C64XX. 85 used by newer systems such as the S3C64XX.
60 86
87config S5P_CLOCK
88 def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
89 help
90 Support common clock part for ARCH_S5P and ARCH_EXYNOS SoCs
91
61# options for IRQ support 92# options for IRQ support
62 93
63config SAMSUNG_IRQ_VIC_TIMER 94config SAMSUNG_IRQ_VIC_TIMER
@@ -65,6 +96,22 @@ config SAMSUNG_IRQ_VIC_TIMER
65 help 96 help
66 Internal configuration to build the VIC timer interrupt code. 97 Internal configuration to build the VIC timer interrupt code.
67 98
99config S5P_IRQ
100 def_bool (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210 || ARCH_EXYNOS)
101 help
102 Support common interrup part for ARCH_S5P and ARCH_EXYNOS SoCs
103
104config S5P_EXT_INT
105 bool
106 help
107 Use the external interrupts (other than GPIO interrupts.)
108 Note: Do not choose this for S5P6440 and S5P6450.
109
110config S5P_GPIO_INT
111 bool
112 help
113 Common code for the GPIO interrupts (other than external interrupts.)
114
68# options for gpio configuration support 115# options for gpio configuration support
69 116
70config SAMSUNG_GPIOLIB_4BIT 117config SAMSUNG_GPIOLIB_4BIT
@@ -117,6 +164,12 @@ config S3C_GPIO_TRACK
117 Internal configuration option to enable the s3c specific gpio 164 Internal configuration option to enable the s3c specific gpio
118 chip tracking if the platform requires it. 165 chip tracking if the platform requires it.
119 166
167# uart options
168
169config S5P_DEV_UART
170 def_bool y
171 depends on (ARCH_S5P64X0 || ARCH_S5PC100 || ARCH_S5PV210)
172
120# ADC driver 173# ADC driver
121 174
122config S3C_ADC 175config S3C_ADC
@@ -274,6 +327,76 @@ config SAMSUNG_DEV_BACKLIGHT
274 help 327 help
275 Compile in platform device definition LCD backlight with PWM Timer 328 Compile in platform device definition LCD backlight with PWM Timer
276 329
330config S5P_DEV_CSIS0
331 bool
332 help
333 Compile in platform device definitions for MIPI-CSIS channel 0
334
335config S5P_DEV_CSIS1
336 bool
337 help
338 Compile in platform device definitions for MIPI-CSIS channel 1
339
340config S5P_DEV_FIMC0
341 bool
342 help
343 Compile in platform device definitions for FIMC controller 0
344
345config S5P_DEV_FIMC1
346 bool
347 help
348 Compile in platform device definitions for FIMC controller 1
349
350config S5P_DEV_FIMC2
351 bool
352 help
353 Compile in platform device definitions for FIMC controller 2
354
355config S5P_DEV_FIMC3
356 bool
357 help
358 Compile in platform device definitions for FIMC controller 3
359
360config S5P_DEV_FIMD0
361 bool
362 help
363 Compile in platform device definitions for FIMD controller 0
364
365config S5P_DEV_G2D
366 bool
367 help
368 Compile in platform device definitions for G2D device
369
370config S5P_DEV_I2C_HDMIPHY
371 bool
372 help
373 Compile in platform device definitions for I2C HDMIPHY controller
374
375config S5P_DEV_JPEG
376 bool
377 help
378 Compile in platform device definitions for JPEG codec
379
380config S5P_DEV_MFC
381 bool
382 help
383 Compile in setup memory (init) code for MFC
384
385config S5P_DEV_ONENAND
386 bool
387 help
388 Compile in platform device definition for OneNAND controller
389
390config S5P_DEV_TV
391 bool
392 help
393 Compile in platform device definition for TV interface
394
395config S5P_DEV_USB_EHCI
396 bool
397 help
398 Compile in platform device definition for USB EHCI
399
277config S3C24XX_PWM 400config S3C24XX_PWM
278 bool "PWM device support" 401 bool "PWM device support"
279 select HAVE_PWM 402 select HAVE_PWM
@@ -281,6 +404,11 @@ config S3C24XX_PWM
281 Support for exporting the PWM timer blocks via the pwm device 404 Support for exporting the PWM timer blocks via the pwm device
282 system 405 system
283 406
407config S5P_SETUP_MIPIPHY
408 bool
409 help
410 Compile in common setup code for MIPI-CSIS and MIPI-DSIM devices
411
284# DMA 412# DMA
285 413
286config S3C_DMA 414config S3C_DMA
@@ -291,7 +419,7 @@ config S3C_DMA
291config SAMSUNG_DMADEV 419config SAMSUNG_DMADEV
292 bool 420 bool
293 select DMADEVICES 421 select DMADEVICES
294 select PL330_DMA if (CPU_EXYNOS4210 || CPU_S5PV210 || CPU_S5PC100 || \ 422 select PL330_DMA if (ARCH_EXYNOS5 || ARCH_EXYNOS4 || CPU_S5PV210 || CPU_S5PC100 || \
295 CPU_S5P6450 || CPU_S5P6440) 423 CPU_S5P6450 || CPU_S5P6440)
296 select ARM_AMBA 424 select ARM_AMBA
297 help 425 help
@@ -351,6 +479,18 @@ config SAMSUNG_WAKEMASK
351 and above. This code allows a set of interrupt to wakeup-mask 479 and above. This code allows a set of interrupt to wakeup-mask
352 mappings. See <plat/wakeup-mask.h> 480 mappings. See <plat/wakeup-mask.h>
353 481
482config S5P_PM
483 bool
484 help
485 Common code for power management support on S5P and newer SoCs
486 Note: Do not select this for S5P6440 and S5P6450.
487
488config S5P_SLEEP
489 bool
490 help
491 Internal config node to apply common S5P sleep management code.
492 Can be selected by S5P and newer SoCs with similar sleep procedure.
493
354comment "Power Domain" 494comment "Power Domain"
355 495
356config SAMSUNG_PD 496config SAMSUNG_PD
diff --git a/arch/arm/plat-samsung/Makefile b/arch/arm/plat-samsung/Makefile
index 6012366f33cb..860b2db4db15 100644
--- a/arch/arm/plat-samsung/Makefile
+++ b/arch/arm/plat-samsung/Makefile
@@ -13,12 +13,18 @@ obj- :=
13 13
14obj-y += init.o cpu.o 14obj-y += init.o cpu.o
15obj-$(CONFIG_ARCH_USES_GETTIMEOFFSET) += time.o 15obj-$(CONFIG_ARCH_USES_GETTIMEOFFSET) += time.o
16obj-$(CONFIG_S5P_HRT) += s5p-time.o
17
16obj-y += clock.o 18obj-y += clock.o
17obj-y += pwm-clock.o 19obj-y += pwm-clock.o
18 20
19obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o 21obj-$(CONFIG_SAMSUNG_CLKSRC) += clock-clksrc.o
22obj-$(CONFIG_S5P_CLOCK) += s5p-clock.o
20 23
21obj-$(CONFIG_SAMSUNG_IRQ_VIC_TIMER) += irq-vic-timer.o 24obj-$(CONFIG_SAMSUNG_IRQ_VIC_TIMER) += irq-vic-timer.o
25obj-$(CONFIG_S5P_IRQ) += s5p-irq.o
26obj-$(CONFIG_S5P_EXT_INT) += s5p-irq-eint.o
27obj-$(CONFIG_S5P_GPIO_INT) += s5p-irq-gpioint.o
22 28
23# ADC 29# ADC
24 30
@@ -30,9 +36,13 @@ obj-y += platformdata.o
30 36
31obj-y += devs.o 37obj-y += devs.o
32obj-y += dev-uart.o 38obj-y += dev-uart.o
39obj-$(CONFIG_S5P_DEV_MFC) += s5p-dev-mfc.o
40obj-$(CONFIG_S5P_DEV_UART) += s5p-dev-uart.o
33 41
34obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o 42obj-$(CONFIG_SAMSUNG_DEV_BACKLIGHT) += dev-backlight.o
35 43
44obj-$(CONFIG_S5P_SETUP_MIPIPHY) += setup-mipiphy.o
45
36# DMA support 46# DMA support
37 47
38obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o 48obj-$(CONFIG_S3C_DMA) += dma.o s3c-dma-ops.o
@@ -47,6 +57,9 @@ obj-$(CONFIG_SAMSUNG_PM_CHECK) += pm-check.o
47 57
48obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o 58obj-$(CONFIG_SAMSUNG_WAKEMASK) += wakeup-mask.o
49 59
60obj-$(CONFIG_S5P_PM) += s5p-pm.o s5p-irq-pm.o
61obj-$(CONFIG_S5P_SLEEP) += s5p-sleep.o
62
50# PD support 63# PD support
51 64
52obj-$(CONFIG_SAMSUNG_PD) += pd.o 65obj-$(CONFIG_SAMSUNG_PD) += pd.o
diff --git a/arch/arm/plat-samsung/include/plat/cpu.h b/arch/arm/plat-samsung/include/plat/cpu.h
index 787ceaca0be8..0721293fad63 100644
--- a/arch/arm/plat-samsung/include/plat/cpu.h
+++ b/arch/arm/plat-samsung/include/plat/cpu.h
@@ -202,7 +202,7 @@ extern struct bus_type s3c2443_subsys;
202extern struct bus_type s3c6410_subsys; 202extern struct bus_type s3c6410_subsys;
203extern struct bus_type s5p64x0_subsys; 203extern struct bus_type s5p64x0_subsys;
204extern struct bus_type s5pv210_subsys; 204extern struct bus_type s5pv210_subsys;
205extern struct bus_type exynos4_subsys; 205extern struct bus_type exynos_subsys;
206 206
207extern void (*s5pc1xx_idle)(void); 207extern void (*s5pc1xx_idle)(void);
208 208
diff --git a/arch/arm/plat-samsung/include/plat/devs.h b/arch/arm/plat-samsung/include/plat/devs.h
index 2155d4af62a3..61ca2f356c52 100644
--- a/arch/arm/plat-samsung/include/plat/devs.h
+++ b/arch/arm/plat-samsung/include/plat/devs.h
@@ -133,7 +133,8 @@ extern struct platform_device exynos4_device_pcm1;
133extern struct platform_device exynos4_device_pcm2; 133extern struct platform_device exynos4_device_pcm2;
134extern struct platform_device exynos4_device_pd[]; 134extern struct platform_device exynos4_device_pd[];
135extern struct platform_device exynos4_device_spdif; 135extern struct platform_device exynos4_device_spdif;
136extern struct platform_device exynos4_device_sysmmu; 136
137extern struct platform_device exynos_device_drm;
137 138
138extern struct platform_device samsung_asoc_dma; 139extern struct platform_device samsung_asoc_dma;
139extern struct platform_device samsung_asoc_idma; 140extern struct platform_device samsung_asoc_idma;
diff --git a/arch/arm/plat-samsung/include/plat/dma-pl330.h b/arch/arm/plat-samsung/include/plat/dma-pl330.h
index 0670f37aaaed..d384a8016b47 100644
--- a/arch/arm/plat-samsung/include/plat/dma-pl330.h
+++ b/arch/arm/plat-samsung/include/plat/dma-pl330.h
@@ -90,6 +90,7 @@ enum dma_ch {
90 DMACH_MIPI_HSI5, 90 DMACH_MIPI_HSI5,
91 DMACH_MIPI_HSI6, 91 DMACH_MIPI_HSI6,
92 DMACH_MIPI_HSI7, 92 DMACH_MIPI_HSI7,
93 DMACH_DISP1,
93 DMACH_MTOM_0, 94 DMACH_MTOM_0,
94 DMACH_MTOM_1, 95 DMACH_MTOM_1,
95 DMACH_MTOM_2, 96 DMACH_MTOM_2,
diff --git a/arch/arm/plat-samsung/include/plat/s5p-clock.h b/arch/arm/plat-samsung/include/plat/s5p-clock.h
index 1de4b32f98e9..8364b4bea8b8 100644
--- a/arch/arm/plat-samsung/include/plat/s5p-clock.h
+++ b/arch/arm/plat-samsung/include/plat/s5p-clock.h
@@ -32,8 +32,10 @@ extern struct clk clk_48m;
32extern struct clk s5p_clk_27m; 32extern struct clk s5p_clk_27m;
33extern struct clk clk_fout_apll; 33extern struct clk clk_fout_apll;
34extern struct clk clk_fout_bpll; 34extern struct clk clk_fout_bpll;
35extern struct clk clk_fout_bpll_div2;
35extern struct clk clk_fout_cpll; 36extern struct clk clk_fout_cpll;
36extern struct clk clk_fout_mpll; 37extern struct clk clk_fout_mpll;
38extern struct clk clk_fout_mpll_div2;
37extern struct clk clk_fout_epll; 39extern struct clk clk_fout_epll;
38extern struct clk clk_fout_dpll; 40extern struct clk clk_fout_dpll;
39extern struct clk clk_fout_vpll; 41extern struct clk clk_fout_vpll;
@@ -42,8 +44,10 @@ extern struct clk clk_vpll;
42 44
43extern struct clksrc_sources clk_src_apll; 45extern struct clksrc_sources clk_src_apll;
44extern struct clksrc_sources clk_src_bpll; 46extern struct clksrc_sources clk_src_bpll;
47extern struct clksrc_sources clk_src_bpll_fout;
45extern struct clksrc_sources clk_src_cpll; 48extern struct clksrc_sources clk_src_cpll;
46extern struct clksrc_sources clk_src_mpll; 49extern struct clksrc_sources clk_src_mpll;
50extern struct clksrc_sources clk_src_mpll_fout;
47extern struct clksrc_sources clk_src_epll; 51extern struct clksrc_sources clk_src_epll;
48extern struct clksrc_sources clk_src_dpll; 52extern struct clksrc_sources clk_src_dpll;
49 53
diff --git a/arch/arm/plat-samsung/include/plat/sysmmu.h b/arch/arm/plat-samsung/include/plat/sysmmu.h
deleted file mode 100644
index 5fe8ee01a5ba..000000000000
--- a/arch/arm/plat-samsung/include/plat/sysmmu.h
+++ /dev/null
@@ -1,95 +0,0 @@
1/* linux/arch/arm/plat-samsung/include/plat/sysmmu.h
2 *
3 * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * Samsung System MMU driver for S5P platform
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11*/
12
13#ifndef __PLAT_SAMSUNG_SYSMMU_H
14#define __PLAT_SAMSUNG_SYSMMU_H __FILE__
15
16enum S5P_SYSMMU_INTERRUPT_TYPE {
17 SYSMMU_PAGEFAULT,
18 SYSMMU_AR_MULTIHIT,
19 SYSMMU_AW_MULTIHIT,
20 SYSMMU_BUSERROR,
21 SYSMMU_AR_SECURITY,
22 SYSMMU_AR_ACCESS,
23 SYSMMU_AW_SECURITY,
24 SYSMMU_AW_PROTECTION, /* 7 */
25 SYSMMU_FAULTS_NUM
26};
27
28#ifdef CONFIG_S5P_SYSTEM_MMU
29
30#include <mach/sysmmu.h>
31
32/**
33 * s5p_sysmmu_enable() - enable system mmu of ip
34 * @ips: The ip connected system mmu.
35 * #pgd: Base physical address of the 1st level page table
36 *
37 * This function enable system mmu to transfer address
38 * from virtual address to physical address
39 */
40void s5p_sysmmu_enable(sysmmu_ips ips, unsigned long pgd);
41
42/**
43 * s5p_sysmmu_disable() - disable sysmmu mmu of ip
44 * @ips: The ip connected system mmu.
45 *
46 * This function disable system mmu to transfer address
47 * from virtual address to physical address
48 */
49void s5p_sysmmu_disable(sysmmu_ips ips);
50
51/**
52 * s5p_sysmmu_set_tablebase_pgd() - set page table base address to refer page table
53 * @ips: The ip connected system mmu.
54 * @pgd: The page table base address.
55 *
56 * This function set page table base address
57 * When system mmu transfer address from virtaul address to physical address,
58 * system mmu refer address information from page table
59 */
60void s5p_sysmmu_set_tablebase_pgd(sysmmu_ips ips, unsigned long pgd);
61
62/**
63 * s5p_sysmmu_tlb_invalidate() - flush all TLB entry in system mmu
64 * @ips: The ip connected system mmu.
65 *
66 * This function flush all TLB entry in system mmu
67 */
68void s5p_sysmmu_tlb_invalidate(sysmmu_ips ips);
69
70/** s5p_sysmmu_set_fault_handler() - Fault handler for System MMUs
71 * @itype: type of fault.
72 * @pgtable_base: the physical address of page table base. This is 0 if @ips is
73 * SYSMMU_BUSERROR.
74 * @fault_addr: the device (virtual) address that the System MMU tried to
75 * translated. This is 0 if @ips is SYSMMU_BUSERROR.
76 * Called when interrupt occurred by the System MMUs
77 * The device drivers of peripheral devices that has a System MMU can implement
78 * a fault handler to resolve address translation fault by System MMU.
79 * The meanings of return value and parameters are described below.
80
81 * return value: non-zero if the fault is correctly resolved.
82 * zero if the fault is not handled.
83 */
84void s5p_sysmmu_set_fault_handler(sysmmu_ips ips,
85 int (*handler)(enum S5P_SYSMMU_INTERRUPT_TYPE itype,
86 unsigned long pgtable_base,
87 unsigned long fault_addr));
88#else
89#define s5p_sysmmu_enable(ips, pgd) do { } while (0)
90#define s5p_sysmmu_disable(ips) do { } while (0)
91#define s5p_sysmmu_set_tablebase_pgd(ips, pgd) do { } while (0)
92#define s5p_sysmmu_tlb_invalidate(ips) do { } while (0)
93#define s5p_sysmmu_set_fault_handler(ips, handler) do { } while (0)
94#endif
95#endif /* __ASM_PLAT_SYSMMU_H */
diff --git a/arch/arm/plat-s5p/clock.c b/arch/arm/plat-samsung/s5p-clock.c
index f68a9bb11948..031a61899bef 100644
--- a/arch/arm/plat-s5p/clock.c
+++ b/arch/arm/plat-samsung/s5p-clock.c
@@ -1,5 +1,4 @@
1/* linux/arch/arm/plat-s5p/clock.c 1/*
2 *
3 * Copyright 2009 Samsung Electronics Co., Ltd. 2 * Copyright 2009 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 3 * http://www.samsung.com/
5 * 4 *
@@ -68,6 +67,11 @@ struct clk clk_fout_bpll = {
68 .id = -1, 67 .id = -1,
69}; 68};
70 69
70struct clk clk_fout_bpll_div2 = {
71 .name = "fout_bpll_div2",
72 .id = -1,
73};
74
71/* CPLL clock output */ 75/* CPLL clock output */
72 76
73struct clk clk_fout_cpll = { 77struct clk clk_fout_cpll = {
@@ -83,6 +87,11 @@ struct clk clk_fout_mpll = {
83 .id = -1, 87 .id = -1,
84}; 88};
85 89
90struct clk clk_fout_mpll_div2 = {
91 .name = "fout_mpll_div2",
92 .id = -1,
93};
94
86/* EPLL clock output */ 95/* EPLL clock output */
87struct clk clk_fout_epll = { 96struct clk clk_fout_epll = {
88 .name = "fout_epll", 97 .name = "fout_epll",
@@ -126,6 +135,16 @@ struct clksrc_sources clk_src_bpll = {
126 .nr_sources = ARRAY_SIZE(clk_src_bpll_list), 135 .nr_sources = ARRAY_SIZE(clk_src_bpll_list),
127}; 136};
128 137
138static struct clk *clk_src_bpll_fout_list[] = {
139 [0] = &clk_fout_bpll_div2,
140 [1] = &clk_fout_bpll,
141};
142
143struct clksrc_sources clk_src_bpll_fout = {
144 .sources = clk_src_bpll_fout_list,
145 .nr_sources = ARRAY_SIZE(clk_src_bpll_fout_list),
146};
147
129/* Possible clock sources for CPLL Mux */ 148/* Possible clock sources for CPLL Mux */
130static struct clk *clk_src_cpll_list[] = { 149static struct clk *clk_src_cpll_list[] = {
131 [0] = &clk_fin_cpll, 150 [0] = &clk_fin_cpll,
@@ -148,6 +167,16 @@ struct clksrc_sources clk_src_mpll = {
148 .nr_sources = ARRAY_SIZE(clk_src_mpll_list), 167 .nr_sources = ARRAY_SIZE(clk_src_mpll_list),
149}; 168};
150 169
170static struct clk *clk_src_mpll_fout_list[] = {
171 [0] = &clk_fout_mpll_div2,
172 [1] = &clk_fout_mpll,
173};
174
175struct clksrc_sources clk_src_mpll_fout = {
176 .sources = clk_src_mpll_fout_list,
177 .nr_sources = ARRAY_SIZE(clk_src_mpll_fout_list),
178};
179
151/* Possible clock sources for EPLL Mux */ 180/* Possible clock sources for EPLL Mux */
152static struct clk *clk_src_epll_list[] = { 181static struct clk *clk_src_epll_list[] = {
153 [0] = &clk_fin_epll, 182 [0] = &clk_fin_epll,
diff --git a/arch/arm/plat-s5p/dev-mfc.c b/arch/arm/plat-samsung/s5p-dev-mfc.c
index a30d36b7f61b..ad6089465e2a 100644
--- a/arch/arm/plat-s5p/dev-mfc.c
+++ b/arch/arm/plat-samsung/s5p-dev-mfc.c
@@ -1,5 +1,4 @@
1/* linux/arch/arm/plat-s5p/dev-mfc.c 1/*
2 *
3 * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd 2 * Copyright (C) 2010-2011 Samsung Electronics Co.Ltd
4 * 3 *
5 * Base S5P MFC resource and device definitions 4 * Base S5P MFC resource and device definitions
@@ -9,7 +8,6 @@
9 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
10 */ 9 */
11 10
12
13#include <linux/kernel.h> 11#include <linux/kernel.h>
14#include <linux/interrupt.h> 12#include <linux/interrupt.h>
15#include <linux/platform_device.h> 13#include <linux/platform_device.h>
diff --git a/arch/arm/plat-s5p/dev-uart.c b/arch/arm/plat-samsung/s5p-dev-uart.c
index c9308db36183..cafa3deddcc1 100644
--- a/arch/arm/plat-s5p/dev-uart.c
+++ b/arch/arm/plat-samsung/s5p-dev-uart.c
@@ -1,6 +1,5 @@
1/* linux/arch/arm/plat-s5p/dev-uart.c 1/*
2 * 2 * Copyright (c) 2009,2012 Samsung Electronics Co., Ltd.
3 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 3 * http://www.samsung.com/
5 * 4 *
6 * Base S5P UART resource and device definitions 5 * Base S5P UART resource and device definitions
@@ -14,6 +13,7 @@
14#include <linux/types.h> 13#include <linux/types.h>
15#include <linux/interrupt.h> 14#include <linux/interrupt.h>
16#include <linux/list.h> 15#include <linux/list.h>
16#include <linux/ioport.h>
17#include <linux/platform_device.h> 17#include <linux/platform_device.h>
18 18
19#include <asm/mach/arch.h> 19#include <asm/mach/arch.h>
@@ -26,86 +26,38 @@
26 /* Serial port registrations */ 26 /* Serial port registrations */
27 27
28static struct resource s5p_uart0_resource[] = { 28static struct resource s5p_uart0_resource[] = {
29 [0] = { 29 [0] = DEFINE_RES_MEM(S5P_PA_UART0, S5P_SZ_UART),
30 .start = S5P_PA_UART0, 30 [1] = DEFINE_RES_IRQ(IRQ_UART0),
31 .end = S5P_PA_UART0 + S5P_SZ_UART - 1,
32 .flags = IORESOURCE_MEM,
33 },
34 [1] = {
35 .start = IRQ_UART0,
36 .end = IRQ_UART0,
37 .flags = IORESOURCE_IRQ,
38 },
39}; 31};
40 32
41static struct resource s5p_uart1_resource[] = { 33static struct resource s5p_uart1_resource[] = {
42 [0] = { 34 [0] = DEFINE_RES_MEM(S5P_PA_UART1, S5P_SZ_UART),
43 .start = S5P_PA_UART1, 35 [1] = DEFINE_RES_IRQ(IRQ_UART1),
44 .end = S5P_PA_UART1 + S5P_SZ_UART - 1,
45 .flags = IORESOURCE_MEM,
46 },
47 [1] = {
48 .start = IRQ_UART1,
49 .end = IRQ_UART1,
50 .flags = IORESOURCE_IRQ,
51 },
52}; 36};
53 37
54static struct resource s5p_uart2_resource[] = { 38static struct resource s5p_uart2_resource[] = {
55 [0] = { 39 [0] = DEFINE_RES_MEM(S5P_PA_UART2, S5P_SZ_UART),
56 .start = S5P_PA_UART2, 40 [1] = DEFINE_RES_IRQ(IRQ_UART2),
57 .end = S5P_PA_UART2 + S5P_SZ_UART - 1,
58 .flags = IORESOURCE_MEM,
59 },
60 [1] = {
61 .start = IRQ_UART2,
62 .end = IRQ_UART2,
63 .flags = IORESOURCE_IRQ,
64 },
65}; 41};
66 42
67static struct resource s5p_uart3_resource[] = { 43static struct resource s5p_uart3_resource[] = {
68#if CONFIG_SERIAL_SAMSUNG_UARTS > 3 44#if CONFIG_SERIAL_SAMSUNG_UARTS > 3
69 [0] = { 45 [0] = DEFINE_RES_MEM(S5P_PA_UART3, S5P_SZ_UART),
70 .start = S5P_PA_UART3, 46 [1] = DEFINE_RES_IRQ(IRQ_UART3),
71 .end = S5P_PA_UART3 + S5P_SZ_UART - 1,
72 .flags = IORESOURCE_MEM,
73 },
74 [1] = {
75 .start = IRQ_UART3,
76 .end = IRQ_UART3,
77 .flags = IORESOURCE_IRQ,
78 },
79#endif 47#endif
80}; 48};
81 49
82static struct resource s5p_uart4_resource[] = { 50static struct resource s5p_uart4_resource[] = {
83#if CONFIG_SERIAL_SAMSUNG_UARTS > 4 51#if CONFIG_SERIAL_SAMSUNG_UARTS > 4
84 [0] = { 52 [0] = DEFINE_RES_MEM(S5P_PA_UART4, S5P_SZ_UART),
85 .start = S5P_PA_UART4, 53 [1] = DEFINE_RES_IRQ(IRQ_UART4),
86 .end = S5P_PA_UART4 + S5P_SZ_UART - 1,
87 .flags = IORESOURCE_MEM,
88 },
89 [1] = {
90 .start = IRQ_UART4,
91 .end = IRQ_UART4,
92 .flags = IORESOURCE_IRQ,
93 },
94#endif 54#endif
95}; 55};
96 56
97static struct resource s5p_uart5_resource[] = { 57static struct resource s5p_uart5_resource[] = {
98#if CONFIG_SERIAL_SAMSUNG_UARTS > 5 58#if CONFIG_SERIAL_SAMSUNG_UARTS > 5
99 [0] = { 59 [0] = DEFINE_RES_MEM(S5P_PA_UART5, S5P_SZ_UART),
100 .start = S5P_PA_UART5, 60 [1] = DEFINE_RES_IRQ(IRQ_UART5),
101 .end = S5P_PA_UART5 + S5P_SZ_UART - 1,
102 .flags = IORESOURCE_MEM,
103 },
104 [1] = {
105 .start = IRQ_UART5,
106 .end = IRQ_UART5,
107 .flags = IORESOURCE_IRQ,
108 },
109#endif 61#endif
110}; 62};
111 63
diff --git a/arch/arm/plat-s5p/irq-eint.c b/arch/arm/plat-samsung/s5p-irq-eint.c
index 139c050918c5..33bd3f3d20f5 100644
--- a/arch/arm/plat-s5p/irq-eint.c
+++ b/arch/arm/plat-samsung/s5p-irq-eint.c
@@ -1,5 +1,4 @@
1/* linux/arch/arm/plat-s5p/irq-eint.c 1/*
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 2 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 3 * http://www.samsung.com
5 * 4 *
diff --git a/arch/arm/plat-s5p/irq-gpioint.c b/arch/arm/plat-samsung/s5p-irq-gpioint.c
index 82c7311017a2..f9431fe5b06e 100644
--- a/arch/arm/plat-s5p/irq-gpioint.c
+++ b/arch/arm/plat-samsung/s5p-irq-gpioint.c
@@ -1,5 +1,4 @@
1/* linux/arch/arm/plat-s5p/irq-gpioint.c 1/*
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 2 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * Author: Kyungmin Park <kyungmin.park@samsung.com> 3 * Author: Kyungmin Park <kyungmin.park@samsung.com>
5 * Author: Joonyoung Shim <jy0922.shim@samsung.com> 4 * Author: Joonyoung Shim <jy0922.shim@samsung.com>
diff --git a/arch/arm/plat-s5p/irq-pm.c b/arch/arm/plat-samsung/s5p-irq-pm.c
index d1bfecae6c9f..7c1e3b7072fc 100644
--- a/arch/arm/plat-s5p/irq-pm.c
+++ b/arch/arm/plat-samsung/s5p-irq-pm.c
@@ -1,5 +1,4 @@
1/* linux/arch/arm/plat-s5p/irq-pm.c 1/*
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 2 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 3 * http://www.samsung.com
5 * 4 *
diff --git a/arch/arm/plat-s5p/irq.c b/arch/arm/plat-samsung/s5p-irq.c
index afdaa1082b9f..dfb47d638f03 100644
--- a/arch/arm/plat-s5p/irq.c
+++ b/arch/arm/plat-samsung/s5p-irq.c
@@ -1,5 +1,4 @@
1/* arch/arm/plat-s5p/irq.c 1/*
2 *
3 * Copyright (c) 2009 Samsung Electronics Co., Ltd. 2 * Copyright (c) 2009 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 3 * http://www.samsung.com/
5 * 4 *
diff --git a/arch/arm/plat-s5p/pm.c b/arch/arm/plat-samsung/s5p-pm.c
index d15dc47b0e3d..0747468f0936 100644
--- a/arch/arm/plat-s5p/pm.c
+++ b/arch/arm/plat-samsung/s5p-pm.c
@@ -1,5 +1,4 @@
1/* linux/arch/arm/plat-s5p/pm.c 1/*
2 *
3 * Copyright (c) 2010 Samsung Electronics Co., Ltd. 2 * Copyright (c) 2010 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 3 * http://www.samsung.com
5 * 4 *
diff --git a/arch/arm/plat-s5p/sleep.S b/arch/arm/plat-samsung/s5p-sleep.S
index 006bd01eda02..bdf6dadf8790 100644
--- a/arch/arm/plat-s5p/sleep.S
+++ b/arch/arm/plat-samsung/s5p-sleep.S
@@ -1,5 +1,4 @@
1/* linux/arch/arm/plat-s5p/sleep.S 1/*
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com 3 * http://www.samsung.com
5 * 4 *
diff --git a/arch/arm/plat-s5p/s5p-time.c b/arch/arm/plat-samsung/s5p-time.c
index 17c0a2c58dfd..028b6e877eb9 100644
--- a/arch/arm/plat-s5p/s5p-time.c
+++ b/arch/arm/plat-samsung/s5p-time.c
@@ -1,5 +1,4 @@
1/* linux/arch/arm/plat-s5p/s5p-time.c 1/*
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd. 2 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com/ 3 * http://www.samsung.com/
5 * 4 *
diff --git a/arch/arm/plat-s5p/setup-mipiphy.c b/arch/arm/plat-samsung/setup-mipiphy.c
index 683c466c0e6a..683c466c0e6a 100644
--- a/arch/arm/plat-s5p/setup-mipiphy.c
+++ b/arch/arm/plat-samsung/setup-mipiphy.c
diff --git a/arch/arm/plat-spear/Kconfig b/arch/arm/plat-spear/Kconfig
index 387655b5ce05..4404f82d5979 100644
--- a/arch/arm/plat-spear/Kconfig
+++ b/arch/arm/plat-spear/Kconfig
@@ -8,6 +8,17 @@ choice
8 prompt "ST SPEAr Family" 8 prompt "ST SPEAr Family"
9 default ARCH_SPEAR3XX 9 default ARCH_SPEAR3XX
10 10
11config ARCH_SPEAR13XX
12 bool "ST SPEAr13xx with Device Tree"
13 select ARM_GIC
14 select CPU_V7
15 select USE_OF
16 select HAVE_SMP
17 select MIGHT_HAVE_CACHE_L2X0
18 select PINCTRL
19 help
20 Supports for ARM's SPEAR13XX family
21
11config ARCH_SPEAR3XX 22config ARCH_SPEAR3XX
12 bool "ST SPEAr3xx with Device Tree" 23 bool "ST SPEAr3xx with Device Tree"
13 select ARM_VIC 24 select ARM_VIC
@@ -27,6 +38,7 @@ config ARCH_SPEAR6XX
27endchoice 38endchoice
28 39
29# Adding SPEAr machine specific configuration files 40# Adding SPEAr machine specific configuration files
41source "arch/arm/mach-spear13xx/Kconfig"
30source "arch/arm/mach-spear3xx/Kconfig" 42source "arch/arm/mach-spear3xx/Kconfig"
31source "arch/arm/mach-spear6xx/Kconfig" 43source "arch/arm/mach-spear6xx/Kconfig"
32 44
diff --git a/arch/arm/plat-spear/Makefile b/arch/arm/plat-spear/Makefile
index 7744802c83e7..2607bd05c525 100644
--- a/arch/arm/plat-spear/Makefile
+++ b/arch/arm/plat-spear/Makefile
@@ -3,6 +3,7 @@
3# 3#
4 4
5# Common support 5# Common support
6obj-y := clock.o restart.o time.o pl080.o 6obj-y := restart.o time.o
7 7
8obj-$(CONFIG_ARCH_SPEAR3XX) += shirq.o 8obj-$(CONFIG_ARCH_SPEAR3XX) += pl080.o shirq.o
9obj-$(CONFIG_ARCH_SPEAR6XX) += pl080.o
diff --git a/arch/arm/plat-spear/clock.c b/arch/arm/plat-spear/clock.c
deleted file mode 100644
index 67dd00381ea6..000000000000
--- a/arch/arm/plat-spear/clock.c
+++ /dev/null
@@ -1,1005 +0,0 @@
1/*
2 * arch/arm/plat-spear/clock.c
3 *
4 * Clock framework for SPEAr platform
5 *
6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/bug.h>
15#include <linux/clk.h>
16#include <linux/debugfs.h>
17#include <linux/err.h>
18#include <linux/io.h>
19#include <linux/list.h>
20#include <linux/module.h>
21#include <linux/spinlock.h>
22#include <plat/clock.h>
23
24static DEFINE_SPINLOCK(clocks_lock);
25static LIST_HEAD(root_clks);
26#ifdef CONFIG_DEBUG_FS
27static LIST_HEAD(clocks);
28#endif
29
30static void propagate_rate(struct clk *, int on_init);
31#ifdef CONFIG_DEBUG_FS
32static int clk_debugfs_reparent(struct clk *);
33#endif
34
35static int generic_clk_enable(struct clk *clk)
36{
37 unsigned int val;
38
39 if (!clk->en_reg)
40 return -EFAULT;
41
42 val = readl(clk->en_reg);
43 if (unlikely(clk->flags & RESET_TO_ENABLE))
44 val &= ~(1 << clk->en_reg_bit);
45 else
46 val |= 1 << clk->en_reg_bit;
47
48 writel(val, clk->en_reg);
49
50 return 0;
51}
52
53static void generic_clk_disable(struct clk *clk)
54{
55 unsigned int val;
56
57 if (!clk->en_reg)
58 return;
59
60 val = readl(clk->en_reg);
61 if (unlikely(clk->flags & RESET_TO_ENABLE))
62 val |= 1 << clk->en_reg_bit;
63 else
64 val &= ~(1 << clk->en_reg_bit);
65
66 writel(val, clk->en_reg);
67}
68
69/* generic clk ops */
70static struct clkops generic_clkops = {
71 .enable = generic_clk_enable,
72 .disable = generic_clk_disable,
73};
74
75/* returns current programmed clocks clock info structure */
76static struct pclk_info *pclk_info_get(struct clk *clk)
77{
78 unsigned int val, i;
79 struct pclk_info *info = NULL;
80
81 val = (readl(clk->pclk_sel->pclk_sel_reg) >> clk->pclk_sel_shift)
82 & clk->pclk_sel->pclk_sel_mask;
83
84 for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
85 if (clk->pclk_sel->pclk_info[i].pclk_val == val)
86 info = &clk->pclk_sel->pclk_info[i];
87 }
88
89 return info;
90}
91
92/*
93 * Set Update pclk, and pclk_info of clk and add clock sibling node to current
94 * parents children list
95 */
96static void clk_reparent(struct clk *clk, struct pclk_info *pclk_info)
97{
98 unsigned long flags;
99
100 spin_lock_irqsave(&clocks_lock, flags);
101 list_del(&clk->sibling);
102 list_add(&clk->sibling, &pclk_info->pclk->children);
103
104 clk->pclk = pclk_info->pclk;
105 spin_unlock_irqrestore(&clocks_lock, flags);
106
107#ifdef CONFIG_DEBUG_FS
108 clk_debugfs_reparent(clk);
109#endif
110}
111
112static void do_clk_disable(struct clk *clk)
113{
114 if (!clk)
115 return;
116
117 if (!clk->usage_count) {
118 WARN_ON(1);
119 return;
120 }
121
122 clk->usage_count--;
123
124 if (clk->usage_count == 0) {
125 /*
126 * Surely, there are no active childrens or direct users
127 * of this clock
128 */
129 if (clk->pclk)
130 do_clk_disable(clk->pclk);
131
132 if (clk->ops && clk->ops->disable)
133 clk->ops->disable(clk);
134 }
135}
136
137static int do_clk_enable(struct clk *clk)
138{
139 int ret = 0;
140
141 if (!clk)
142 return -EFAULT;
143
144 if (clk->usage_count == 0) {
145 if (clk->pclk) {
146 ret = do_clk_enable(clk->pclk);
147 if (ret)
148 goto err;
149 }
150 if (clk->ops && clk->ops->enable) {
151 ret = clk->ops->enable(clk);
152 if (ret) {
153 if (clk->pclk)
154 do_clk_disable(clk->pclk);
155 goto err;
156 }
157 }
158 /*
159 * Since the clock is going to be used for the first
160 * time please reclac
161 */
162 if (clk->recalc) {
163 ret = clk->recalc(clk);
164 if (ret)
165 goto err;
166 }
167 }
168 clk->usage_count++;
169err:
170 return ret;
171}
172
173/*
174 * clk_enable - inform the system when the clock source should be running.
175 * @clk: clock source
176 *
177 * If the clock can not be enabled/disabled, this should return success.
178 *
179 * Returns success (0) or negative errno.
180 */
181int clk_enable(struct clk *clk)
182{
183 unsigned long flags;
184 int ret = 0;
185
186 spin_lock_irqsave(&clocks_lock, flags);
187 ret = do_clk_enable(clk);
188 spin_unlock_irqrestore(&clocks_lock, flags);
189 return ret;
190}
191EXPORT_SYMBOL(clk_enable);
192
193/*
194 * clk_disable - inform the system when the clock source is no longer required.
195 * @clk: clock source
196 *
197 * Inform the system that a clock source is no longer required by
198 * a driver and may be shut down.
199 *
200 * Implementation detail: if the clock source is shared between
201 * multiple drivers, clk_enable() calls must be balanced by the
202 * same number of clk_disable() calls for the clock source to be
203 * disabled.
204 */
205void clk_disable(struct clk *clk)
206{
207 unsigned long flags;
208
209 spin_lock_irqsave(&clocks_lock, flags);
210 do_clk_disable(clk);
211 spin_unlock_irqrestore(&clocks_lock, flags);
212}
213EXPORT_SYMBOL(clk_disable);
214
215/**
216 * clk_get_rate - obtain the current clock rate (in Hz) for a clock source.
217 * This is only valid once the clock source has been enabled.
218 * @clk: clock source
219 */
220unsigned long clk_get_rate(struct clk *clk)
221{
222 unsigned long flags, rate;
223
224 spin_lock_irqsave(&clocks_lock, flags);
225 rate = clk->rate;
226 spin_unlock_irqrestore(&clocks_lock, flags);
227
228 return rate;
229}
230EXPORT_SYMBOL(clk_get_rate);
231
232/**
233 * clk_set_parent - set the parent clock source for this clock
234 * @clk: clock source
235 * @parent: parent clock source
236 *
237 * Returns success (0) or negative errno.
238 */
239int clk_set_parent(struct clk *clk, struct clk *parent)
240{
241 int i, found = 0, val = 0;
242 unsigned long flags;
243
244 if (!clk || !parent)
245 return -EFAULT;
246 if (clk->pclk == parent)
247 return 0;
248 if (!clk->pclk_sel)
249 return -EPERM;
250
251 /* check if requested parent is in clk parent list */
252 for (i = 0; i < clk->pclk_sel->pclk_count; i++) {
253 if (clk->pclk_sel->pclk_info[i].pclk == parent) {
254 found = 1;
255 break;
256 }
257 }
258
259 if (!found)
260 return -EINVAL;
261
262 spin_lock_irqsave(&clocks_lock, flags);
263 /* reflect parent change in hardware */
264 val = readl(clk->pclk_sel->pclk_sel_reg);
265 val &= ~(clk->pclk_sel->pclk_sel_mask << clk->pclk_sel_shift);
266 val |= clk->pclk_sel->pclk_info[i].pclk_val << clk->pclk_sel_shift;
267 writel(val, clk->pclk_sel->pclk_sel_reg);
268 spin_unlock_irqrestore(&clocks_lock, flags);
269
270 /* reflect parent change in software */
271 clk_reparent(clk, &clk->pclk_sel->pclk_info[i]);
272
273 propagate_rate(clk, 0);
274 return 0;
275}
276EXPORT_SYMBOL(clk_set_parent);
277
278/**
279 * clk_set_rate - set the clock rate for a clock source
280 * @clk: clock source
281 * @rate: desired clock rate in Hz
282 *
283 * Returns success (0) or negative errno.
284 */
285int clk_set_rate(struct clk *clk, unsigned long rate)
286{
287 unsigned long flags;
288 int ret = -EINVAL;
289
290 if (!clk || !rate)
291 return -EFAULT;
292
293 if (clk->set_rate) {
294 spin_lock_irqsave(&clocks_lock, flags);
295 ret = clk->set_rate(clk, rate);
296 if (!ret)
297 /* if successful -> propagate */
298 propagate_rate(clk, 0);
299 spin_unlock_irqrestore(&clocks_lock, flags);
300 } else if (clk->pclk) {
301 u32 mult = clk->div_factor ? clk->div_factor : 1;
302 ret = clk_set_rate(clk->pclk, mult * rate);
303 }
304
305 return ret;
306}
307EXPORT_SYMBOL(clk_set_rate);
308
309/* registers clock in platform clock framework */
310void clk_register(struct clk_lookup *cl)
311{
312 struct clk *clk;
313 unsigned long flags;
314
315 if (!cl || !cl->clk)
316 return;
317 clk = cl->clk;
318
319 spin_lock_irqsave(&clocks_lock, flags);
320
321 INIT_LIST_HEAD(&clk->children);
322 if (clk->flags & ALWAYS_ENABLED)
323 clk->ops = NULL;
324 else if (!clk->ops)
325 clk->ops = &generic_clkops;
326
327 /* root clock don't have any parents */
328 if (!clk->pclk && !clk->pclk_sel) {
329 list_add(&clk->sibling, &root_clks);
330 } else if (clk->pclk && !clk->pclk_sel) {
331 /* add clocks with only one parent to parent's children list */
332 list_add(&clk->sibling, &clk->pclk->children);
333 } else {
334 /* clocks with more than one parent */
335 struct pclk_info *pclk_info;
336
337 pclk_info = pclk_info_get(clk);
338 if (!pclk_info) {
339 pr_err("CLKDEV: invalid pclk info of clk with"
340 " %s dev_id and %s con_id\n",
341 cl->dev_id, cl->con_id);
342 } else {
343 clk->pclk = pclk_info->pclk;
344 list_add(&clk->sibling, &pclk_info->pclk->children);
345 }
346 }
347
348 spin_unlock_irqrestore(&clocks_lock, flags);
349
350 /* debugfs specific */
351#ifdef CONFIG_DEBUG_FS
352 list_add(&clk->node, &clocks);
353 clk->cl = cl;
354#endif
355
356 /* add clock to arm clockdev framework */
357 clkdev_add(cl);
358}
359
360/**
361 * propagate_rate - recalculate and propagate all clocks to children
362 * @pclk: parent clock required to be propogated
363 * @on_init: flag for enabling clocks which are ENABLED_ON_INIT.
364 *
365 * Recalculates all children clocks
366 */
367void propagate_rate(struct clk *pclk, int on_init)
368{
369 struct clk *clk, *_temp;
370 int ret = 0;
371
372 list_for_each_entry_safe(clk, _temp, &pclk->children, sibling) {
373 if (clk->recalc) {
374 ret = clk->recalc(clk);
375 /*
376 * recalc will return error if clk out is not programmed
377 * In this case configure default rate.
378 */
379 if (ret && clk->set_rate)
380 clk->set_rate(clk, 0);
381 }
382 propagate_rate(clk, on_init);
383
384 if (!on_init)
385 continue;
386
387 /* Enable clks enabled on init, in software view */
388 if (clk->flags & ENABLED_ON_INIT)
389 do_clk_enable(clk);
390 }
391}
392
393/**
394 * round_rate_index - return closest programmable rate index in rate_config tbl
395 * @clk: ptr to clock structure
396 * @drate: desired rate
397 * @rate: final rate will be returned in this variable only.
398 *
399 * Finds index in rate_config for highest clk rate which is less than
400 * requested rate. If there is no clk rate lesser than requested rate then
401 * -EINVAL is returned. This routine assumes that rate_config is written
402 * in incrementing order of clk rates.
403 * If drate passed is zero then default rate is programmed.
404 */
405static int
406round_rate_index(struct clk *clk, unsigned long drate, unsigned long *rate)
407{
408 unsigned long tmp = 0, prev_rate = 0;
409 int index;
410
411 if (!clk->calc_rate)
412 return -EFAULT;
413
414 if (!drate)
415 return -EINVAL;
416
417 /*
418 * This loops ends on two conditions:
419 * - as soon as clk is found with rate greater than requested rate.
420 * - if all clks in rate_config are smaller than requested rate.
421 */
422 for (index = 0; index < clk->rate_config.count; index++) {
423 prev_rate = tmp;
424 tmp = clk->calc_rate(clk, index);
425 if (drate < tmp) {
426 index--;
427 break;
428 }
429 }
430 /* return if can't find suitable clock */
431 if (index < 0) {
432 index = -EINVAL;
433 *rate = 0;
434 } else if (index == clk->rate_config.count) {
435 /* program with highest clk rate possible */
436 index = clk->rate_config.count - 1;
437 *rate = tmp;
438 } else
439 *rate = prev_rate;
440
441 return index;
442}
443
444/**
445 * clk_round_rate - adjust a rate to the exact rate a clock can provide
446 * @clk: clock source
447 * @rate: desired clock rate in Hz
448 *
449 * Returns rounded clock rate in Hz, or negative errno.
450 */
451long clk_round_rate(struct clk *clk, unsigned long drate)
452{
453 long rate = 0;
454 int index;
455
456 /*
457 * propagate call to parent who supports calc_rate. Similar approach is
458 * used in clk_set_rate.
459 */
460 if (!clk->calc_rate) {
461 u32 mult;
462 if (!clk->pclk)
463 return clk->rate;
464
465 mult = clk->div_factor ? clk->div_factor : 1;
466 return clk_round_rate(clk->pclk, mult * drate) / mult;
467 }
468
469 index = round_rate_index(clk, drate, &rate);
470 if (index >= 0)
471 return rate;
472 else
473 return index;
474}
475EXPORT_SYMBOL(clk_round_rate);
476
477/*All below functions are called with lock held */
478
479/*
480 * Calculates pll clk rate for specific value of mode, m, n and p
481 *
482 * In normal mode
483 * rate = (2 * M[15:8] * Fin)/(N * 2^P)
484 *
485 * In Dithered mode
486 * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
487 */
488unsigned long pll_calc_rate(struct clk *clk, int index)
489{
490 unsigned long rate = clk->pclk->rate;
491 struct pll_rate_tbl *tbls = clk->rate_config.tbls;
492 unsigned int mode;
493
494 mode = tbls[index].mode ? 256 : 1;
495 return (((2 * rate / 10000) * tbls[index].m) /
496 (mode * tbls[index].n * (1 << tbls[index].p))) * 10000;
497}
498
499/*
500 * calculates current programmed rate of pll1
501 *
502 * In normal mode
503 * rate = (2 * M[15:8] * Fin)/(N * 2^P)
504 *
505 * In Dithered mode
506 * rate = (2 * M[15:0] * Fin)/(256 * N * 2^P)
507 */
508int pll_clk_recalc(struct clk *clk)
509{
510 struct pll_clk_config *config = clk->private_data;
511 unsigned int num = 2, den = 0, val, mode = 0;
512
513 mode = (readl(config->mode_reg) >> config->masks->mode_shift) &
514 config->masks->mode_mask;
515
516 val = readl(config->cfg_reg);
517 /* calculate denominator */
518 den = (val >> config->masks->div_p_shift) & config->masks->div_p_mask;
519 den = 1 << den;
520 den *= (val >> config->masks->div_n_shift) & config->masks->div_n_mask;
521
522 /* calculate numerator & denominator */
523 if (!mode) {
524 /* Normal mode */
525 num *= (val >> config->masks->norm_fdbk_m_shift) &
526 config->masks->norm_fdbk_m_mask;
527 } else {
528 /* Dithered mode */
529 num *= (val >> config->masks->dith_fdbk_m_shift) &
530 config->masks->dith_fdbk_m_mask;
531 den *= 256;
532 }
533
534 if (!den)
535 return -EINVAL;
536
537 clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
538 return 0;
539}
540
541/*
542 * Configures new clock rate of pll
543 */
544int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate)
545{
546 struct pll_rate_tbl *tbls = clk->rate_config.tbls;
547 struct pll_clk_config *config = clk->private_data;
548 unsigned long val, rate;
549 int i;
550
551 i = round_rate_index(clk, desired_rate, &rate);
552 if (i < 0)
553 return i;
554
555 val = readl(config->mode_reg) &
556 ~(config->masks->mode_mask << config->masks->mode_shift);
557 val |= (tbls[i].mode & config->masks->mode_mask) <<
558 config->masks->mode_shift;
559 writel(val, config->mode_reg);
560
561 val = readl(config->cfg_reg) &
562 ~(config->masks->div_p_mask << config->masks->div_p_shift);
563 val |= (tbls[i].p & config->masks->div_p_mask) <<
564 config->masks->div_p_shift;
565 val &= ~(config->masks->div_n_mask << config->masks->div_n_shift);
566 val |= (tbls[i].n & config->masks->div_n_mask) <<
567 config->masks->div_n_shift;
568 val &= ~(config->masks->dith_fdbk_m_mask <<
569 config->masks->dith_fdbk_m_shift);
570 if (tbls[i].mode)
571 val |= (tbls[i].m & config->masks->dith_fdbk_m_mask) <<
572 config->masks->dith_fdbk_m_shift;
573 else
574 val |= (tbls[i].m & config->masks->norm_fdbk_m_mask) <<
575 config->masks->norm_fdbk_m_shift;
576
577 writel(val, config->cfg_reg);
578
579 clk->rate = rate;
580
581 return 0;
582}
583
584/*
585 * Calculates ahb, apb clk rate for specific value of div
586 */
587unsigned long bus_calc_rate(struct clk *clk, int index)
588{
589 unsigned long rate = clk->pclk->rate;
590 struct bus_rate_tbl *tbls = clk->rate_config.tbls;
591
592 return rate / (tbls[index].div + 1);
593}
594
595/* calculates current programmed rate of ahb or apb bus */
596int bus_clk_recalc(struct clk *clk)
597{
598 struct bus_clk_config *config = clk->private_data;
599 unsigned int div;
600
601 div = ((readl(config->reg) >> config->masks->shift) &
602 config->masks->mask) + 1;
603
604 if (!div)
605 return -EINVAL;
606
607 clk->rate = (unsigned long)clk->pclk->rate / div;
608 return 0;
609}
610
611/* Configures new clock rate of AHB OR APB bus */
612int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate)
613{
614 struct bus_rate_tbl *tbls = clk->rate_config.tbls;
615 struct bus_clk_config *config = clk->private_data;
616 unsigned long val, rate;
617 int i;
618
619 i = round_rate_index(clk, desired_rate, &rate);
620 if (i < 0)
621 return i;
622
623 val = readl(config->reg) &
624 ~(config->masks->mask << config->masks->shift);
625 val |= (tbls[i].div & config->masks->mask) << config->masks->shift;
626 writel(val, config->reg);
627
628 clk->rate = rate;
629
630 return 0;
631}
632
633/*
634 * gives rate for different values of eq, x and y
635 *
636 * Fout from synthesizer can be given from two equations:
637 * Fout1 = (Fin * X/Y)/2 EQ1
638 * Fout2 = Fin * X/Y EQ2
639 */
640unsigned long aux_calc_rate(struct clk *clk, int index)
641{
642 unsigned long rate = clk->pclk->rate;
643 struct aux_rate_tbl *tbls = clk->rate_config.tbls;
644 u8 eq = tbls[index].eq ? 1 : 2;
645
646 return (((rate/10000) * tbls[index].xscale) /
647 (tbls[index].yscale * eq)) * 10000;
648}
649
650/*
651 * calculates current programmed rate of auxiliary synthesizers
652 * used by: UART, FIRDA
653 *
654 * Fout from synthesizer can be given from two equations:
655 * Fout1 = (Fin * X/Y)/2
656 * Fout2 = Fin * X/Y
657 *
658 * Selection of eqn 1 or 2 is programmed in register
659 */
660int aux_clk_recalc(struct clk *clk)
661{
662 struct aux_clk_config *config = clk->private_data;
663 unsigned int num = 1, den = 1, val, eqn;
664
665 val = readl(config->synth_reg);
666
667 eqn = (val >> config->masks->eq_sel_shift) &
668 config->masks->eq_sel_mask;
669 if (eqn == config->masks->eq1_mask)
670 den *= 2;
671
672 /* calculate numerator */
673 num = (val >> config->masks->xscale_sel_shift) &
674 config->masks->xscale_sel_mask;
675
676 /* calculate denominator */
677 den *= (val >> config->masks->yscale_sel_shift) &
678 config->masks->yscale_sel_mask;
679
680 if (!den)
681 return -EINVAL;
682
683 clk->rate = (((clk->pclk->rate/10000) * num) / den) * 10000;
684 return 0;
685}
686
687/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
688int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate)
689{
690 struct aux_rate_tbl *tbls = clk->rate_config.tbls;
691 struct aux_clk_config *config = clk->private_data;
692 unsigned long val, rate;
693 int i;
694
695 i = round_rate_index(clk, desired_rate, &rate);
696 if (i < 0)
697 return i;
698
699 val = readl(config->synth_reg) &
700 ~(config->masks->eq_sel_mask << config->masks->eq_sel_shift);
701 val |= (tbls[i].eq & config->masks->eq_sel_mask) <<
702 config->masks->eq_sel_shift;
703 val &= ~(config->masks->xscale_sel_mask <<
704 config->masks->xscale_sel_shift);
705 val |= (tbls[i].xscale & config->masks->xscale_sel_mask) <<
706 config->masks->xscale_sel_shift;
707 val &= ~(config->masks->yscale_sel_mask <<
708 config->masks->yscale_sel_shift);
709 val |= (tbls[i].yscale & config->masks->yscale_sel_mask) <<
710 config->masks->yscale_sel_shift;
711 writel(val, config->synth_reg);
712
713 clk->rate = rate;
714
715 return 0;
716}
717
718/*
719 * Calculates gpt clk rate for different values of mscale and nscale
720 *
721 * Fout= Fin/((2 ^ (N+1)) * (M+1))
722 */
723unsigned long gpt_calc_rate(struct clk *clk, int index)
724{
725 unsigned long rate = clk->pclk->rate;
726 struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
727
728 return rate / ((1 << (tbls[index].nscale + 1)) *
729 (tbls[index].mscale + 1));
730}
731
732/*
733 * calculates current programmed rate of gpt synthesizers
734 * Fout from synthesizer can be given from below equations:
735 * Fout= Fin/((2 ^ (N+1)) * (M+1))
736 */
737int gpt_clk_recalc(struct clk *clk)
738{
739 struct gpt_clk_config *config = clk->private_data;
740 unsigned int div = 1, val;
741
742 val = readl(config->synth_reg);
743 div += (val >> config->masks->mscale_sel_shift) &
744 config->masks->mscale_sel_mask;
745 div *= 1 << (((val >> config->masks->nscale_sel_shift) &
746 config->masks->nscale_sel_mask) + 1);
747
748 if (!div)
749 return -EINVAL;
750
751 clk->rate = (unsigned long)clk->pclk->rate / div;
752 return 0;
753}
754
755/* Configures new clock rate of gptiliary synthesizers used by: UART, FIRDA*/
756int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate)
757{
758 struct gpt_rate_tbl *tbls = clk->rate_config.tbls;
759 struct gpt_clk_config *config = clk->private_data;
760 unsigned long val, rate;
761 int i;
762
763 i = round_rate_index(clk, desired_rate, &rate);
764 if (i < 0)
765 return i;
766
767 val = readl(config->synth_reg) & ~(config->masks->mscale_sel_mask <<
768 config->masks->mscale_sel_shift);
769 val |= (tbls[i].mscale & config->masks->mscale_sel_mask) <<
770 config->masks->mscale_sel_shift;
771 val &= ~(config->masks->nscale_sel_mask <<
772 config->masks->nscale_sel_shift);
773 val |= (tbls[i].nscale & config->masks->nscale_sel_mask) <<
774 config->masks->nscale_sel_shift;
775 writel(val, config->synth_reg);
776
777 clk->rate = rate;
778
779 return 0;
780}
781
782/*
783 * Calculates clcd clk rate for different values of div
784 *
785 * Fout from synthesizer can be given from below equation:
786 * Fout= Fin/2*div (division factor)
787 * div is 17 bits:-
788 * 0-13 (fractional part)
789 * 14-16 (integer part)
790 * To calculate Fout we left shift val by 14 bits and divide Fin by
791 * complete div (including fractional part) and then right shift the
792 * result by 14 places.
793 */
794unsigned long clcd_calc_rate(struct clk *clk, int index)
795{
796 unsigned long rate = clk->pclk->rate;
797 struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
798
799 rate /= 1000;
800 rate <<= 12;
801 rate /= (2 * tbls[index].div);
802 rate >>= 12;
803 rate *= 1000;
804
805 return rate;
806}
807
808/*
809 * calculates current programmed rate of clcd synthesizer
810 * Fout from synthesizer can be given from below equation:
811 * Fout= Fin/2*div (division factor)
812 * div is 17 bits:-
813 * 0-13 (fractional part)
814 * 14-16 (integer part)
815 * To calculate Fout we left shift val by 14 bits and divide Fin by
816 * complete div (including fractional part) and then right shift the
817 * result by 14 places.
818 */
819int clcd_clk_recalc(struct clk *clk)
820{
821 struct clcd_clk_config *config = clk->private_data;
822 unsigned int div = 1;
823 unsigned long prate;
824 unsigned int val;
825
826 val = readl(config->synth_reg);
827 div = (val >> config->masks->div_factor_shift) &
828 config->masks->div_factor_mask;
829
830 if (!div)
831 return -EINVAL;
832
833 prate = clk->pclk->rate / 1000; /* first level division, make it KHz */
834
835 clk->rate = (((unsigned long)prate << 12) / (2 * div)) >> 12;
836 clk->rate *= 1000;
837 return 0;
838}
839
840/* Configures new clock rate of auxiliary synthesizers used by: UART, FIRDA*/
841int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate)
842{
843 struct clcd_rate_tbl *tbls = clk->rate_config.tbls;
844 struct clcd_clk_config *config = clk->private_data;
845 unsigned long val, rate;
846 int i;
847
848 i = round_rate_index(clk, desired_rate, &rate);
849 if (i < 0)
850 return i;
851
852 val = readl(config->synth_reg) & ~(config->masks->div_factor_mask <<
853 config->masks->div_factor_shift);
854 val |= (tbls[i].div & config->masks->div_factor_mask) <<
855 config->masks->div_factor_shift;
856 writel(val, config->synth_reg);
857
858 clk->rate = rate;
859
860 return 0;
861}
862
863/*
864 * Used for clocks that always have value as the parent clock divided by a
865 * fixed divisor
866 */
867int follow_parent(struct clk *clk)
868{
869 unsigned int div_factor = (clk->div_factor < 1) ? 1 : clk->div_factor;
870
871 clk->rate = clk->pclk->rate/div_factor;
872 return 0;
873}
874
875/**
876 * recalc_root_clocks - recalculate and propagate all root clocks
877 *
878 * Recalculates all root clocks (clocks with no parent), which if the
879 * clock's .recalc is set correctly, should also propagate their rates.
880 */
881void recalc_root_clocks(void)
882{
883 struct clk *pclk;
884 unsigned long flags;
885 int ret = 0;
886
887 spin_lock_irqsave(&clocks_lock, flags);
888 list_for_each_entry(pclk, &root_clks, sibling) {
889 if (pclk->recalc) {
890 ret = pclk->recalc(pclk);
891 /*
892 * recalc will return error if clk out is not programmed
893 * In this case configure default clock.
894 */
895 if (ret && pclk->set_rate)
896 pclk->set_rate(pclk, 0);
897 }
898 propagate_rate(pclk, 1);
899 /* Enable clks enabled on init, in software view */
900 if (pclk->flags & ENABLED_ON_INIT)
901 do_clk_enable(pclk);
902 }
903 spin_unlock_irqrestore(&clocks_lock, flags);
904}
905
906void __init clk_init(void)
907{
908 recalc_root_clocks();
909}
910
911#ifdef CONFIG_DEBUG_FS
912/*
913 * debugfs support to trace clock tree hierarchy and attributes
914 */
915static struct dentry *clk_debugfs_root;
916static int clk_debugfs_register_one(struct clk *c)
917{
918 int err;
919 struct dentry *d;
920 struct clk *pa = c->pclk;
921 char s[255];
922 char *p = s;
923
924 if (c) {
925 if (c->cl->con_id)
926 p += sprintf(p, "%s", c->cl->con_id);
927 if (c->cl->dev_id)
928 p += sprintf(p, "%s", c->cl->dev_id);
929 }
930 d = debugfs_create_dir(s, pa ? pa->dent : clk_debugfs_root);
931 if (!d)
932 return -ENOMEM;
933 c->dent = d;
934
935 d = debugfs_create_u32("usage_count", S_IRUGO, c->dent,
936 (u32 *)&c->usage_count);
937 if (!d) {
938 err = -ENOMEM;
939 goto err_out;
940 }
941 d = debugfs_create_u32("rate", S_IRUGO, c->dent, (u32 *)&c->rate);
942 if (!d) {
943 err = -ENOMEM;
944 goto err_out;
945 }
946 d = debugfs_create_x32("flags", S_IRUGO, c->dent, (u32 *)&c->flags);
947 if (!d) {
948 err = -ENOMEM;
949 goto err_out;
950 }
951 return 0;
952
953err_out:
954 debugfs_remove_recursive(c->dent);
955 return err;
956}
957
958static int clk_debugfs_register(struct clk *c)
959{
960 int err;
961 struct clk *pa = c->pclk;
962
963 if (pa && !pa->dent) {
964 err = clk_debugfs_register(pa);
965 if (err)
966 return err;
967 }
968
969 if (!c->dent) {
970 err = clk_debugfs_register_one(c);
971 if (err)
972 return err;
973 }
974 return 0;
975}
976
977static int __init clk_debugfs_init(void)
978{
979 struct clk *c;
980 struct dentry *d;
981 int err;
982
983 d = debugfs_create_dir("clock", NULL);
984 if (!d)
985 return -ENOMEM;
986 clk_debugfs_root = d;
987
988 list_for_each_entry(c, &clocks, node) {
989 err = clk_debugfs_register(c);
990 if (err)
991 goto err_out;
992 }
993 return 0;
994err_out:
995 debugfs_remove_recursive(clk_debugfs_root);
996 return err;
997}
998late_initcall(clk_debugfs_init);
999
1000static int clk_debugfs_reparent(struct clk *c)
1001{
1002 debugfs_remove(c->dent);
1003 return clk_debugfs_register_one(c);
1004}
1005#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/arm/plat-spear/include/plat/clock.h b/arch/arm/plat-spear/include/plat/clock.h
deleted file mode 100644
index 0062bafef12d..000000000000
--- a/arch/arm/plat-spear/include/plat/clock.h
+++ /dev/null
@@ -1,249 +0,0 @@
1/*
2 * arch/arm/plat-spear/include/plat/clock.h
3 *
4 * Clock framework definitions for SPEAr platform
5 *
6 * Copyright (C) 2009 ST Microelectronics
7 * Viresh Kumar<viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#ifndef __PLAT_CLOCK_H
15#define __PLAT_CLOCK_H
16
17#include <linux/list.h>
18#include <linux/clkdev.h>
19#include <linux/types.h>
20
21/* clk structure flags */
22#define ALWAYS_ENABLED (1 << 0) /* clock always enabled */
23#define RESET_TO_ENABLE (1 << 1) /* reset register bit to enable clk */
24#define ENABLED_ON_INIT (1 << 2) /* clocks enabled at init */
25
26/**
27 * struct clkops - clock operations
28 * @enable: pointer to clock enable function
29 * @disable: pointer to clock disable function
30 */
31struct clkops {
32 int (*enable) (struct clk *);
33 void (*disable) (struct clk *);
34};
35
36/**
37 * struct pclk_info - parents info
38 * @pclk: pointer to parent clk
39 * @pclk_val: value to be written for selecting this parent
40 */
41struct pclk_info {
42 struct clk *pclk;
43 u8 pclk_val;
44};
45
46/**
47 * struct pclk_sel - parents selection configuration
48 * @pclk_info: pointer to array of parent clock info
49 * @pclk_count: number of parents
50 * @pclk_sel_reg: register for selecting a parent
51 * @pclk_sel_mask: mask for selecting parent (can be used to clear bits also)
52 */
53struct pclk_sel {
54 struct pclk_info *pclk_info;
55 u8 pclk_count;
56 void __iomem *pclk_sel_reg;
57 unsigned int pclk_sel_mask;
58};
59
60/**
61 * struct rate_config - clk rate configurations
62 * @tbls: array of device specific clk rate tables, in ascending order of rates
63 * @count: size of tbls array
64 * @default_index: default setting when originally disabled
65 */
66struct rate_config {
67 void *tbls;
68 u8 count;
69 u8 default_index;
70};
71
72/**
73 * struct clk - clock structure
74 * @usage_count: num of users who enabled this clock
75 * @flags: flags for clock properties
76 * @rate: programmed clock rate in Hz
77 * @en_reg: clk enable/disable reg
78 * @en_reg_bit: clk enable/disable bit
79 * @ops: clk enable/disable ops - generic_clkops selected if NULL
80 * @recalc: pointer to clock rate recalculate function
81 * @set_rate: pointer to clock set rate function
82 * @calc_rate: pointer to clock get rate function for index
83 * @rate_config: rate configuration information, used by set_rate
84 * @div_factor: division factor to parent clock.
85 * @pclk: current parent clk
86 * @pclk_sel: pointer to parent selection structure
87 * @pclk_sel_shift: register shift for selecting parent of this clock
88 * @children: list for childrens or this clock
89 * @sibling: node for list of clocks having same parents
90 * @private_data: clock specific private data
91 * @node: list to maintain clocks linearly
92 * @cl: clocklook up associated with this clock
93 * @dent: object for debugfs
94 */
95struct clk {
96 unsigned int usage_count;
97 unsigned int flags;
98 unsigned long rate;
99 void __iomem *en_reg;
100 u8 en_reg_bit;
101 const struct clkops *ops;
102 int (*recalc) (struct clk *);
103 int (*set_rate) (struct clk *, unsigned long rate);
104 unsigned long (*calc_rate)(struct clk *, int index);
105 struct rate_config rate_config;
106 unsigned int div_factor;
107
108 struct clk *pclk;
109 struct pclk_sel *pclk_sel;
110 unsigned int pclk_sel_shift;
111
112 struct list_head children;
113 struct list_head sibling;
114 void *private_data;
115#ifdef CONFIG_DEBUG_FS
116 struct list_head node;
117 struct clk_lookup *cl;
118 struct dentry *dent;
119#endif
120};
121
122/* pll configuration structure */
123struct pll_clk_masks {
124 u32 mode_mask;
125 u32 mode_shift;
126
127 u32 norm_fdbk_m_mask;
128 u32 norm_fdbk_m_shift;
129 u32 dith_fdbk_m_mask;
130 u32 dith_fdbk_m_shift;
131 u32 div_p_mask;
132 u32 div_p_shift;
133 u32 div_n_mask;
134 u32 div_n_shift;
135};
136
137struct pll_clk_config {
138 void __iomem *mode_reg;
139 void __iomem *cfg_reg;
140 struct pll_clk_masks *masks;
141};
142
143/* pll clk rate config structure */
144struct pll_rate_tbl {
145 u8 mode;
146 u16 m;
147 u8 n;
148 u8 p;
149};
150
151/* ahb and apb bus configuration structure */
152struct bus_clk_masks {
153 u32 mask;
154 u32 shift;
155};
156
157struct bus_clk_config {
158 void __iomem *reg;
159 struct bus_clk_masks *masks;
160};
161
162/* ahb and apb clk bus rate config structure */
163struct bus_rate_tbl {
164 u8 div;
165};
166
167/* Aux clk configuration structure: applicable to UART and FIRDA */
168struct aux_clk_masks {
169 u32 eq_sel_mask;
170 u32 eq_sel_shift;
171 u32 eq1_mask;
172 u32 eq2_mask;
173 u32 xscale_sel_mask;
174 u32 xscale_sel_shift;
175 u32 yscale_sel_mask;
176 u32 yscale_sel_shift;
177};
178
179struct aux_clk_config {
180 void __iomem *synth_reg;
181 struct aux_clk_masks *masks;
182};
183
184/* aux clk rate config structure */
185struct aux_rate_tbl {
186 u16 xscale;
187 u16 yscale;
188 u8 eq;
189};
190
191/* GPT clk configuration structure */
192struct gpt_clk_masks {
193 u32 mscale_sel_mask;
194 u32 mscale_sel_shift;
195 u32 nscale_sel_mask;
196 u32 nscale_sel_shift;
197};
198
199struct gpt_clk_config {
200 void __iomem *synth_reg;
201 struct gpt_clk_masks *masks;
202};
203
204/* gpt clk rate config structure */
205struct gpt_rate_tbl {
206 u16 mscale;
207 u16 nscale;
208};
209
210/* clcd clk configuration structure */
211struct clcd_synth_masks {
212 u32 div_factor_mask;
213 u32 div_factor_shift;
214};
215
216struct clcd_clk_config {
217 void __iomem *synth_reg;
218 struct clcd_synth_masks *masks;
219};
220
221/* clcd clk rate config structure */
222struct clcd_rate_tbl {
223 u16 div;
224};
225
226/* platform specific clock functions */
227void __init clk_init(void);
228void clk_register(struct clk_lookup *cl);
229void recalc_root_clocks(void);
230
231/* clock recalc & set rate functions */
232int follow_parent(struct clk *clk);
233unsigned long pll_calc_rate(struct clk *clk, int index);
234int pll_clk_recalc(struct clk *clk);
235int pll_clk_set_rate(struct clk *clk, unsigned long desired_rate);
236unsigned long bus_calc_rate(struct clk *clk, int index);
237int bus_clk_recalc(struct clk *clk);
238int bus_clk_set_rate(struct clk *clk, unsigned long desired_rate);
239unsigned long gpt_calc_rate(struct clk *clk, int index);
240int gpt_clk_recalc(struct clk *clk);
241int gpt_clk_set_rate(struct clk *clk, unsigned long desired_rate);
242unsigned long aux_calc_rate(struct clk *clk, int index);
243int aux_clk_recalc(struct clk *clk);
244int aux_clk_set_rate(struct clk *clk, unsigned long desired_rate);
245unsigned long clcd_calc_rate(struct clk *clk, int index);
246int clcd_clk_recalc(struct clk *clk);
247int clcd_clk_set_rate(struct clk *clk, unsigned long desired_rate);
248
249#endif /* __PLAT_CLOCK_H */
diff --git a/arch/arm/plat-spear/restart.c b/arch/arm/plat-spear/restart.c
index 4471a232713a..ea0a61302b7e 100644
--- a/arch/arm/plat-spear/restart.c
+++ b/arch/arm/plat-spear/restart.c
@@ -16,6 +16,7 @@
16#include <mach/spear.h> 16#include <mach/spear.h>
17#include <mach/generic.h> 17#include <mach/generic.h>
18 18
19#define SPEAR13XX_SYS_SW_RES (VA_MISC_BASE + 0x204)
19void spear_restart(char mode, const char *cmd) 20void spear_restart(char mode, const char *cmd)
20{ 21{
21 if (mode == 's') { 22 if (mode == 's') {
@@ -23,6 +24,10 @@ void spear_restart(char mode, const char *cmd)
23 soft_restart(0); 24 soft_restart(0);
24 } else { 25 } else {
25 /* hardware reset, Use on-chip reset capability */ 26 /* hardware reset, Use on-chip reset capability */
27#ifdef CONFIG_ARCH_SPEAR13XX
28 writel_relaxed(0x01, SPEAR13XX_SYS_SW_RES);
29#else
26 sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE); 30 sysctl_soft_reset((void __iomem *)VA_SPEAR_SYS_CTRL_BASE);
31#endif
27 } 32 }
28} 33}
diff --git a/arch/arm/plat-spear/time.c b/arch/arm/plat-spear/time.c
index a3164d1647fd..03321af5de9f 100644
--- a/arch/arm/plat-spear/time.c
+++ b/arch/arm/plat-spear/time.c
@@ -18,6 +18,8 @@
18#include <linux/ioport.h> 18#include <linux/ioport.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/kernel.h> 20#include <linux/kernel.h>
21#include <linux/of_irq.h>
22#include <linux/of_address.h>
21#include <linux/time.h> 23#include <linux/time.h>
22#include <linux/irq.h> 24#include <linux/irq.h>
23#include <asm/mach/time.h> 25#include <asm/mach/time.h>
@@ -197,19 +199,32 @@ static void __init spear_clockevent_init(int irq)
197 setup_irq(irq, &spear_timer_irq); 199 setup_irq(irq, &spear_timer_irq);
198} 200}
199 201
200void __init spear_setup_timer(resource_size_t base, int irq) 202const static struct of_device_id timer_of_match[] __initconst = {
203 { .compatible = "st,spear-timer", },
204 { },
205};
206
207void __init spear_setup_of_timer(void)
201{ 208{
202 int ret; 209 struct device_node *np;
210 int irq, ret;
211
212 np = of_find_matching_node(NULL, timer_of_match);
213 if (!np) {
214 pr_err("%s: No timer passed via DT\n", __func__);
215 return;
216 }
203 217
204 if (!request_mem_region(base, SZ_1K, "gpt0")) { 218 irq = irq_of_parse_and_map(np, 0);
205 pr_err("%s:cannot get IO addr\n", __func__); 219 if (!irq) {
220 pr_err("%s: No irq passed for timer via DT\n", __func__);
206 return; 221 return;
207 } 222 }
208 223
209 gpt_base = ioremap(base, SZ_1K); 224 gpt_base = of_iomap(np, 0);
210 if (!gpt_base) { 225 if (!gpt_base) {
211 pr_err("%s:ioremap failed for gpt\n", __func__); 226 pr_err("%s: of iomap failed\n", __func__);
212 goto err_mem; 227 return;
213 } 228 }
214 229
215 gpt_clk = clk_get_sys("gpt0", NULL); 230 gpt_clk = clk_get_sys("gpt0", NULL);
@@ -218,10 +233,10 @@ void __init spear_setup_timer(resource_size_t base, int irq)
218 goto err_iomap; 233 goto err_iomap;
219 } 234 }
220 235
221 ret = clk_enable(gpt_clk); 236 ret = clk_prepare_enable(gpt_clk);
222 if (ret < 0) { 237 if (ret < 0) {
223 pr_err("%s:couldn't enable gpt clock\n", __func__); 238 pr_err("%s:couldn't prepare-enable gpt clock\n", __func__);
224 goto err_clk; 239 goto err_prepare_enable_clk;
225 } 240 }
226 241
227 spear_clockevent_init(irq); 242 spear_clockevent_init(irq);
@@ -229,10 +244,8 @@ void __init spear_setup_timer(resource_size_t base, int irq)
229 244
230 return; 245 return;
231 246
232err_clk: 247err_prepare_enable_clk:
233 clk_put(gpt_clk); 248 clk_put(gpt_clk);
234err_iomap: 249err_iomap:
235 iounmap(gpt_base); 250 iounmap(gpt_base);
236err_mem:
237 release_mem_region(base, SZ_1K);
238} 251}
diff --git a/arch/avr32/include/asm/kvm_para.h b/arch/avr32/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/avr32/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/blackfin/include/asm/kvm_para.h b/arch/blackfin/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/blackfin/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/c6x/include/asm/kvm_para.h b/arch/c6x/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/c6x/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/cris/Kconfig b/arch/cris/Kconfig
index 22d34d64cc81..bb344650a14f 100644
--- a/arch/cris/Kconfig
+++ b/arch/cris/Kconfig
@@ -40,6 +40,7 @@ config CRIS
40 bool 40 bool
41 default y 41 default y
42 select HAVE_IDE 42 select HAVE_IDE
43 select GENERIC_ATOMIC64
43 select HAVE_GENERIC_HARDIRQS 44 select HAVE_GENERIC_HARDIRQS
44 select GENERIC_IRQ_SHOW 45 select GENERIC_IRQ_SHOW
45 select GENERIC_IOMAP 46 select GENERIC_IOMAP
diff --git a/arch/cris/arch-v10/drivers/ds1302.c b/arch/cris/arch-v10/drivers/ds1302.c
deleted file mode 100644
index 74f99c688c8d..000000000000
--- a/arch/cris/arch-v10/drivers/ds1302.c
+++ /dev/null
@@ -1,515 +0,0 @@
1/*!***************************************************************************
2*!
3*! FILE NAME : ds1302.c
4*!
5*! DESCRIPTION: Implements an interface for the DS1302 RTC through Etrax I/O
6*!
7*! Functions exported: ds1302_readreg, ds1302_writereg, ds1302_init
8*!
9*! ---------------------------------------------------------------------------
10*!
11*! (C) Copyright 1999-2007 Axis Communications AB, LUND, SWEDEN
12*!
13*!***************************************************************************/
14
15
16#include <linux/fs.h>
17#include <linux/init.h>
18#include <linux/mm.h>
19#include <linux/module.h>
20#include <linux/miscdevice.h>
21#include <linux/delay.h>
22#include <linux/mutex.h>
23#include <linux/bcd.h>
24#include <linux/capability.h>
25
26#include <asm/uaccess.h>
27#include <arch/svinto.h>
28#include <asm/io.h>
29#include <asm/rtc.h>
30#include <arch/io_interface_mux.h>
31
32#include "i2c.h"
33
34#define RTC_MAJOR_NR 121 /* local major, change later */
35
36static DEFINE_MUTEX(ds1302_mutex);
37static const char ds1302_name[] = "ds1302";
38
39/* The DS1302 might be connected to different bits on different products.
40 * It has three signals - SDA, SCL and RST. RST and SCL are always outputs,
41 * but SDA can have a selected direction.
42 * For now, only PORT_PB is hardcoded.
43 */
44
45/* The RST bit may be on either the Generic Port or Port PB. */
46#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
47#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_G_DATA, port_g_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
48#define TK_RST_DIR(x)
49#else
50#define TK_RST_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
51#define TK_RST_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_RSTBIT, x)
52#endif
53
54
55#define TK_SDA_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SDABIT, x)
56#define TK_SCL_OUT(x) REG_SHADOW_SET(R_PORT_PB_DATA, port_pb_data_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x)
57
58#define TK_SDA_IN() ((*R_PORT_PB_READ >> CONFIG_ETRAX_DS1302_SDABIT) & 1)
59/* 1 is out, 0 is in */
60#define TK_SDA_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SDABIT, x)
61#define TK_SCL_DIR(x) REG_SHADOW_SET(R_PORT_PB_DIR, port_pb_dir_shadow, CONFIG_ETRAX_DS1302_SCLBIT, x)
62
63
64/*
65 * The reason for tempudelay and not udelay is that loops_per_usec
66 * (used in udelay) is not set when functions here are called from time.c
67 */
68
69static void tempudelay(int usecs)
70{
71 volatile int loops;
72
73 for(loops = usecs * 12; loops > 0; loops--)
74 /* nothing */;
75}
76
77
78/* Send 8 bits. */
79static void
80out_byte(unsigned char x)
81{
82 int i;
83 TK_SDA_DIR(1);
84 for (i = 8; i--;) {
85 /* The chip latches incoming bits on the rising edge of SCL. */
86 TK_SCL_OUT(0);
87 TK_SDA_OUT(x & 1);
88 tempudelay(1);
89 TK_SCL_OUT(1);
90 tempudelay(1);
91 x >>= 1;
92 }
93 TK_SDA_DIR(0);
94}
95
96static unsigned char
97in_byte(void)
98{
99 unsigned char x = 0;
100 int i;
101
102 /* Read byte. Bits come LSB first, on the falling edge of SCL.
103 * Assume SDA is in input direction already.
104 */
105 TK_SDA_DIR(0);
106
107 for (i = 8; i--;) {
108 TK_SCL_OUT(0);
109 tempudelay(1);
110 x >>= 1;
111 x |= (TK_SDA_IN() << 7);
112 TK_SCL_OUT(1);
113 tempudelay(1);
114 }
115
116 return x;
117}
118
119/* Prepares for a transaction by de-activating RST (active-low). */
120
121static void
122start(void)
123{
124 TK_SCL_OUT(0);
125 tempudelay(1);
126 TK_RST_OUT(0);
127 tempudelay(5);
128 TK_RST_OUT(1);
129}
130
131/* Ends a transaction by taking RST active again. */
132
133static void
134stop(void)
135{
136 tempudelay(2);
137 TK_RST_OUT(0);
138}
139
140/* Enable writing. */
141
142static void
143ds1302_wenable(void)
144{
145 start();
146 out_byte(0x8e); /* Write control register */
147 out_byte(0x00); /* Disable write protect bit 7 = 0 */
148 stop();
149}
150
151/* Disable writing. */
152
153static void
154ds1302_wdisable(void)
155{
156 start();
157 out_byte(0x8e); /* Write control register */
158 out_byte(0x80); /* Disable write protect bit 7 = 0 */
159 stop();
160}
161
162
163
164/* Read a byte from the selected register in the DS1302. */
165
166unsigned char
167ds1302_readreg(int reg)
168{
169 unsigned char x;
170
171 start();
172 out_byte(0x81 | (reg << 1)); /* read register */
173 x = in_byte();
174 stop();
175
176 return x;
177}
178
179/* Write a byte to the selected register. */
180
181void
182ds1302_writereg(int reg, unsigned char val)
183{
184#ifndef CONFIG_ETRAX_RTC_READONLY
185 int do_writereg = 1;
186#else
187 int do_writereg = 0;
188
189 if (reg == RTC_TRICKLECHARGER)
190 do_writereg = 1;
191#endif
192
193 if (do_writereg) {
194 ds1302_wenable();
195 start();
196 out_byte(0x80 | (reg << 1)); /* write register */
197 out_byte(val);
198 stop();
199 ds1302_wdisable();
200 }
201}
202
203void
204get_rtc_time(struct rtc_time *rtc_tm)
205{
206 unsigned long flags;
207
208 local_irq_save(flags);
209
210 rtc_tm->tm_sec = CMOS_READ(RTC_SECONDS);
211 rtc_tm->tm_min = CMOS_READ(RTC_MINUTES);
212 rtc_tm->tm_hour = CMOS_READ(RTC_HOURS);
213 rtc_tm->tm_mday = CMOS_READ(RTC_DAY_OF_MONTH);
214 rtc_tm->tm_mon = CMOS_READ(RTC_MONTH);
215 rtc_tm->tm_year = CMOS_READ(RTC_YEAR);
216
217 local_irq_restore(flags);
218
219 rtc_tm->tm_sec = bcd2bin(rtc_tm->tm_sec);
220 rtc_tm->tm_min = bcd2bin(rtc_tm->tm_min);
221 rtc_tm->tm_hour = bcd2bin(rtc_tm->tm_hour);
222 rtc_tm->tm_mday = bcd2bin(rtc_tm->tm_mday);
223 rtc_tm->tm_mon = bcd2bin(rtc_tm->tm_mon);
224 rtc_tm->tm_year = bcd2bin(rtc_tm->tm_year);
225
226 /*
227 * Account for differences between how the RTC uses the values
228 * and how they are defined in a struct rtc_time;
229 */
230
231 if (rtc_tm->tm_year <= 69)
232 rtc_tm->tm_year += 100;
233
234 rtc_tm->tm_mon--;
235}
236
237static unsigned char days_in_mo[] =
238 {0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31};
239
240/* ioctl that supports RTC_RD_TIME and RTC_SET_TIME (read and set time/date). */
241
242static int rtc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
243{
244 unsigned long flags;
245
246 switch(cmd) {
247 case RTC_RD_TIME: /* read the time/date from RTC */
248 {
249 struct rtc_time rtc_tm;
250
251 memset(&rtc_tm, 0, sizeof (struct rtc_time));
252 get_rtc_time(&rtc_tm);
253 if (copy_to_user((struct rtc_time*)arg, &rtc_tm, sizeof(struct rtc_time)))
254 return -EFAULT;
255 return 0;
256 }
257
258 case RTC_SET_TIME: /* set the RTC */
259 {
260 struct rtc_time rtc_tm;
261 unsigned char mon, day, hrs, min, sec, leap_yr;
262 unsigned int yrs;
263
264 if (!capable(CAP_SYS_TIME))
265 return -EPERM;
266
267 if (copy_from_user(&rtc_tm, (struct rtc_time*)arg, sizeof(struct rtc_time)))
268 return -EFAULT;
269
270 yrs = rtc_tm.tm_year + 1900;
271 mon = rtc_tm.tm_mon + 1; /* tm_mon starts at zero */
272 day = rtc_tm.tm_mday;
273 hrs = rtc_tm.tm_hour;
274 min = rtc_tm.tm_min;
275 sec = rtc_tm.tm_sec;
276
277
278 if ((yrs < 1970) || (yrs > 2069))
279 return -EINVAL;
280
281 leap_yr = ((!(yrs % 4) && (yrs % 100)) || !(yrs % 400));
282
283 if ((mon > 12) || (day == 0))
284 return -EINVAL;
285
286 if (day > (days_in_mo[mon] + ((mon == 2) && leap_yr)))
287 return -EINVAL;
288
289 if ((hrs >= 24) || (min >= 60) || (sec >= 60))
290 return -EINVAL;
291
292 if (yrs >= 2000)
293 yrs -= 2000; /* RTC (0, 1, ... 69) */
294 else
295 yrs -= 1900; /* RTC (70, 71, ... 99) */
296
297 sec = bin2bcd(sec);
298 min = bin2bcd(min);
299 hrs = bin2bcd(hrs);
300 day = bin2bcd(day);
301 mon = bin2bcd(mon);
302 yrs = bin2bcd(yrs);
303
304 local_irq_save(flags);
305 CMOS_WRITE(yrs, RTC_YEAR);
306 CMOS_WRITE(mon, RTC_MONTH);
307 CMOS_WRITE(day, RTC_DAY_OF_MONTH);
308 CMOS_WRITE(hrs, RTC_HOURS);
309 CMOS_WRITE(min, RTC_MINUTES);
310 CMOS_WRITE(sec, RTC_SECONDS);
311 local_irq_restore(flags);
312
313 /* Notice that at this point, the RTC is updated but
314 * the kernel is still running with the old time.
315 * You need to set that separately with settimeofday
316 * or adjtimex.
317 */
318 return 0;
319 }
320
321 case RTC_SET_CHARGE: /* set the RTC TRICKLE CHARGE register */
322 {
323 int tcs_val;
324
325 if (!capable(CAP_SYS_TIME))
326 return -EPERM;
327
328 if(copy_from_user(&tcs_val, (int*)arg, sizeof(int)))
329 return -EFAULT;
330
331 tcs_val = RTC_TCR_PATTERN | (tcs_val & 0x0F);
332 ds1302_writereg(RTC_TRICKLECHARGER, tcs_val);
333 return 0;
334 }
335 case RTC_VL_READ:
336 {
337 /* TODO:
338 * Implement voltage low detection support
339 */
340 printk(KERN_WARNING "DS1302: RTC Voltage Low detection"
341 " is not supported\n");
342 return 0;
343 }
344 case RTC_VL_CLR:
345 {
346 /* TODO:
347 * Nothing to do since Voltage Low detection is not supported
348 */
349 return 0;
350 }
351 default:
352 return -ENOIOCTLCMD;
353 }
354}
355
356static long rtc_unlocked_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
357{
358 int ret;
359
360 mutex_lock(&ds1302_mutex);
361 ret = rtc_ioctl(file, cmd, arg);
362 mutex_unlock(&ds1302_mutex);
363
364 return ret;
365}
366
367static void
368print_rtc_status(void)
369{
370 struct rtc_time tm;
371
372 get_rtc_time(&tm);
373
374 /*
375 * There is no way to tell if the luser has the RTC set for local
376 * time or for Universal Standard Time (GMT). Probably local though.
377 */
378
379 printk(KERN_INFO "rtc_time\t: %02d:%02d:%02d\n",
380 tm.tm_hour, tm.tm_min, tm.tm_sec);
381 printk(KERN_INFO "rtc_date\t: %04d-%02d-%02d\n",
382 tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday);
383}
384
385/* The various file operations we support. */
386
387static const struct file_operations rtc_fops = {
388 .owner = THIS_MODULE,
389 .unlocked_ioctl = rtc_unlocked_ioctl,
390 .llseek = noop_llseek,
391};
392
393/* Probe for the chip by writing something to its RAM and try reading it back. */
394
395#define MAGIC_PATTERN 0x42
396
397static int __init
398ds1302_probe(void)
399{
400 int retval, res;
401
402 TK_RST_DIR(1);
403 TK_SCL_DIR(1);
404 TK_SDA_DIR(0);
405
406 /* Try to talk to timekeeper. */
407
408 ds1302_wenable();
409 start();
410 out_byte(0xc0); /* write RAM byte 0 */
411 out_byte(MAGIC_PATTERN); /* write something magic */
412 start();
413 out_byte(0xc1); /* read RAM byte 0 */
414
415 if((res = in_byte()) == MAGIC_PATTERN) {
416 stop();
417 ds1302_wdisable();
418 printk(KERN_INFO "%s: RTC found.\n", ds1302_name);
419 printk(KERN_INFO "%s: SDA, SCL, RST on PB%i, PB%i, %s%i\n",
420 ds1302_name,
421 CONFIG_ETRAX_DS1302_SDABIT,
422 CONFIG_ETRAX_DS1302_SCLBIT,
423#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
424 "GENIO",
425#else
426 "PB",
427#endif
428 CONFIG_ETRAX_DS1302_RSTBIT);
429 print_rtc_status();
430 retval = 1;
431 } else {
432 stop();
433 retval = 0;
434 }
435
436 return retval;
437}
438
439
440/* Just probe for the RTC and register the device to handle the ioctl needed. */
441
442int __init
443ds1302_init(void)
444{
445#ifdef CONFIG_ETRAX_I2C
446 i2c_init();
447#endif
448
449 if (!ds1302_probe()) {
450#ifdef CONFIG_ETRAX_DS1302_RST_ON_GENERIC_PORT
451#if CONFIG_ETRAX_DS1302_RSTBIT == 27
452 /*
453 * The only way to set g27 to output is to enable ATA.
454 *
455 * Make sure that R_GEN_CONFIG is setup correct.
456 */
457 /* Allocating the ATA interface will grab almost all
458 * pins in I/O groups a, b, c and d. A consequence of
459 * allocating the ATA interface is that the fixed
460 * interfaces shared RAM, parallel port 0, parallel
461 * port 1, parallel port W, SCSI-8 port 0, SCSI-8 port
462 * 1, SCSI-W, serial port 2, serial port 3,
463 * synchronous serial port 3 and USB port 2 and almost
464 * all GPIO pins on port g cannot be used.
465 */
466 if (cris_request_io_interface(if_ata, "ds1302/ATA")) {
467 printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
468 return -1;
469 }
470
471#elif CONFIG_ETRAX_DS1302_RSTBIT == 0
472 if (cris_io_interface_allocate_pins(if_gpio_grp_a,
473 'g',
474 CONFIG_ETRAX_DS1302_RSTBIT,
475 CONFIG_ETRAX_DS1302_RSTBIT)) {
476 printk(KERN_WARNING "ds1302: Failed to get IO interface\n");
477 return -1;
478 }
479
480 /* Set the direction of this bit to out. */
481 genconfig_shadow = ((genconfig_shadow &
482 ~IO_MASK(R_GEN_CONFIG, g0dir)) |
483 (IO_STATE(R_GEN_CONFIG, g0dir, out)));
484 *R_GEN_CONFIG = genconfig_shadow;
485#endif
486 if (!ds1302_probe()) {
487 printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
488 return -1;
489 }
490#else
491 printk(KERN_WARNING "%s: RTC not found.\n", ds1302_name);
492 return -1;
493#endif
494 }
495 /* Initialise trickle charger */
496 ds1302_writereg(RTC_TRICKLECHARGER,
497 RTC_TCR_PATTERN |(CONFIG_ETRAX_DS1302_TRICKLE_CHARGE & 0x0F));
498 /* Start clock by resetting CLOCK_HALT */
499 ds1302_writereg(RTC_SECONDS, (ds1302_readreg(RTC_SECONDS) & 0x7F));
500 return 0;
501}
502
503static int __init ds1302_register(void)
504{
505 ds1302_init();
506 if (register_chrdev(RTC_MAJOR_NR, ds1302_name, &rtc_fops)) {
507 printk(KERN_INFO "%s: unable to get major %d for rtc\n",
508 ds1302_name, RTC_MAJOR_NR);
509 return -1;
510 }
511 return 0;
512
513}
514
515module_init(ds1302_register);
diff --git a/arch/cris/arch-v10/drivers/pcf8563.c b/arch/cris/arch-v10/drivers/pcf8563.c
deleted file mode 100644
index 9da056860c92..000000000000
--- a/arch/cris/arch-v10/drivers/pcf8563.c
+++ /dev/null
@@ -1,380 +0,0 @@
1/*
2 * PCF8563 RTC
3 *
4 * From Phillips' datasheet:
5 *
6 * The PCF8563 is a CMOS real-time clock/calendar optimized for low power
7 * consumption. A programmable clock output, interrupt output and voltage
8 * low detector are also provided. All address and data are transferred
9 * serially via two-line bidirectional I2C-bus. Maximum bus speed is
10 * 400 kbits/s. The built-in word address register is incremented
11 * automatically after each written or read byte.
12 *
13 * Copyright (c) 2002-2007, Axis Communications AB
14 * All rights reserved.
15 *
16 * Author: Tobias Anderberg <tobiasa@axis.com>.
17 *
18 */
19
20#include <linux/module.h>
21#include <linux/kernel.h>
22#include <linux/types.h>
23#include <linux/sched.h>
24#include <linux/init.h>
25#include <linux/fs.h>
26#include <linux/ioctl.h>
27#include <linux/delay.h>
28#include <linux/bcd.h>
29#include <linux/mutex.h>
30
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/rtc.h>
34
35#include "i2c.h"
36
37#define PCF8563_MAJOR 121 /* Local major number. */
38#define DEVICE_NAME "rtc" /* Name which is registered in /proc/devices. */
39#define PCF8563_NAME "PCF8563"
40#define DRIVER_VERSION "$Revision: 1.24 $"
41
42/* I2C bus slave registers. */
43#define RTC_I2C_READ 0xa3
44#define RTC_I2C_WRITE 0xa2
45
46/* Two simple wrapper macros, saves a few keystrokes. */
47#define rtc_read(x) i2c_readreg(RTC_I2C_READ, x)
48#define rtc_write(x,y) i2c_writereg(RTC_I2C_WRITE, x, y)
49
50static DEFINE_MUTEX(pcf8563_mutex);
51static DEFINE_MUTEX(rtc_lock); /* Protect state etc */
52
53static const unsigned char days_in_month[] =
54 { 0, 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 };
55
56static long pcf8563_unlocked_ioctl(struct file *, unsigned int, unsigned long);
57
58/* Cache VL bit value read at driver init since writing the RTC_SECOND
59 * register clears the VL status.
60 */
61static int voltage_low;
62
63static const struct file_operations pcf8563_fops = {
64 .owner = THIS_MODULE,
65 .unlocked_ioctl = pcf8563_unlocked_ioctl,
66 .llseek = noop_llseek,
67};
68
69unsigned char
70pcf8563_readreg(int reg)
71{
72 unsigned char res = rtc_read(reg);
73
74 /* The PCF8563 does not return 0 for unimplemented bits. */
75 switch (reg) {
76 case RTC_SECONDS:
77 case RTC_MINUTES:
78 res &= 0x7F;
79 break;
80 case RTC_HOURS:
81 case RTC_DAY_OF_MONTH:
82 res &= 0x3F;
83 break;
84 case RTC_WEEKDAY:
85 res &= 0x07;
86 break;
87 case RTC_MONTH:
88 res &= 0x1F;
89 break;
90 case RTC_CONTROL1:
91 res &= 0xA8;
92 break;
93 case RTC_CONTROL2:
94 res &= 0x1F;
95 break;
96 case RTC_CLOCKOUT_FREQ:
97 case RTC_TIMER_CONTROL:
98 res &= 0x83;
99 break;
100 }
101 return res;
102}
103
104void
105pcf8563_writereg(int reg, unsigned char val)
106{
107 rtc_write(reg, val);
108}
109
110void
111get_rtc_time(struct rtc_time *tm)
112{
113 tm->tm_sec = rtc_read(RTC_SECONDS);
114 tm->tm_min = rtc_read(RTC_MINUTES);
115 tm->tm_hour = rtc_read(RTC_HOURS);
116 tm->tm_mday = rtc_read(RTC_DAY_OF_MONTH);
117 tm->tm_wday = rtc_read(RTC_WEEKDAY);
118 tm->tm_mon = rtc_read(RTC_MONTH);
119 tm->tm_year = rtc_read(RTC_YEAR);
120
121 if (tm->tm_sec & 0x80) {
122 printk(KERN_ERR "%s: RTC Voltage Low - reliable date/time "
123 "information is no longer guaranteed!\n", PCF8563_NAME);
124 }
125
126 tm->tm_year = bcd2bin(tm->tm_year) +
127 ((tm->tm_mon & 0x80) ? 100 : 0);
128 tm->tm_sec &= 0x7F;
129 tm->tm_min &= 0x7F;
130 tm->tm_hour &= 0x3F;
131 tm->tm_mday &= 0x3F;
132 tm->tm_wday &= 0x07; /* Not coded in BCD. */
133 tm->tm_mon &= 0x1F;
134
135 tm->tm_sec = bcd2bin(tm->tm_sec);
136 tm->tm_min = bcd2bin(tm->tm_min);
137 tm->tm_hour = bcd2bin(tm->tm_hour);
138 tm->tm_mday = bcd2bin(tm->tm_mday);
139 tm->tm_mon = bcd2bin(tm->tm_mon);
140 tm->tm_mon--; /* Month is 1..12 in RTC but 0..11 in linux */
141}
142
143int __init
144pcf8563_init(void)
145{
146 static int res;
147 static int first = 1;
148
149 if (!first)
150 return res;
151 first = 0;
152
153 /* Initiate the i2c protocol. */
154 res = i2c_init();
155 if (res < 0) {
156 printk(KERN_CRIT "pcf8563_init: Failed to init i2c.\n");
157 return res;
158 }
159
160 /*
161 * First of all we need to reset the chip. This is done by
162 * clearing control1, control2 and clk freq and resetting
163 * all alarms.
164 */
165 if (rtc_write(RTC_CONTROL1, 0x00) < 0)
166 goto err;
167
168 if (rtc_write(RTC_CONTROL2, 0x00) < 0)
169 goto err;
170
171 if (rtc_write(RTC_CLOCKOUT_FREQ, 0x00) < 0)
172 goto err;
173
174 if (rtc_write(RTC_TIMER_CONTROL, 0x03) < 0)
175 goto err;
176
177 /* Reset the alarms. */
178 if (rtc_write(RTC_MINUTE_ALARM, 0x80) < 0)
179 goto err;
180
181 if (rtc_write(RTC_HOUR_ALARM, 0x80) < 0)
182 goto err;
183
184 if (rtc_write(RTC_DAY_ALARM, 0x80) < 0)
185 goto err;
186
187 if (rtc_write(RTC_WEEKDAY_ALARM, 0x80) < 0)
188 goto err;
189
190 /* Check for low voltage, and warn about it. */
191 if (rtc_read(RTC_SECONDS) & 0x80) {
192 voltage_low = 1;
193 printk(KERN_WARNING "%s: RTC Voltage Low - reliable "
194 "date/time information is no longer guaranteed!\n",
195 PCF8563_NAME);
196 }
197
198 return res;
199
200err:
201 printk(KERN_INFO "%s: Error initializing chip.\n", PCF8563_NAME);
202 res = -1;
203 return res;
204}
205
206void __exit
207pcf8563_exit(void)
208{
209 unregister_chrdev(PCF8563_MAJOR, DEVICE_NAME);
210}
211
212/*
213 * ioctl calls for this driver. Why return -ENOTTY upon error? Because
214 * POSIX says so!
215 */
216static int pcf8563_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
217{
218 /* Some sanity checks. */
219 if (_IOC_TYPE(cmd) != RTC_MAGIC)
220 return -ENOTTY;
221
222 if (_IOC_NR(cmd) > RTC_MAX_IOCTL)
223 return -ENOTTY;
224
225 switch (cmd) {
226 case RTC_RD_TIME:
227 {
228 struct rtc_time tm;
229
230 mutex_lock(&rtc_lock);
231 memset(&tm, 0, sizeof tm);
232 get_rtc_time(&tm);
233
234 if (copy_to_user((struct rtc_time *) arg, &tm,
235 sizeof tm)) {
236 mutex_unlock(&rtc_lock);
237 return -EFAULT;
238 }
239
240 mutex_unlock(&rtc_lock);
241
242 return 0;
243 }
244 case RTC_SET_TIME:
245 {
246 int leap;
247 int year;
248 int century;
249 struct rtc_time tm;
250
251 memset(&tm, 0, sizeof tm);
252 if (!capable(CAP_SYS_TIME))
253 return -EPERM;
254
255 if (copy_from_user(&tm, (struct rtc_time *) arg, sizeof tm))
256 return -EFAULT;
257
258 /* Convert from struct tm to struct rtc_time. */
259 tm.tm_year += 1900;
260 tm.tm_mon += 1;
261
262 /*
263 * Check if tm.tm_year is a leap year. A year is a leap
264 * year if it is divisible by 4 but not 100, except
265 * that years divisible by 400 _are_ leap years.
266 */
267 year = tm.tm_year;
268 leap = (tm.tm_mon == 2) &&
269 ((year % 4 == 0 && year % 100 != 0) || year % 400 == 0);
270
271 /* Perform some sanity checks. */
272 if ((tm.tm_year < 1970) ||
273 (tm.tm_mon > 12) ||
274 (tm.tm_mday == 0) ||
275 (tm.tm_mday > days_in_month[tm.tm_mon] + leap) ||
276 (tm.tm_wday >= 7) ||
277 (tm.tm_hour >= 24) ||
278 (tm.tm_min >= 60) ||
279 (tm.tm_sec >= 60))
280 return -EINVAL;
281
282 century = (tm.tm_year >= 2000) ? 0x80 : 0;
283 tm.tm_year = tm.tm_year % 100;
284
285 tm.tm_year = bin2bcd(tm.tm_year);
286 tm.tm_mon = bin2bcd(tm.tm_mon);
287 tm.tm_mday = bin2bcd(tm.tm_mday);
288 tm.tm_hour = bin2bcd(tm.tm_hour);
289 tm.tm_min = bin2bcd(tm.tm_min);
290 tm.tm_sec = bin2bcd(tm.tm_sec);
291 tm.tm_mon |= century;
292
293 mutex_lock(&rtc_lock);
294
295 rtc_write(RTC_YEAR, tm.tm_year);
296 rtc_write(RTC_MONTH, tm.tm_mon);
297 rtc_write(RTC_WEEKDAY, tm.tm_wday); /* Not coded in BCD. */
298 rtc_write(RTC_DAY_OF_MONTH, tm.tm_mday);
299 rtc_write(RTC_HOURS, tm.tm_hour);
300 rtc_write(RTC_MINUTES, tm.tm_min);
301 rtc_write(RTC_SECONDS, tm.tm_sec);
302
303 mutex_unlock(&rtc_lock);
304
305 return 0;
306 }
307 case RTC_VL_READ:
308 if (voltage_low) {
309 printk(KERN_ERR "%s: RTC Voltage Low - "
310 "reliable date/time information is no "
311 "longer guaranteed!\n", PCF8563_NAME);
312 }
313
314 if (copy_to_user((int *) arg, &voltage_low, sizeof(int)))
315 return -EFAULT;
316 return 0;
317
318 case RTC_VL_CLR:
319 {
320 /* Clear the VL bit in the seconds register in case
321 * the time has not been set already (which would
322 * have cleared it). This does not really matter
323 * because of the cached voltage_low value but do it
324 * anyway for consistency. */
325
326 int ret = rtc_read(RTC_SECONDS);
327
328 rtc_write(RTC_SECONDS, (ret & 0x7F));
329
330 /* Clear the cached value. */
331 voltage_low = 0;
332
333 return 0;
334 }
335 default:
336 return -ENOTTY;
337 }
338
339 return 0;
340}
341
342static long pcf8563_unlocked_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
343{
344 int ret;
345
346 mutex_lock(&pcf8563_mutex);
347 ret = pcf8563_ioctl(filp, cmd, arg);
348 mutex_unlock(&pcf8563_mutex);
349
350 return ret;
351}
352
353static int __init pcf8563_register(void)
354{
355 if (pcf8563_init() < 0) {
356 printk(KERN_INFO "%s: Unable to initialize Real-Time Clock "
357 "Driver, %s\n", PCF8563_NAME, DRIVER_VERSION);
358 return -1;
359 }
360
361 if (register_chrdev(PCF8563_MAJOR, DEVICE_NAME, &pcf8563_fops) < 0) {
362 printk(KERN_INFO "%s: Unable to get major number %d for RTC device.\n",
363 PCF8563_NAME, PCF8563_MAJOR);
364 return -1;
365 }
366
367 printk(KERN_INFO "%s Real-Time Clock Driver, %s\n", PCF8563_NAME,
368 DRIVER_VERSION);
369
370 /* Check for low voltage, and warn about it. */
371 if (voltage_low) {
372 printk(KERN_WARNING "%s: RTC Voltage Low - reliable date/time "
373 "information is no longer guaranteed!\n", PCF8563_NAME);
374 }
375
376 return 0;
377}
378
379module_init(pcf8563_register);
380module_exit(pcf8563_exit);
diff --git a/arch/cris/arch-v10/kernel/fasttimer.c b/arch/cris/arch-v10/kernel/fasttimer.c
index 8a8196ee8ce8..082f1890bacb 100644
--- a/arch/cris/arch-v10/kernel/fasttimer.c
+++ b/arch/cris/arch-v10/kernel/fasttimer.c
@@ -21,8 +21,6 @@
21#include <asm/io.h> 21#include <asm/io.h>
22#include <asm/irq.h> 22#include <asm/irq.h>
23#include <asm/delay.h> 23#include <asm/delay.h>
24#include <asm/rtc.h>
25
26 24
27#include <arch/svinto.h> 25#include <arch/svinto.h>
28#include <asm/fasttimer.h> 26#include <asm/fasttimer.h>
diff --git a/arch/cris/arch-v10/kernel/kgdb.c b/arch/cris/arch-v10/kernel/kgdb.c
index b579dd02e098..37e6d2c50b76 100644
--- a/arch/cris/arch-v10/kernel/kgdb.c
+++ b/arch/cris/arch-v10/kernel/kgdb.c
@@ -264,7 +264,7 @@ static int write_register (int regno, char *val);
264 264
265/* Write a value to a specified register in the stack of a thread other 265/* Write a value to a specified register in the stack of a thread other
266 than the current thread. */ 266 than the current thread. */
267static write_stack_register (int thread_id, int regno, char *valptr); 267static int write_stack_register(int thread_id, int regno, char *valptr);
268 268
269/* Read a value from a specified register in the register image. Returns the 269/* Read a value from a specified register in the register image. Returns the
270 status of the read operation. The register value is returned in valptr. */ 270 status of the read operation. The register value is returned in valptr. */
diff --git a/arch/cris/arch-v10/kernel/time.c b/arch/cris/arch-v10/kernel/time.c
index 20c85b5dc7d0..bcffcb6a9415 100644
--- a/arch/cris/arch-v10/kernel/time.c
+++ b/arch/cris/arch-v10/kernel/time.c
@@ -19,16 +19,12 @@
19#include <asm/signal.h> 19#include <asm/signal.h>
20#include <asm/io.h> 20#include <asm/io.h>
21#include <asm/delay.h> 21#include <asm/delay.h>
22#include <asm/rtc.h>
23#include <asm/irq_regs.h> 22#include <asm/irq_regs.h>
24 23
25/* define this if you need to use print_timestamp */ 24/* define this if you need to use print_timestamp */
26/* it will make jiffies at 96 hz instead of 100 hz though */ 25/* it will make jiffies at 96 hz instead of 100 hz though */
27#undef USE_CASCADE_TIMERS 26#undef USE_CASCADE_TIMERS
28 27
29extern int set_rtc_mmss(unsigned long nowtime);
30extern int have_rtc;
31
32unsigned long get_ns_in_jiffie(void) 28unsigned long get_ns_in_jiffie(void)
33{ 29{
34 unsigned char timer_count, t1; 30 unsigned char timer_count, t1;
@@ -203,11 +199,6 @@ time_init(void)
203 */ 199 */
204 loops_per_usec = 50; 200 loops_per_usec = 50;
205 201
206 if(RTC_INIT() < 0)
207 have_rtc = 0;
208 else
209 have_rtc = 1;
210
211 /* Setup the etrax timers 202 /* Setup the etrax timers
212 * Base frequency is 25000 hz, divider 250 -> 100 HZ 203 * Base frequency is 25000 hz, divider 250 -> 100 HZ
213 * In normal mode, we use timer0, so timer1 is free. In cascade 204 * In normal mode, we use timer0, so timer1 is free. In cascade
diff --git a/arch/cris/arch-v10/lib/Makefile b/arch/cris/arch-v10/lib/Makefile
index 36e9a9c5239b..725153edb764 100644
--- a/arch/cris/arch-v10/lib/Makefile
+++ b/arch/cris/arch-v10/lib/Makefile
@@ -2,8 +2,5 @@
2# Makefile for Etrax-specific library files.. 2# Makefile for Etrax-specific library files..
3# 3#
4 4
5
6EXTRA_AFLAGS := -traditional
7
8lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o csumcpfruser.o 5lib-y = checksum.o checksumcopy.o string.o usercopy.o memset.o csumcpfruser.o
9 6
diff --git a/arch/cris/arch-v32/drivers/cryptocop.c b/arch/cris/arch-v32/drivers/cryptocop.c
index 642c6fed43d7..f8476d9e856b 100644
--- a/arch/cris/arch-v32/drivers/cryptocop.c
+++ b/arch/cris/arch-v32/drivers/cryptocop.c
@@ -1394,11 +1394,10 @@ static int create_md5_pad(int alloc_flag, unsigned long long hashed_length, char
1394 1394
1395 if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH; 1395 if (padlen < MD5_MIN_PAD_LENGTH) padlen += MD5_BLOCK_LENGTH;
1396 1396
1397 p = kmalloc(padlen, alloc_flag); 1397 p = kzalloc(padlen, alloc_flag);
1398 if (!p) return -ENOMEM; 1398 if (!p) return -ENOMEM;
1399 1399
1400 *p = 0x80; 1400 *p = 0x80;
1401 memset(p+1, 0, padlen - 1);
1402 1401
1403 DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length)); 1402 DEBUG(printk("create_md5_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1404 1403
@@ -1426,11 +1425,10 @@ static int create_sha1_pad(int alloc_flag, unsigned long long hashed_length, cha
1426 1425
1427 if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH; 1426 if (padlen < SHA1_MIN_PAD_LENGTH) padlen += SHA1_BLOCK_LENGTH;
1428 1427
1429 p = kmalloc(padlen, alloc_flag); 1428 p = kzalloc(padlen, alloc_flag);
1430 if (!p) return -ENOMEM; 1429 if (!p) return -ENOMEM;
1431 1430
1432 *p = 0x80; 1431 *p = 0x80;
1433 memset(p+1, 0, padlen - 1);
1434 1432
1435 DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length)); 1433 DEBUG(printk("create_sha1_pad: hashed_length=%lld bits == %lld bytes\n", bit_length, hashed_length));
1436 1434
diff --git a/arch/cris/arch-v32/kernel/ptrace.c b/arch/cris/arch-v32/kernel/ptrace.c
index f7ad9e8637df..f085229cf870 100644
--- a/arch/cris/arch-v32/kernel/ptrace.c
+++ b/arch/cris/arch-v32/kernel/ptrace.c
@@ -114,8 +114,6 @@ void user_disable_single_step(struct task_struct *child)
114void 114void
115ptrace_disable(struct task_struct *child) 115ptrace_disable(struct task_struct *child)
116{ 116{
117 unsigned long tmp;
118
119 /* Deconfigure SPC and S-bit. */ 117 /* Deconfigure SPC and S-bit. */
120 user_disable_single_step(child); 118 user_disable_single_step(child);
121 put_reg(child, PT_SPC, 0); 119 put_reg(child, PT_SPC, 0);
diff --git a/arch/cris/arch-v32/kernel/time.c b/arch/cris/arch-v32/kernel/time.c
index 6773fc83a670..8c4b45efd7b6 100644
--- a/arch/cris/arch-v32/kernel/time.c
+++ b/arch/cris/arch-v32/kernel/time.c
@@ -18,7 +18,6 @@
18#include <asm/signal.h> 18#include <asm/signal.h>
19#include <asm/io.h> 19#include <asm/io.h>
20#include <asm/delay.h> 20#include <asm/delay.h>
21#include <asm/rtc.h>
22#include <asm/irq.h> 21#include <asm/irq.h>
23#include <asm/irq_regs.h> 22#include <asm/irq_regs.h>
24 23
@@ -67,7 +66,6 @@ unsigned long timer_regs[NR_CPUS] =
67}; 66};
68 67
69extern int set_rtc_mmss(unsigned long nowtime); 68extern int set_rtc_mmss(unsigned long nowtime);
70extern int have_rtc;
71 69
72#ifdef CONFIG_CPU_FREQ 70#ifdef CONFIG_CPU_FREQ
73static int 71static int
@@ -265,11 +263,6 @@ void __init time_init(void)
265 */ 263 */
266 loops_per_usec = 50; 264 loops_per_usec = 50;
267 265
268 if(RTC_INIT() < 0)
269 have_rtc = 0;
270 else
271 have_rtc = 1;
272
273 /* Start CPU local timer. */ 266 /* Start CPU local timer. */
274 cris_timer_init(); 267 cris_timer_init();
275 268
diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
index 1de779f4f240..7caf25d58e6b 100644
--- a/arch/cris/include/arch-v32/arch/cache.h
+++ b/arch/cris/include/arch-v32/arch/cache.h
@@ -7,7 +7,7 @@
7#define L1_CACHE_BYTES 32 7#define L1_CACHE_BYTES 32
8#define L1_CACHE_SHIFT 5 8#define L1_CACHE_SHIFT 5
9 9
10#define __read_mostly __attribute__((__section__(".data.read_mostly"))) 10#define __read_mostly __attribute__((__section__(".data..read_mostly")))
11 11
12void flush_dma_list(dma_descr_data *descr); 12void flush_dma_list(dma_descr_data *descr);
13void flush_dma_descr(dma_descr_data *descr, int flush_buf); 13void flush_dma_descr(dma_descr_data *descr, int flush_buf);
diff --git a/arch/cris/include/asm/Kbuild b/arch/cris/include/asm/Kbuild
index 956eea246b97..04d02a51c5e9 100644
--- a/arch/cris/include/asm/Kbuild
+++ b/arch/cris/include/asm/Kbuild
@@ -6,5 +6,4 @@ header-y += arch-v32/
6header-y += ethernet.h 6header-y += ethernet.h
7header-y += etraxgpio.h 7header-y += etraxgpio.h
8header-y += rs485.h 8header-y += rs485.h
9header-y += rtc.h
10header-y += sync_serial.h 9header-y += sync_serial.h
diff --git a/arch/cris/include/asm/posix_types.h b/arch/cris/include/asm/posix_types.h
index 72b3cd6eda0b..234891c74e2b 100644
--- a/arch/cris/include/asm/posix_types.h
+++ b/arch/cris/include/asm/posix_types.h
@@ -33,4 +33,6 @@ typedef int __kernel_ptrdiff_t;
33typedef unsigned short __kernel_old_dev_t; 33typedef unsigned short __kernel_old_dev_t;
34#define __kernel_old_dev_t __kernel_old_dev_t 34#define __kernel_old_dev_t __kernel_old_dev_t
35 35
36#include <asm-generic/posix_types.h>
37
36#endif /* __ARCH_CRIS_POSIX_TYPES_H */ 38#endif /* __ARCH_CRIS_POSIX_TYPES_H */
diff --git a/arch/cris/include/asm/rtc.h b/arch/cris/include/asm/rtc.h
deleted file mode 100644
index 17d3019529e1..000000000000
--- a/arch/cris/include/asm/rtc.h
+++ /dev/null
@@ -1,107 +0,0 @@
1
2#ifndef __RTC_H__
3#define __RTC_H__
4
5#ifdef CONFIG_ETRAX_DS1302
6 /* Dallas DS1302 clock/calendar register numbers. */
7# define RTC_SECONDS 0
8# define RTC_MINUTES 1
9# define RTC_HOURS 2
10# define RTC_DAY_OF_MONTH 3
11# define RTC_MONTH 4
12# define RTC_WEEKDAY 5
13# define RTC_YEAR 6
14# define RTC_CONTROL 7
15
16 /* Bits in CONTROL register. */
17# define RTC_CONTROL_WRITEPROTECT 0x80
18# define RTC_TRICKLECHARGER 8
19
20 /* Bits in TRICKLECHARGER register TCS TCS TCS TCS DS DS RS RS. */
21# define RTC_TCR_PATTERN 0xA0 /* 1010xxxx */
22# define RTC_TCR_1DIOD 0x04 /* xxxx01xx */
23# define RTC_TCR_2DIOD 0x08 /* xxxx10xx */
24# define RTC_TCR_DISABLED 0x00 /* xxxxxx00 Disabled */
25# define RTC_TCR_2KOHM 0x01 /* xxxxxx01 2KOhm */
26# define RTC_TCR_4KOHM 0x02 /* xxxxxx10 4kOhm */
27# define RTC_TCR_8KOHM 0x03 /* xxxxxx11 8kOhm */
28
29#elif defined(CONFIG_ETRAX_PCF8563)
30 /* I2C bus slave registers. */
31# define RTC_I2C_READ 0xa3
32# define RTC_I2C_WRITE 0xa2
33
34 /* Phillips PCF8563 registers. */
35# define RTC_CONTROL1 0x00 /* Control/Status register 1. */
36# define RTC_CONTROL2 0x01 /* Control/Status register 2. */
37# define RTC_CLOCKOUT_FREQ 0x0d /* CLKOUT frequency. */
38# define RTC_TIMER_CONTROL 0x0e /* Timer control. */
39# define RTC_TIMER_CNTDOWN 0x0f /* Timer countdown. */
40
41 /* BCD encoded clock registers. */
42# define RTC_SECONDS 0x02
43# define RTC_MINUTES 0x03
44# define RTC_HOURS 0x04
45# define RTC_DAY_OF_MONTH 0x05
46# define RTC_WEEKDAY 0x06 /* Not coded in BCD! */
47# define RTC_MONTH 0x07
48# define RTC_YEAR 0x08
49# define RTC_MINUTE_ALARM 0x09
50# define RTC_HOUR_ALARM 0x0a
51# define RTC_DAY_ALARM 0x0b
52# define RTC_WEEKDAY_ALARM 0x0c
53
54#endif
55
56#ifdef CONFIG_ETRAX_DS1302
57extern unsigned char ds1302_readreg(int reg);
58extern void ds1302_writereg(int reg, unsigned char val);
59extern int ds1302_init(void);
60# define CMOS_READ(x) ds1302_readreg(x)
61# define CMOS_WRITE(val,reg) ds1302_writereg(reg,val)
62# define RTC_INIT() ds1302_init()
63#elif defined(CONFIG_ETRAX_PCF8563)
64extern unsigned char pcf8563_readreg(int reg);
65extern void pcf8563_writereg(int reg, unsigned char val);
66extern int pcf8563_init(void);
67# define CMOS_READ(x) pcf8563_readreg(x)
68# define CMOS_WRITE(val,reg) pcf8563_writereg(reg,val)
69# define RTC_INIT() pcf8563_init()
70#else
71 /* No RTC configured so we shouldn't try to access any. */
72# define CMOS_READ(x) 42
73# define CMOS_WRITE(x,y)
74# define RTC_INIT() (-1)
75#endif
76
77/*
78 * The struct used to pass data via the following ioctl. Similar to the
79 * struct tm in <time.h>, but it needs to be here so that the kernel
80 * source is self contained, allowing cross-compiles, etc. etc.
81 */
82struct rtc_time {
83 int tm_sec;
84 int tm_min;
85 int tm_hour;
86 int tm_mday;
87 int tm_mon;
88 int tm_year;
89 int tm_wday;
90 int tm_yday;
91 int tm_isdst;
92};
93
94/* ioctl() calls that are permitted to the /dev/rtc interface. */
95#define RTC_MAGIC 'p'
96/* Read RTC time. */
97#define RTC_RD_TIME _IOR(RTC_MAGIC, 0x09, struct rtc_time)
98/* Set RTC time. */
99#define RTC_SET_TIME _IOW(RTC_MAGIC, 0x0a, struct rtc_time)
100#define RTC_SET_CHARGE _IOW(RTC_MAGIC, 0x0b, int)
101/* Voltage low detector */
102#define RTC_VL_READ _IOR(RTC_MAGIC, 0x13, int)
103/* Clear voltage low information */
104#define RTC_VL_CLR _IO(RTC_MAGIC, 0x14)
105#define RTC_MAX_IOCTL 0x14
106
107#endif /* __RTC_H__ */
diff --git a/arch/cris/kernel/time.c b/arch/cris/kernel/time.c
index 4e73092e85c0..277ffc459e4b 100644
--- a/arch/cris/kernel/time.c
+++ b/arch/cris/kernel/time.c
@@ -21,7 +21,6 @@
21 * 21 *
22 */ 22 */
23 23
24#include <asm/rtc.h>
25#include <linux/errno.h> 24#include <linux/errno.h>
26#include <linux/module.h> 25#include <linux/module.h>
27#include <linux/param.h> 26#include <linux/param.h>
@@ -32,7 +31,8 @@
32#include <linux/profile.h> 31#include <linux/profile.h>
33#include <linux/sched.h> /* just for sched_clock() - funny that */ 32#include <linux/sched.h> /* just for sched_clock() - funny that */
34 33
35int have_rtc; /* used to remember if we have an RTC or not */; 34
35#define D(x)
36 36
37#define TICK_SIZE tick 37#define TICK_SIZE tick
38 38
@@ -50,78 +50,16 @@ u32 arch_gettimeoffset(void)
50} 50}
51#endif 51#endif
52 52
53/*
54 * BUG: This routine does not handle hour overflow properly; it just
55 * sets the minutes. Usually you'll only notice that after reboot!
56 */
57
58int set_rtc_mmss(unsigned long nowtime) 53int set_rtc_mmss(unsigned long nowtime)
59{ 54{
60 int retval = 0; 55 D(printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime));
61 int real_seconds, real_minutes, cmos_minutes; 56 return 0;
62
63 printk(KERN_DEBUG "set_rtc_mmss(%lu)\n", nowtime);
64
65 if(!have_rtc)
66 return 0;
67
68 cmos_minutes = CMOS_READ(RTC_MINUTES);
69 cmos_minutes = bcd2bin(cmos_minutes);
70
71 /*
72 * since we're only adjusting minutes and seconds,
73 * don't interfere with hour overflow. This avoids
74 * messing with unknown time zones but requires your
75 * RTC not to be off by more than 15 minutes
76 */
77 real_seconds = nowtime % 60;
78 real_minutes = nowtime / 60;
79 if (((abs(real_minutes - cmos_minutes) + 15)/30) & 1)
80 real_minutes += 30; /* correct for half hour time zone */
81 real_minutes %= 60;
82
83 if (abs(real_minutes - cmos_minutes) < 30) {
84 real_seconds = bin2bcd(real_seconds);
85 real_minutes = bin2bcd(real_minutes);
86 CMOS_WRITE(real_seconds,RTC_SECONDS);
87 CMOS_WRITE(real_minutes,RTC_MINUTES);
88 } else {
89 printk_once(KERN_NOTICE
90 "set_rtc_mmss: can't update from %d to %d\n",
91 cmos_minutes, real_minutes);
92 retval = -1;
93 }
94
95 return retval;
96} 57}
97 58
98/* grab the time from the RTC chip */ 59/* grab the time from the RTC chip */
99 60unsigned long get_cmos_time(void)
100unsigned long
101get_cmos_time(void)
102{ 61{
103 unsigned int year, mon, day, hour, min, sec; 62 return 0;
104 if(!have_rtc)
105 return 0;
106
107 sec = CMOS_READ(RTC_SECONDS);
108 min = CMOS_READ(RTC_MINUTES);
109 hour = CMOS_READ(RTC_HOURS);
110 day = CMOS_READ(RTC_DAY_OF_MONTH);
111 mon = CMOS_READ(RTC_MONTH);
112 year = CMOS_READ(RTC_YEAR);
113
114 sec = bcd2bin(sec);
115 min = bcd2bin(min);
116 hour = bcd2bin(hour);
117 day = bcd2bin(day);
118 mon = bcd2bin(mon);
119 year = bcd2bin(year);
120
121 if ((year += 1900) < 1970)
122 year += 100;
123
124 return mktime(year, mon, day, hour, min, sec);
125} 63}
126 64
127 65
@@ -132,7 +70,7 @@ int update_persistent_clock(struct timespec now)
132 70
133void read_persistent_clock(struct timespec *ts) 71void read_persistent_clock(struct timespec *ts)
134{ 72{
135 ts->tv_sec = get_cmos_time(); 73 ts->tv_sec = 0;
136 ts->tv_nsec = 0; 74 ts->tv_nsec = 0;
137} 75}
138 76
diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S
index a6990cb0f098..a68b983dcea1 100644
--- a/arch/cris/kernel/vmlinux.lds.S
+++ b/arch/cris/kernel/vmlinux.lds.S
@@ -52,6 +52,7 @@ SECTIONS
52 52
53 EXCEPTION_TABLE(4) 53 EXCEPTION_TABLE(4)
54 54
55 _sdata = .;
55 RODATA 56 RODATA
56 57
57 . = ALIGN (4); 58 . = ALIGN (4);
diff --git a/arch/cris/mm/fault.c b/arch/cris/mm/fault.c
index b4760d86e1bb..45fd542cf173 100644
--- a/arch/cris/mm/fault.c
+++ b/arch/cris/mm/fault.c
@@ -58,6 +58,8 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
58 struct vm_area_struct * vma; 58 struct vm_area_struct * vma;
59 siginfo_t info; 59 siginfo_t info;
60 int fault; 60 int fault;
61 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
62 ((writeaccess & 1) ? FAULT_FLAG_WRITE : 0);
61 63
62 D(printk(KERN_DEBUG 64 D(printk(KERN_DEBUG
63 "Page fault for %lX on %X at %lX, prot %d write %d\n", 65 "Page fault for %lX on %X at %lX, prot %d write %d\n",
@@ -115,6 +117,7 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
115 if (in_atomic() || !mm) 117 if (in_atomic() || !mm)
116 goto no_context; 118 goto no_context;
117 119
120retry:
118 down_read(&mm->mmap_sem); 121 down_read(&mm->mmap_sem);
119 vma = find_vma(mm, address); 122 vma = find_vma(mm, address);
120 if (!vma) 123 if (!vma)
@@ -163,7 +166,11 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
163 * the fault. 166 * the fault.
164 */ 167 */
165 168
166 fault = handle_mm_fault(mm, vma, address, (writeaccess & 1) ? FAULT_FLAG_WRITE : 0); 169 fault = handle_mm_fault(mm, vma, address, flags);
170
171 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
172 return;
173
167 if (unlikely(fault & VM_FAULT_ERROR)) { 174 if (unlikely(fault & VM_FAULT_ERROR)) {
168 if (fault & VM_FAULT_OOM) 175 if (fault & VM_FAULT_OOM)
169 goto out_of_memory; 176 goto out_of_memory;
@@ -171,10 +178,24 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
171 goto do_sigbus; 178 goto do_sigbus;
172 BUG(); 179 BUG();
173 } 180 }
174 if (fault & VM_FAULT_MAJOR) 181
175 tsk->maj_flt++; 182 if (flags & FAULT_FLAG_ALLOW_RETRY) {
176 else 183 if (fault & VM_FAULT_MAJOR)
177 tsk->min_flt++; 184 tsk->maj_flt++;
185 else
186 tsk->min_flt++;
187 if (fault & VM_FAULT_RETRY) {
188 flags &= ~FAULT_FLAG_ALLOW_RETRY;
189
190 /*
191 * No need to up_read(&mm->mmap_sem) as we would
192 * have already released it in __lock_page_or_retry
193 * in mm/filemap.c.
194 */
195
196 goto retry;
197 }
198 }
178 199
179 up_read(&mm->mmap_sem); 200 up_read(&mm->mmap_sem);
180 return; 201 return;
diff --git a/arch/frv/include/asm/kvm_para.h b/arch/frv/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/frv/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/h8300/include/asm/kvm_para.h b/arch/h8300/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/h8300/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/hexagon/include/asm/kvm_para.h b/arch/hexagon/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/hexagon/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/ia64/include/asm/kvm_host.h b/arch/ia64/include/asm/kvm_host.h
index e35b3a84a40b..6d6a5ac48d85 100644
--- a/arch/ia64/include/asm/kvm_host.h
+++ b/arch/ia64/include/asm/kvm_host.h
@@ -365,6 +365,7 @@ struct thash_cb {
365}; 365};
366 366
367struct kvm_vcpu_stat { 367struct kvm_vcpu_stat {
368 u32 halt_wakeup;
368}; 369};
369 370
370struct kvm_vcpu_arch { 371struct kvm_vcpu_arch {
@@ -448,6 +449,8 @@ struct kvm_vcpu_arch {
448 char log_buf[VMM_LOG_LEN]; 449 char log_buf[VMM_LOG_LEN];
449 union context host; 450 union context host;
450 union context guest; 451 union context guest;
452
453 char mmio_data[8];
451}; 454};
452 455
453struct kvm_vm_stat { 456struct kvm_vm_stat {
diff --git a/arch/ia64/include/asm/kvm_para.h b/arch/ia64/include/asm/kvm_para.h
index 1588aee781a2..2019cb99335e 100644
--- a/arch/ia64/include/asm/kvm_para.h
+++ b/arch/ia64/include/asm/kvm_para.h
@@ -26,6 +26,11 @@ static inline unsigned int kvm_arch_para_features(void)
26 return 0; 26 return 0;
27} 27}
28 28
29static inline bool kvm_check_and_clear_guest_paused(void)
30{
31 return false;
32}
33
29#endif 34#endif
30 35
31#endif 36#endif
diff --git a/arch/ia64/kvm/kvm-ia64.c b/arch/ia64/kvm/kvm-ia64.c
index 463fb3bbe11e..bd77cb507c1c 100644
--- a/arch/ia64/kvm/kvm-ia64.c
+++ b/arch/ia64/kvm/kvm-ia64.c
@@ -232,12 +232,12 @@ static int handle_mmio(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
232 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS) 232 if ((p->addr & PAGE_MASK) == IOAPIC_DEFAULT_BASE_ADDRESS)
233 goto mmio; 233 goto mmio;
234 vcpu->mmio_needed = 1; 234 vcpu->mmio_needed = 1;
235 vcpu->mmio_phys_addr = kvm_run->mmio.phys_addr = p->addr; 235 vcpu->mmio_fragments[0].gpa = kvm_run->mmio.phys_addr = p->addr;
236 vcpu->mmio_size = kvm_run->mmio.len = p->size; 236 vcpu->mmio_fragments[0].len = kvm_run->mmio.len = p->size;
237 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir; 237 vcpu->mmio_is_write = kvm_run->mmio.is_write = !p->dir;
238 238
239 if (vcpu->mmio_is_write) 239 if (vcpu->mmio_is_write)
240 memcpy(vcpu->mmio_data, &p->data, p->size); 240 memcpy(vcpu->arch.mmio_data, &p->data, p->size);
241 memcpy(kvm_run->mmio.data, &p->data, p->size); 241 memcpy(kvm_run->mmio.data, &p->data, p->size);
242 kvm_run->exit_reason = KVM_EXIT_MMIO; 242 kvm_run->exit_reason = KVM_EXIT_MMIO;
243 return 0; 243 return 0;
@@ -719,7 +719,7 @@ static void kvm_set_mmio_data(struct kvm_vcpu *vcpu)
719 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu); 719 struct kvm_mmio_req *p = kvm_get_vcpu_ioreq(vcpu);
720 720
721 if (!vcpu->mmio_is_write) 721 if (!vcpu->mmio_is_write)
722 memcpy(&p->data, vcpu->mmio_data, 8); 722 memcpy(&p->data, vcpu->arch.mmio_data, 8);
723 p->state = STATE_IORESP_READY; 723 p->state = STATE_IORESP_READY;
724} 724}
725 725
@@ -739,7 +739,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
739 } 739 }
740 740
741 if (vcpu->mmio_needed) { 741 if (vcpu->mmio_needed) {
742 memcpy(vcpu->mmio_data, kvm_run->mmio.data, 8); 742 memcpy(vcpu->arch.mmio_data, kvm_run->mmio.data, 8);
743 kvm_set_mmio_data(vcpu); 743 kvm_set_mmio_data(vcpu);
744 vcpu->mmio_read_completed = 1; 744 vcpu->mmio_read_completed = 1;
745 vcpu->mmio_needed = 0; 745 vcpu->mmio_needed = 0;
@@ -1872,21 +1872,6 @@ void kvm_arch_hardware_unsetup(void)
1872{ 1872{
1873} 1873}
1874 1874
1875void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1876{
1877 int me;
1878 int cpu = vcpu->cpu;
1879
1880 if (waitqueue_active(&vcpu->wq))
1881 wake_up_interruptible(&vcpu->wq);
1882
1883 me = get_cpu();
1884 if (cpu != me && (unsigned) cpu < nr_cpu_ids && cpu_online(cpu))
1885 if (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests))
1886 smp_send_reschedule(cpu);
1887 put_cpu();
1888}
1889
1890int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq) 1875int kvm_apic_set_irq(struct kvm_vcpu *vcpu, struct kvm_lapic_irq *irq)
1891{ 1876{
1892 return __apic_accept_irq(vcpu, irq->vector); 1877 return __apic_accept_irq(vcpu, irq->vector);
@@ -1956,6 +1941,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1956 (kvm_highest_pending_irq(vcpu) != -1); 1941 (kvm_highest_pending_irq(vcpu) != -1);
1957} 1942}
1958 1943
1944int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1945{
1946 return (!test_and_set_bit(KVM_REQ_KICK, &vcpu->requests));
1947}
1948
1959int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, 1949int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1960 struct kvm_mp_state *mp_state) 1950 struct kvm_mp_state *mp_state)
1961{ 1951{
diff --git a/arch/m68k/include/asm/kvm_para.h b/arch/m68k/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/m68k/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/microblaze/Kconfig b/arch/microblaze/Kconfig
index 83460468998d..0bf44231aaf9 100644
--- a/arch/microblaze/Kconfig
+++ b/arch/microblaze/Kconfig
@@ -52,7 +52,7 @@ config GENERIC_CALIBRATE_DELAY
52 def_bool y 52 def_bool y
53 53
54config GENERIC_GPIO 54config GENERIC_GPIO
55 def_bool y 55 bool
56 56
57config GENERIC_CSUM 57config GENERIC_CSUM
58 def_bool y 58 def_bool y
diff --git a/arch/microblaze/include/asm/kvm_para.h b/arch/microblaze/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/microblaze/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
index daff9e5e4a1f..03f7b8ce6b6b 100644
--- a/arch/microblaze/kernel/entry.S
+++ b/arch/microblaze/kernel/entry.S
@@ -492,10 +492,11 @@ C_ENTRY(sys_clone):
492 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */ 492 bnei r6, 1f; /* See if child SP arg (arg 1) is 0. */
493 lwi r6, r1, PT_R1; /* If so, use paret's stack ptr */ 493 lwi r6, r1, PT_R1; /* If so, use paret's stack ptr */
4941: addik r7, r1, 0; /* Arg 2: parent context */ 4941: addik r7, r1, 0; /* Arg 2: parent context */
495 add r8, r0, r0; /* Arg 3: (unused) */ 495 lwi r9, r1, PT_R8; /* parent tid. */
496 add r9, r0, r0; /* Arg 4: (unused) */ 496 lwi r10, r1, PT_R9; /* child tid. */
497 /* do_fork will pick up TLS from regs->r10. */
497 brid do_fork /* Do real work (tail-call) */ 498 brid do_fork /* Do real work (tail-call) */
498 add r10, r0, r0; /* Arg 5: (unused) */ 499 add r8, r0, r0; /* Arg 3: (unused) */
499 500
500C_ENTRY(sys_execve): 501C_ENTRY(sys_execve):
501 brid microblaze_execve; /* Do real work (tail-call).*/ 502 brid microblaze_execve; /* Do real work (tail-call).*/
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
index e7eaa7a8cbd3..fc1e1322ce4c 100644
--- a/arch/microblaze/kernel/mcount.S
+++ b/arch/microblaze/kernel/mcount.S
@@ -138,7 +138,7 @@ NOALIGN_ENTRY(ftrace_call)
138#endif /* CONFIG_DYNAMIC_FTRACE */ 138#endif /* CONFIG_DYNAMIC_FTRACE */
139/* static normal trace */ 139/* static normal trace */
140 lwi r6, r1, 120; /* MS: load parent addr */ 140 lwi r6, r1, 120; /* MS: load parent addr */
141 addik r5, r15, 0; /* MS: load current function addr */ 141 addik r5, r15, -4; /* MS: load current function addr */
142 /* MS: here is dependency on previous code */ 142 /* MS: here is dependency on previous code */
143 brald r15, r20; /* MS: jump to ftrace handler */ 143 brald r15, r20; /* MS: jump to ftrace handler */
144 nop; 144 nop;
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
index 883b92789cdf..1944e00f07e1 100644
--- a/arch/microblaze/kernel/process.c
+++ b/arch/microblaze/kernel/process.c
@@ -182,8 +182,12 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
182#endif 182#endif
183 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8; 183 ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
184 184
185 /*
186 * r21 is the thread reg, r10 is 6th arg to clone
187 * which contains TLS area
188 */
185 if (clone_flags & CLONE_SETTLS) 189 if (clone_flags & CLONE_SETTLS)
186 ; 190 childregs->r21 = childregs->r10;
187 191
188 return 0; 192 return 0;
189} 193}
diff --git a/arch/microblaze/mm/fault.c b/arch/microblaze/mm/fault.c
index c38a265846de..eb365d6795fa 100644
--- a/arch/microblaze/mm/fault.c
+++ b/arch/microblaze/mm/fault.c
@@ -92,6 +92,8 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
92 int code = SEGV_MAPERR; 92 int code = SEGV_MAPERR;
93 int is_write = error_code & ESR_S; 93 int is_write = error_code & ESR_S;
94 int fault; 94 int fault;
95 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
96 (is_write ? FAULT_FLAG_WRITE : 0);
95 97
96 regs->ear = address; 98 regs->ear = address;
97 regs->esr = error_code; 99 regs->esr = error_code;
@@ -138,6 +140,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
138 if (kernel_mode(regs) && !search_exception_tables(regs->pc)) 140 if (kernel_mode(regs) && !search_exception_tables(regs->pc))
139 goto bad_area_nosemaphore; 141 goto bad_area_nosemaphore;
140 142
143retry:
141 down_read(&mm->mmap_sem); 144 down_read(&mm->mmap_sem);
142 } 145 }
143 146
@@ -210,7 +213,11 @@ good_area:
210 * make sure we exit gracefully rather than endlessly redo 213 * make sure we exit gracefully rather than endlessly redo
211 * the fault. 214 * the fault.
212 */ 215 */
213 fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); 216 fault = handle_mm_fault(mm, vma, address, flags);
217
218 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
219 return;
220
214 if (unlikely(fault & VM_FAULT_ERROR)) { 221 if (unlikely(fault & VM_FAULT_ERROR)) {
215 if (fault & VM_FAULT_OOM) 222 if (fault & VM_FAULT_OOM)
216 goto out_of_memory; 223 goto out_of_memory;
@@ -218,11 +225,27 @@ good_area:
218 goto do_sigbus; 225 goto do_sigbus;
219 BUG(); 226 BUG();
220 } 227 }
221 if (unlikely(fault & VM_FAULT_MAJOR)) 228
222 current->maj_flt++; 229 if (flags & FAULT_FLAG_ALLOW_RETRY) {
223 else 230 if (unlikely(fault & VM_FAULT_MAJOR))
224 current->min_flt++; 231 current->maj_flt++;
232 else
233 current->min_flt++;
234 if (fault & VM_FAULT_RETRY) {
235 flags &= ~FAULT_FLAG_ALLOW_RETRY;
236
237 /*
238 * No need to up_read(&mm->mmap_sem) as we would
239 * have already released it in __lock_page_or_retry
240 * in mm/filemap.c.
241 */
242
243 goto retry;
244 }
245 }
246
225 up_read(&mm->mmap_sem); 247 up_read(&mm->mmap_sem);
248
226 /* 249 /*
227 * keep track of tlb+htab misses that are good addrs but 250 * keep track of tlb+htab misses that are good addrs but
228 * just need pte's created via handle_mm_fault() 251 * just need pte's created via handle_mm_fault()
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 77050671eeef..09ab87ee6fef 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -233,8 +233,9 @@ config LANTIQ
233 select ARCH_REQUIRE_GPIOLIB 233 select ARCH_REQUIRE_GPIOLIB
234 select SWAP_IO_SPACE 234 select SWAP_IO_SPACE
235 select BOOT_RAW 235 select BOOT_RAW
236 select HAVE_CLK 236 select HAVE_MACH_CLKDEV
237 select MIPS_MACHINE 237 select CLKDEV_LOOKUP
238 select USE_OF
238 239
239config LASAT 240config LASAT
240 bool "LASAT Networks platforms" 241 bool "LASAT Networks platforms"
@@ -1783,10 +1784,12 @@ endchoice
1783 1784
1784config FORCE_MAX_ZONEORDER 1785config FORCE_MAX_ZONEORDER
1785 int "Maximum zone order" 1786 int "Maximum zone order"
1786 range 13 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB 1787 range 14 64 if HUGETLB_PAGE && PAGE_SIZE_64KB
1787 default "13" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_32KB 1788 default "14" if HUGETLB_PAGE && PAGE_SIZE_64KB
1788 range 12 64 if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB 1789 range 13 64 if HUGETLB_PAGE && PAGE_SIZE_32KB
1789 default "12" if SYS_SUPPORTS_HUGETLBFS && PAGE_SIZE_16KB 1790 default "13" if HUGETLB_PAGE && PAGE_SIZE_32KB
1791 range 12 64 if HUGETLB_PAGE && PAGE_SIZE_16KB
1792 default "12" if HUGETLB_PAGE && PAGE_SIZE_16KB
1790 range 11 64 1793 range 11 64
1791 default "11" 1794 default "11"
1792 help 1795 help
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index 76017c25a9e6..764e37a9dbb3 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -219,8 +219,8 @@ endif
219 219
220KBUILD_AFLAGS += $(cflags-y) 220KBUILD_AFLAGS += $(cflags-y)
221KBUILD_CFLAGS += $(cflags-y) 221KBUILD_CFLAGS += $(cflags-y)
222KBUILD_CPPFLAGS += -D"VMLINUX_LOAD_ADDRESS=$(load-y)" 222KBUILD_CPPFLAGS += -DVMLINUX_LOAD_ADDRESS=$(load-y)
223KBUILD_CPPFLAGS += -D"DATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)" 223KBUILD_CPPFLAGS += -DDATAOFFSET=$(if $(dataoffset-y),$(dataoffset-y),0)
224 224
225LDFLAGS += -m $(ld-emul) 225LDFLAGS += -m $(ld-emul)
226 226
diff --git a/arch/mips/alchemy/devboards/db1200.c b/arch/mips/alchemy/devboards/db1200.c
index a83302b96c01..7dde01642d6b 100644
--- a/arch/mips/alchemy/devboards/db1200.c
+++ b/arch/mips/alchemy/devboards/db1200.c
@@ -22,6 +22,7 @@
22#include <linux/gpio.h> 22#include <linux/gpio.h>
23#include <linux/i2c.h> 23#include <linux/i2c.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/module.h>
25#include <linux/interrupt.h> 26#include <linux/interrupt.h>
26#include <linux/io.h> 27#include <linux/io.h>
27#include <linux/leds.h> 28#include <linux/leds.h>
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index e0fae8f4442b..f44feee2d67f 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -26,6 +26,18 @@ config ATH79_MACH_AP81
26 Say 'Y' here if you want your kernel to support the 26 Say 'Y' here if you want your kernel to support the
27 Atheros AP81 reference board. 27 Atheros AP81 reference board.
28 28
29config ATH79_MACH_DB120
30 bool "Atheros DB120 reference board"
31 select SOC_AR934X
32 select ATH79_DEV_GPIO_BUTTONS
33 select ATH79_DEV_LEDS_GPIO
34 select ATH79_DEV_SPI
35 select ATH79_DEV_USB
36 select ATH79_DEV_WMAC
37 help
38 Say 'Y' here if you want your kernel to support the
39 Atheros DB120 reference board.
40
29config ATH79_MACH_PB44 41config ATH79_MACH_PB44
30 bool "Atheros PB44 reference board" 42 bool "Atheros PB44 reference board"
31 select SOC_AR71XX 43 select SOC_AR71XX
@@ -52,12 +64,14 @@ endmenu
52config SOC_AR71XX 64config SOC_AR71XX
53 select USB_ARCH_HAS_EHCI 65 select USB_ARCH_HAS_EHCI
54 select USB_ARCH_HAS_OHCI 66 select USB_ARCH_HAS_OHCI
67 select HW_HAS_PCI
55 def_bool n 68 def_bool n
56 69
57config SOC_AR724X 70config SOC_AR724X
58 select USB_ARCH_HAS_EHCI 71 select USB_ARCH_HAS_EHCI
59 select USB_ARCH_HAS_OHCI 72 select USB_ARCH_HAS_OHCI
60 select HW_HAS_PCI 73 select HW_HAS_PCI
74 select PCI_AR724X if PCI
61 def_bool n 75 def_bool n
62 76
63config SOC_AR913X 77config SOC_AR913X
@@ -68,6 +82,15 @@ config SOC_AR933X
68 select USB_ARCH_HAS_EHCI 82 select USB_ARCH_HAS_EHCI
69 def_bool n 83 def_bool n
70 84
85config SOC_AR934X
86 select USB_ARCH_HAS_EHCI
87 select HW_HAS_PCI
88 select PCI_AR724X if PCI
89 def_bool n
90
91config PCI_AR724X
92 def_bool n
93
71config ATH79_DEV_GPIO_BUTTONS 94config ATH79_DEV_GPIO_BUTTONS
72 def_bool n 95 def_bool n
73 96
@@ -81,7 +104,7 @@ config ATH79_DEV_USB
81 def_bool n 104 def_bool n
82 105
83config ATH79_DEV_WMAC 106config ATH79_DEV_WMAC
84 depends on (SOC_AR913X || SOC_AR933X) 107 depends on (SOC_AR913X || SOC_AR933X || SOC_AR934X)
85 def_bool n 108 def_bool n
86 109
87endif 110endif
diff --git a/arch/mips/ath79/Makefile b/arch/mips/ath79/Makefile
index 3b911e09dbec..2b54d98263f3 100644
--- a/arch/mips/ath79/Makefile
+++ b/arch/mips/ath79/Makefile
@@ -11,6 +11,7 @@
11obj-y := prom.o setup.o irq.o common.o clock.o gpio.o 11obj-y := prom.o setup.o irq.o common.o clock.o gpio.o
12 12
13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 13obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
14obj-$(CONFIG_PCI) += pci.o
14 15
15# 16#
16# Devices 17# Devices
@@ -27,5 +28,6 @@ obj-$(CONFIG_ATH79_DEV_WMAC) += dev-wmac.o
27# 28#
28obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o 29obj-$(CONFIG_ATH79_MACH_AP121) += mach-ap121.o
29obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o 30obj-$(CONFIG_ATH79_MACH_AP81) += mach-ap81.o
31obj-$(CONFIG_ATH79_MACH_DB120) += mach-db120.o
30obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o 32obj-$(CONFIG_ATH79_MACH_PB44) += mach-pb44.o
31obj-$(CONFIG_ATH79_MACH_UBNT_XM) += mach-ubnt-xm.o 33obj-$(CONFIG_ATH79_MACH_UBNT_XM) += mach-ubnt-xm.o
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index 54d0eb4db987..b91ad3efe29e 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -1,8 +1,11 @@
1/* 1/*
2 * Atheros AR71XX/AR724X/AR913X common routines 2 * Atheros AR71XX/AR724X/AR913X common routines
3 * 3 *
4 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
4 * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org> 5 * Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
5 * 6 *
7 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
8 *
6 * This program is free software; you can redistribute it and/or modify it 9 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published 10 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation. 11 * by the Free Software Foundation.
@@ -163,6 +166,82 @@ static void __init ar933x_clocks_init(void)
163 ath79_uart_clk.rate = ath79_ref_clk.rate; 166 ath79_uart_clk.rate = ath79_ref_clk.rate;
164} 167}
165 168
169static void __init ar934x_clocks_init(void)
170{
171 u32 pll, out_div, ref_div, nint, frac, clk_ctrl, postdiv;
172 u32 cpu_pll, ddr_pll;
173 u32 bootstrap;
174
175 bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
176 if (bootstrap & AR934X_BOOTSTRAP_REF_CLK_40)
177 ath79_ref_clk.rate = 40 * 1000 * 1000;
178 else
179 ath79_ref_clk.rate = 25 * 1000 * 1000;
180
181 pll = ath79_pll_rr(AR934X_PLL_CPU_CONFIG_REG);
182 out_div = (pll >> AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
183 AR934X_PLL_CPU_CONFIG_OUTDIV_MASK;
184 ref_div = (pll >> AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
185 AR934X_PLL_CPU_CONFIG_REFDIV_MASK;
186 nint = (pll >> AR934X_PLL_CPU_CONFIG_NINT_SHIFT) &
187 AR934X_PLL_CPU_CONFIG_NINT_MASK;
188 frac = (pll >> AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT) &
189 AR934X_PLL_CPU_CONFIG_NFRAC_MASK;
190
191 cpu_pll = nint * ath79_ref_clk.rate / ref_div;
192 cpu_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 6));
193 cpu_pll /= (1 << out_div);
194
195 pll = ath79_pll_rr(AR934X_PLL_DDR_CONFIG_REG);
196 out_div = (pll >> AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT) &
197 AR934X_PLL_DDR_CONFIG_OUTDIV_MASK;
198 ref_div = (pll >> AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT) &
199 AR934X_PLL_DDR_CONFIG_REFDIV_MASK;
200 nint = (pll >> AR934X_PLL_DDR_CONFIG_NINT_SHIFT) &
201 AR934X_PLL_DDR_CONFIG_NINT_MASK;
202 frac = (pll >> AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT) &
203 AR934X_PLL_DDR_CONFIG_NFRAC_MASK;
204
205 ddr_pll = nint * ath79_ref_clk.rate / ref_div;
206 ddr_pll += frac * ath79_ref_clk.rate / (ref_div * (2 << 10));
207 ddr_pll /= (1 << out_div);
208
209 clk_ctrl = ath79_pll_rr(AR934X_PLL_CPU_DDR_CLK_CTRL_REG);
210
211 postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT) &
212 AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK;
213
214 if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS)
215 ath79_cpu_clk.rate = ath79_ref_clk.rate;
216 else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL)
217 ath79_cpu_clk.rate = cpu_pll / (postdiv + 1);
218 else
219 ath79_cpu_clk.rate = ddr_pll / (postdiv + 1);
220
221 postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT) &
222 AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK;
223
224 if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS)
225 ath79_ddr_clk.rate = ath79_ref_clk.rate;
226 else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL)
227 ath79_ddr_clk.rate = ddr_pll / (postdiv + 1);
228 else
229 ath79_ddr_clk.rate = cpu_pll / (postdiv + 1);
230
231 postdiv = (clk_ctrl >> AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT) &
232 AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK;
233
234 if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS)
235 ath79_ahb_clk.rate = ath79_ref_clk.rate;
236 else if (clk_ctrl & AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL)
237 ath79_ahb_clk.rate = ddr_pll / (postdiv + 1);
238 else
239 ath79_ahb_clk.rate = cpu_pll / (postdiv + 1);
240
241 ath79_wdt_clk.rate = ath79_ref_clk.rate;
242 ath79_uart_clk.rate = ath79_ref_clk.rate;
243}
244
166void __init ath79_clocks_init(void) 245void __init ath79_clocks_init(void)
167{ 246{
168 if (soc_is_ar71xx()) 247 if (soc_is_ar71xx())
@@ -173,6 +252,8 @@ void __init ath79_clocks_init(void)
173 ar913x_clocks_init(); 252 ar913x_clocks_init();
174 else if (soc_is_ar933x()) 253 else if (soc_is_ar933x())
175 ar933x_clocks_init(); 254 ar933x_clocks_init();
255 else if (soc_is_ar934x())
256 ar934x_clocks_init();
176 else 257 else
177 BUG(); 258 BUG();
178 259
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index f0fda982b965..5a4adfc9d79d 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -1,9 +1,12 @@
1/* 1/*
2 * Atheros AR71XX/AR724X/AR913X common routines 2 * Atheros AR71XX/AR724X/AR913X common routines
3 * 3 *
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> 4 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 * 7 *
8 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
9 *
7 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published 11 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation. 12 * by the Free Software Foundation.
@@ -67,6 +70,8 @@ void ath79_device_reset_set(u32 mask)
67 reg = AR913X_RESET_REG_RESET_MODULE; 70 reg = AR913X_RESET_REG_RESET_MODULE;
68 else if (soc_is_ar933x()) 71 else if (soc_is_ar933x())
69 reg = AR933X_RESET_REG_RESET_MODULE; 72 reg = AR933X_RESET_REG_RESET_MODULE;
73 else if (soc_is_ar934x())
74 reg = AR934X_RESET_REG_RESET_MODULE;
70 else 75 else
71 BUG(); 76 BUG();
72 77
@@ -91,6 +96,8 @@ void ath79_device_reset_clear(u32 mask)
91 reg = AR913X_RESET_REG_RESET_MODULE; 96 reg = AR913X_RESET_REG_RESET_MODULE;
92 else if (soc_is_ar933x()) 97 else if (soc_is_ar933x())
93 reg = AR933X_RESET_REG_RESET_MODULE; 98 reg = AR933X_RESET_REG_RESET_MODULE;
99 else if (soc_is_ar934x())
100 reg = AR934X_RESET_REG_RESET_MODULE;
94 else 101 else
95 BUG(); 102 BUG();
96 103
diff --git a/arch/mips/ath79/dev-common.c b/arch/mips/ath79/dev-common.c
index f4956f809072..45efc63b08b6 100644
--- a/arch/mips/ath79/dev-common.c
+++ b/arch/mips/ath79/dev-common.c
@@ -89,7 +89,8 @@ void __init ath79_register_uart(void)
89 89
90 if (soc_is_ar71xx() || 90 if (soc_is_ar71xx() ||
91 soc_is_ar724x() || 91 soc_is_ar724x() ||
92 soc_is_ar913x()) { 92 soc_is_ar913x() ||
93 soc_is_ar934x()) {
93 ath79_uart_data[0].uartclk = clk_get_rate(clk); 94 ath79_uart_data[0].uartclk = clk_get_rate(clk);
94 platform_device_register(&ath79_uart_device); 95 platform_device_register(&ath79_uart_device);
95 } else if (soc_is_ar933x()) { 96 } else if (soc_is_ar933x()) {
diff --git a/arch/mips/ath79/dev-gpio-buttons.c b/arch/mips/ath79/dev-gpio-buttons.c
index 4b0168a11c01..366b35fb164d 100644
--- a/arch/mips/ath79/dev-gpio-buttons.c
+++ b/arch/mips/ath79/dev-gpio-buttons.c
@@ -25,12 +25,10 @@ void __init ath79_register_gpio_keys_polled(int id,
25 struct gpio_keys_button *p; 25 struct gpio_keys_button *p;
26 int err; 26 int err;
27 27
28 p = kmalloc(nbuttons * sizeof(*p), GFP_KERNEL); 28 p = kmemdup(buttons, nbuttons * sizeof(*p), GFP_KERNEL);
29 if (!p) 29 if (!p)
30 return; 30 return;
31 31
32 memcpy(p, buttons, nbuttons * sizeof(*p));
33
34 pdev = platform_device_alloc("gpio-keys-polled", id); 32 pdev = platform_device_alloc("gpio-keys-polled", id);
35 if (!pdev) 33 if (!pdev)
36 goto err_free_buttons; 34 goto err_free_buttons;
diff --git a/arch/mips/ath79/dev-leds-gpio.c b/arch/mips/ath79/dev-leds-gpio.c
index cdade68dcd17..dcb1debcefb8 100644
--- a/arch/mips/ath79/dev-leds-gpio.c
+++ b/arch/mips/ath79/dev-leds-gpio.c
@@ -24,12 +24,10 @@ void __init ath79_register_leds_gpio(int id,
24 struct gpio_led *p; 24 struct gpio_led *p;
25 int err; 25 int err;
26 26
27 p = kmalloc(num_leds * sizeof(*p), GFP_KERNEL); 27 p = kmemdup(leds, num_leds * sizeof(*p), GFP_KERNEL);
28 if (!p) 28 if (!p)
29 return; 29 return;
30 30
31 memcpy(p, leds, num_leds * sizeof(*p));
32
33 pdev = platform_device_alloc("leds-gpio", id); 31 pdev = platform_device_alloc("leds-gpio", id);
34 if (!pdev) 32 if (!pdev)
35 goto err_free_leds; 33 goto err_free_leds;
diff --git a/arch/mips/ath79/dev-wmac.c b/arch/mips/ath79/dev-wmac.c
index 9c717bf98ffe..d6d893c16ad4 100644
--- a/arch/mips/ath79/dev-wmac.c
+++ b/arch/mips/ath79/dev-wmac.c
@@ -1,9 +1,12 @@
1/* 1/*
2 * Atheros AR913X/AR933X SoC built-in WMAC device support 2 * Atheros AR913X/AR933X SoC built-in WMAC device support
3 * 3 *
4 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
4 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> 5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 * 7 *
8 * Parts of this file are based on Atheros 2.6.15/2.6.31 BSP
9 *
7 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published 11 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation. 12 * by the Free Software Foundation.
@@ -26,8 +29,7 @@ static struct resource ath79_wmac_resources[] = {
26 /* .start and .end fields are filled dynamically */ 29 /* .start and .end fields are filled dynamically */
27 .flags = IORESOURCE_MEM, 30 .flags = IORESOURCE_MEM,
28 }, { 31 }, {
29 .start = ATH79_CPU_IRQ_IP2, 32 /* .start and .end fields are filled dynamically */
30 .end = ATH79_CPU_IRQ_IP2,
31 .flags = IORESOURCE_IRQ, 33 .flags = IORESOURCE_IRQ,
32 }, 34 },
33}; 35};
@@ -53,6 +55,8 @@ static void __init ar913x_wmac_setup(void)
53 55
54 ath79_wmac_resources[0].start = AR913X_WMAC_BASE; 56 ath79_wmac_resources[0].start = AR913X_WMAC_BASE;
55 ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1; 57 ath79_wmac_resources[0].end = AR913X_WMAC_BASE + AR913X_WMAC_SIZE - 1;
58 ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
59 ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
56} 60}
57 61
58 62
@@ -79,6 +83,8 @@ static void __init ar933x_wmac_setup(void)
79 83
80 ath79_wmac_resources[0].start = AR933X_WMAC_BASE; 84 ath79_wmac_resources[0].start = AR933X_WMAC_BASE;
81 ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1; 85 ath79_wmac_resources[0].end = AR933X_WMAC_BASE + AR933X_WMAC_SIZE - 1;
86 ath79_wmac_resources[1].start = ATH79_CPU_IRQ_IP2;
87 ath79_wmac_resources[1].end = ATH79_CPU_IRQ_IP2;
82 88
83 t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP); 89 t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
84 if (t & AR933X_BOOTSTRAP_REF_CLK_40) 90 if (t & AR933X_BOOTSTRAP_REF_CLK_40)
@@ -92,12 +98,32 @@ static void __init ar933x_wmac_setup(void)
92 ath79_wmac_data.external_reset = ar933x_wmac_reset; 98 ath79_wmac_data.external_reset = ar933x_wmac_reset;
93} 99}
94 100
101static void ar934x_wmac_setup(void)
102{
103 u32 t;
104
105 ath79_wmac_device.name = "ar934x_wmac";
106
107 ath79_wmac_resources[0].start = AR934X_WMAC_BASE;
108 ath79_wmac_resources[0].end = AR934X_WMAC_BASE + AR934X_WMAC_SIZE - 1;
109 ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
110 ath79_wmac_resources[1].start = ATH79_IP2_IRQ(1);
111
112 t = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
113 if (t & AR934X_BOOTSTRAP_REF_CLK_40)
114 ath79_wmac_data.is_clk_25mhz = false;
115 else
116 ath79_wmac_data.is_clk_25mhz = true;
117}
118
95void __init ath79_register_wmac(u8 *cal_data) 119void __init ath79_register_wmac(u8 *cal_data)
96{ 120{
97 if (soc_is_ar913x()) 121 if (soc_is_ar913x())
98 ar913x_wmac_setup(); 122 ar913x_wmac_setup();
99 else if (soc_is_ar933x()) 123 else if (soc_is_ar933x())
100 ar933x_wmac_setup(); 124 ar933x_wmac_setup();
125 else if (soc_is_ar934x())
126 ar934x_wmac_setup();
101 else 127 else
102 BUG(); 128 BUG();
103 129
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index 6a51ced7a293..dc938cb2ba58 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -71,6 +71,9 @@ static void prom_putchar_init(void)
71 case REV_ID_MAJOR_AR7241: 71 case REV_ID_MAJOR_AR7241:
72 case REV_ID_MAJOR_AR7242: 72 case REV_ID_MAJOR_AR7242:
73 case REV_ID_MAJOR_AR913X: 73 case REV_ID_MAJOR_AR913X:
74 case REV_ID_MAJOR_AR9341:
75 case REV_ID_MAJOR_AR9342:
76 case REV_ID_MAJOR_AR9344:
74 _prom_putchar = prom_putchar_ar71xx; 77 _prom_putchar = prom_putchar_ar71xx;
75 break; 78 break;
76 79
diff --git a/arch/mips/ath79/gpio.c b/arch/mips/ath79/gpio.c
index a2f8ca630ed6..29054f211832 100644
--- a/arch/mips/ath79/gpio.c
+++ b/arch/mips/ath79/gpio.c
@@ -1,9 +1,12 @@
1/* 1/*
2 * Atheros AR71XX/AR724X/AR913X GPIO API support 2 * Atheros AR71XX/AR724X/AR913X GPIO API support
3 * 3 *
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> 4 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 * 7 *
8 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
9 *
7 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published 11 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation. 12 * by the Free Software Foundation.
@@ -89,6 +92,42 @@ static int ath79_gpio_direction_output(struct gpio_chip *chip,
89 return 0; 92 return 0;
90} 93}
91 94
95static int ar934x_gpio_direction_input(struct gpio_chip *chip, unsigned offset)
96{
97 void __iomem *base = ath79_gpio_base;
98 unsigned long flags;
99
100 spin_lock_irqsave(&ath79_gpio_lock, flags);
101
102 __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) | (1 << offset),
103 base + AR71XX_GPIO_REG_OE);
104
105 spin_unlock_irqrestore(&ath79_gpio_lock, flags);
106
107 return 0;
108}
109
110static int ar934x_gpio_direction_output(struct gpio_chip *chip, unsigned offset,
111 int value)
112{
113 void __iomem *base = ath79_gpio_base;
114 unsigned long flags;
115
116 spin_lock_irqsave(&ath79_gpio_lock, flags);
117
118 if (value)
119 __raw_writel(1 << offset, base + AR71XX_GPIO_REG_SET);
120 else
121 __raw_writel(1 << offset, base + AR71XX_GPIO_REG_CLEAR);
122
123 __raw_writel(__raw_readl(base + AR71XX_GPIO_REG_OE) & ~(1 << offset),
124 base + AR71XX_GPIO_REG_OE);
125
126 spin_unlock_irqrestore(&ath79_gpio_lock, flags);
127
128 return 0;
129}
130
92static struct gpio_chip ath79_gpio_chip = { 131static struct gpio_chip ath79_gpio_chip = {
93 .label = "ath79", 132 .label = "ath79",
94 .get = ath79_gpio_get_value, 133 .get = ath79_gpio_get_value,
@@ -155,11 +194,17 @@ void __init ath79_gpio_init(void)
155 ath79_gpio_count = AR913X_GPIO_COUNT; 194 ath79_gpio_count = AR913X_GPIO_COUNT;
156 else if (soc_is_ar933x()) 195 else if (soc_is_ar933x())
157 ath79_gpio_count = AR933X_GPIO_COUNT; 196 ath79_gpio_count = AR933X_GPIO_COUNT;
197 else if (soc_is_ar934x())
198 ath79_gpio_count = AR934X_GPIO_COUNT;
158 else 199 else
159 BUG(); 200 BUG();
160 201
161 ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE); 202 ath79_gpio_base = ioremap_nocache(AR71XX_GPIO_BASE, AR71XX_GPIO_SIZE);
162 ath79_gpio_chip.ngpio = ath79_gpio_count; 203 ath79_gpio_chip.ngpio = ath79_gpio_count;
204 if (soc_is_ar934x()) {
205 ath79_gpio_chip.direction_input = ar934x_gpio_direction_input;
206 ath79_gpio_chip.direction_output = ar934x_gpio_direction_output;
207 }
163 208
164 err = gpiochip_add(&ath79_gpio_chip); 209 err = gpiochip_add(&ath79_gpio_chip);
165 if (err) 210 if (err)
diff --git a/arch/mips/ath79/irq.c b/arch/mips/ath79/irq.c
index 1b073de44680..90d09fc15398 100644
--- a/arch/mips/ath79/irq.c
+++ b/arch/mips/ath79/irq.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * Atheros AR71xx/AR724x/AR913x specific interrupt handling 2 * Atheros AR71xx/AR724x/AR913x specific interrupt handling
3 * 3 *
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> 4 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 * 7 *
7 * Parts of this file are based on Atheros' 2.6.15 BSP 8 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published 11 * under the terms of the GNU General Public License version 2 as published
@@ -23,8 +24,8 @@
23#include <asm/mach-ath79/ar71xx_regs.h> 24#include <asm/mach-ath79/ar71xx_regs.h>
24#include "common.h" 25#include "common.h"
25 26
26static unsigned int ath79_ip2_flush_reg; 27static void (*ath79_ip2_handler)(void);
27static unsigned int ath79_ip3_flush_reg; 28static void (*ath79_ip3_handler)(void);
28 29
29static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc) 30static void ath79_misc_irq_handler(unsigned int irq, struct irq_desc *desc)
30{ 31{
@@ -129,7 +130,7 @@ static void __init ath79_misc_irq_init(void)
129 130
130 if (soc_is_ar71xx() || soc_is_ar913x()) 131 if (soc_is_ar71xx() || soc_is_ar913x())
131 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask; 132 ath79_misc_irq_chip.irq_mask_ack = ar71xx_misc_irq_mask;
132 else if (soc_is_ar724x() || soc_is_ar933x()) 133 else if (soc_is_ar724x() || soc_is_ar933x() || soc_is_ar934x())
133 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack; 134 ath79_misc_irq_chip.irq_ack = ar724x_misc_irq_ack;
134 else 135 else
135 BUG(); 136 BUG();
@@ -143,6 +144,39 @@ static void __init ath79_misc_irq_init(void)
143 irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler); 144 irq_set_chained_handler(ATH79_CPU_IRQ_MISC, ath79_misc_irq_handler);
144} 145}
145 146
147static void ar934x_ip2_irq_dispatch(unsigned int irq, struct irq_desc *desc)
148{
149 u32 status;
150
151 disable_irq_nosync(irq);
152
153 status = ath79_reset_rr(AR934X_RESET_REG_PCIE_WMAC_INT_STATUS);
154
155 if (status & AR934X_PCIE_WMAC_INT_PCIE_ALL) {
156 ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_PCIE);
157 generic_handle_irq(ATH79_IP2_IRQ(0));
158 } else if (status & AR934X_PCIE_WMAC_INT_WMAC_ALL) {
159 ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_WMAC);
160 generic_handle_irq(ATH79_IP2_IRQ(1));
161 } else {
162 spurious_interrupt();
163 }
164
165 enable_irq(irq);
166}
167
168static void ar934x_ip2_irq_init(void)
169{
170 int i;
171
172 for (i = ATH79_IP2_IRQ_BASE;
173 i < ATH79_IP2_IRQ_BASE + ATH79_IP2_IRQ_COUNT; i++)
174 irq_set_chip_and_handler(i, &dummy_irq_chip,
175 handle_level_irq);
176
177 irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar934x_ip2_irq_dispatch);
178}
179
146asmlinkage void plat_irq_dispatch(void) 180asmlinkage void plat_irq_dispatch(void)
147{ 181{
148 unsigned long pending; 182 unsigned long pending;
@@ -152,10 +186,8 @@ asmlinkage void plat_irq_dispatch(void)
152 if (pending & STATUSF_IP7) 186 if (pending & STATUSF_IP7)
153 do_IRQ(ATH79_CPU_IRQ_TIMER); 187 do_IRQ(ATH79_CPU_IRQ_TIMER);
154 188
155 else if (pending & STATUSF_IP2) { 189 else if (pending & STATUSF_IP2)
156 ath79_ddr_wb_flush(ath79_ip2_flush_reg); 190 ath79_ip2_handler();
157 do_IRQ(ATH79_CPU_IRQ_IP2);
158 }
159 191
160 else if (pending & STATUSF_IP4) 192 else if (pending & STATUSF_IP4)
161 do_IRQ(ATH79_CPU_IRQ_GE0); 193 do_IRQ(ATH79_CPU_IRQ_GE0);
@@ -163,10 +195,8 @@ asmlinkage void plat_irq_dispatch(void)
163 else if (pending & STATUSF_IP5) 195 else if (pending & STATUSF_IP5)
164 do_IRQ(ATH79_CPU_IRQ_GE1); 196 do_IRQ(ATH79_CPU_IRQ_GE1);
165 197
166 else if (pending & STATUSF_IP3) { 198 else if (pending & STATUSF_IP3)
167 ath79_ddr_wb_flush(ath79_ip3_flush_reg); 199 ath79_ip3_handler();
168 do_IRQ(ATH79_CPU_IRQ_USB);
169 }
170 200
171 else if (pending & STATUSF_IP6) 201 else if (pending & STATUSF_IP6)
172 do_IRQ(ATH79_CPU_IRQ_MISC); 202 do_IRQ(ATH79_CPU_IRQ_MISC);
@@ -175,24 +205,97 @@ asmlinkage void plat_irq_dispatch(void)
175 spurious_interrupt(); 205 spurious_interrupt();
176} 206}
177 207
208/*
209 * The IP2/IP3 lines are tied to a PCI/WMAC/USB device. Drivers for
210 * these devices typically allocate coherent DMA memory, however the
211 * DMA controller may still have some unsynchronized data in the FIFO.
212 * Issue a flush in the handlers to ensure that the driver sees
213 * the update.
214 */
215static void ar71xx_ip2_handler(void)
216{
217 ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_PCI);
218 do_IRQ(ATH79_CPU_IRQ_IP2);
219}
220
221static void ar724x_ip2_handler(void)
222{
223 ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_PCIE);
224 do_IRQ(ATH79_CPU_IRQ_IP2);
225}
226
227static void ar913x_ip2_handler(void)
228{
229 ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_WMAC);
230 do_IRQ(ATH79_CPU_IRQ_IP2);
231}
232
233static void ar933x_ip2_handler(void)
234{
235 ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_WMAC);
236 do_IRQ(ATH79_CPU_IRQ_IP2);
237}
238
239static void ar934x_ip2_handler(void)
240{
241 do_IRQ(ATH79_CPU_IRQ_IP2);
242}
243
244static void ar71xx_ip3_handler(void)
245{
246 ath79_ddr_wb_flush(AR71XX_DDR_REG_FLUSH_USB);
247 do_IRQ(ATH79_CPU_IRQ_USB);
248}
249
250static void ar724x_ip3_handler(void)
251{
252 ath79_ddr_wb_flush(AR724X_DDR_REG_FLUSH_USB);
253 do_IRQ(ATH79_CPU_IRQ_USB);
254}
255
256static void ar913x_ip3_handler(void)
257{
258 ath79_ddr_wb_flush(AR913X_DDR_REG_FLUSH_USB);
259 do_IRQ(ATH79_CPU_IRQ_USB);
260}
261
262static void ar933x_ip3_handler(void)
263{
264 ath79_ddr_wb_flush(AR933X_DDR_REG_FLUSH_USB);
265 do_IRQ(ATH79_CPU_IRQ_USB);
266}
267
268static void ar934x_ip3_handler(void)
269{
270 ath79_ddr_wb_flush(AR934X_DDR_REG_FLUSH_USB);
271 do_IRQ(ATH79_CPU_IRQ_USB);
272}
273
178void __init arch_init_irq(void) 274void __init arch_init_irq(void)
179{ 275{
180 if (soc_is_ar71xx()) { 276 if (soc_is_ar71xx()) {
181 ath79_ip2_flush_reg = AR71XX_DDR_REG_FLUSH_PCI; 277 ath79_ip2_handler = ar71xx_ip2_handler;
182 ath79_ip3_flush_reg = AR71XX_DDR_REG_FLUSH_USB; 278 ath79_ip3_handler = ar71xx_ip3_handler;
183 } else if (soc_is_ar724x()) { 279 } else if (soc_is_ar724x()) {
184 ath79_ip2_flush_reg = AR724X_DDR_REG_FLUSH_PCIE; 280 ath79_ip2_handler = ar724x_ip2_handler;
185 ath79_ip3_flush_reg = AR724X_DDR_REG_FLUSH_USB; 281 ath79_ip3_handler = ar724x_ip3_handler;
186 } else if (soc_is_ar913x()) { 282 } else if (soc_is_ar913x()) {
187 ath79_ip2_flush_reg = AR913X_DDR_REG_FLUSH_WMAC; 283 ath79_ip2_handler = ar913x_ip2_handler;
188 ath79_ip3_flush_reg = AR913X_DDR_REG_FLUSH_USB; 284 ath79_ip3_handler = ar913x_ip3_handler;
189 } else if (soc_is_ar933x()) { 285 } else if (soc_is_ar933x()) {
190 ath79_ip2_flush_reg = AR933X_DDR_REG_FLUSH_WMAC; 286 ath79_ip2_handler = ar933x_ip2_handler;
191 ath79_ip3_flush_reg = AR933X_DDR_REG_FLUSH_USB; 287 ath79_ip3_handler = ar933x_ip3_handler;
192 } else 288 } else if (soc_is_ar934x()) {
289 ath79_ip2_handler = ar934x_ip2_handler;
290 ath79_ip3_handler = ar934x_ip3_handler;
291 } else {
193 BUG(); 292 BUG();
293 }
194 294
195 cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC; 295 cp0_perfcount_irq = ATH79_MISC_IRQ_PERFC;
196 mips_cpu_irq_init(); 296 mips_cpu_irq_init();
197 ath79_misc_irq_init(); 297 ath79_misc_irq_init();
298
299 if (soc_is_ar934x())
300 ar934x_ip2_irq_init();
198} 301}
diff --git a/arch/mips/ath79/mach-db120.c b/arch/mips/ath79/mach-db120.c
new file mode 100644
index 000000000000..1983e4d2af4b
--- /dev/null
+++ b/arch/mips/ath79/mach-db120.c
@@ -0,0 +1,134 @@
1/*
2 * Atheros DB120 reference board support
3 *
4 * Copyright (c) 2011 Qualcomm Atheros
5 * Copyright (c) 2011 Gabor Juhos <juhosg@openwrt.org>
6 *
7 * Permission to use, copy, modify, and/or distribute this software for any
8 * purpose with or without fee is hereby granted, provided that the above
9 * copyright notice and this permission notice appear in all copies.
10 *
11 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
12 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
13 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
14 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
15 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
16 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
17 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
18 *
19 */
20
21#include <linux/pci.h>
22#include <linux/ath9k_platform.h>
23
24#include "machtypes.h"
25#include "dev-gpio-buttons.h"
26#include "dev-leds-gpio.h"
27#include "dev-spi.h"
28#include "dev-wmac.h"
29#include "pci.h"
30
31#define DB120_GPIO_LED_WLAN_5G 12
32#define DB120_GPIO_LED_WLAN_2G 13
33#define DB120_GPIO_LED_STATUS 14
34#define DB120_GPIO_LED_WPS 15
35
36#define DB120_GPIO_BTN_WPS 16
37
38#define DB120_KEYS_POLL_INTERVAL 20 /* msecs */
39#define DB120_KEYS_DEBOUNCE_INTERVAL (3 * DB120_KEYS_POLL_INTERVAL)
40
41#define DB120_WMAC_CALDATA_OFFSET 0x1000
42#define DB120_PCIE_CALDATA_OFFSET 0x5000
43
44static struct gpio_led db120_leds_gpio[] __initdata = {
45 {
46 .name = "db120:green:status",
47 .gpio = DB120_GPIO_LED_STATUS,
48 .active_low = 1,
49 },
50 {
51 .name = "db120:green:wps",
52 .gpio = DB120_GPIO_LED_WPS,
53 .active_low = 1,
54 },
55 {
56 .name = "db120:green:wlan-5g",
57 .gpio = DB120_GPIO_LED_WLAN_5G,
58 .active_low = 1,
59 },
60 {
61 .name = "db120:green:wlan-2g",
62 .gpio = DB120_GPIO_LED_WLAN_2G,
63 .active_low = 1,
64 },
65};
66
67static struct gpio_keys_button db120_gpio_keys[] __initdata = {
68 {
69 .desc = "WPS button",
70 .type = EV_KEY,
71 .code = KEY_WPS_BUTTON,
72 .debounce_interval = DB120_KEYS_DEBOUNCE_INTERVAL,
73 .gpio = DB120_GPIO_BTN_WPS,
74 .active_low = 1,
75 },
76};
77
78static struct spi_board_info db120_spi_info[] = {
79 {
80 .bus_num = 0,
81 .chip_select = 0,
82 .max_speed_hz = 25000000,
83 .modalias = "s25sl064a",
84 }
85};
86
87static struct ath79_spi_platform_data db120_spi_data = {
88 .bus_num = 0,
89 .num_chipselect = 1,
90};
91
92#ifdef CONFIG_PCI
93static struct ath9k_platform_data db120_ath9k_data;
94
95static int db120_pci_plat_dev_init(struct pci_dev *dev)
96{
97 switch (PCI_SLOT(dev->devfn)) {
98 case 0:
99 dev->dev.platform_data = &db120_ath9k_data;
100 break;
101 }
102
103 return 0;
104}
105
106static void __init db120_pci_init(u8 *eeprom)
107{
108 memcpy(db120_ath9k_data.eeprom_data, eeprom,
109 sizeof(db120_ath9k_data.eeprom_data));
110
111 ath79_pci_set_plat_dev_init(db120_pci_plat_dev_init);
112 ath79_register_pci();
113}
114#else
115static inline void db120_pci_init(void) {}
116#endif /* CONFIG_PCI */
117
118static void __init db120_setup(void)
119{
120 u8 *art = (u8 *) KSEG1ADDR(0x1fff0000);
121
122 ath79_register_leds_gpio(-1, ARRAY_SIZE(db120_leds_gpio),
123 db120_leds_gpio);
124 ath79_register_gpio_keys_polled(-1, DB120_KEYS_POLL_INTERVAL,
125 ARRAY_SIZE(db120_gpio_keys),
126 db120_gpio_keys);
127 ath79_register_spi(&db120_spi_data, db120_spi_info,
128 ARRAY_SIZE(db120_spi_info));
129 ath79_register_wmac(art + DB120_WMAC_CALDATA_OFFSET);
130 db120_pci_init(art + DB120_PCIE_CALDATA_OFFSET);
131}
132
133MIPS_MACHINE(ATH79_MACH_DB120, "DB120", "Atheros DB120 reference board",
134 db120_setup);
diff --git a/arch/mips/ath79/mach-pb44.c b/arch/mips/ath79/mach-pb44.c
index fe9701a32291..c5f0ea5e00c3 100644
--- a/arch/mips/ath79/mach-pb44.c
+++ b/arch/mips/ath79/mach-pb44.c
@@ -19,6 +19,7 @@
19#include "dev-leds-gpio.h" 19#include "dev-leds-gpio.h"
20#include "dev-spi.h" 20#include "dev-spi.h"
21#include "dev-usb.h" 21#include "dev-usb.h"
22#include "pci.h"
22 23
23#define PB44_GPIO_I2C_SCL 0 24#define PB44_GPIO_I2C_SCL 0
24#define PB44_GPIO_I2C_SDA 1 25#define PB44_GPIO_I2C_SDA 1
@@ -114,6 +115,7 @@ static void __init pb44_init(void)
114 ath79_register_spi(&pb44_spi_data, pb44_spi_info, 115 ath79_register_spi(&pb44_spi_data, pb44_spi_info,
115 ARRAY_SIZE(pb44_spi_info)); 116 ARRAY_SIZE(pb44_spi_info));
116 ath79_register_usb(); 117 ath79_register_usb();
118 ath79_register_pci();
117} 119}
118 120
119MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board", 121MIPS_MACHINE(ATH79_MACH_PB44, "PB44", "Atheros PB44 reference board",
diff --git a/arch/mips/ath79/mach-ubnt-xm.c b/arch/mips/ath79/mach-ubnt-xm.c
index 3c311a539347..4a3c60694c75 100644
--- a/arch/mips/ath79/mach-ubnt-xm.c
+++ b/arch/mips/ath79/mach-ubnt-xm.c
@@ -12,16 +12,15 @@
12 12
13#include <linux/init.h> 13#include <linux/init.h>
14#include <linux/pci.h> 14#include <linux/pci.h>
15
16#ifdef CONFIG_PCI
17#include <linux/ath9k_platform.h> 15#include <linux/ath9k_platform.h>
18#include <asm/mach-ath79/pci-ath724x.h> 16
19#endif /* CONFIG_PCI */ 17#include <asm/mach-ath79/irq.h>
20 18
21#include "machtypes.h" 19#include "machtypes.h"
22#include "dev-gpio-buttons.h" 20#include "dev-gpio-buttons.h"
23#include "dev-leds-gpio.h" 21#include "dev-leds-gpio.h"
24#include "dev-spi.h" 22#include "dev-spi.h"
23#include "pci.h"
25 24
26#define UBNT_XM_GPIO_LED_L1 0 25#define UBNT_XM_GPIO_LED_L1 0
27#define UBNT_XM_GPIO_LED_L2 1 26#define UBNT_XM_GPIO_LED_L2 1
@@ -33,7 +32,6 @@
33#define UBNT_XM_KEYS_POLL_INTERVAL 20 32#define UBNT_XM_KEYS_POLL_INTERVAL 20
34#define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL) 33#define UBNT_XM_KEYS_DEBOUNCE_INTERVAL (3 * UBNT_XM_KEYS_POLL_INTERVAL)
35 34
36#define UBNT_XM_PCI_IRQ 48
37#define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000) 35#define UBNT_XM_EEPROM_ADDR (u8 *) KSEG1ADDR(0x1fff1000)
38 36
39static struct gpio_led ubnt_xm_leds_gpio[] __initdata = { 37static struct gpio_led ubnt_xm_leds_gpio[] __initdata = {
@@ -84,12 +82,27 @@ static struct ath79_spi_platform_data ubnt_xm_spi_data = {
84#ifdef CONFIG_PCI 82#ifdef CONFIG_PCI
85static struct ath9k_platform_data ubnt_xm_eeprom_data; 83static struct ath9k_platform_data ubnt_xm_eeprom_data;
86 84
87static struct ath724x_pci_data ubnt_xm_pci_data[] = { 85static int ubnt_xm_pci_plat_dev_init(struct pci_dev *dev)
88 { 86{
89 .irq = UBNT_XM_PCI_IRQ, 87 switch (PCI_SLOT(dev->devfn)) {
90 .pdata = &ubnt_xm_eeprom_data, 88 case 0:
91 }, 89 dev->dev.platform_data = &ubnt_xm_eeprom_data;
92}; 90 break;
91 }
92
93 return 0;
94}
95
96static void __init ubnt_xm_pci_init(void)
97{
98 memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
99 sizeof(ubnt_xm_eeprom_data.eeprom_data));
100
101 ath79_pci_set_plat_dev_init(ubnt_xm_pci_plat_dev_init);
102 ath79_register_pci();
103}
104#else
105static inline void ubnt_xm_pci_init(void) {}
93#endif /* CONFIG_PCI */ 106#endif /* CONFIG_PCI */
94 107
95static void __init ubnt_xm_init(void) 108static void __init ubnt_xm_init(void)
@@ -104,13 +117,7 @@ static void __init ubnt_xm_init(void)
104 ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info, 117 ath79_register_spi(&ubnt_xm_spi_data, ubnt_xm_spi_info,
105 ARRAY_SIZE(ubnt_xm_spi_info)); 118 ARRAY_SIZE(ubnt_xm_spi_info));
106 119
107#ifdef CONFIG_PCI 120 ubnt_xm_pci_init();
108 memcpy(ubnt_xm_eeprom_data.eeprom_data, UBNT_XM_EEPROM_ADDR,
109 sizeof(ubnt_xm_eeprom_data.eeprom_data));
110
111 ath724x_pci_add_data(ubnt_xm_pci_data, ARRAY_SIZE(ubnt_xm_pci_data));
112#endif /* CONFIG_PCI */
113
114} 121}
115 122
116MIPS_MACHINE(ATH79_MACH_UBNT_XM, 123MIPS_MACHINE(ATH79_MACH_UBNT_XM,
diff --git a/arch/mips/ath79/machtypes.h b/arch/mips/ath79/machtypes.h
index 9a1f3826626e..af92e5c30d66 100644
--- a/arch/mips/ath79/machtypes.h
+++ b/arch/mips/ath79/machtypes.h
@@ -18,6 +18,7 @@ enum ath79_mach_type {
18 ATH79_MACH_GENERIC = 0, 18 ATH79_MACH_GENERIC = 0,
19 ATH79_MACH_AP121, /* Atheros AP121 reference board */ 19 ATH79_MACH_AP121, /* Atheros AP121 reference board */
20 ATH79_MACH_AP81, /* Atheros AP81 reference board */ 20 ATH79_MACH_AP81, /* Atheros AP81 reference board */
21 ATH79_MACH_DB120, /* Atheros DB120 reference board */
21 ATH79_MACH_PB44, /* Atheros PB44 reference board */ 22 ATH79_MACH_PB44, /* Atheros PB44 reference board */
22 ATH79_MACH_UBNT_XM, /* Ubiquiti Networks XM board rev 1.0 */ 23 ATH79_MACH_UBNT_XM, /* Ubiquiti Networks XM board rev 1.0 */
23}; 24};
diff --git a/arch/mips/ath79/pci.c b/arch/mips/ath79/pci.c
new file mode 100644
index 000000000000..ca83abd9d31e
--- /dev/null
+++ b/arch/mips/ath79/pci.c
@@ -0,0 +1,130 @@
1/*
2 * Atheros AR71XX/AR724X specific PCI setup code
3 *
4 * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 *
8 * Parts of this file are based on Atheros' 2.6.15 BSP
9 *
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation.
13 */
14
15#include <linux/init.h>
16#include <linux/pci.h>
17#include <asm/mach-ath79/ar71xx_regs.h>
18#include <asm/mach-ath79/ath79.h>
19#include <asm/mach-ath79/irq.h>
20#include <asm/mach-ath79/pci.h>
21#include "pci.h"
22
23static int (*ath79_pci_plat_dev_init)(struct pci_dev *dev);
24static const struct ath79_pci_irq *ath79_pci_irq_map __initdata;
25static unsigned ath79_pci_nr_irqs __initdata;
26
27static const struct ath79_pci_irq ar71xx_pci_irq_map[] __initconst = {
28 {
29 .slot = 17,
30 .pin = 1,
31 .irq = ATH79_PCI_IRQ(0),
32 }, {
33 .slot = 18,
34 .pin = 1,
35 .irq = ATH79_PCI_IRQ(1),
36 }, {
37 .slot = 19,
38 .pin = 1,
39 .irq = ATH79_PCI_IRQ(2),
40 }
41};
42
43static const struct ath79_pci_irq ar724x_pci_irq_map[] __initconst = {
44 {
45 .slot = 0,
46 .pin = 1,
47 .irq = ATH79_PCI_IRQ(0),
48 }
49};
50
51int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
52{
53 int irq = -1;
54 int i;
55
56 if (ath79_pci_nr_irqs == 0 ||
57 ath79_pci_irq_map == NULL) {
58 if (soc_is_ar71xx()) {
59 ath79_pci_irq_map = ar71xx_pci_irq_map;
60 ath79_pci_nr_irqs = ARRAY_SIZE(ar71xx_pci_irq_map);
61 } else if (soc_is_ar724x() ||
62 soc_is_ar9342() ||
63 soc_is_ar9344()) {
64 ath79_pci_irq_map = ar724x_pci_irq_map;
65 ath79_pci_nr_irqs = ARRAY_SIZE(ar724x_pci_irq_map);
66 } else {
67 pr_crit("pci %s: invalid irq map\n",
68 pci_name((struct pci_dev *) dev));
69 return irq;
70 }
71 }
72
73 for (i = 0; i < ath79_pci_nr_irqs; i++) {
74 const struct ath79_pci_irq *entry;
75
76 entry = &ath79_pci_irq_map[i];
77 if (entry->slot == slot && entry->pin == pin) {
78 irq = entry->irq;
79 break;
80 }
81 }
82
83 if (irq < 0)
84 pr_crit("pci %s: no irq found for pin %u\n",
85 pci_name((struct pci_dev *) dev), pin);
86 else
87 pr_info("pci %s: using irq %d for pin %u\n",
88 pci_name((struct pci_dev *) dev), irq, pin);
89
90 return irq;
91}
92
93int pcibios_plat_dev_init(struct pci_dev *dev)
94{
95 if (ath79_pci_plat_dev_init)
96 return ath79_pci_plat_dev_init(dev);
97
98 return 0;
99}
100
101void __init ath79_pci_set_irq_map(unsigned nr_irqs,
102 const struct ath79_pci_irq *map)
103{
104 ath79_pci_nr_irqs = nr_irqs;
105 ath79_pci_irq_map = map;
106}
107
108void __init ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev))
109{
110 ath79_pci_plat_dev_init = func;
111}
112
113int __init ath79_register_pci(void)
114{
115 if (soc_is_ar71xx())
116 return ar71xx_pcibios_init();
117
118 if (soc_is_ar724x())
119 return ar724x_pcibios_init(ATH79_CPU_IRQ_IP2);
120
121 if (soc_is_ar9342() || soc_is_ar9344()) {
122 u32 bootstrap;
123
124 bootstrap = ath79_reset_rr(AR934X_RESET_REG_BOOTSTRAP);
125 if (bootstrap & AR934X_BOOTSTRAP_PCIE_RC)
126 return ar724x_pcibios_init(ATH79_IP2_IRQ(0));
127 }
128
129 return -ENODEV;
130}
diff --git a/arch/mips/ath79/pci.h b/arch/mips/ath79/pci.h
new file mode 100644
index 000000000000..51c6625dcc6d
--- /dev/null
+++ b/arch/mips/ath79/pci.h
@@ -0,0 +1,34 @@
1/*
2 * Atheros AR71XX/AR724X PCI support
3 *
4 * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 */
12
13#ifndef _ATH79_PCI_H
14#define _ATH79_PCI_H
15
16struct ath79_pci_irq {
17 u8 slot;
18 u8 pin;
19 int irq;
20};
21
22#ifdef CONFIG_PCI
23void ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map);
24void ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *dev));
25int ath79_register_pci(void);
26#else
27static inline void
28ath79_pci_set_irq_map(unsigned nr_irqs, const struct ath79_pci_irq *map) {}
29static inline void
30ath79_pci_set_plat_dev_init(int (*func)(struct pci_dev *)) {}
31static inline int ath79_register_pci(void) { return 0; }
32#endif
33
34#endif /* _ATH79_PCI_H */
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index 80a7d4023d7f..60d212ef8629 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -1,10 +1,11 @@
1/* 1/*
2 * Atheros AR71XX/AR724X/AR913X specific setup 2 * Atheros AR71XX/AR724X/AR913X specific setup
3 * 3 *
4 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
4 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org> 5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 * 7 *
7 * Parts of this file are based on Atheros' 2.6.15 BSP 8 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published 11 * under the terms of the GNU General Public License version 2 as published
@@ -116,18 +117,6 @@ static void __init ath79_detect_sys_type(void)
116 rev = id & AR724X_REV_ID_REVISION_MASK; 117 rev = id & AR724X_REV_ID_REVISION_MASK;
117 break; 118 break;
118 119
119 case REV_ID_MAJOR_AR9330:
120 ath79_soc = ATH79_SOC_AR9330;
121 chip = "9330";
122 rev = id & AR933X_REV_ID_REVISION_MASK;
123 break;
124
125 case REV_ID_MAJOR_AR9331:
126 ath79_soc = ATH79_SOC_AR9331;
127 chip = "9331";
128 rev = id & AR933X_REV_ID_REVISION_MASK;
129 break;
130
131 case REV_ID_MAJOR_AR913X: 120 case REV_ID_MAJOR_AR913X:
132 minor = id & AR913X_REV_ID_MINOR_MASK; 121 minor = id & AR913X_REV_ID_MINOR_MASK;
133 rev = id >> AR913X_REV_ID_REVISION_SHIFT; 122 rev = id >> AR913X_REV_ID_REVISION_SHIFT;
@@ -145,6 +134,36 @@ static void __init ath79_detect_sys_type(void)
145 } 134 }
146 break; 135 break;
147 136
137 case REV_ID_MAJOR_AR9330:
138 ath79_soc = ATH79_SOC_AR9330;
139 chip = "9330";
140 rev = id & AR933X_REV_ID_REVISION_MASK;
141 break;
142
143 case REV_ID_MAJOR_AR9331:
144 ath79_soc = ATH79_SOC_AR9331;
145 chip = "9331";
146 rev = id & AR933X_REV_ID_REVISION_MASK;
147 break;
148
149 case REV_ID_MAJOR_AR9341:
150 ath79_soc = ATH79_SOC_AR9341;
151 chip = "9341";
152 rev = id & AR934X_REV_ID_REVISION_MASK;
153 break;
154
155 case REV_ID_MAJOR_AR9342:
156 ath79_soc = ATH79_SOC_AR9342;
157 chip = "9342";
158 rev = id & AR934X_REV_ID_REVISION_MASK;
159 break;
160
161 case REV_ID_MAJOR_AR9344:
162 ath79_soc = ATH79_SOC_AR9344;
163 chip = "9344";
164 rev = id & AR934X_REV_ID_REVISION_MASK;
165 break;
166
148 default: 167 default:
149 panic("ath79: unknown SoC, id:0x%08x", id); 168 panic("ath79: unknown SoC, id:0x%08x", id);
150 } 169 }
diff --git a/arch/mips/bcm63xx/boards/Makefile b/arch/mips/bcm63xx/boards/Makefile
index 9f64fb414077..af07c1aa202f 100644
--- a/arch/mips/bcm63xx/boards/Makefile
+++ b/arch/mips/bcm63xx/boards/Makefile
@@ -1,3 +1 @@
1obj-$(CONFIG_BOARD_BCM963XX) += board_bcm963xx.o obj-$(CONFIG_BOARD_BCM963XX) += board_bcm963xx.o
2
3ccflags-y := -Werror
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index d3a9f012aa0a..260dc247c052 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -9,6 +9,7 @@
9#include <linux/init.h> 9#include <linux/init.h>
10#include <linux/console.h> 10#include <linux/console.h>
11#include <linux/delay.h> 11#include <linux/delay.h>
12#include <linux/export.h>
12#include <linux/interrupt.h> 13#include <linux/interrupt.h>
13#include <linux/io.h> 14#include <linux/io.h>
14#include <linux/serial.h> 15#include <linux/serial.h>
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index 97e7ce9b50ed..4b93048044eb 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -257,8 +257,6 @@ DEFINE_PER_CPU(int, cpu_state);
257 257
258extern void fixup_irqs(void); 258extern void fixup_irqs(void);
259 259
260static DEFINE_SPINLOCK(smp_reserve_lock);
261
262static int octeon_cpu_disable(void) 260static int octeon_cpu_disable(void)
263{ 261{
264 unsigned int cpu = smp_processor_id(); 262 unsigned int cpu = smp_processor_id();
@@ -266,8 +264,6 @@ static int octeon_cpu_disable(void)
266 if (cpu == 0) 264 if (cpu == 0)
267 return -EBUSY; 265 return -EBUSY;
268 266
269 spin_lock(&smp_reserve_lock);
270
271 set_cpu_online(cpu, false); 267 set_cpu_online(cpu, false);
272 cpu_clear(cpu, cpu_callin_map); 268 cpu_clear(cpu, cpu_callin_map);
273 local_irq_disable(); 269 local_irq_disable();
@@ -277,8 +273,6 @@ static int octeon_cpu_disable(void)
277 flush_cache_all(); 273 flush_cache_all();
278 local_flush_tlb_all(); 274 local_flush_tlb_all();
279 275
280 spin_unlock(&smp_reserve_lock);
281
282 return 0; 276 return 0;
283} 277}
284 278
diff --git a/arch/mips/fw/arc/Makefile b/arch/mips/fw/arc/Makefile
index 5314b37aff2c..4f349ec1ea2d 100644
--- a/arch/mips/fw/arc/Makefile
+++ b/arch/mips/fw/arc/Makefile
@@ -8,5 +8,3 @@ lib-y += cmdline.o env.o file.o identify.o init.o \
8lib-$(CONFIG_ARC_MEMORY) += memory.o 8lib-$(CONFIG_ARC_MEMORY) += memory.o
9lib-$(CONFIG_ARC_CONSOLE) += arc_con.o 9lib-$(CONFIG_ARC_CONSOLE) += arc_con.o
10lib-$(CONFIG_ARC_PROMLIB) += promlib.o 10lib-$(CONFIG_ARC_PROMLIB) += promlib.o
11
12ccflags-y := -Werror
diff --git a/arch/mips/include/asm/clkdev.h b/arch/mips/include/asm/clkdev.h
new file mode 100644
index 000000000000..262475414e5f
--- /dev/null
+++ b/arch/mips/include/asm/clkdev.h
@@ -0,0 +1,25 @@
1/*
2 * based on arch/arm/include/asm/clkdev.h
3 *
4 * Copyright (C) 2008 Russell King.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Helper for the clk API to assist looking up a struct clk.
11 */
12#ifndef __ASM_CLKDEV_H
13#define __ASM_CLKDEV_H
14
15#include <linux/slab.h>
16
17#define __clk_get(clk) ({ 1; })
18#define __clk_put(clk) do { } while (0)
19
20static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
21{
22 return kzalloc(size, GFP_KERNEL);
23}
24
25#endif
diff --git a/arch/mips/include/asm/kvm_para.h b/arch/mips/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/mips/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
index 2f0becb4ec8f..1caa78ad06d5 100644
--- a/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
+++ b/arch/mips/include/asm/mach-ath79/ar71xx_regs.h
@@ -1,10 +1,11 @@
1/* 1/*
2 * Atheros AR71XX/AR724X/AR913X SoC register definitions 2 * Atheros AR71XX/AR724X/AR913X SoC register definitions
3 * 3 *
4 * Copyright (C) 2010-2011 Jaiganesh Narayanan <jnarayanan@atheros.com>
4 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org> 5 * Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org> 6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 * 7 *
7 * Parts of this file are based on Atheros' 2.6.15 BSP 8 * Parts of this file are based on Atheros' 2.6.15/2.6.31 BSP
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published 11 * under the terms of the GNU General Public License version 2 as published
@@ -60,6 +61,9 @@
60#define AR933X_EHCI_BASE 0x1b000000 61#define AR933X_EHCI_BASE 0x1b000000
61#define AR933X_EHCI_SIZE 0x1000 62#define AR933X_EHCI_SIZE 0x1000
62 63
64#define AR934X_WMAC_BASE (AR71XX_APB_BASE + 0x00100000)
65#define AR934X_WMAC_SIZE 0x20000
66
63/* 67/*
64 * DDR_CTRL block 68 * DDR_CTRL block
65 */ 69 */
@@ -91,6 +95,12 @@
91#define AR933X_DDR_REG_FLUSH_USB 0x84 95#define AR933X_DDR_REG_FLUSH_USB 0x84
92#define AR933X_DDR_REG_FLUSH_WMAC 0x88 96#define AR933X_DDR_REG_FLUSH_WMAC 0x88
93 97
98#define AR934X_DDR_REG_FLUSH_GE0 0x9c
99#define AR934X_DDR_REG_FLUSH_GE1 0xa0
100#define AR934X_DDR_REG_FLUSH_USB 0xa4
101#define AR934X_DDR_REG_FLUSH_PCIE 0xa8
102#define AR934X_DDR_REG_FLUSH_WMAC 0xac
103
94/* 104/*
95 * PLL block 105 * PLL block
96 */ 106 */
@@ -150,6 +160,41 @@
150#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT 15 160#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT 15
151#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK 0x7 161#define AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK 0x7
152 162
163#define AR934X_PLL_CPU_CONFIG_REG 0x00
164#define AR934X_PLL_DDR_CONFIG_REG 0x04
165#define AR934X_PLL_CPU_DDR_CLK_CTRL_REG 0x08
166
167#define AR934X_PLL_CPU_CONFIG_NFRAC_SHIFT 0
168#define AR934X_PLL_CPU_CONFIG_NFRAC_MASK 0x3f
169#define AR934X_PLL_CPU_CONFIG_NINT_SHIFT 6
170#define AR934X_PLL_CPU_CONFIG_NINT_MASK 0x3f
171#define AR934X_PLL_CPU_CONFIG_REFDIV_SHIFT 12
172#define AR934X_PLL_CPU_CONFIG_REFDIV_MASK 0x1f
173#define AR934X_PLL_CPU_CONFIG_OUTDIV_SHIFT 19
174#define AR934X_PLL_CPU_CONFIG_OUTDIV_MASK 0x3
175
176#define AR934X_PLL_DDR_CONFIG_NFRAC_SHIFT 0
177#define AR934X_PLL_DDR_CONFIG_NFRAC_MASK 0x3ff
178#define AR934X_PLL_DDR_CONFIG_NINT_SHIFT 10
179#define AR934X_PLL_DDR_CONFIG_NINT_MASK 0x3f
180#define AR934X_PLL_DDR_CONFIG_REFDIV_SHIFT 16
181#define AR934X_PLL_DDR_CONFIG_REFDIV_MASK 0x1f
182#define AR934X_PLL_DDR_CONFIG_OUTDIV_SHIFT 23
183#define AR934X_PLL_DDR_CONFIG_OUTDIV_MASK 0x7
184
185#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_PLL_BYPASS BIT(2)
186#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_PLL_BYPASS BIT(3)
187#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_PLL_BYPASS BIT(4)
188#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_SHIFT 5
189#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPU_POST_DIV_MASK 0x1f
190#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_SHIFT 10
191#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDR_POST_DIV_MASK 0x1f
192#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_SHIFT 15
193#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHB_POST_DIV_MASK 0x1f
194#define AR934X_PLL_CPU_DDR_CLK_CTRL_CPUCLK_FROM_CPUPLL BIT(20)
195#define AR934X_PLL_CPU_DDR_CLK_CTRL_DDRCLK_FROM_DDRPLL BIT(21)
196#define AR934X_PLL_CPU_DDR_CLK_CTRL_AHBCLK_FROM_DDRPLL BIT(24)
197
153/* 198/*
154 * USB_CONFIG block 199 * USB_CONFIG block
155 */ 200 */
@@ -185,6 +230,10 @@
185#define AR933X_RESET_REG_RESET_MODULE 0x1c 230#define AR933X_RESET_REG_RESET_MODULE 0x1c
186#define AR933X_RESET_REG_BOOTSTRAP 0xac 231#define AR933X_RESET_REG_BOOTSTRAP 0xac
187 232
233#define AR934X_RESET_REG_RESET_MODULE 0x1c
234#define AR934X_RESET_REG_BOOTSTRAP 0xb0
235#define AR934X_RESET_REG_PCIE_WMAC_INT_STATUS 0xac
236
188#define MISC_INT_ETHSW BIT(12) 237#define MISC_INT_ETHSW BIT(12)
189#define MISC_INT_TIMER4 BIT(10) 238#define MISC_INT_TIMER4 BIT(10)
190#define MISC_INT_TIMER3 BIT(9) 239#define MISC_INT_TIMER3 BIT(9)
@@ -241,6 +290,40 @@
241 290
242#define AR933X_BOOTSTRAP_REF_CLK_40 BIT(0) 291#define AR933X_BOOTSTRAP_REF_CLK_40 BIT(0)
243 292
293#define AR934X_BOOTSTRAP_SW_OPTION8 BIT(23)
294#define AR934X_BOOTSTRAP_SW_OPTION7 BIT(22)
295#define AR934X_BOOTSTRAP_SW_OPTION6 BIT(21)
296#define AR934X_BOOTSTRAP_SW_OPTION5 BIT(20)
297#define AR934X_BOOTSTRAP_SW_OPTION4 BIT(19)
298#define AR934X_BOOTSTRAP_SW_OPTION3 BIT(18)
299#define AR934X_BOOTSTRAP_SW_OPTION2 BIT(17)
300#define AR934X_BOOTSTRAP_SW_OPTION1 BIT(16)
301#define AR934X_BOOTSTRAP_USB_MODE_DEVICE BIT(7)
302#define AR934X_BOOTSTRAP_PCIE_RC BIT(6)
303#define AR934X_BOOTSTRAP_EJTAG_MODE BIT(5)
304#define AR934X_BOOTSTRAP_REF_CLK_40 BIT(4)
305#define AR934X_BOOTSTRAP_BOOT_FROM_SPI BIT(2)
306#define AR934X_BOOTSTRAP_SDRAM_DISABLED BIT(1)
307#define AR934X_BOOTSTRAP_DDR1 BIT(0)
308
309#define AR934X_PCIE_WMAC_INT_WMAC_MISC BIT(0)
310#define AR934X_PCIE_WMAC_INT_WMAC_TX BIT(1)
311#define AR934X_PCIE_WMAC_INT_WMAC_RXLP BIT(2)
312#define AR934X_PCIE_WMAC_INT_WMAC_RXHP BIT(3)
313#define AR934X_PCIE_WMAC_INT_PCIE_RC BIT(4)
314#define AR934X_PCIE_WMAC_INT_PCIE_RC0 BIT(5)
315#define AR934X_PCIE_WMAC_INT_PCIE_RC1 BIT(6)
316#define AR934X_PCIE_WMAC_INT_PCIE_RC2 BIT(7)
317#define AR934X_PCIE_WMAC_INT_PCIE_RC3 BIT(8)
318#define AR934X_PCIE_WMAC_INT_WMAC_ALL \
319 (AR934X_PCIE_WMAC_INT_WMAC_MISC | AR934X_PCIE_WMAC_INT_WMAC_TX | \
320 AR934X_PCIE_WMAC_INT_WMAC_RXLP | AR934X_PCIE_WMAC_INT_WMAC_RXHP)
321
322#define AR934X_PCIE_WMAC_INT_PCIE_ALL \
323 (AR934X_PCIE_WMAC_INT_PCIE_RC | AR934X_PCIE_WMAC_INT_PCIE_RC0 | \
324 AR934X_PCIE_WMAC_INT_PCIE_RC1 | AR934X_PCIE_WMAC_INT_PCIE_RC2 | \
325 AR934X_PCIE_WMAC_INT_PCIE_RC3)
326
244#define REV_ID_MAJOR_MASK 0xfff0 327#define REV_ID_MAJOR_MASK 0xfff0
245#define REV_ID_MAJOR_AR71XX 0x00a0 328#define REV_ID_MAJOR_AR71XX 0x00a0
246#define REV_ID_MAJOR_AR913X 0x00b0 329#define REV_ID_MAJOR_AR913X 0x00b0
@@ -249,6 +332,9 @@
249#define REV_ID_MAJOR_AR7242 0x1100 332#define REV_ID_MAJOR_AR7242 0x1100
250#define REV_ID_MAJOR_AR9330 0x0110 333#define REV_ID_MAJOR_AR9330 0x0110
251#define REV_ID_MAJOR_AR9331 0x1110 334#define REV_ID_MAJOR_AR9331 0x1110
335#define REV_ID_MAJOR_AR9341 0x0120
336#define REV_ID_MAJOR_AR9342 0x1120
337#define REV_ID_MAJOR_AR9344 0x2120
252 338
253#define AR71XX_REV_ID_MINOR_MASK 0x3 339#define AR71XX_REV_ID_MINOR_MASK 0x3
254#define AR71XX_REV_ID_MINOR_AR7130 0x0 340#define AR71XX_REV_ID_MINOR_AR7130 0x0
@@ -267,6 +353,8 @@
267 353
268#define AR724X_REV_ID_REVISION_MASK 0x3 354#define AR724X_REV_ID_REVISION_MASK 0x3
269 355
356#define AR934X_REV_ID_REVISION_MASK 0xf
357
270/* 358/*
271 * SPI block 359 * SPI block
272 */ 360 */
@@ -308,5 +396,6 @@
308#define AR724X_GPIO_COUNT 18 396#define AR724X_GPIO_COUNT 18
309#define AR913X_GPIO_COUNT 22 397#define AR913X_GPIO_COUNT 22
310#define AR933X_GPIO_COUNT 30 398#define AR933X_GPIO_COUNT 30
399#define AR934X_GPIO_COUNT 23
311 400
312#endif /* __ASM_MACH_AR71XX_REGS_H */ 401#endif /* __ASM_MACH_AR71XX_REGS_H */
diff --git a/arch/mips/include/asm/mach-ath79/ath79.h b/arch/mips/include/asm/mach-ath79/ath79.h
index 6d0c6c9d5622..4f248c3d7b23 100644
--- a/arch/mips/include/asm/mach-ath79/ath79.h
+++ b/arch/mips/include/asm/mach-ath79/ath79.h
@@ -29,6 +29,9 @@ enum ath79_soc_type {
29 ATH79_SOC_AR9132, 29 ATH79_SOC_AR9132,
30 ATH79_SOC_AR9330, 30 ATH79_SOC_AR9330,
31 ATH79_SOC_AR9331, 31 ATH79_SOC_AR9331,
32 ATH79_SOC_AR9341,
33 ATH79_SOC_AR9342,
34 ATH79_SOC_AR9344,
32}; 35};
33 36
34extern enum ath79_soc_type ath79_soc; 37extern enum ath79_soc_type ath79_soc;
@@ -75,6 +78,26 @@ static inline int soc_is_ar933x(void)
75 ath79_soc == ATH79_SOC_AR9331); 78 ath79_soc == ATH79_SOC_AR9331);
76} 79}
77 80
81static inline int soc_is_ar9341(void)
82{
83 return (ath79_soc == ATH79_SOC_AR9341);
84}
85
86static inline int soc_is_ar9342(void)
87{
88 return (ath79_soc == ATH79_SOC_AR9342);
89}
90
91static inline int soc_is_ar9344(void)
92{
93 return (ath79_soc == ATH79_SOC_AR9344);
94}
95
96static inline int soc_is_ar934x(void)
97{
98 return soc_is_ar9341() || soc_is_ar9342() || soc_is_ar9344();
99}
100
78extern void __iomem *ath79_ddr_base; 101extern void __iomem *ath79_ddr_base;
79extern void __iomem *ath79_pll_base; 102extern void __iomem *ath79_pll_base;
80extern void __iomem *ath79_reset_base; 103extern void __iomem *ath79_reset_base;
diff --git a/arch/mips/include/asm/mach-ath79/irq.h b/arch/mips/include/asm/mach-ath79/irq.h
index 519958fe4e3c..0968f69e2018 100644
--- a/arch/mips/include/asm/mach-ath79/irq.h
+++ b/arch/mips/include/asm/mach-ath79/irq.h
@@ -10,11 +10,19 @@
10#define __ASM_MACH_ATH79_IRQ_H 10#define __ASM_MACH_ATH79_IRQ_H
11 11
12#define MIPS_CPU_IRQ_BASE 0 12#define MIPS_CPU_IRQ_BASE 0
13#define NR_IRQS 40 13#define NR_IRQS 48
14 14
15#define ATH79_MISC_IRQ_BASE 8 15#define ATH79_MISC_IRQ_BASE 8
16#define ATH79_MISC_IRQ_COUNT 32 16#define ATH79_MISC_IRQ_COUNT 32
17 17
18#define ATH79_PCI_IRQ_BASE (ATH79_MISC_IRQ_BASE + ATH79_MISC_IRQ_COUNT)
19#define ATH79_PCI_IRQ_COUNT 6
20#define ATH79_PCI_IRQ(_x) (ATH79_PCI_IRQ_BASE + (_x))
21
22#define ATH79_IP2_IRQ_BASE (ATH79_PCI_IRQ_BASE + ATH79_PCI_IRQ_COUNT)
23#define ATH79_IP2_IRQ_COUNT 2
24#define ATH79_IP2_IRQ(_x) (ATH79_IP2_IRQ_BASE + (_x))
25
18#define ATH79_CPU_IRQ_IP2 (MIPS_CPU_IRQ_BASE + 2) 26#define ATH79_CPU_IRQ_IP2 (MIPS_CPU_IRQ_BASE + 2)
19#define ATH79_CPU_IRQ_USB (MIPS_CPU_IRQ_BASE + 3) 27#define ATH79_CPU_IRQ_USB (MIPS_CPU_IRQ_BASE + 3)
20#define ATH79_CPU_IRQ_GE0 (MIPS_CPU_IRQ_BASE + 4) 28#define ATH79_CPU_IRQ_GE0 (MIPS_CPU_IRQ_BASE + 4)
diff --git a/arch/mips/include/asm/mach-ath79/pci-ath724x.h b/arch/mips/include/asm/mach-ath79/pci-ath724x.h
deleted file mode 100644
index 454885fa30c3..000000000000
--- a/arch/mips/include/asm/mach-ath79/pci-ath724x.h
+++ /dev/null
@@ -1,21 +0,0 @@
1/*
2 * Atheros 724x PCI support
3 *
4 * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11#ifndef __ASM_MACH_ATH79_PCI_ATH724X_H
12#define __ASM_MACH_ATH79_PCI_ATH724X_H
13
14struct ath724x_pci_data {
15 int irq;
16 void *pdata;
17};
18
19void ath724x_pci_add_data(struct ath724x_pci_data *data, int size);
20
21#endif /* __ASM_MACH_ATH79_PCI_ATH724X_H */
diff --git a/arch/mips/include/asm/mach-ath79/pci.h b/arch/mips/include/asm/mach-ath79/pci.h
new file mode 100644
index 000000000000..7868f7fa028f
--- /dev/null
+++ b/arch/mips/include/asm/mach-ath79/pci.h
@@ -0,0 +1,28 @@
1/*
2 * Atheros AR71XX/AR724X PCI support
3 *
4 * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
5 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
6 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License version 2 as published
10 * by the Free Software Foundation.
11 */
12
13#ifndef __ASM_MACH_ATH79_PCI_H
14#define __ASM_MACH_ATH79_PCI_H
15
16#if defined(CONFIG_PCI) && defined(CONFIG_SOC_AR71XX)
17int ar71xx_pcibios_init(void);
18#else
19static inline int ar71xx_pcibios_init(void) { return 0; }
20#endif
21
22#if defined(CONFIG_PCI_AR724X)
23int ar724x_pcibios_init(int irq);
24#else
25static inline int ar724x_pcibios_init(int irq) { return 0; }
26#endif
27
28#endif /* __ASM_MACH_ATH79_PCI_H */
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
index 3d5de96d4036..1d7dd96aa460 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_gpio.h
@@ -2,6 +2,7 @@
2#define BCM63XX_GPIO_H 2#define BCM63XX_GPIO_H
3 3
4#include <linux/init.h> 4#include <linux/init.h>
5#include <bcm63xx_cpu.h>
5 6
6int __init bcm63xx_gpio_init(void); 7int __init bcm63xx_gpio_init(void);
7 8
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
new file mode 100644
index 000000000000..318f982f04ff
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/falcon_irq.h
@@ -0,0 +1,23 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
7 */
8
9#ifndef _FALCON_IRQ__
10#define _FALCON_IRQ__
11
12#define INT_NUM_IRQ0 8
13#define INT_NUM_IM0_IRL0 (INT_NUM_IRQ0 + 0)
14#define INT_NUM_IM1_IRL0 (INT_NUM_IM0_IRL0 + 32)
15#define INT_NUM_IM2_IRL0 (INT_NUM_IM1_IRL0 + 32)
16#define INT_NUM_IM3_IRL0 (INT_NUM_IM2_IRL0 + 32)
17#define INT_NUM_IM4_IRL0 (INT_NUM_IM3_IRL0 + 32)
18#define INT_NUM_EXTRA_START (INT_NUM_IM4_IRL0 + 32)
19#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
20
21#define MIPS_CPU_TIMER_IRQ 7
22
23#endif /* _FALCON_IRQ__ */
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/irq.h b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
new file mode 100644
index 000000000000..2caccd9f9dbc
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/irq.h
@@ -0,0 +1,18 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
7 */
8
9#ifndef __FALCON_IRQ_H
10#define __FALCON_IRQ_H
11
12#include <falcon_irq.h>
13
14#define NR_IRQS 328
15
16#include_next <irq.h>
17
18#endif
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
new file mode 100644
index 000000000000..b385252584ee
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -0,0 +1,67 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_FALCON_H__
10#define _LTQ_FALCON_H__
11
12#ifdef CONFIG_SOC_FALCON
13
14#include <linux/pinctrl/pinctrl.h>
15#include <lantiq.h>
16
17/* Chip IDs */
18#define SOC_ID_FALCON 0x01B8
19
20/* SoC Types */
21#define SOC_TYPE_FALCON 0x01
22
23/*
24 * during early_printk no ioremap possible at this early stage
25 * lets use KSEG1 instead
26 */
27#define LTQ_ASC0_BASE_ADDR 0x1E100C00
28#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
29
30/* WDT */
31#define LTQ_RST_CAUSE_WDTRST 0x0002
32
33/* CHIP ID */
34#define LTQ_STATUS_BASE_ADDR 0x1E802000
35
36#define FALCON_CHIPID ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x0c))
37#define FALCON_CHIPTYPE ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x38))
38#define FALCON_CHIPCONF ((u32 *)(KSEG1 + LTQ_STATUS_BASE_ADDR + 0x40))
39
40/* SYSCTL - start/stop/restart/configure/... different parts of the Soc */
41#define SYSCTL_SYS1 0
42#define SYSCTL_SYSETH 1
43#define SYSCTL_SYSGPE 2
44
45/* BOOT_SEL - find what boot media we have */
46#define BS_FLASH 0x1
47#define BS_SPI 0x4
48
49/* global register ranges */
50extern __iomem void *ltq_ebu_membase;
51extern __iomem void *ltq_sys1_membase;
52#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
53#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
54
55#define ltq_sys1_w32(x, y) ltq_w32((x), ltq_sys1_membase + (y))
56#define ltq_sys1_r32(x) ltq_r32(ltq_sys1_membase + (x))
57#define ltq_sys1_w32_mask(clear, set, reg) \
58 ltq_sys1_w32((ltq_sys1_r32(reg) & ~(clear)) | (set), reg)
59
60/*
61 * to keep the irq code generic we need to define this to 0 as falcon
62 * has no EIU/EBU
63 */
64#define LTQ_EBU_PCC_ISTAT 0
65
66#endif /* CONFIG_SOC_FALCON */
67#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mach-lantiq/gpio.h b/arch/mips/include/asm/mach-lantiq/gpio.h
new file mode 100644
index 000000000000..f79505b43609
--- /dev/null
+++ b/arch/mips/include/asm/mach-lantiq/gpio.h
@@ -0,0 +1,16 @@
1#ifndef __ASM_MIPS_MACH_LANTIQ_GPIO_H
2#define __ASM_MIPS_MACH_LANTIQ_GPIO_H
3
4static inline int gpio_to_irq(unsigned int gpio)
5{
6 return -1;
7}
8
9#define gpio_get_value __gpio_get_value
10#define gpio_set_value __gpio_set_value
11
12#define gpio_cansleep __gpio_cansleep
13
14#include <asm-generic/gpio.h>
15
16#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index ce2f02929d22..5e8a6e965756 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -9,6 +9,8 @@
9#define _LANTIQ_H__ 9#define _LANTIQ_H__
10 10
11#include <linux/irq.h> 11#include <linux/irq.h>
12#include <linux/device.h>
13#include <linux/clk.h>
12 14
13/* generic reg access functions */ 15/* generic reg access functions */
14#define ltq_r32(reg) __raw_readl(reg) 16#define ltq_r32(reg) __raw_readl(reg)
@@ -21,25 +23,9 @@
21/* register access macros for EBU and CGU */ 23/* register access macros for EBU and CGU */
22#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y)) 24#define ltq_ebu_w32(x, y) ltq_w32((x), ltq_ebu_membase + (y))
23#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x)) 25#define ltq_ebu_r32(x) ltq_r32(ltq_ebu_membase + (x))
24#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y)) 26#define ltq_ebu_w32_mask(x, y, z) \
25#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x)) 27 ltq_w32_mask(x, y, ltq_ebu_membase + (z))
26
27extern __iomem void *ltq_ebu_membase; 28extern __iomem void *ltq_ebu_membase;
28extern __iomem void *ltq_cgu_membase;
29
30extern unsigned int ltq_get_cpu_ver(void);
31extern unsigned int ltq_get_soc_type(void);
32
33/* clock speeds */
34#define CLOCK_60M 60000000
35#define CLOCK_83M 83333333
36#define CLOCK_111M 111111111
37#define CLOCK_133M 133333333
38#define CLOCK_167M 166666667
39#define CLOCK_200M 200000000
40#define CLOCK_266M 266666666
41#define CLOCK_333M 333333333
42#define CLOCK_400M 400000000
43 29
44/* spinlock all ebu i/o */ 30/* spinlock all ebu i/o */
45extern spinlock_t ebu_lock; 31extern spinlock_t ebu_lock;
@@ -49,15 +35,21 @@ extern void ltq_disable_irq(struct irq_data *data);
49extern void ltq_mask_and_ack_irq(struct irq_data *data); 35extern void ltq_mask_and_ack_irq(struct irq_data *data);
50extern void ltq_enable_irq(struct irq_data *data); 36extern void ltq_enable_irq(struct irq_data *data);
51 37
38/* clock handling */
39extern int clk_activate(struct clk *clk);
40extern void clk_deactivate(struct clk *clk);
41extern struct clk *clk_get_cpu(void);
42extern struct clk *clk_get_fpi(void);
43extern struct clk *clk_get_io(void);
44
45/* find out what bootsource we have */
46extern unsigned char ltq_boot_select(void);
52/* find out what caused the last cpu reset */ 47/* find out what caused the last cpu reset */
53extern int ltq_reset_cause(void); 48extern int ltq_reset_cause(void);
54#define LTQ_RST_CAUSE_WDTRST 0x20
55 49
56#define IOPORT_RESOURCE_START 0x10000000 50#define IOPORT_RESOURCE_START 0x10000000
57#define IOPORT_RESOURCE_END 0xffffffff 51#define IOPORT_RESOURCE_END 0xffffffff
58#define IOMEM_RESOURCE_START 0x10000000 52#define IOMEM_RESOURCE_START 0x10000000
59#define IOMEM_RESOURCE_END 0xffffffff 53#define IOMEM_RESOURCE_END 0xffffffff
60#define LTQ_FLASH_START 0x10000000
61#define LTQ_FLASH_MAX 0x04000000
62 54
63#endif 55#endif
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
index a305f1d0259e..e23bf7c9a2d0 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
@@ -9,41 +9,8 @@
9#ifndef _LANTIQ_PLATFORM_H__ 9#ifndef _LANTIQ_PLATFORM_H__
10#define _LANTIQ_PLATFORM_H__ 10#define _LANTIQ_PLATFORM_H__
11 11
12#include <linux/mtd/partitions.h>
13#include <linux/socket.h> 12#include <linux/socket.h>
14 13
15/* struct used to pass info to the pci core */
16enum {
17 PCI_CLOCK_INT = 0,
18 PCI_CLOCK_EXT
19};
20
21#define PCI_EXIN0 0x0001
22#define PCI_EXIN1 0x0002
23#define PCI_EXIN2 0x0004
24#define PCI_EXIN3 0x0008
25#define PCI_EXIN4 0x0010
26#define PCI_EXIN5 0x0020
27#define PCI_EXIN_MAX 6
28
29#define PCI_GNT1 0x0040
30#define PCI_GNT2 0x0080
31#define PCI_GNT3 0x0100
32#define PCI_GNT4 0x0200
33
34#define PCI_REQ1 0x0400
35#define PCI_REQ2 0x0800
36#define PCI_REQ3 0x1000
37#define PCI_REQ4 0x2000
38#define PCI_REQ_SHIFT 10
39#define PCI_REQ_MASK 0xf
40
41struct ltq_pci_data {
42 int clock;
43 int gpio;
44 int irq[16];
45};
46
47/* struct used to pass info to network drivers */ 14/* struct used to pass info to network drivers */
48struct ltq_eth_data { 15struct ltq_eth_data {
49 struct sockaddr mac; 16 struct sockaddr mac;
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index b4465a888e20..aa0b3b866f84 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -17,50 +17,8 @@
17#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128) 17#define INT_NUM_IM4_IRL0 (INT_NUM_IRQ0 + 128)
18#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0) 18#define INT_NUM_IM_OFFSET (INT_NUM_IM1_IRL0 - INT_NUM_IM0_IRL0)
19 19
20#define LTQ_ASC_TIR(x) (INT_NUM_IM3_IRL0 + (x * 8))
21#define LTQ_ASC_RIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 1)
22#define LTQ_ASC_EIR(x) (INT_NUM_IM3_IRL0 + (x * 8) + 2)
23
24#define LTQ_ASC_ASE_TIR INT_NUM_IM2_IRL0
25#define LTQ_ASC_ASE_RIR (INT_NUM_IM2_IRL0 + 2)
26#define LTQ_ASC_ASE_EIR (INT_NUM_IM2_IRL0 + 3)
27
28#define LTQ_SSC_TIR (INT_NUM_IM0_IRL0 + 15)
29#define LTQ_SSC_RIR (INT_NUM_IM0_IRL0 + 14)
30#define LTQ_SSC_EIR (INT_NUM_IM0_IRL0 + 16)
31
32#define LTQ_MEI_DYING_GASP_INT (INT_NUM_IM1_IRL0 + 21)
33#define LTQ_MEI_INT (INT_NUM_IM1_IRL0 + 23)
34
35#define LTQ_TIMER6_INT (INT_NUM_IM1_IRL0 + 23)
36#define LTQ_USB_INT (INT_NUM_IM1_IRL0 + 22)
37#define LTQ_USB_OC_INT (INT_NUM_IM4_IRL0 + 23)
38
39#define MIPS_CPU_TIMER_IRQ 7
40
41#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0) 20#define LTQ_DMA_CH0_INT (INT_NUM_IM2_IRL0)
42#define LTQ_DMA_CH1_INT (INT_NUM_IM2_IRL0 + 1)
43#define LTQ_DMA_CH2_INT (INT_NUM_IM2_IRL0 + 2)
44#define LTQ_DMA_CH3_INT (INT_NUM_IM2_IRL0 + 3)
45#define LTQ_DMA_CH4_INT (INT_NUM_IM2_IRL0 + 4)
46#define LTQ_DMA_CH5_INT (INT_NUM_IM2_IRL0 + 5)
47#define LTQ_DMA_CH6_INT (INT_NUM_IM2_IRL0 + 6)
48#define LTQ_DMA_CH7_INT (INT_NUM_IM2_IRL0 + 7)
49#define LTQ_DMA_CH8_INT (INT_NUM_IM2_IRL0 + 8)
50#define LTQ_DMA_CH9_INT (INT_NUM_IM2_IRL0 + 9)
51#define LTQ_DMA_CH10_INT (INT_NUM_IM2_IRL0 + 10)
52#define LTQ_DMA_CH11_INT (INT_NUM_IM2_IRL0 + 11)
53#define LTQ_DMA_CH12_INT (INT_NUM_IM2_IRL0 + 25)
54#define LTQ_DMA_CH13_INT (INT_NUM_IM2_IRL0 + 26)
55#define LTQ_DMA_CH14_INT (INT_NUM_IM2_IRL0 + 27)
56#define LTQ_DMA_CH15_INT (INT_NUM_IM2_IRL0 + 28)
57#define LTQ_DMA_CH16_INT (INT_NUM_IM2_IRL0 + 29)
58#define LTQ_DMA_CH17_INT (INT_NUM_IM2_IRL0 + 30)
59#define LTQ_DMA_CH18_INT (INT_NUM_IM2_IRL0 + 16)
60#define LTQ_DMA_CH19_INT (INT_NUM_IM2_IRL0 + 21)
61
62#define LTQ_PPE_MBOX_INT (INT_NUM_IM2_IRL0 + 24)
63 21
64#define INT_NUM_IM4_IRL14 (INT_NUM_IM4_IRL0 + 14) 22#define MIPS_CPU_TIMER_IRQ 7
65 23
66#endif 24#endif
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index 8a3c6be669d2..6a2df709c576 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -17,38 +17,56 @@
17#define SOC_ID_DANUBE1 0x129 17#define SOC_ID_DANUBE1 0x129
18#define SOC_ID_DANUBE2 0x12B 18#define SOC_ID_DANUBE2 0x12B
19#define SOC_ID_TWINPASS 0x12D 19#define SOC_ID_TWINPASS 0x12D
20#define SOC_ID_AMAZON_SE 0x152 20#define SOC_ID_AMAZON_SE_1 0x152 /* 50601 */
21#define SOC_ID_AMAZON_SE_2 0x153 /* 50600 */
21#define SOC_ID_ARX188 0x16C 22#define SOC_ID_ARX188 0x16C
22#define SOC_ID_ARX168 0x16D 23#define SOC_ID_ARX168_1 0x16D
24#define SOC_ID_ARX168_2 0x16E
23#define SOC_ID_ARX182 0x16F 25#define SOC_ID_ARX182 0x16F
24 26#define SOC_ID_GRX188 0x170
25/* SoC Types */ 27#define SOC_ID_GRX168 0x171
28
29#define SOC_ID_VRX288 0x1C0 /* v1.1 */
30#define SOC_ID_VRX282 0x1C1 /* v1.1 */
31#define SOC_ID_VRX268 0x1C2 /* v1.1 */
32#define SOC_ID_GRX268 0x1C8 /* v1.1 */
33#define SOC_ID_GRX288 0x1C9 /* v1.1 */
34#define SOC_ID_VRX288_2 0x00B /* v1.2 */
35#define SOC_ID_VRX268_2 0x00C /* v1.2 */
36#define SOC_ID_GRX288_2 0x00D /* v1.2 */
37#define SOC_ID_GRX282_2 0x00E /* v1.2 */
38
39 /* SoC Types */
26#define SOC_TYPE_DANUBE 0x01 40#define SOC_TYPE_DANUBE 0x01
27#define SOC_TYPE_TWINPASS 0x02 41#define SOC_TYPE_TWINPASS 0x02
28#define SOC_TYPE_AR9 0x03 42#define SOC_TYPE_AR9 0x03
29#define SOC_TYPE_VR9 0x04 43#define SOC_TYPE_VR9 0x04 /* v1.1 */
30#define SOC_TYPE_AMAZON_SE 0x05 44#define SOC_TYPE_VR9_2 0x05 /* v1.2 */
45#define SOC_TYPE_AMAZON_SE 0x06
46
47/* BOOT_SEL - find what boot media we have */
48#define BS_EXT_ROM 0x0
49#define BS_FLASH 0x1
50#define BS_MII0 0x2
51#define BS_PCI 0x3
52#define BS_UART1 0x4
53#define BS_SPI 0x5
54#define BS_NAND 0x6
55#define BS_RMII0 0x7
56
57/* helpers used to access the cgu */
58#define ltq_cgu_w32(x, y) ltq_w32((x), ltq_cgu_membase + (y))
59#define ltq_cgu_r32(x) ltq_r32(ltq_cgu_membase + (x))
60extern __iomem void *ltq_cgu_membase;
31 61
32/* ASC0/1 - serial port */ 62/*
33#define LTQ_ASC0_BASE_ADDR 0x1E100400 63 * during early_printk no ioremap is possible
64 * lets use KSEG1 instead
65 */
34#define LTQ_ASC1_BASE_ADDR 0x1E100C00 66#define LTQ_ASC1_BASE_ADDR 0x1E100C00
35#define LTQ_ASC_SIZE 0x400 67#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
36
37/* RCU - reset control unit */
38#define LTQ_RCU_BASE_ADDR 0x1F203000
39#define LTQ_RCU_SIZE 0x1000
40
41/* GPTU - general purpose timer unit */
42#define LTQ_GPTU_BASE_ADDR 0x18000300
43#define LTQ_GPTU_SIZE 0x100
44 68
45/* EBU - external bus unit */ 69/* EBU - external bus unit */
46#define LTQ_EBU_GPIO_START 0x14000000
47#define LTQ_EBU_GPIO_SIZE 0x1000
48
49#define LTQ_EBU_BASE_ADDR 0x1E105300
50#define LTQ_EBU_SIZE 0x100
51
52#define LTQ_EBU_BUSCON0 0x0060 70#define LTQ_EBU_BUSCON0 0x0060
53#define LTQ_EBU_PCC_CON 0x0090 71#define LTQ_EBU_PCC_CON 0x0090
54#define LTQ_EBU_PCC_IEN 0x00A4 72#define LTQ_EBU_PCC_IEN 0x00A4
@@ -57,85 +75,17 @@
57#define LTQ_EBU_ADDRSEL1 0x0024 75#define LTQ_EBU_ADDRSEL1 0x0024
58#define EBU_WRDIS 0x80000000 76#define EBU_WRDIS 0x80000000
59 77
60/* CGU - clock generation unit */
61#define LTQ_CGU_BASE_ADDR 0x1F103000
62#define LTQ_CGU_SIZE 0x1000
63
64/* ICU - interrupt control unit */
65#define LTQ_ICU_BASE_ADDR 0x1F880200
66#define LTQ_ICU_SIZE 0x100
67
68/* EIU - external interrupt unit */
69#define LTQ_EIU_BASE_ADDR 0x1F101000
70#define LTQ_EIU_SIZE 0x1000
71
72/* PMU - power management unit */
73#define LTQ_PMU_BASE_ADDR 0x1F102000
74#define LTQ_PMU_SIZE 0x1000
75
76#define PMU_DMA 0x0020
77#define PMU_USB 0x8041
78#define PMU_LED 0x0800
79#define PMU_GPT 0x1000
80#define PMU_PPE 0x2000
81#define PMU_FPI 0x4000
82#define PMU_SWITCH 0x10000000
83
84/* ETOP - ethernet */
85#define LTQ_ETOP_BASE_ADDR 0x1E180000
86#define LTQ_ETOP_SIZE 0x40000
87
88/* DMA */
89#define LTQ_DMA_BASE_ADDR 0x1E104100
90#define LTQ_DMA_SIZE 0x800
91
92/* PCI */
93#define PCI_CR_BASE_ADDR 0x1E105400
94#define PCI_CR_SIZE 0x400
95
96/* WDT */ 78/* WDT */
97#define LTQ_WDT_BASE_ADDR 0x1F8803F0 79#define LTQ_RST_CAUSE_WDTRST 0x20
98#define LTQ_WDT_SIZE 0x10
99
100/* STP - serial to parallel conversion unit */
101#define LTQ_STP_BASE_ADDR 0x1E100BB0
102#define LTQ_STP_SIZE 0x40
103
104/* GPIO */
105#define LTQ_GPIO0_BASE_ADDR 0x1E100B10
106#define LTQ_GPIO1_BASE_ADDR 0x1E100B40
107#define LTQ_GPIO2_BASE_ADDR 0x1E100B70
108#define LTQ_GPIO_SIZE 0x30
109
110/* SSC */
111#define LTQ_SSC_BASE_ADDR 0x1e100800
112#define LTQ_SSC_SIZE 0x100
113
114/* MEI - dsl core */
115#define LTQ_MEI_BASE_ADDR 0x1E116000
116
117/* DEU - data encryption unit */
118#define LTQ_DEU_BASE_ADDR 0x1E103100
119 80
120/* MPS - multi processor unit (voice) */ 81/* MPS - multi processor unit (voice) */
121#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000) 82#define LTQ_MPS_BASE_ADDR (KSEG1 + 0x1F107000)
122#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344)) 83#define LTQ_MPS_CHIPID ((u32 *)(LTQ_MPS_BASE_ADDR + 0x0344))
123 84
124/* request a non-gpio and set the PIO config */ 85/* request a non-gpio and set the PIO config */
125extern int ltq_gpio_request(unsigned int pin, unsigned int alt0, 86#define PMU_PPE BIT(13)
126 unsigned int alt1, unsigned int dir, const char *name);
127extern void ltq_pmu_enable(unsigned int module); 87extern void ltq_pmu_enable(unsigned int module);
128extern void ltq_pmu_disable(unsigned int module); 88extern void ltq_pmu_disable(unsigned int module);
129 89
130static inline int ltq_is_ar9(void)
131{
132 return (ltq_get_soc_type() == SOC_TYPE_AR9);
133}
134
135static inline int ltq_is_vr9(void)
136{
137 return (ltq_get_soc_type() == SOC_TYPE_VR9);
138}
139
140#endif /* CONFIG_SOC_TYPE_XWAY */ 90#endif /* CONFIG_SOC_TYPE_XWAY */
141#endif /* _LTQ_XWAY_H__ */ 91#endif /* _LTQ_XWAY_H__ */
diff --git a/arch/mips/include/asm/mips-boards/generic.h b/arch/mips/include/asm/mips-boards/generic.h
index 46c08563e532..6e23ceb0ba8c 100644
--- a/arch/mips/include/asm/mips-boards/generic.h
+++ b/arch/mips/include/asm/mips-boards/generic.h
@@ -93,8 +93,4 @@ extern void mips_pcibios_init(void);
93#define mips_pcibios_init() do { } while (0) 93#define mips_pcibios_init() do { } while (0)
94#endif 94#endif
95 95
96#ifdef CONFIG_KGDB
97extern void kgdb_config(void);
98#endif
99
100#endif /* __ASM_MIPS_BOARDS_GENERIC_H */ 96#endif /* __ASM_MIPS_BOARDS_GENERIC_H */
diff --git a/arch/mips/include/asm/module.h b/arch/mips/include/asm/module.h
index 7467d1d933d5..530008048c62 100644
--- a/arch/mips/include/asm/module.h
+++ b/arch/mips/include/asm/module.h
@@ -2,6 +2,7 @@
2#define _ASM_MODULE_H 2#define _ASM_MODULE_H
3 3
4#include <linux/list.h> 4#include <linux/list.h>
5#include <linux/elf.h>
5#include <asm/uaccess.h> 6#include <asm/uaccess.h>
6 7
7struct mod_arch_specific { 8struct mod_arch_specific {
diff --git a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h b/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
deleted file mode 100644
index d553f8e88df6..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-pcieep-defs.h
+++ /dev/null
@@ -1,1365 +0,0 @@
1/***********************license start***************
2 * Author: Cavium Networks
3 *
4 * Contact: support@caviumnetworks.com
5 * This file is part of the OCTEON SDK
6 *
7 * Copyright (c) 2003-2008 Cavium Networks
8 *
9 * This file is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License, Version 2, as
11 * published by the Free Software Foundation.
12 *
13 * This file is distributed in the hope that it will be useful, but
14 * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16 * NONINFRINGEMENT. See the GNU General Public License for more
17 * details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this file; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 * or visit http://www.gnu.org/licenses/.
23 *
24 * This file may also be available under a different license from Cavium.
25 * Contact Cavium Networks for more information
26 ***********************license end**************************************/
27
28#ifndef __CVMX_PCIEEP_DEFS_H__
29#define __CVMX_PCIEEP_DEFS_H__
30
31#define CVMX_PCIEEP_CFG000 \
32 (0x0000000000000000ull)
33#define CVMX_PCIEEP_CFG001 \
34 (0x0000000000000004ull)
35#define CVMX_PCIEEP_CFG002 \
36 (0x0000000000000008ull)
37#define CVMX_PCIEEP_CFG003 \
38 (0x000000000000000Cull)
39#define CVMX_PCIEEP_CFG004 \
40 (0x0000000000000010ull)
41#define CVMX_PCIEEP_CFG004_MASK \
42 (0x0000000080000010ull)
43#define CVMX_PCIEEP_CFG005 \
44 (0x0000000000000014ull)
45#define CVMX_PCIEEP_CFG005_MASK \
46 (0x0000000080000014ull)
47#define CVMX_PCIEEP_CFG006 \
48 (0x0000000000000018ull)
49#define CVMX_PCIEEP_CFG006_MASK \
50 (0x0000000080000018ull)
51#define CVMX_PCIEEP_CFG007 \
52 (0x000000000000001Cull)
53#define CVMX_PCIEEP_CFG007_MASK \
54 (0x000000008000001Cull)
55#define CVMX_PCIEEP_CFG008 \
56 (0x0000000000000020ull)
57#define CVMX_PCIEEP_CFG008_MASK \
58 (0x0000000080000020ull)
59#define CVMX_PCIEEP_CFG009 \
60 (0x0000000000000024ull)
61#define CVMX_PCIEEP_CFG009_MASK \
62 (0x0000000080000024ull)
63#define CVMX_PCIEEP_CFG010 \
64 (0x0000000000000028ull)
65#define CVMX_PCIEEP_CFG011 \
66 (0x000000000000002Cull)
67#define CVMX_PCIEEP_CFG012 \
68 (0x0000000000000030ull)
69#define CVMX_PCIEEP_CFG012_MASK \
70 (0x0000000080000030ull)
71#define CVMX_PCIEEP_CFG013 \
72 (0x0000000000000034ull)
73#define CVMX_PCIEEP_CFG015 \
74 (0x000000000000003Cull)
75#define CVMX_PCIEEP_CFG016 \
76 (0x0000000000000040ull)
77#define CVMX_PCIEEP_CFG017 \
78 (0x0000000000000044ull)
79#define CVMX_PCIEEP_CFG020 \
80 (0x0000000000000050ull)
81#define CVMX_PCIEEP_CFG021 \
82 (0x0000000000000054ull)
83#define CVMX_PCIEEP_CFG022 \
84 (0x0000000000000058ull)
85#define CVMX_PCIEEP_CFG023 \
86 (0x000000000000005Cull)
87#define CVMX_PCIEEP_CFG028 \
88 (0x0000000000000070ull)
89#define CVMX_PCIEEP_CFG029 \
90 (0x0000000000000074ull)
91#define CVMX_PCIEEP_CFG030 \
92 (0x0000000000000078ull)
93#define CVMX_PCIEEP_CFG031 \
94 (0x000000000000007Cull)
95#define CVMX_PCIEEP_CFG032 \
96 (0x0000000000000080ull)
97#define CVMX_PCIEEP_CFG033 \
98 (0x0000000000000084ull)
99#define CVMX_PCIEEP_CFG034 \
100 (0x0000000000000088ull)
101#define CVMX_PCIEEP_CFG037 \
102 (0x0000000000000094ull)
103#define CVMX_PCIEEP_CFG038 \
104 (0x0000000000000098ull)
105#define CVMX_PCIEEP_CFG039 \
106 (0x000000000000009Cull)
107#define CVMX_PCIEEP_CFG040 \
108 (0x00000000000000A0ull)
109#define CVMX_PCIEEP_CFG041 \
110 (0x00000000000000A4ull)
111#define CVMX_PCIEEP_CFG042 \
112 (0x00000000000000A8ull)
113#define CVMX_PCIEEP_CFG064 \
114 (0x0000000000000100ull)
115#define CVMX_PCIEEP_CFG065 \
116 (0x0000000000000104ull)
117#define CVMX_PCIEEP_CFG066 \
118 (0x0000000000000108ull)
119#define CVMX_PCIEEP_CFG067 \
120 (0x000000000000010Cull)
121#define CVMX_PCIEEP_CFG068 \
122 (0x0000000000000110ull)
123#define CVMX_PCIEEP_CFG069 \
124 (0x0000000000000114ull)
125#define CVMX_PCIEEP_CFG070 \
126 (0x0000000000000118ull)
127#define CVMX_PCIEEP_CFG071 \
128 (0x000000000000011Cull)
129#define CVMX_PCIEEP_CFG072 \
130 (0x0000000000000120ull)
131#define CVMX_PCIEEP_CFG073 \
132 (0x0000000000000124ull)
133#define CVMX_PCIEEP_CFG074 \
134 (0x0000000000000128ull)
135#define CVMX_PCIEEP_CFG448 \
136 (0x0000000000000700ull)
137#define CVMX_PCIEEP_CFG449 \
138 (0x0000000000000704ull)
139#define CVMX_PCIEEP_CFG450 \
140 (0x0000000000000708ull)
141#define CVMX_PCIEEP_CFG451 \
142 (0x000000000000070Cull)
143#define CVMX_PCIEEP_CFG452 \
144 (0x0000000000000710ull)
145#define CVMX_PCIEEP_CFG453 \
146 (0x0000000000000714ull)
147#define CVMX_PCIEEP_CFG454 \
148 (0x0000000000000718ull)
149#define CVMX_PCIEEP_CFG455 \
150 (0x000000000000071Cull)
151#define CVMX_PCIEEP_CFG456 \
152 (0x0000000000000720ull)
153#define CVMX_PCIEEP_CFG458 \
154 (0x0000000000000728ull)
155#define CVMX_PCIEEP_CFG459 \
156 (0x000000000000072Cull)
157#define CVMX_PCIEEP_CFG460 \
158 (0x0000000000000730ull)
159#define CVMX_PCIEEP_CFG461 \
160 (0x0000000000000734ull)
161#define CVMX_PCIEEP_CFG462 \
162 (0x0000000000000738ull)
163#define CVMX_PCIEEP_CFG463 \
164 (0x000000000000073Cull)
165#define CVMX_PCIEEP_CFG464 \
166 (0x0000000000000740ull)
167#define CVMX_PCIEEP_CFG465 \
168 (0x0000000000000744ull)
169#define CVMX_PCIEEP_CFG466 \
170 (0x0000000000000748ull)
171#define CVMX_PCIEEP_CFG467 \
172 (0x000000000000074Cull)
173#define CVMX_PCIEEP_CFG468 \
174 (0x0000000000000750ull)
175#define CVMX_PCIEEP_CFG490 \
176 (0x00000000000007A8ull)
177#define CVMX_PCIEEP_CFG491 \
178 (0x00000000000007ACull)
179#define CVMX_PCIEEP_CFG492 \
180 (0x00000000000007B0ull)
181#define CVMX_PCIEEP_CFG516 \
182 (0x0000000000000810ull)
183#define CVMX_PCIEEP_CFG517 \
184 (0x0000000000000814ull)
185
186union cvmx_pcieep_cfg000 {
187 uint32_t u32;
188 struct cvmx_pcieep_cfg000_s {
189 uint32_t devid:16;
190 uint32_t vendid:16;
191 } s;
192 struct cvmx_pcieep_cfg000_s cn52xx;
193 struct cvmx_pcieep_cfg000_s cn52xxp1;
194 struct cvmx_pcieep_cfg000_s cn56xx;
195 struct cvmx_pcieep_cfg000_s cn56xxp1;
196};
197
198union cvmx_pcieep_cfg001 {
199 uint32_t u32;
200 struct cvmx_pcieep_cfg001_s {
201 uint32_t dpe:1;
202 uint32_t sse:1;
203 uint32_t rma:1;
204 uint32_t rta:1;
205 uint32_t sta:1;
206 uint32_t devt:2;
207 uint32_t mdpe:1;
208 uint32_t fbb:1;
209 uint32_t reserved_22_22:1;
210 uint32_t m66:1;
211 uint32_t cl:1;
212 uint32_t i_stat:1;
213 uint32_t reserved_11_18:8;
214 uint32_t i_dis:1;
215 uint32_t fbbe:1;
216 uint32_t see:1;
217 uint32_t ids_wcc:1;
218 uint32_t per:1;
219 uint32_t vps:1;
220 uint32_t mwice:1;
221 uint32_t scse:1;
222 uint32_t me:1;
223 uint32_t msae:1;
224 uint32_t isae:1;
225 } s;
226 struct cvmx_pcieep_cfg001_s cn52xx;
227 struct cvmx_pcieep_cfg001_s cn52xxp1;
228 struct cvmx_pcieep_cfg001_s cn56xx;
229 struct cvmx_pcieep_cfg001_s cn56xxp1;
230};
231
232union cvmx_pcieep_cfg002 {
233 uint32_t u32;
234 struct cvmx_pcieep_cfg002_s {
235 uint32_t bcc:8;
236 uint32_t sc:8;
237 uint32_t pi:8;
238 uint32_t rid:8;
239 } s;
240 struct cvmx_pcieep_cfg002_s cn52xx;
241 struct cvmx_pcieep_cfg002_s cn52xxp1;
242 struct cvmx_pcieep_cfg002_s cn56xx;
243 struct cvmx_pcieep_cfg002_s cn56xxp1;
244};
245
246union cvmx_pcieep_cfg003 {
247 uint32_t u32;
248 struct cvmx_pcieep_cfg003_s {
249 uint32_t bist:8;
250 uint32_t mfd:1;
251 uint32_t chf:7;
252 uint32_t lt:8;
253 uint32_t cls:8;
254 } s;
255 struct cvmx_pcieep_cfg003_s cn52xx;
256 struct cvmx_pcieep_cfg003_s cn52xxp1;
257 struct cvmx_pcieep_cfg003_s cn56xx;
258 struct cvmx_pcieep_cfg003_s cn56xxp1;
259};
260
261union cvmx_pcieep_cfg004 {
262 uint32_t u32;
263 struct cvmx_pcieep_cfg004_s {
264 uint32_t lbab:18;
265 uint32_t reserved_4_13:10;
266 uint32_t pf:1;
267 uint32_t typ:2;
268 uint32_t mspc:1;
269 } s;
270 struct cvmx_pcieep_cfg004_s cn52xx;
271 struct cvmx_pcieep_cfg004_s cn52xxp1;
272 struct cvmx_pcieep_cfg004_s cn56xx;
273 struct cvmx_pcieep_cfg004_s cn56xxp1;
274};
275
276union cvmx_pcieep_cfg004_mask {
277 uint32_t u32;
278 struct cvmx_pcieep_cfg004_mask_s {
279 uint32_t lmask:31;
280 uint32_t enb:1;
281 } s;
282 struct cvmx_pcieep_cfg004_mask_s cn52xx;
283 struct cvmx_pcieep_cfg004_mask_s cn52xxp1;
284 struct cvmx_pcieep_cfg004_mask_s cn56xx;
285 struct cvmx_pcieep_cfg004_mask_s cn56xxp1;
286};
287
288union cvmx_pcieep_cfg005 {
289 uint32_t u32;
290 struct cvmx_pcieep_cfg005_s {
291 uint32_t ubab:32;
292 } s;
293 struct cvmx_pcieep_cfg005_s cn52xx;
294 struct cvmx_pcieep_cfg005_s cn52xxp1;
295 struct cvmx_pcieep_cfg005_s cn56xx;
296 struct cvmx_pcieep_cfg005_s cn56xxp1;
297};
298
299union cvmx_pcieep_cfg005_mask {
300 uint32_t u32;
301 struct cvmx_pcieep_cfg005_mask_s {
302 uint32_t umask:32;
303 } s;
304 struct cvmx_pcieep_cfg005_mask_s cn52xx;
305 struct cvmx_pcieep_cfg005_mask_s cn52xxp1;
306 struct cvmx_pcieep_cfg005_mask_s cn56xx;
307 struct cvmx_pcieep_cfg005_mask_s cn56xxp1;
308};
309
310union cvmx_pcieep_cfg006 {
311 uint32_t u32;
312 struct cvmx_pcieep_cfg006_s {
313 uint32_t lbab:6;
314 uint32_t reserved_4_25:22;
315 uint32_t pf:1;
316 uint32_t typ:2;
317 uint32_t mspc:1;
318 } s;
319 struct cvmx_pcieep_cfg006_s cn52xx;
320 struct cvmx_pcieep_cfg006_s cn52xxp1;
321 struct cvmx_pcieep_cfg006_s cn56xx;
322 struct cvmx_pcieep_cfg006_s cn56xxp1;
323};
324
325union cvmx_pcieep_cfg006_mask {
326 uint32_t u32;
327 struct cvmx_pcieep_cfg006_mask_s {
328 uint32_t lmask:31;
329 uint32_t enb:1;
330 } s;
331 struct cvmx_pcieep_cfg006_mask_s cn52xx;
332 struct cvmx_pcieep_cfg006_mask_s cn52xxp1;
333 struct cvmx_pcieep_cfg006_mask_s cn56xx;
334 struct cvmx_pcieep_cfg006_mask_s cn56xxp1;
335};
336
337union cvmx_pcieep_cfg007 {
338 uint32_t u32;
339 struct cvmx_pcieep_cfg007_s {
340 uint32_t ubab:32;
341 } s;
342 struct cvmx_pcieep_cfg007_s cn52xx;
343 struct cvmx_pcieep_cfg007_s cn52xxp1;
344 struct cvmx_pcieep_cfg007_s cn56xx;
345 struct cvmx_pcieep_cfg007_s cn56xxp1;
346};
347
348union cvmx_pcieep_cfg007_mask {
349 uint32_t u32;
350 struct cvmx_pcieep_cfg007_mask_s {
351 uint32_t umask:32;
352 } s;
353 struct cvmx_pcieep_cfg007_mask_s cn52xx;
354 struct cvmx_pcieep_cfg007_mask_s cn52xxp1;
355 struct cvmx_pcieep_cfg007_mask_s cn56xx;
356 struct cvmx_pcieep_cfg007_mask_s cn56xxp1;
357};
358
359union cvmx_pcieep_cfg008 {
360 uint32_t u32;
361 struct cvmx_pcieep_cfg008_s {
362 uint32_t reserved_4_31:28;
363 uint32_t pf:1;
364 uint32_t typ:2;
365 uint32_t mspc:1;
366 } s;
367 struct cvmx_pcieep_cfg008_s cn52xx;
368 struct cvmx_pcieep_cfg008_s cn52xxp1;
369 struct cvmx_pcieep_cfg008_s cn56xx;
370 struct cvmx_pcieep_cfg008_s cn56xxp1;
371};
372
373union cvmx_pcieep_cfg008_mask {
374 uint32_t u32;
375 struct cvmx_pcieep_cfg008_mask_s {
376 uint32_t lmask:31;
377 uint32_t enb:1;
378 } s;
379 struct cvmx_pcieep_cfg008_mask_s cn52xx;
380 struct cvmx_pcieep_cfg008_mask_s cn52xxp1;
381 struct cvmx_pcieep_cfg008_mask_s cn56xx;
382 struct cvmx_pcieep_cfg008_mask_s cn56xxp1;
383};
384
385union cvmx_pcieep_cfg009 {
386 uint32_t u32;
387 struct cvmx_pcieep_cfg009_s {
388 uint32_t ubab:25;
389 uint32_t reserved_0_6:7;
390 } s;
391 struct cvmx_pcieep_cfg009_s cn52xx;
392 struct cvmx_pcieep_cfg009_s cn52xxp1;
393 struct cvmx_pcieep_cfg009_s cn56xx;
394 struct cvmx_pcieep_cfg009_s cn56xxp1;
395};
396
397union cvmx_pcieep_cfg009_mask {
398 uint32_t u32;
399 struct cvmx_pcieep_cfg009_mask_s {
400 uint32_t umask:32;
401 } s;
402 struct cvmx_pcieep_cfg009_mask_s cn52xx;
403 struct cvmx_pcieep_cfg009_mask_s cn52xxp1;
404 struct cvmx_pcieep_cfg009_mask_s cn56xx;
405 struct cvmx_pcieep_cfg009_mask_s cn56xxp1;
406};
407
408union cvmx_pcieep_cfg010 {
409 uint32_t u32;
410 struct cvmx_pcieep_cfg010_s {
411 uint32_t cisp:32;
412 } s;
413 struct cvmx_pcieep_cfg010_s cn52xx;
414 struct cvmx_pcieep_cfg010_s cn52xxp1;
415 struct cvmx_pcieep_cfg010_s cn56xx;
416 struct cvmx_pcieep_cfg010_s cn56xxp1;
417};
418
419union cvmx_pcieep_cfg011 {
420 uint32_t u32;
421 struct cvmx_pcieep_cfg011_s {
422 uint32_t ssid:16;
423 uint32_t ssvid:16;
424 } s;
425 struct cvmx_pcieep_cfg011_s cn52xx;
426 struct cvmx_pcieep_cfg011_s cn52xxp1;
427 struct cvmx_pcieep_cfg011_s cn56xx;
428 struct cvmx_pcieep_cfg011_s cn56xxp1;
429};
430
431union cvmx_pcieep_cfg012 {
432 uint32_t u32;
433 struct cvmx_pcieep_cfg012_s {
434 uint32_t eraddr:16;
435 uint32_t reserved_1_15:15;
436 uint32_t er_en:1;
437 } s;
438 struct cvmx_pcieep_cfg012_s cn52xx;
439 struct cvmx_pcieep_cfg012_s cn52xxp1;
440 struct cvmx_pcieep_cfg012_s cn56xx;
441 struct cvmx_pcieep_cfg012_s cn56xxp1;
442};
443
444union cvmx_pcieep_cfg012_mask {
445 uint32_t u32;
446 struct cvmx_pcieep_cfg012_mask_s {
447 uint32_t mask:31;
448 uint32_t enb:1;
449 } s;
450 struct cvmx_pcieep_cfg012_mask_s cn52xx;
451 struct cvmx_pcieep_cfg012_mask_s cn52xxp1;
452 struct cvmx_pcieep_cfg012_mask_s cn56xx;
453 struct cvmx_pcieep_cfg012_mask_s cn56xxp1;
454};
455
456union cvmx_pcieep_cfg013 {
457 uint32_t u32;
458 struct cvmx_pcieep_cfg013_s {
459 uint32_t reserved_8_31:24;
460 uint32_t cp:8;
461 } s;
462 struct cvmx_pcieep_cfg013_s cn52xx;
463 struct cvmx_pcieep_cfg013_s cn52xxp1;
464 struct cvmx_pcieep_cfg013_s cn56xx;
465 struct cvmx_pcieep_cfg013_s cn56xxp1;
466};
467
468union cvmx_pcieep_cfg015 {
469 uint32_t u32;
470 struct cvmx_pcieep_cfg015_s {
471 uint32_t ml:8;
472 uint32_t mg:8;
473 uint32_t inta:8;
474 uint32_t il:8;
475 } s;
476 struct cvmx_pcieep_cfg015_s cn52xx;
477 struct cvmx_pcieep_cfg015_s cn52xxp1;
478 struct cvmx_pcieep_cfg015_s cn56xx;
479 struct cvmx_pcieep_cfg015_s cn56xxp1;
480};
481
482union cvmx_pcieep_cfg016 {
483 uint32_t u32;
484 struct cvmx_pcieep_cfg016_s {
485 uint32_t pmes:5;
486 uint32_t d2s:1;
487 uint32_t d1s:1;
488 uint32_t auxc:3;
489 uint32_t dsi:1;
490 uint32_t reserved_20_20:1;
491 uint32_t pme_clock:1;
492 uint32_t pmsv:3;
493 uint32_t ncp:8;
494 uint32_t pmcid:8;
495 } s;
496 struct cvmx_pcieep_cfg016_s cn52xx;
497 struct cvmx_pcieep_cfg016_s cn52xxp1;
498 struct cvmx_pcieep_cfg016_s cn56xx;
499 struct cvmx_pcieep_cfg016_s cn56xxp1;
500};
501
502union cvmx_pcieep_cfg017 {
503 uint32_t u32;
504 struct cvmx_pcieep_cfg017_s {
505 uint32_t pmdia:8;
506 uint32_t bpccee:1;
507 uint32_t bd3h:1;
508 uint32_t reserved_16_21:6;
509 uint32_t pmess:1;
510 uint32_t pmedsia:2;
511 uint32_t pmds:4;
512 uint32_t pmeens:1;
513 uint32_t reserved_4_7:4;
514 uint32_t nsr:1;
515 uint32_t reserved_2_2:1;
516 uint32_t ps:2;
517 } s;
518 struct cvmx_pcieep_cfg017_s cn52xx;
519 struct cvmx_pcieep_cfg017_s cn52xxp1;
520 struct cvmx_pcieep_cfg017_s cn56xx;
521 struct cvmx_pcieep_cfg017_s cn56xxp1;
522};
523
524union cvmx_pcieep_cfg020 {
525 uint32_t u32;
526 struct cvmx_pcieep_cfg020_s {
527 uint32_t reserved_24_31:8;
528 uint32_t m64:1;
529 uint32_t mme:3;
530 uint32_t mmc:3;
531 uint32_t msien:1;
532 uint32_t ncp:8;
533 uint32_t msicid:8;
534 } s;
535 struct cvmx_pcieep_cfg020_s cn52xx;
536 struct cvmx_pcieep_cfg020_s cn52xxp1;
537 struct cvmx_pcieep_cfg020_s cn56xx;
538 struct cvmx_pcieep_cfg020_s cn56xxp1;
539};
540
541union cvmx_pcieep_cfg021 {
542 uint32_t u32;
543 struct cvmx_pcieep_cfg021_s {
544 uint32_t lmsi:30;
545 uint32_t reserved_0_1:2;
546 } s;
547 struct cvmx_pcieep_cfg021_s cn52xx;
548 struct cvmx_pcieep_cfg021_s cn52xxp1;
549 struct cvmx_pcieep_cfg021_s cn56xx;
550 struct cvmx_pcieep_cfg021_s cn56xxp1;
551};
552
553union cvmx_pcieep_cfg022 {
554 uint32_t u32;
555 struct cvmx_pcieep_cfg022_s {
556 uint32_t umsi:32;
557 } s;
558 struct cvmx_pcieep_cfg022_s cn52xx;
559 struct cvmx_pcieep_cfg022_s cn52xxp1;
560 struct cvmx_pcieep_cfg022_s cn56xx;
561 struct cvmx_pcieep_cfg022_s cn56xxp1;
562};
563
564union cvmx_pcieep_cfg023 {
565 uint32_t u32;
566 struct cvmx_pcieep_cfg023_s {
567 uint32_t reserved_16_31:16;
568 uint32_t msimd:16;
569 } s;
570 struct cvmx_pcieep_cfg023_s cn52xx;
571 struct cvmx_pcieep_cfg023_s cn52xxp1;
572 struct cvmx_pcieep_cfg023_s cn56xx;
573 struct cvmx_pcieep_cfg023_s cn56xxp1;
574};
575
576union cvmx_pcieep_cfg028 {
577 uint32_t u32;
578 struct cvmx_pcieep_cfg028_s {
579 uint32_t reserved_30_31:2;
580 uint32_t imn:5;
581 uint32_t si:1;
582 uint32_t dpt:4;
583 uint32_t pciecv:4;
584 uint32_t ncp:8;
585 uint32_t pcieid:8;
586 } s;
587 struct cvmx_pcieep_cfg028_s cn52xx;
588 struct cvmx_pcieep_cfg028_s cn52xxp1;
589 struct cvmx_pcieep_cfg028_s cn56xx;
590 struct cvmx_pcieep_cfg028_s cn56xxp1;
591};
592
593union cvmx_pcieep_cfg029 {
594 uint32_t u32;
595 struct cvmx_pcieep_cfg029_s {
596 uint32_t reserved_28_31:4;
597 uint32_t cspls:2;
598 uint32_t csplv:8;
599 uint32_t reserved_16_17:2;
600 uint32_t rber:1;
601 uint32_t reserved_12_14:3;
602 uint32_t el1al:3;
603 uint32_t el0al:3;
604 uint32_t etfs:1;
605 uint32_t pfs:2;
606 uint32_t mpss:3;
607 } s;
608 struct cvmx_pcieep_cfg029_s cn52xx;
609 struct cvmx_pcieep_cfg029_s cn52xxp1;
610 struct cvmx_pcieep_cfg029_s cn56xx;
611 struct cvmx_pcieep_cfg029_s cn56xxp1;
612};
613
614union cvmx_pcieep_cfg030 {
615 uint32_t u32;
616 struct cvmx_pcieep_cfg030_s {
617 uint32_t reserved_22_31:10;
618 uint32_t tp:1;
619 uint32_t ap_d:1;
620 uint32_t ur_d:1;
621 uint32_t fe_d:1;
622 uint32_t nfe_d:1;
623 uint32_t ce_d:1;
624 uint32_t reserved_15_15:1;
625 uint32_t mrrs:3;
626 uint32_t ns_en:1;
627 uint32_t ap_en:1;
628 uint32_t pf_en:1;
629 uint32_t etf_en:1;
630 uint32_t mps:3;
631 uint32_t ro_en:1;
632 uint32_t ur_en:1;
633 uint32_t fe_en:1;
634 uint32_t nfe_en:1;
635 uint32_t ce_en:1;
636 } s;
637 struct cvmx_pcieep_cfg030_s cn52xx;
638 struct cvmx_pcieep_cfg030_s cn52xxp1;
639 struct cvmx_pcieep_cfg030_s cn56xx;
640 struct cvmx_pcieep_cfg030_s cn56xxp1;
641};
642
643union cvmx_pcieep_cfg031 {
644 uint32_t u32;
645 struct cvmx_pcieep_cfg031_s {
646 uint32_t pnum:8;
647 uint32_t reserved_22_23:2;
648 uint32_t lbnc:1;
649 uint32_t dllarc:1;
650 uint32_t sderc:1;
651 uint32_t cpm:1;
652 uint32_t l1el:3;
653 uint32_t l0el:3;
654 uint32_t aslpms:2;
655 uint32_t mlw:6;
656 uint32_t mls:4;
657 } s;
658 struct cvmx_pcieep_cfg031_s cn52xx;
659 struct cvmx_pcieep_cfg031_s cn52xxp1;
660 struct cvmx_pcieep_cfg031_s cn56xx;
661 struct cvmx_pcieep_cfg031_s cn56xxp1;
662};
663
664union cvmx_pcieep_cfg032 {
665 uint32_t u32;
666 struct cvmx_pcieep_cfg032_s {
667 uint32_t reserved_30_31:2;
668 uint32_t dlla:1;
669 uint32_t scc:1;
670 uint32_t lt:1;
671 uint32_t reserved_26_26:1;
672 uint32_t nlw:6;
673 uint32_t ls:4;
674 uint32_t reserved_10_15:6;
675 uint32_t hawd:1;
676 uint32_t ecpm:1;
677 uint32_t es:1;
678 uint32_t ccc:1;
679 uint32_t rl:1;
680 uint32_t ld:1;
681 uint32_t rcb:1;
682 uint32_t reserved_2_2:1;
683 uint32_t aslpc:2;
684 } s;
685 struct cvmx_pcieep_cfg032_s cn52xx;
686 struct cvmx_pcieep_cfg032_s cn52xxp1;
687 struct cvmx_pcieep_cfg032_s cn56xx;
688 struct cvmx_pcieep_cfg032_s cn56xxp1;
689};
690
691union cvmx_pcieep_cfg033 {
692 uint32_t u32;
693 struct cvmx_pcieep_cfg033_s {
694 uint32_t ps_num:13;
695 uint32_t nccs:1;
696 uint32_t emip:1;
697 uint32_t sp_ls:2;
698 uint32_t sp_lv:8;
699 uint32_t hp_c:1;
700 uint32_t hp_s:1;
701 uint32_t pip:1;
702 uint32_t aip:1;
703 uint32_t mrlsp:1;
704 uint32_t pcp:1;
705 uint32_t abp:1;
706 } s;
707 struct cvmx_pcieep_cfg033_s cn52xx;
708 struct cvmx_pcieep_cfg033_s cn52xxp1;
709 struct cvmx_pcieep_cfg033_s cn56xx;
710 struct cvmx_pcieep_cfg033_s cn56xxp1;
711};
712
713union cvmx_pcieep_cfg034 {
714 uint32_t u32;
715 struct cvmx_pcieep_cfg034_s {
716 uint32_t reserved_25_31:7;
717 uint32_t dlls_c:1;
718 uint32_t emis:1;
719 uint32_t pds:1;
720 uint32_t mrlss:1;
721 uint32_t ccint_d:1;
722 uint32_t pd_c:1;
723 uint32_t mrls_c:1;
724 uint32_t pf_d:1;
725 uint32_t abp_d:1;
726 uint32_t reserved_13_15:3;
727 uint32_t dlls_en:1;
728 uint32_t emic:1;
729 uint32_t pcc:1;
730 uint32_t pic:2;
731 uint32_t aic:2;
732 uint32_t hpint_en:1;
733 uint32_t ccint_en:1;
734 uint32_t pd_en:1;
735 uint32_t mrls_en:1;
736 uint32_t pf_en:1;
737 uint32_t abp_en:1;
738 } s;
739 struct cvmx_pcieep_cfg034_s cn52xx;
740 struct cvmx_pcieep_cfg034_s cn52xxp1;
741 struct cvmx_pcieep_cfg034_s cn56xx;
742 struct cvmx_pcieep_cfg034_s cn56xxp1;
743};
744
745union cvmx_pcieep_cfg037 {
746 uint32_t u32;
747 struct cvmx_pcieep_cfg037_s {
748 uint32_t reserved_5_31:27;
749 uint32_t ctds:1;
750 uint32_t ctrs:4;
751 } s;
752 struct cvmx_pcieep_cfg037_s cn52xx;
753 struct cvmx_pcieep_cfg037_s cn52xxp1;
754 struct cvmx_pcieep_cfg037_s cn56xx;
755 struct cvmx_pcieep_cfg037_s cn56xxp1;
756};
757
758union cvmx_pcieep_cfg038 {
759 uint32_t u32;
760 struct cvmx_pcieep_cfg038_s {
761 uint32_t reserved_5_31:27;
762 uint32_t ctd:1;
763 uint32_t ctv:4;
764 } s;
765 struct cvmx_pcieep_cfg038_s cn52xx;
766 struct cvmx_pcieep_cfg038_s cn52xxp1;
767 struct cvmx_pcieep_cfg038_s cn56xx;
768 struct cvmx_pcieep_cfg038_s cn56xxp1;
769};
770
771union cvmx_pcieep_cfg039 {
772 uint32_t u32;
773 struct cvmx_pcieep_cfg039_s {
774 uint32_t reserved_0_31:32;
775 } s;
776 struct cvmx_pcieep_cfg039_s cn52xx;
777 struct cvmx_pcieep_cfg039_s cn52xxp1;
778 struct cvmx_pcieep_cfg039_s cn56xx;
779 struct cvmx_pcieep_cfg039_s cn56xxp1;
780};
781
782union cvmx_pcieep_cfg040 {
783 uint32_t u32;
784 struct cvmx_pcieep_cfg040_s {
785 uint32_t reserved_0_31:32;
786 } s;
787 struct cvmx_pcieep_cfg040_s cn52xx;
788 struct cvmx_pcieep_cfg040_s cn52xxp1;
789 struct cvmx_pcieep_cfg040_s cn56xx;
790 struct cvmx_pcieep_cfg040_s cn56xxp1;
791};
792
793union cvmx_pcieep_cfg041 {
794 uint32_t u32;
795 struct cvmx_pcieep_cfg041_s {
796 uint32_t reserved_0_31:32;
797 } s;
798 struct cvmx_pcieep_cfg041_s cn52xx;
799 struct cvmx_pcieep_cfg041_s cn52xxp1;
800 struct cvmx_pcieep_cfg041_s cn56xx;
801 struct cvmx_pcieep_cfg041_s cn56xxp1;
802};
803
804union cvmx_pcieep_cfg042 {
805 uint32_t u32;
806 struct cvmx_pcieep_cfg042_s {
807 uint32_t reserved_0_31:32;
808 } s;
809 struct cvmx_pcieep_cfg042_s cn52xx;
810 struct cvmx_pcieep_cfg042_s cn52xxp1;
811 struct cvmx_pcieep_cfg042_s cn56xx;
812 struct cvmx_pcieep_cfg042_s cn56xxp1;
813};
814
815union cvmx_pcieep_cfg064 {
816 uint32_t u32;
817 struct cvmx_pcieep_cfg064_s {
818 uint32_t nco:12;
819 uint32_t cv:4;
820 uint32_t pcieec:16;
821 } s;
822 struct cvmx_pcieep_cfg064_s cn52xx;
823 struct cvmx_pcieep_cfg064_s cn52xxp1;
824 struct cvmx_pcieep_cfg064_s cn56xx;
825 struct cvmx_pcieep_cfg064_s cn56xxp1;
826};
827
828union cvmx_pcieep_cfg065 {
829 uint32_t u32;
830 struct cvmx_pcieep_cfg065_s {
831 uint32_t reserved_21_31:11;
832 uint32_t ures:1;
833 uint32_t ecrces:1;
834 uint32_t mtlps:1;
835 uint32_t ros:1;
836 uint32_t ucs:1;
837 uint32_t cas:1;
838 uint32_t cts:1;
839 uint32_t fcpes:1;
840 uint32_t ptlps:1;
841 uint32_t reserved_6_11:6;
842 uint32_t sdes:1;
843 uint32_t dlpes:1;
844 uint32_t reserved_0_3:4;
845 } s;
846 struct cvmx_pcieep_cfg065_s cn52xx;
847 struct cvmx_pcieep_cfg065_s cn52xxp1;
848 struct cvmx_pcieep_cfg065_s cn56xx;
849 struct cvmx_pcieep_cfg065_s cn56xxp1;
850};
851
852union cvmx_pcieep_cfg066 {
853 uint32_t u32;
854 struct cvmx_pcieep_cfg066_s {
855 uint32_t reserved_21_31:11;
856 uint32_t urem:1;
857 uint32_t ecrcem:1;
858 uint32_t mtlpm:1;
859 uint32_t rom:1;
860 uint32_t ucm:1;
861 uint32_t cam:1;
862 uint32_t ctm:1;
863 uint32_t fcpem:1;
864 uint32_t ptlpm:1;
865 uint32_t reserved_6_11:6;
866 uint32_t sdem:1;
867 uint32_t dlpem:1;
868 uint32_t reserved_0_3:4;
869 } s;
870 struct cvmx_pcieep_cfg066_s cn52xx;
871 struct cvmx_pcieep_cfg066_s cn52xxp1;
872 struct cvmx_pcieep_cfg066_s cn56xx;
873 struct cvmx_pcieep_cfg066_s cn56xxp1;
874};
875
876union cvmx_pcieep_cfg067 {
877 uint32_t u32;
878 struct cvmx_pcieep_cfg067_s {
879 uint32_t reserved_21_31:11;
880 uint32_t ures:1;
881 uint32_t ecrces:1;
882 uint32_t mtlps:1;
883 uint32_t ros:1;
884 uint32_t ucs:1;
885 uint32_t cas:1;
886 uint32_t cts:1;
887 uint32_t fcpes:1;
888 uint32_t ptlps:1;
889 uint32_t reserved_6_11:6;
890 uint32_t sdes:1;
891 uint32_t dlpes:1;
892 uint32_t reserved_0_3:4;
893 } s;
894 struct cvmx_pcieep_cfg067_s cn52xx;
895 struct cvmx_pcieep_cfg067_s cn52xxp1;
896 struct cvmx_pcieep_cfg067_s cn56xx;
897 struct cvmx_pcieep_cfg067_s cn56xxp1;
898};
899
900union cvmx_pcieep_cfg068 {
901 uint32_t u32;
902 struct cvmx_pcieep_cfg068_s {
903 uint32_t reserved_14_31:18;
904 uint32_t anfes:1;
905 uint32_t rtts:1;
906 uint32_t reserved_9_11:3;
907 uint32_t rnrs:1;
908 uint32_t bdllps:1;
909 uint32_t btlps:1;
910 uint32_t reserved_1_5:5;
911 uint32_t res:1;
912 } s;
913 struct cvmx_pcieep_cfg068_s cn52xx;
914 struct cvmx_pcieep_cfg068_s cn52xxp1;
915 struct cvmx_pcieep_cfg068_s cn56xx;
916 struct cvmx_pcieep_cfg068_s cn56xxp1;
917};
918
919union cvmx_pcieep_cfg069 {
920 uint32_t u32;
921 struct cvmx_pcieep_cfg069_s {
922 uint32_t reserved_14_31:18;
923 uint32_t anfem:1;
924 uint32_t rttm:1;
925 uint32_t reserved_9_11:3;
926 uint32_t rnrm:1;
927 uint32_t bdllpm:1;
928 uint32_t btlpm:1;
929 uint32_t reserved_1_5:5;
930 uint32_t rem:1;
931 } s;
932 struct cvmx_pcieep_cfg069_s cn52xx;
933 struct cvmx_pcieep_cfg069_s cn52xxp1;
934 struct cvmx_pcieep_cfg069_s cn56xx;
935 struct cvmx_pcieep_cfg069_s cn56xxp1;
936};
937
938union cvmx_pcieep_cfg070 {
939 uint32_t u32;
940 struct cvmx_pcieep_cfg070_s {
941 uint32_t reserved_9_31:23;
942 uint32_t ce:1;
943 uint32_t cc:1;
944 uint32_t ge:1;
945 uint32_t gc:1;
946 uint32_t fep:5;
947 } s;
948 struct cvmx_pcieep_cfg070_s cn52xx;
949 struct cvmx_pcieep_cfg070_s cn52xxp1;
950 struct cvmx_pcieep_cfg070_s cn56xx;
951 struct cvmx_pcieep_cfg070_s cn56xxp1;
952};
953
954union cvmx_pcieep_cfg071 {
955 uint32_t u32;
956 struct cvmx_pcieep_cfg071_s {
957 uint32_t dword1:32;
958 } s;
959 struct cvmx_pcieep_cfg071_s cn52xx;
960 struct cvmx_pcieep_cfg071_s cn52xxp1;
961 struct cvmx_pcieep_cfg071_s cn56xx;
962 struct cvmx_pcieep_cfg071_s cn56xxp1;
963};
964
965union cvmx_pcieep_cfg072 {
966 uint32_t u32;
967 struct cvmx_pcieep_cfg072_s {
968 uint32_t dword2:32;
969 } s;
970 struct cvmx_pcieep_cfg072_s cn52xx;
971 struct cvmx_pcieep_cfg072_s cn52xxp1;
972 struct cvmx_pcieep_cfg072_s cn56xx;
973 struct cvmx_pcieep_cfg072_s cn56xxp1;
974};
975
976union cvmx_pcieep_cfg073 {
977 uint32_t u32;
978 struct cvmx_pcieep_cfg073_s {
979 uint32_t dword3:32;
980 } s;
981 struct cvmx_pcieep_cfg073_s cn52xx;
982 struct cvmx_pcieep_cfg073_s cn52xxp1;
983 struct cvmx_pcieep_cfg073_s cn56xx;
984 struct cvmx_pcieep_cfg073_s cn56xxp1;
985};
986
987union cvmx_pcieep_cfg074 {
988 uint32_t u32;
989 struct cvmx_pcieep_cfg074_s {
990 uint32_t dword4:32;
991 } s;
992 struct cvmx_pcieep_cfg074_s cn52xx;
993 struct cvmx_pcieep_cfg074_s cn52xxp1;
994 struct cvmx_pcieep_cfg074_s cn56xx;
995 struct cvmx_pcieep_cfg074_s cn56xxp1;
996};
997
998union cvmx_pcieep_cfg448 {
999 uint32_t u32;
1000 struct cvmx_pcieep_cfg448_s {
1001 uint32_t rtl:16;
1002 uint32_t rtltl:16;
1003 } s;
1004 struct cvmx_pcieep_cfg448_s cn52xx;
1005 struct cvmx_pcieep_cfg448_s cn52xxp1;
1006 struct cvmx_pcieep_cfg448_s cn56xx;
1007 struct cvmx_pcieep_cfg448_s cn56xxp1;
1008};
1009
1010union cvmx_pcieep_cfg449 {
1011 uint32_t u32;
1012 struct cvmx_pcieep_cfg449_s {
1013 uint32_t omr:32;
1014 } s;
1015 struct cvmx_pcieep_cfg449_s cn52xx;
1016 struct cvmx_pcieep_cfg449_s cn52xxp1;
1017 struct cvmx_pcieep_cfg449_s cn56xx;
1018 struct cvmx_pcieep_cfg449_s cn56xxp1;
1019};
1020
1021union cvmx_pcieep_cfg450 {
1022 uint32_t u32;
1023 struct cvmx_pcieep_cfg450_s {
1024 uint32_t lpec:8;
1025 uint32_t reserved_22_23:2;
1026 uint32_t link_state:6;
1027 uint32_t force_link:1;
1028 uint32_t reserved_8_14:7;
1029 uint32_t link_num:8;
1030 } s;
1031 struct cvmx_pcieep_cfg450_s cn52xx;
1032 struct cvmx_pcieep_cfg450_s cn52xxp1;
1033 struct cvmx_pcieep_cfg450_s cn56xx;
1034 struct cvmx_pcieep_cfg450_s cn56xxp1;
1035};
1036
1037union cvmx_pcieep_cfg451 {
1038 uint32_t u32;
1039 struct cvmx_pcieep_cfg451_s {
1040 uint32_t reserved_30_31:2;
1041 uint32_t l1el:3;
1042 uint32_t l0el:3;
1043 uint32_t n_fts_cc:8;
1044 uint32_t n_fts:8;
1045 uint32_t ack_freq:8;
1046 } s;
1047 struct cvmx_pcieep_cfg451_s cn52xx;
1048 struct cvmx_pcieep_cfg451_s cn52xxp1;
1049 struct cvmx_pcieep_cfg451_s cn56xx;
1050 struct cvmx_pcieep_cfg451_s cn56xxp1;
1051};
1052
1053union cvmx_pcieep_cfg452 {
1054 uint32_t u32;
1055 struct cvmx_pcieep_cfg452_s {
1056 uint32_t reserved_26_31:6;
1057 uint32_t eccrc:1;
1058 uint32_t reserved_22_24:3;
1059 uint32_t lme:6;
1060 uint32_t reserved_8_15:8;
1061 uint32_t flm:1;
1062 uint32_t reserved_6_6:1;
1063 uint32_t dllle:1;
1064 uint32_t reserved_4_4:1;
1065 uint32_t ra:1;
1066 uint32_t le:1;
1067 uint32_t sd:1;
1068 uint32_t omr:1;
1069 } s;
1070 struct cvmx_pcieep_cfg452_s cn52xx;
1071 struct cvmx_pcieep_cfg452_s cn52xxp1;
1072 struct cvmx_pcieep_cfg452_s cn56xx;
1073 struct cvmx_pcieep_cfg452_s cn56xxp1;
1074};
1075
1076union cvmx_pcieep_cfg453 {
1077 uint32_t u32;
1078 struct cvmx_pcieep_cfg453_s {
1079 uint32_t dlld:1;
1080 uint32_t reserved_26_30:5;
1081 uint32_t ack_nak:1;
1082 uint32_t fcd:1;
1083 uint32_t ilst:24;
1084 } s;
1085 struct cvmx_pcieep_cfg453_s cn52xx;
1086 struct cvmx_pcieep_cfg453_s cn52xxp1;
1087 struct cvmx_pcieep_cfg453_s cn56xx;
1088 struct cvmx_pcieep_cfg453_s cn56xxp1;
1089};
1090
1091union cvmx_pcieep_cfg454 {
1092 uint32_t u32;
1093 struct cvmx_pcieep_cfg454_s {
1094 uint32_t reserved_29_31:3;
1095 uint32_t tmfcwt:5;
1096 uint32_t tmanlt:5;
1097 uint32_t tmrt:5;
1098 uint32_t reserved_11_13:3;
1099 uint32_t nskps:3;
1100 uint32_t reserved_4_7:4;
1101 uint32_t ntss:4;
1102 } s;
1103 struct cvmx_pcieep_cfg454_s cn52xx;
1104 struct cvmx_pcieep_cfg454_s cn52xxp1;
1105 struct cvmx_pcieep_cfg454_s cn56xx;
1106 struct cvmx_pcieep_cfg454_s cn56xxp1;
1107};
1108
1109union cvmx_pcieep_cfg455 {
1110 uint32_t u32;
1111 struct cvmx_pcieep_cfg455_s {
1112 uint32_t m_cfg0_filt:1;
1113 uint32_t m_io_filt:1;
1114 uint32_t msg_ctrl:1;
1115 uint32_t m_cpl_ecrc_filt:1;
1116 uint32_t m_ecrc_filt:1;
1117 uint32_t m_cpl_len_err:1;
1118 uint32_t m_cpl_attr_err:1;
1119 uint32_t m_cpl_tc_err:1;
1120 uint32_t m_cpl_fun_err:1;
1121 uint32_t m_cpl_rid_err:1;
1122 uint32_t m_cpl_tag_err:1;
1123 uint32_t m_lk_filt:1;
1124 uint32_t m_cfg1_filt:1;
1125 uint32_t m_bar_match:1;
1126 uint32_t m_pois_filt:1;
1127 uint32_t m_fun:1;
1128 uint32_t dfcwt:1;
1129 uint32_t reserved_11_14:4;
1130 uint32_t skpiv:11;
1131 } s;
1132 struct cvmx_pcieep_cfg455_s cn52xx;
1133 struct cvmx_pcieep_cfg455_s cn52xxp1;
1134 struct cvmx_pcieep_cfg455_s cn56xx;
1135 struct cvmx_pcieep_cfg455_s cn56xxp1;
1136};
1137
1138union cvmx_pcieep_cfg456 {
1139 uint32_t u32;
1140 struct cvmx_pcieep_cfg456_s {
1141 uint32_t reserved_2_31:30;
1142 uint32_t m_vend1_drp:1;
1143 uint32_t m_vend0_drp:1;
1144 } s;
1145 struct cvmx_pcieep_cfg456_s cn52xx;
1146 struct cvmx_pcieep_cfg456_s cn52xxp1;
1147 struct cvmx_pcieep_cfg456_s cn56xx;
1148 struct cvmx_pcieep_cfg456_s cn56xxp1;
1149};
1150
1151union cvmx_pcieep_cfg458 {
1152 uint32_t u32;
1153 struct cvmx_pcieep_cfg458_s {
1154 uint32_t dbg_info_l32:32;
1155 } s;
1156 struct cvmx_pcieep_cfg458_s cn52xx;
1157 struct cvmx_pcieep_cfg458_s cn52xxp1;
1158 struct cvmx_pcieep_cfg458_s cn56xx;
1159 struct cvmx_pcieep_cfg458_s cn56xxp1;
1160};
1161
1162union cvmx_pcieep_cfg459 {
1163 uint32_t u32;
1164 struct cvmx_pcieep_cfg459_s {
1165 uint32_t dbg_info_u32:32;
1166 } s;
1167 struct cvmx_pcieep_cfg459_s cn52xx;
1168 struct cvmx_pcieep_cfg459_s cn52xxp1;
1169 struct cvmx_pcieep_cfg459_s cn56xx;
1170 struct cvmx_pcieep_cfg459_s cn56xxp1;
1171};
1172
1173union cvmx_pcieep_cfg460 {
1174 uint32_t u32;
1175 struct cvmx_pcieep_cfg460_s {
1176 uint32_t reserved_20_31:12;
1177 uint32_t tphfcc:8;
1178 uint32_t tpdfcc:12;
1179 } s;
1180 struct cvmx_pcieep_cfg460_s cn52xx;
1181 struct cvmx_pcieep_cfg460_s cn52xxp1;
1182 struct cvmx_pcieep_cfg460_s cn56xx;
1183 struct cvmx_pcieep_cfg460_s cn56xxp1;
1184};
1185
1186union cvmx_pcieep_cfg461 {
1187 uint32_t u32;
1188 struct cvmx_pcieep_cfg461_s {
1189 uint32_t reserved_20_31:12;
1190 uint32_t tchfcc:8;
1191 uint32_t tcdfcc:12;
1192 } s;
1193 struct cvmx_pcieep_cfg461_s cn52xx;
1194 struct cvmx_pcieep_cfg461_s cn52xxp1;
1195 struct cvmx_pcieep_cfg461_s cn56xx;
1196 struct cvmx_pcieep_cfg461_s cn56xxp1;
1197};
1198
1199union cvmx_pcieep_cfg462 {
1200 uint32_t u32;
1201 struct cvmx_pcieep_cfg462_s {
1202 uint32_t reserved_20_31:12;
1203 uint32_t tchfcc:8;
1204 uint32_t tcdfcc:12;
1205 } s;
1206 struct cvmx_pcieep_cfg462_s cn52xx;
1207 struct cvmx_pcieep_cfg462_s cn52xxp1;
1208 struct cvmx_pcieep_cfg462_s cn56xx;
1209 struct cvmx_pcieep_cfg462_s cn56xxp1;
1210};
1211
1212union cvmx_pcieep_cfg463 {
1213 uint32_t u32;
1214 struct cvmx_pcieep_cfg463_s {
1215 uint32_t reserved_3_31:29;
1216 uint32_t rqne:1;
1217 uint32_t trbne:1;
1218 uint32_t rtlpfccnr:1;
1219 } s;
1220 struct cvmx_pcieep_cfg463_s cn52xx;
1221 struct cvmx_pcieep_cfg463_s cn52xxp1;
1222 struct cvmx_pcieep_cfg463_s cn56xx;
1223 struct cvmx_pcieep_cfg463_s cn56xxp1;
1224};
1225
1226union cvmx_pcieep_cfg464 {
1227 uint32_t u32;
1228 struct cvmx_pcieep_cfg464_s {
1229 uint32_t wrr_vc3:8;
1230 uint32_t wrr_vc2:8;
1231 uint32_t wrr_vc1:8;
1232 uint32_t wrr_vc0:8;
1233 } s;
1234 struct cvmx_pcieep_cfg464_s cn52xx;
1235 struct cvmx_pcieep_cfg464_s cn52xxp1;
1236 struct cvmx_pcieep_cfg464_s cn56xx;
1237 struct cvmx_pcieep_cfg464_s cn56xxp1;
1238};
1239
1240union cvmx_pcieep_cfg465 {
1241 uint32_t u32;
1242 struct cvmx_pcieep_cfg465_s {
1243 uint32_t wrr_vc7:8;
1244 uint32_t wrr_vc6:8;
1245 uint32_t wrr_vc5:8;
1246 uint32_t wrr_vc4:8;
1247 } s;
1248 struct cvmx_pcieep_cfg465_s cn52xx;
1249 struct cvmx_pcieep_cfg465_s cn52xxp1;
1250 struct cvmx_pcieep_cfg465_s cn56xx;
1251 struct cvmx_pcieep_cfg465_s cn56xxp1;
1252};
1253
1254union cvmx_pcieep_cfg466 {
1255 uint32_t u32;
1256 struct cvmx_pcieep_cfg466_s {
1257 uint32_t rx_queue_order:1;
1258 uint32_t type_ordering:1;
1259 uint32_t reserved_24_29:6;
1260 uint32_t queue_mode:3;
1261 uint32_t reserved_20_20:1;
1262 uint32_t header_credits:8;
1263 uint32_t data_credits:12;
1264 } s;
1265 struct cvmx_pcieep_cfg466_s cn52xx;
1266 struct cvmx_pcieep_cfg466_s cn52xxp1;
1267 struct cvmx_pcieep_cfg466_s cn56xx;
1268 struct cvmx_pcieep_cfg466_s cn56xxp1;
1269};
1270
1271union cvmx_pcieep_cfg467 {
1272 uint32_t u32;
1273 struct cvmx_pcieep_cfg467_s {
1274 uint32_t reserved_24_31:8;
1275 uint32_t queue_mode:3;
1276 uint32_t reserved_20_20:1;
1277 uint32_t header_credits:8;
1278 uint32_t data_credits:12;
1279 } s;
1280 struct cvmx_pcieep_cfg467_s cn52xx;
1281 struct cvmx_pcieep_cfg467_s cn52xxp1;
1282 struct cvmx_pcieep_cfg467_s cn56xx;
1283 struct cvmx_pcieep_cfg467_s cn56xxp1;
1284};
1285
1286union cvmx_pcieep_cfg468 {
1287 uint32_t u32;
1288 struct cvmx_pcieep_cfg468_s {
1289 uint32_t reserved_24_31:8;
1290 uint32_t queue_mode:3;
1291 uint32_t reserved_20_20:1;
1292 uint32_t header_credits:8;
1293 uint32_t data_credits:12;
1294 } s;
1295 struct cvmx_pcieep_cfg468_s cn52xx;
1296 struct cvmx_pcieep_cfg468_s cn52xxp1;
1297 struct cvmx_pcieep_cfg468_s cn56xx;
1298 struct cvmx_pcieep_cfg468_s cn56xxp1;
1299};
1300
1301union cvmx_pcieep_cfg490 {
1302 uint32_t u32;
1303 struct cvmx_pcieep_cfg490_s {
1304 uint32_t reserved_26_31:6;
1305 uint32_t header_depth:10;
1306 uint32_t reserved_14_15:2;
1307 uint32_t data_depth:14;
1308 } s;
1309 struct cvmx_pcieep_cfg490_s cn52xx;
1310 struct cvmx_pcieep_cfg490_s cn52xxp1;
1311 struct cvmx_pcieep_cfg490_s cn56xx;
1312 struct cvmx_pcieep_cfg490_s cn56xxp1;
1313};
1314
1315union cvmx_pcieep_cfg491 {
1316 uint32_t u32;
1317 struct cvmx_pcieep_cfg491_s {
1318 uint32_t reserved_26_31:6;
1319 uint32_t header_depth:10;
1320 uint32_t reserved_14_15:2;
1321 uint32_t data_depth:14;
1322 } s;
1323 struct cvmx_pcieep_cfg491_s cn52xx;
1324 struct cvmx_pcieep_cfg491_s cn52xxp1;
1325 struct cvmx_pcieep_cfg491_s cn56xx;
1326 struct cvmx_pcieep_cfg491_s cn56xxp1;
1327};
1328
1329union cvmx_pcieep_cfg492 {
1330 uint32_t u32;
1331 struct cvmx_pcieep_cfg492_s {
1332 uint32_t reserved_26_31:6;
1333 uint32_t header_depth:10;
1334 uint32_t reserved_14_15:2;
1335 uint32_t data_depth:14;
1336 } s;
1337 struct cvmx_pcieep_cfg492_s cn52xx;
1338 struct cvmx_pcieep_cfg492_s cn52xxp1;
1339 struct cvmx_pcieep_cfg492_s cn56xx;
1340 struct cvmx_pcieep_cfg492_s cn56xxp1;
1341};
1342
1343union cvmx_pcieep_cfg516 {
1344 uint32_t u32;
1345 struct cvmx_pcieep_cfg516_s {
1346 uint32_t phy_stat:32;
1347 } s;
1348 struct cvmx_pcieep_cfg516_s cn52xx;
1349 struct cvmx_pcieep_cfg516_s cn52xxp1;
1350 struct cvmx_pcieep_cfg516_s cn56xx;
1351 struct cvmx_pcieep_cfg516_s cn56xxp1;
1352};
1353
1354union cvmx_pcieep_cfg517 {
1355 uint32_t u32;
1356 struct cvmx_pcieep_cfg517_s {
1357 uint32_t phy_ctrl:32;
1358 } s;
1359 struct cvmx_pcieep_cfg517_s cn52xx;
1360 struct cvmx_pcieep_cfg517_s cn52xxp1;
1361 struct cvmx_pcieep_cfg517_s cn56xx;
1362 struct cvmx_pcieep_cfg517_s cn56xxp1;
1363};
1364
1365#endif
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index fcd4060f6421..90bf3b3fce19 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -17,6 +17,7 @@
17 */ 17 */
18 18
19#include <linux/ioport.h> 19#include <linux/ioport.h>
20#include <linux/of.h>
20 21
21/* 22/*
22 * Each pci channel is a top-level PCI bus seem by CPU. A machine with 23 * Each pci channel is a top-level PCI bus seem by CPU. A machine with
@@ -26,6 +27,7 @@
26struct pci_controller { 27struct pci_controller {
27 struct pci_controller *next; 28 struct pci_controller *next;
28 struct pci_bus *bus; 29 struct pci_bus *bus;
30 struct device_node *of_node;
29 31
30 struct pci_ops *pci_ops; 32 struct pci_ops *pci_ops;
31 struct resource *mem_resource; 33 struct resource *mem_resource;
@@ -142,4 +144,8 @@ static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
142 144
143extern char * (*pcibios_plat_setup)(char *str); 145extern char * (*pcibios_plat_setup)(char *str);
144 146
147/* this function parses memory ranges from a device node */
148extern void __devinit pci_load_of_ranges(struct pci_controller *hose,
149 struct device_node *node);
150
145#endif /* _ASM_PCI_H */ 151#endif /* _ASM_PCI_H */
diff --git a/arch/mips/include/asm/prom.h b/arch/mips/include/asm/prom.h
index 7a6e82ef449b..7206d445bab8 100644
--- a/arch/mips/include/asm/prom.h
+++ b/arch/mips/include/asm/prom.h
@@ -12,6 +12,9 @@
12#define __ASM_PROM_H 12#define __ASM_PROM_H
13 13
14#ifdef CONFIG_OF 14#ifdef CONFIG_OF
15#include <linux/bug.h>
16#include <linux/io.h>
17#include <linux/types.h>
15#include <asm/bootinfo.h> 18#include <asm/bootinfo.h>
16 19
17extern int early_init_dt_scan_memory_arch(unsigned long node, 20extern int early_init_dt_scan_memory_arch(unsigned long node,
@@ -21,6 +24,29 @@ extern int reserve_mem_mach(unsigned long addr, unsigned long size);
21extern void free_mem_mach(unsigned long addr, unsigned long size); 24extern void free_mem_mach(unsigned long addr, unsigned long size);
22 25
23extern void device_tree_init(void); 26extern void device_tree_init(void);
27
28static inline unsigned long pci_address_to_pio(phys_addr_t address)
29{
30 /*
31 * The ioport address can be directly used by inX() / outX()
32 */
33 BUG_ON(address > IO_SPACE_LIMIT);
34
35 return (unsigned long) address;
36}
37#define pci_address_to_pio pci_address_to_pio
38
39struct boot_param_header;
40
41extern void __dt_setup_arch(struct boot_param_header *bph);
42
43#define dt_setup_arch(sym) \
44({ \
45 extern struct boot_param_header __dtb_##sym##_begin; \
46 \
47 __dt_setup_arch(&__dtb_##sym##_begin); \
48})
49
24#else /* CONFIG_OF */ 50#else /* CONFIG_OF */
25static inline void device_tree_init(void) { } 51static inline void device_tree_init(void) { }
26#endif /* CONFIG_OF */ 52#endif /* CONFIG_OF */
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index 6dce6d8d09ab..2560b6b6a7d8 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -14,7 +14,8 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
14 14
15extern void *set_except_vector(int n, void *addr); 15extern void *set_except_vector(int n, void *addr);
16extern unsigned long ebase; 16extern unsigned long ebase;
17extern void per_cpu_trap_init(void); 17extern void per_cpu_trap_init(bool);
18extern void cpu_cache_init(void);
18 19
19#endif /* __KERNEL__ */ 20#endif /* __KERNEL__ */
20 21
diff --git a/arch/mips/include/asm/sparsemem.h b/arch/mips/include/asm/sparsemem.h
index 7165333ad043..4461198361c9 100644
--- a/arch/mips/include/asm/sparsemem.h
+++ b/arch/mips/include/asm/sparsemem.h
@@ -6,7 +6,11 @@
6 * SECTION_SIZE_BITS 2^N: how big each section will be 6 * SECTION_SIZE_BITS 2^N: how big each section will be
7 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space 7 * MAX_PHYSMEM_BITS 2^N: how much memory we can have in that space
8 */ 8 */
9#define SECTION_SIZE_BITS 28 9#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_PAGE_SIZE_64KB)
10# define SECTION_SIZE_BITS 29
11#else
12# define SECTION_SIZE_BITS 28
13#endif
10#define MAX_PHYSMEM_BITS 35 14#define MAX_PHYSMEM_BITS 35
11 15
12#endif /* CONFIG_SPARSEMEM */ 16#endif /* CONFIG_SPARSEMEM */
diff --git a/arch/mips/include/asm/termios.h b/arch/mips/include/asm/termios.h
index 8f77f774a2a0..abdd87aaf609 100644
--- a/arch/mips/include/asm/termios.h
+++ b/arch/mips/include/asm/termios.h
@@ -60,7 +60,7 @@ struct termio {
60}; 60};
61 61
62#ifdef __KERNEL__ 62#ifdef __KERNEL__
63#include <linux/module.h> 63#include <asm/uaccess.h>
64 64
65/* 65/*
66 * intr=^C quit=^\ erase=del kill=^U 66 * intr=^C quit=^\ erase=del kill=^U
diff --git a/arch/mips/include/asm/traps.h b/arch/mips/include/asm/traps.h
index ff74aec3561a..420ca06b2f42 100644
--- a/arch/mips/include/asm/traps.h
+++ b/arch/mips/include/asm/traps.h
@@ -25,6 +25,7 @@ extern void (*board_nmi_handler_setup)(void);
25extern void (*board_ejtag_handler_setup)(void); 25extern void (*board_ejtag_handler_setup)(void);
26extern void (*board_bind_eic_interrupt)(int irq, int regset); 26extern void (*board_bind_eic_interrupt)(int irq, int regset);
27extern void (*board_ebase_setup)(void); 27extern void (*board_ebase_setup)(void);
28extern void (*board_cache_error_setup)(void);
28 29
29extern int register_nmi_notifier(struct notifier_block *nb); 30extern int register_nmi_notifier(struct notifier_block *nb);
30 31
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index 504d40aedfae..440a21dab575 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -11,7 +11,7 @@
11#include <linux/types.h> 11#include <linux/types.h>
12 12
13#ifdef CONFIG_EXPORT_UASM 13#ifdef CONFIG_EXPORT_UASM
14#include <linux/module.h> 14#include <linux/export.h>
15#define __uasminit 15#define __uasminit
16#define __uasminitdata 16#define __uasminitdata
17#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym) 17#define UASM_EXPORT_SYMBOL(sym) EXPORT_SYMBOL(sym)
diff --git a/arch/mips/jz4740/Makefile b/arch/mips/jz4740/Makefile
index a9dff3321251..e44abea9c209 100644
--- a/arch/mips/jz4740/Makefile
+++ b/arch/mips/jz4740/Makefile
@@ -16,5 +16,3 @@ obj-$(CONFIG_JZ4740_QI_LB60) += board-qi_lb60.o
16# PM support 16# PM support
17 17
18obj-$(CONFIG_PM) += pm.o 18obj-$(CONFIG_PM) += pm.o
19
20ccflags-y := -Werror -Wall
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index 5099201fb7bc..6ae7ce4ac63e 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -340,7 +340,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
340 __cpu_name[cpu] = "R2000"; 340 __cpu_name[cpu] = "R2000";
341 c->isa_level = MIPS_CPU_ISA_I; 341 c->isa_level = MIPS_CPU_ISA_I;
342 c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | 342 c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
343 MIPS_CPU_NOFPUEX; 343 MIPS_CPU_NOFPUEX;
344 if (__cpu_has_fpu()) 344 if (__cpu_has_fpu())
345 c->options |= MIPS_CPU_FPU; 345 c->options |= MIPS_CPU_FPU;
346 c->tlbsize = 64; 346 c->tlbsize = 64;
@@ -361,7 +361,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
361 } 361 }
362 c->isa_level = MIPS_CPU_ISA_I; 362 c->isa_level = MIPS_CPU_ISA_I;
363 c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE | 363 c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
364 MIPS_CPU_NOFPUEX; 364 MIPS_CPU_NOFPUEX;
365 if (__cpu_has_fpu()) 365 if (__cpu_has_fpu())
366 c->options |= MIPS_CPU_FPU; 366 c->options |= MIPS_CPU_FPU;
367 c->tlbsize = 64; 367 c->tlbsize = 64;
@@ -387,8 +387,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
387 387
388 c->isa_level = MIPS_CPU_ISA_III; 388 c->isa_level = MIPS_CPU_ISA_III;
389 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 389 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
390 MIPS_CPU_WATCH | MIPS_CPU_VCE | 390 MIPS_CPU_WATCH | MIPS_CPU_VCE |
391 MIPS_CPU_LLSC; 391 MIPS_CPU_LLSC;
392 c->tlbsize = 48; 392 c->tlbsize = 48;
393 break; 393 break;
394 case PRID_IMP_VR41XX: 394 case PRID_IMP_VR41XX:
@@ -434,7 +434,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
434 __cpu_name[cpu] = "R4300"; 434 __cpu_name[cpu] = "R4300";
435 c->isa_level = MIPS_CPU_ISA_III; 435 c->isa_level = MIPS_CPU_ISA_III;
436 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 436 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
437 MIPS_CPU_LLSC; 437 MIPS_CPU_LLSC;
438 c->tlbsize = 32; 438 c->tlbsize = 32;
439 break; 439 break;
440 case PRID_IMP_R4600: 440 case PRID_IMP_R4600:
@@ -446,7 +446,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
446 c->tlbsize = 48; 446 c->tlbsize = 48;
447 break; 447 break;
448 #if 0 448 #if 0
449 case PRID_IMP_R4650: 449 case PRID_IMP_R4650:
450 /* 450 /*
451 * This processor doesn't have an MMU, so it's not 451 * This processor doesn't have an MMU, so it's not
452 * "real easy" to run Linux on it. It is left purely 452 * "real easy" to run Linux on it. It is left purely
@@ -455,9 +455,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
455 */ 455 */
456 c->cputype = CPU_R4650; 456 c->cputype = CPU_R4650;
457 __cpu_name[cpu] = "R4650"; 457 __cpu_name[cpu] = "R4650";
458 c->isa_level = MIPS_CPU_ISA_III; 458 c->isa_level = MIPS_CPU_ISA_III;
459 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC; 459 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
460 c->tlbsize = 48; 460 c->tlbsize = 48;
461 break; 461 break;
462 #endif 462 #endif
463 case PRID_IMP_TX39: 463 case PRID_IMP_TX39:
@@ -488,7 +488,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
488 __cpu_name[cpu] = "R4700"; 488 __cpu_name[cpu] = "R4700";
489 c->isa_level = MIPS_CPU_ISA_III; 489 c->isa_level = MIPS_CPU_ISA_III;
490 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 490 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
491 MIPS_CPU_LLSC; 491 MIPS_CPU_LLSC;
492 c->tlbsize = 48; 492 c->tlbsize = 48;
493 break; 493 break;
494 case PRID_IMP_TX49: 494 case PRID_IMP_TX49:
@@ -505,7 +505,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
505 __cpu_name[cpu] = "R5000"; 505 __cpu_name[cpu] = "R5000";
506 c->isa_level = MIPS_CPU_ISA_IV; 506 c->isa_level = MIPS_CPU_ISA_IV;
507 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 507 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
508 MIPS_CPU_LLSC; 508 MIPS_CPU_LLSC;
509 c->tlbsize = 48; 509 c->tlbsize = 48;
510 break; 510 break;
511 case PRID_IMP_R5432: 511 case PRID_IMP_R5432:
@@ -513,7 +513,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
513 __cpu_name[cpu] = "R5432"; 513 __cpu_name[cpu] = "R5432";
514 c->isa_level = MIPS_CPU_ISA_IV; 514 c->isa_level = MIPS_CPU_ISA_IV;
515 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 515 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
516 MIPS_CPU_WATCH | MIPS_CPU_LLSC; 516 MIPS_CPU_WATCH | MIPS_CPU_LLSC;
517 c->tlbsize = 48; 517 c->tlbsize = 48;
518 break; 518 break;
519 case PRID_IMP_R5500: 519 case PRID_IMP_R5500:
@@ -521,7 +521,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
521 __cpu_name[cpu] = "R5500"; 521 __cpu_name[cpu] = "R5500";
522 c->isa_level = MIPS_CPU_ISA_IV; 522 c->isa_level = MIPS_CPU_ISA_IV;
523 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 523 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
524 MIPS_CPU_WATCH | MIPS_CPU_LLSC; 524 MIPS_CPU_WATCH | MIPS_CPU_LLSC;
525 c->tlbsize = 48; 525 c->tlbsize = 48;
526 break; 526 break;
527 case PRID_IMP_NEVADA: 527 case PRID_IMP_NEVADA:
@@ -529,7 +529,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
529 __cpu_name[cpu] = "Nevada"; 529 __cpu_name[cpu] = "Nevada";
530 c->isa_level = MIPS_CPU_ISA_IV; 530 c->isa_level = MIPS_CPU_ISA_IV;
531 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 531 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
532 MIPS_CPU_DIVEC | MIPS_CPU_LLSC; 532 MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
533 c->tlbsize = 48; 533 c->tlbsize = 48;
534 break; 534 break;
535 case PRID_IMP_R6000: 535 case PRID_IMP_R6000:
@@ -537,7 +537,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
537 __cpu_name[cpu] = "R6000"; 537 __cpu_name[cpu] = "R6000";
538 c->isa_level = MIPS_CPU_ISA_II; 538 c->isa_level = MIPS_CPU_ISA_II;
539 c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | 539 c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
540 MIPS_CPU_LLSC; 540 MIPS_CPU_LLSC;
541 c->tlbsize = 32; 541 c->tlbsize = 32;
542 break; 542 break;
543 case PRID_IMP_R6000A: 543 case PRID_IMP_R6000A:
@@ -545,7 +545,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
545 __cpu_name[cpu] = "R6000A"; 545 __cpu_name[cpu] = "R6000A";
546 c->isa_level = MIPS_CPU_ISA_II; 546 c->isa_level = MIPS_CPU_ISA_II;
547 c->options = MIPS_CPU_TLB | MIPS_CPU_FPU | 547 c->options = MIPS_CPU_TLB | MIPS_CPU_FPU |
548 MIPS_CPU_LLSC; 548 MIPS_CPU_LLSC;
549 c->tlbsize = 32; 549 c->tlbsize = 32;
550 break; 550 break;
551 case PRID_IMP_RM7000: 551 case PRID_IMP_RM7000:
@@ -553,7 +553,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
553 __cpu_name[cpu] = "RM7000"; 553 __cpu_name[cpu] = "RM7000";
554 c->isa_level = MIPS_CPU_ISA_IV; 554 c->isa_level = MIPS_CPU_ISA_IV;
555 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 555 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
556 MIPS_CPU_LLSC; 556 MIPS_CPU_LLSC;
557 /* 557 /*
558 * Undocumented RM7000: Bit 29 in the info register of 558 * Undocumented RM7000: Bit 29 in the info register of
559 * the RM7000 v2.0 indicates if the TLB has 48 or 64 559 * the RM7000 v2.0 indicates if the TLB has 48 or 64
@@ -569,7 +569,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
569 __cpu_name[cpu] = "RM9000"; 569 __cpu_name[cpu] = "RM9000";
570 c->isa_level = MIPS_CPU_ISA_IV; 570 c->isa_level = MIPS_CPU_ISA_IV;
571 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR | 571 c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
572 MIPS_CPU_LLSC; 572 MIPS_CPU_LLSC;
573 /* 573 /*
574 * Bit 29 in the info register of the RM9000 574 * Bit 29 in the info register of the RM9000
575 * indicates if the TLB has 48 or 64 entries. 575 * indicates if the TLB has 48 or 64 entries.
@@ -584,8 +584,8 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
584 __cpu_name[cpu] = "RM8000"; 584 __cpu_name[cpu] = "RM8000";
585 c->isa_level = MIPS_CPU_ISA_IV; 585 c->isa_level = MIPS_CPU_ISA_IV;
586 c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX | 586 c->options = MIPS_CPU_TLB | MIPS_CPU_4KEX |
587 MIPS_CPU_FPU | MIPS_CPU_32FPR | 587 MIPS_CPU_FPU | MIPS_CPU_32FPR |
588 MIPS_CPU_LLSC; 588 MIPS_CPU_LLSC;
589 c->tlbsize = 384; /* has weird TLB: 3-way x 128 */ 589 c->tlbsize = 384; /* has weird TLB: 3-way x 128 */
590 break; 590 break;
591 case PRID_IMP_R10000: 591 case PRID_IMP_R10000:
@@ -593,9 +593,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
593 __cpu_name[cpu] = "R10000"; 593 __cpu_name[cpu] = "R10000";
594 c->isa_level = MIPS_CPU_ISA_IV; 594 c->isa_level = MIPS_CPU_ISA_IV;
595 c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | 595 c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
596 MIPS_CPU_FPU | MIPS_CPU_32FPR | 596 MIPS_CPU_FPU | MIPS_CPU_32FPR |
597 MIPS_CPU_COUNTER | MIPS_CPU_WATCH | 597 MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
598 MIPS_CPU_LLSC; 598 MIPS_CPU_LLSC;
599 c->tlbsize = 64; 599 c->tlbsize = 64;
600 break; 600 break;
601 case PRID_IMP_R12000: 601 case PRID_IMP_R12000:
@@ -603,9 +603,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
603 __cpu_name[cpu] = "R12000"; 603 __cpu_name[cpu] = "R12000";
604 c->isa_level = MIPS_CPU_ISA_IV; 604 c->isa_level = MIPS_CPU_ISA_IV;
605 c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | 605 c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
606 MIPS_CPU_FPU | MIPS_CPU_32FPR | 606 MIPS_CPU_FPU | MIPS_CPU_32FPR |
607 MIPS_CPU_COUNTER | MIPS_CPU_WATCH | 607 MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
608 MIPS_CPU_LLSC; 608 MIPS_CPU_LLSC;
609 c->tlbsize = 64; 609 c->tlbsize = 64;
610 break; 610 break;
611 case PRID_IMP_R14000: 611 case PRID_IMP_R14000:
@@ -613,9 +613,9 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
613 __cpu_name[cpu] = "R14000"; 613 __cpu_name[cpu] = "R14000";
614 c->isa_level = MIPS_CPU_ISA_IV; 614 c->isa_level = MIPS_CPU_ISA_IV;
615 c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX | 615 c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
616 MIPS_CPU_FPU | MIPS_CPU_32FPR | 616 MIPS_CPU_FPU | MIPS_CPU_32FPR |
617 MIPS_CPU_COUNTER | MIPS_CPU_WATCH | 617 MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
618 MIPS_CPU_LLSC; 618 MIPS_CPU_LLSC;
619 c->tlbsize = 64; 619 c->tlbsize = 64;
620 break; 620 break;
621 case PRID_IMP_LOONGSON2: 621 case PRID_IMP_LOONGSON2:
@@ -739,7 +739,7 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
739 if (config3 & MIPS_CONF3_VEIC) 739 if (config3 & MIPS_CONF3_VEIC)
740 c->options |= MIPS_CPU_VEIC; 740 c->options |= MIPS_CPU_VEIC;
741 if (config3 & MIPS_CONF3_MT) 741 if (config3 & MIPS_CONF3_MT)
742 c->ases |= MIPS_ASE_MIPSMT; 742 c->ases |= MIPS_ASE_MIPSMT;
743 if (config3 & MIPS_CONF3_ULRI) 743 if (config3 & MIPS_CONF3_ULRI)
744 c->options |= MIPS_CPU_ULRI; 744 c->options |= MIPS_CPU_ULRI;
745 745
@@ -767,7 +767,7 @@ static void __cpuinit decode_configs(struct cpuinfo_mips *c)
767 767
768 /* MIPS32 or MIPS64 compliant CPU. */ 768 /* MIPS32 or MIPS64 compliant CPU. */
769 c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER | 769 c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
770 MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK; 770 MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
771 771
772 c->scache.flags = MIPS_CACHE_NOT_PRESENT; 772 c->scache.flags = MIPS_CACHE_NOT_PRESENT;
773 773
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index ab73fa2fb9b5..f29099b104c4 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -1532,7 +1532,8 @@ init_hw_perf_events(void)
1532 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR; 1532 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1533 } else { 1533 } else {
1534#endif 1534#endif
1535 if (cp0_perfcount_irq >= 0) 1535 if ((cp0_perfcount_irq >= 0) &&
1536 (cp0_compare_irq != cp0_perfcount_irq))
1536 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq; 1537 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1537 else 1538 else
1538 irq = -1; 1539 irq = -1;
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index f8b2c592514d..5542817c1b49 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -41,27 +41,27 @@ static int show_cpuinfo(struct seq_file *m, void *v)
41 41
42 seq_printf(m, "processor\t\t: %ld\n", n); 42 seq_printf(m, "processor\t\t: %ld\n", n);
43 sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n", 43 sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
44 cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : ""); 44 cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
45 seq_printf(m, fmt, __cpu_name[n], 45 seq_printf(m, fmt, __cpu_name[n],
46 (version >> 4) & 0x0f, version & 0x0f, 46 (version >> 4) & 0x0f, version & 0x0f,
47 (fp_vers >> 4) & 0x0f, fp_vers & 0x0f); 47 (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
48 seq_printf(m, "BogoMIPS\t\t: %u.%02u\n", 48 seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
49 cpu_data[n].udelay_val / (500000/HZ), 49 cpu_data[n].udelay_val / (500000/HZ),
50 (cpu_data[n].udelay_val / (5000/HZ)) % 100); 50 (cpu_data[n].udelay_val / (5000/HZ)) % 100);
51 seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no"); 51 seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
52 seq_printf(m, "microsecond timers\t: %s\n", 52 seq_printf(m, "microsecond timers\t: %s\n",
53 cpu_has_counter ? "yes" : "no"); 53 cpu_has_counter ? "yes" : "no");
54 seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize); 54 seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
55 seq_printf(m, "extra interrupt vector\t: %s\n", 55 seq_printf(m, "extra interrupt vector\t: %s\n",
56 cpu_has_divec ? "yes" : "no"); 56 cpu_has_divec ? "yes" : "no");
57 seq_printf(m, "hardware watchpoint\t: %s", 57 seq_printf(m, "hardware watchpoint\t: %s",
58 cpu_has_watch ? "yes, " : "no\n"); 58 cpu_has_watch ? "yes, " : "no\n");
59 if (cpu_has_watch) { 59 if (cpu_has_watch) {
60 seq_printf(m, "count: %d, address/irw mask: [", 60 seq_printf(m, "count: %d, address/irw mask: [",
61 cpu_data[n].watch_reg_count); 61 cpu_data[n].watch_reg_count);
62 for (i = 0; i < cpu_data[n].watch_reg_count; i++) 62 for (i = 0; i < cpu_data[n].watch_reg_count; i++)
63 seq_printf(m, "%s0x%04x", i ? ", " : "" , 63 seq_printf(m, "%s0x%04x", i ? ", " : "" ,
64 cpu_data[n].watch_reg_masks[i]); 64 cpu_data[n].watch_reg_masks[i]);
65 seq_printf(m, "]\n"); 65 seq_printf(m, "]\n");
66 } 66 }
67 seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n", 67 seq_printf(m, "ASEs implemented\t:%s%s%s%s%s%s\n",
@@ -73,13 +73,13 @@ static int show_cpuinfo(struct seq_file *m, void *v)
73 cpu_has_mipsmt ? " mt" : "" 73 cpu_has_mipsmt ? " mt" : ""
74 ); 74 );
75 seq_printf(m, "shadow register sets\t: %d\n", 75 seq_printf(m, "shadow register sets\t: %d\n",
76 cpu_data[n].srsets); 76 cpu_data[n].srsets);
77 seq_printf(m, "kscratch registers\t: %d\n", 77 seq_printf(m, "kscratch registers\t: %d\n",
78 hweight8(cpu_data[n].kscratch_mask)); 78 hweight8(cpu_data[n].kscratch_mask));
79 seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core); 79 seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
80 80
81 sprintf(fmt, "VCE%%c exceptions\t\t: %s\n", 81 sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
82 cpu_has_vce ? "%u" : "not available"); 82 cpu_has_vce ? "%u" : "not available");
83 seq_printf(m, fmt, 'D', vced_count); 83 seq_printf(m, fmt, 'D', vced_count);
84 seq_printf(m, fmt, 'I', vcei_count); 84 seq_printf(m, fmt, 'I', vcei_count);
85 seq_printf(m, "\n"); 85 seq_printf(m, "\n");
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
index 558b5395795d..f11b2bbb826d 100644
--- a/arch/mips/kernel/prom.c
+++ b/arch/mips/kernel/prom.c
@@ -95,3 +95,16 @@ void __init device_tree_init(void)
95 /* free the space reserved for the dt blob */ 95 /* free the space reserved for the dt blob */
96 free_mem_mach(base, size); 96 free_mem_mach(base, size);
97} 97}
98
99void __init __dt_setup_arch(struct boot_param_header *bph)
100{
101 if (be32_to_cpu(bph->magic) != OF_DT_HEADER) {
102 pr_err("DTB has bad magic, ignoring builtin OF DTB\n");
103
104 return;
105 }
106
107 initial_boot_params = bph;
108
109 early_init_devtree(initial_boot_params);
110}
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index c504b212f8f3..a53f8ec37aac 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -605,6 +605,8 @@ void __init setup_arch(char **cmdline_p)
605 605
606 resource_init(); 606 resource_init();
607 plat_smp_setup(); 607 plat_smp_setup();
608
609 cpu_cache_init();
608} 610}
609 611
610unsigned long kernelsp[NR_CPUS]; 612unsigned long kernelsp[NR_CPUS];
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 71a95f55a649..48650c818040 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -106,7 +106,7 @@ asmlinkage __cpuinit void start_secondary(void)
106#endif /* CONFIG_MIPS_MT_SMTC */ 106#endif /* CONFIG_MIPS_MT_SMTC */
107 cpu_probe(); 107 cpu_probe();
108 cpu_report(); 108 cpu_report();
109 per_cpu_trap_init(); 109 per_cpu_trap_init(false);
110 mips_clockevent_init(); 110 mips_clockevent_init();
111 mp_ops->init_secondary(); 111 mp_ops->init_secondary();
112 112
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index cfdaaa4cffc0..2d0c2a277f52 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -15,6 +15,7 @@
15#include <linux/compiler.h> 15#include <linux/compiler.h>
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/module.h>
18#include <linux/mm.h> 19#include <linux/mm.h>
19#include <linux/sched.h> 20#include <linux/sched.h>
20#include <linux/smp.h> 21#include <linux/smp.h>
@@ -91,7 +92,7 @@ void (*board_nmi_handler_setup)(void);
91void (*board_ejtag_handler_setup)(void); 92void (*board_ejtag_handler_setup)(void);
92void (*board_bind_eic_interrupt)(int irq, int regset); 93void (*board_bind_eic_interrupt)(int irq, int regset);
93void (*board_ebase_setup)(void); 94void (*board_ebase_setup)(void);
94 95void __cpuinitdata(*board_cache_error_setup)(void);
95 96
96static void show_raw_backtrace(unsigned long reg29) 97static void show_raw_backtrace(unsigned long reg29)
97{ 98{
@@ -1490,7 +1491,6 @@ void *set_vi_handler(int n, vi_handler_t addr)
1490 return set_vi_srs_handler(n, addr, 0); 1491 return set_vi_srs_handler(n, addr, 0);
1491} 1492}
1492 1493
1493extern void cpu_cache_init(void);
1494extern void tlb_init(void); 1494extern void tlb_init(void);
1495extern void flush_tlb_handlers(void); 1495extern void flush_tlb_handlers(void);
1496 1496
@@ -1517,7 +1517,7 @@ static int __init ulri_disable(char *s)
1517} 1517}
1518__setup("noulri", ulri_disable); 1518__setup("noulri", ulri_disable);
1519 1519
1520void __cpuinit per_cpu_trap_init(void) 1520void __cpuinit per_cpu_trap_init(bool is_boot_cpu)
1521{ 1521{
1522 unsigned int cpu = smp_processor_id(); 1522 unsigned int cpu = smp_processor_id();
1523 unsigned int status_set = ST0_CU0; 1523 unsigned int status_set = ST0_CU0;
@@ -1616,7 +1616,9 @@ void __cpuinit per_cpu_trap_init(void)
1616#ifdef CONFIG_MIPS_MT_SMTC 1616#ifdef CONFIG_MIPS_MT_SMTC
1617 if (bootTC) { 1617 if (bootTC) {
1618#endif /* CONFIG_MIPS_MT_SMTC */ 1618#endif /* CONFIG_MIPS_MT_SMTC */
1619 cpu_cache_init(); 1619 /* Boot CPU's cache setup in setup_arch(). */
1620 if (!is_boot_cpu)
1621 cpu_cache_init();
1620 tlb_init(); 1622 tlb_init();
1621#ifdef CONFIG_MIPS_MT_SMTC 1623#ifdef CONFIG_MIPS_MT_SMTC
1622 } else if (!secondaryTC) { 1624 } else if (!secondaryTC) {
@@ -1632,7 +1634,7 @@ void __cpuinit per_cpu_trap_init(void)
1632} 1634}
1633 1635
1634/* Install CPU exception handler */ 1636/* Install CPU exception handler */
1635void __init set_handler(unsigned long offset, void *addr, unsigned long size) 1637void __cpuinit set_handler(unsigned long offset, void *addr, unsigned long size)
1636{ 1638{
1637 memcpy((void *)(ebase + offset), addr, size); 1639 memcpy((void *)(ebase + offset), addr, size);
1638 local_flush_icache_range(ebase + offset, ebase + offset + size); 1640 local_flush_icache_range(ebase + offset, ebase + offset + size);
@@ -1693,7 +1695,7 @@ void __init trap_init(void)
1693 1695
1694 if (board_ebase_setup) 1696 if (board_ebase_setup)
1695 board_ebase_setup(); 1697 board_ebase_setup();
1696 per_cpu_trap_init(); 1698 per_cpu_trap_init(true);
1697 1699
1698 /* 1700 /*
1699 * Copy the generic exception handlers to their final destination. 1701 * Copy the generic exception handlers to their final destination.
@@ -1797,6 +1799,9 @@ void __init trap_init(void)
1797 1799
1798 set_except_vector(26, handle_dsp); 1800 set_except_vector(26, handle_dsp);
1799 1801
1802 if (board_cache_error_setup)
1803 board_cache_error_setup();
1804
1800 if (cpu_has_vce) 1805 if (cpu_has_vce)
1801 /* Special exception: R4[04]00 uses also the divec space. */ 1806 /* Special exception: R4[04]00 uses also the divec space. */
1802 memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100); 1807 memcpy((void *)(ebase + 0x180), &except_vec3_r4000, 0x100);
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index 3fccf2104513..20bdf40b3efa 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -16,8 +16,22 @@ config SOC_XWAY
16 bool "XWAY" 16 bool "XWAY"
17 select SOC_TYPE_XWAY 17 select SOC_TYPE_XWAY
18 select HW_HAS_PCI 18 select HW_HAS_PCI
19
20config SOC_FALCON
21 bool "FALCON"
22
23endchoice
24
25choice
26 prompt "Devicetree"
27
28config DT_EASY50712
29 bool "Easy50712"
30 depends on SOC_XWAY
19endchoice 31endchoice
20 32
21source "arch/mips/lantiq/xway/Kconfig" 33config PCI_LANTIQ
34 bool "PCI Support"
35 depends on SOC_XWAY && PCI
22 36
23endif 37endif
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
index e5dae0e24b00..d6bdc579419f 100644
--- a/arch/mips/lantiq/Makefile
+++ b/arch/mips/lantiq/Makefile
@@ -4,8 +4,11 @@
4# under the terms of the GNU General Public License version 2 as published 4# under the terms of the GNU General Public License version 2 as published
5# by the Free Software Foundation. 5# by the Free Software Foundation.
6 6
7obj-y := irq.o setup.o clk.o prom.o devices.o 7obj-y := irq.o clk.o prom.o
8
9obj-y += dts/
8 10
9obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 11obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
10 12
11obj-$(CONFIG_SOC_TYPE_XWAY) += xway/ 13obj-$(CONFIG_SOC_TYPE_XWAY) += xway/
14obj-$(CONFIG_SOC_FALCON) += falcon/
diff --git a/arch/mips/lantiq/Platform b/arch/mips/lantiq/Platform
index f3dff05722de..b3ec49838fd7 100644
--- a/arch/mips/lantiq/Platform
+++ b/arch/mips/lantiq/Platform
@@ -6,3 +6,4 @@ platform-$(CONFIG_LANTIQ) += lantiq/
6cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq 6cflags-$(CONFIG_LANTIQ) += -I$(srctree)/arch/mips/include/asm/mach-lantiq
7load-$(CONFIG_LANTIQ) = 0xffffffff80002000 7load-$(CONFIG_LANTIQ) = 0xffffffff80002000
8cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway 8cflags-$(CONFIG_SOC_TYPE_XWAY) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/xway
9cflags-$(CONFIG_SOC_FALCON) += -I$(srctree)/arch/mips/include/asm/mach-lantiq/falcon
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index 412814fdd3ee..d3bcc33f4699 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -12,6 +12,7 @@
12#include <linux/kernel.h> 12#include <linux/kernel.h>
13#include <linux/types.h> 13#include <linux/types.h>
14#include <linux/clk.h> 14#include <linux/clk.h>
15#include <linux/clkdev.h>
15#include <linux/err.h> 16#include <linux/err.h>
16#include <linux/list.h> 17#include <linux/list.h>
17 18
@@ -22,44 +23,32 @@
22#include <lantiq_soc.h> 23#include <lantiq_soc.h>
23 24
24#include "clk.h" 25#include "clk.h"
26#include "prom.h"
25 27
26struct clk { 28/* lantiq socs have 3 static clocks */
27 const char *name; 29static struct clk cpu_clk_generic[3];
28 unsigned long rate;
29 unsigned long (*get_rate) (void);
30};
31 30
32static struct clk *cpu_clk; 31void clkdev_add_static(unsigned long cpu, unsigned long fpi, unsigned long io)
33static int cpu_clk_cnt; 32{
33 cpu_clk_generic[0].rate = cpu;
34 cpu_clk_generic[1].rate = fpi;
35 cpu_clk_generic[2].rate = io;
36}
34 37
35/* lantiq socs have 3 static clocks */ 38struct clk *clk_get_cpu(void)
36static struct clk cpu_clk_generic[] = { 39{
37 { 40 return &cpu_clk_generic[0];
38 .name = "cpu", 41}
39 .get_rate = ltq_get_cpu_hz, 42
40 }, { 43struct clk *clk_get_fpi(void)
41 .name = "fpi", 44{
42 .get_rate = ltq_get_fpi_hz, 45 return &cpu_clk_generic[1];
43 }, { 46}
44 .name = "io", 47EXPORT_SYMBOL_GPL(clk_get_fpi);
45 .get_rate = ltq_get_io_region_clock, 48
46 }, 49struct clk *clk_get_io(void)
47};
48
49static struct resource ltq_cgu_resource = {
50 .name = "cgu",
51 .start = LTQ_CGU_BASE_ADDR,
52 .end = LTQ_CGU_BASE_ADDR + LTQ_CGU_SIZE - 1,
53 .flags = IORESOURCE_MEM,
54};
55
56/* remapped clock register range */
57void __iomem *ltq_cgu_membase;
58
59void clk_init(void)
60{ 50{
61 cpu_clk = cpu_clk_generic; 51 return &cpu_clk_generic[2];
62 cpu_clk_cnt = ARRAY_SIZE(cpu_clk_generic);
63} 52}
64 53
65static inline int clk_good(struct clk *clk) 54static inline int clk_good(struct clk *clk)
@@ -82,38 +71,71 @@ unsigned long clk_get_rate(struct clk *clk)
82} 71}
83EXPORT_SYMBOL(clk_get_rate); 72EXPORT_SYMBOL(clk_get_rate);
84 73
85struct clk *clk_get(struct device *dev, const char *id) 74int clk_set_rate(struct clk *clk, unsigned long rate)
86{ 75{
87 int i; 76 if (unlikely(!clk_good(clk)))
88 77 return 0;
89 for (i = 0; i < cpu_clk_cnt; i++) 78 if (clk->rates && *clk->rates) {
90 if (!strcmp(id, cpu_clk[i].name)) 79 unsigned long *r = clk->rates;
91 return &cpu_clk[i]; 80
92 BUG(); 81 while (*r && (*r != rate))
93 return ERR_PTR(-ENOENT); 82 r++;
94} 83 if (!*r) {
95EXPORT_SYMBOL(clk_get); 84 pr_err("clk %s.%s: trying to set invalid rate %ld\n",
96 85 clk->cl.dev_id, clk->cl.con_id, rate);
97void clk_put(struct clk *clk) 86 return -1;
98{ 87 }
99 /* not used */ 88 }
89 clk->rate = rate;
90 return 0;
100} 91}
101EXPORT_SYMBOL(clk_put); 92EXPORT_SYMBOL(clk_set_rate);
102 93
103int clk_enable(struct clk *clk) 94int clk_enable(struct clk *clk)
104{ 95{
105 /* not used */ 96 if (unlikely(!clk_good(clk)))
106 return 0; 97 return -1;
98
99 if (clk->enable)
100 return clk->enable(clk);
101
102 return -1;
107} 103}
108EXPORT_SYMBOL(clk_enable); 104EXPORT_SYMBOL(clk_enable);
109 105
110void clk_disable(struct clk *clk) 106void clk_disable(struct clk *clk)
111{ 107{
112 /* not used */ 108 if (unlikely(!clk_good(clk)))
109 return;
110
111 if (clk->disable)
112 clk->disable(clk);
113} 113}
114EXPORT_SYMBOL(clk_disable); 114EXPORT_SYMBOL(clk_disable);
115 115
116static inline u32 ltq_get_counter_resolution(void) 116int clk_activate(struct clk *clk)
117{
118 if (unlikely(!clk_good(clk)))
119 return -1;
120
121 if (clk->activate)
122 return clk->activate(clk);
123
124 return -1;
125}
126EXPORT_SYMBOL(clk_activate);
127
128void clk_deactivate(struct clk *clk)
129{
130 if (unlikely(!clk_good(clk)))
131 return;
132
133 if (clk->deactivate)
134 clk->deactivate(clk);
135}
136EXPORT_SYMBOL(clk_deactivate);
137
138static inline u32 get_counter_resolution(void)
117{ 139{
118 u32 res; 140 u32 res;
119 141
@@ -133,21 +155,11 @@ void __init plat_time_init(void)
133{ 155{
134 struct clk *clk; 156 struct clk *clk;
135 157
136 if (insert_resource(&iomem_resource, &ltq_cgu_resource) < 0) 158 ltq_soc_init();
137 panic("Failed to insert cgu memory");
138 159
139 if (request_mem_region(ltq_cgu_resource.start, 160 clk = clk_get_cpu();
140 resource_size(&ltq_cgu_resource), "cgu") < 0) 161 mips_hpt_frequency = clk_get_rate(clk) / get_counter_resolution();
141 panic("Failed to request cgu memory");
142
143 ltq_cgu_membase = ioremap_nocache(ltq_cgu_resource.start,
144 resource_size(&ltq_cgu_resource));
145 if (!ltq_cgu_membase) {
146 pr_err("Failed to remap cgu memory\n");
147 unreachable();
148 }
149 clk = clk_get(0, "cpu");
150 mips_hpt_frequency = clk_get_rate(clk) / ltq_get_counter_resolution();
151 write_c0_compare(read_c0_count()); 162 write_c0_compare(read_c0_count());
163 pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
152 clk_put(clk); 164 clk_put(clk);
153} 165}
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
index 3328925f2c3f..fa670602b91b 100644
--- a/arch/mips/lantiq/clk.h
+++ b/arch/mips/lantiq/clk.h
@@ -9,10 +9,70 @@
9#ifndef _LTQ_CLK_H__ 9#ifndef _LTQ_CLK_H__
10#define _LTQ_CLK_H__ 10#define _LTQ_CLK_H__
11 11
12extern void clk_init(void); 12#include <linux/clkdev.h>
13 13
14extern unsigned long ltq_get_cpu_hz(void); 14/* clock speeds */
15extern unsigned long ltq_get_fpi_hz(void); 15#define CLOCK_33M 33333333
16extern unsigned long ltq_get_io_region_clock(void); 16#define CLOCK_60M 60000000
17#define CLOCK_62_5M 62500000
18#define CLOCK_83M 83333333
19#define CLOCK_83_5M 83500000
20#define CLOCK_98_304M 98304000
21#define CLOCK_100M 100000000
22#define CLOCK_111M 111111111
23#define CLOCK_125M 125000000
24#define CLOCK_133M 133333333
25#define CLOCK_150M 150000000
26#define CLOCK_166M 166666666
27#define CLOCK_167M 166666667
28#define CLOCK_196_608M 196608000
29#define CLOCK_200M 200000000
30#define CLOCK_250M 250000000
31#define CLOCK_266M 266666666
32#define CLOCK_300M 300000000
33#define CLOCK_333M 333333333
34#define CLOCK_393M 393215332
35#define CLOCK_400M 400000000
36#define CLOCK_500M 500000000
37#define CLOCK_600M 600000000
38
39/* clock out speeds */
40#define CLOCK_32_768K 32768
41#define CLOCK_1_536M 1536000
42#define CLOCK_2_5M 2500000
43#define CLOCK_12M 12000000
44#define CLOCK_24M 24000000
45#define CLOCK_25M 25000000
46#define CLOCK_30M 30000000
47#define CLOCK_40M 40000000
48#define CLOCK_48M 48000000
49#define CLOCK_50M 50000000
50#define CLOCK_60M 60000000
51
52struct clk {
53 struct clk_lookup cl;
54 unsigned long rate;
55 unsigned long *rates;
56 unsigned int module;
57 unsigned int bits;
58 unsigned long (*get_rate) (void);
59 int (*enable) (struct clk *clk);
60 void (*disable) (struct clk *clk);
61 int (*activate) (struct clk *clk);
62 void (*deactivate) (struct clk *clk);
63 void (*reboot) (struct clk *clk);
64};
65
66extern void clkdev_add_static(unsigned long cpu, unsigned long fpi,
67 unsigned long io);
68
69extern unsigned long ltq_danube_cpu_hz(void);
70extern unsigned long ltq_danube_fpi_hz(void);
71
72extern unsigned long ltq_ar9_cpu_hz(void);
73extern unsigned long ltq_ar9_fpi_hz(void);
74
75extern unsigned long ltq_vr9_cpu_hz(void);
76extern unsigned long ltq_vr9_fpi_hz(void);
17 77
18#endif 78#endif
diff --git a/arch/mips/lantiq/devices.c b/arch/mips/lantiq/devices.c
deleted file mode 100644
index de1cb2bcd79a..000000000000
--- a/arch/mips/lantiq/devices.c
+++ /dev/null
@@ -1,120 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/export.h>
11#include <linux/types.h>
12#include <linux/string.h>
13#include <linux/kernel.h>
14#include <linux/reboot.h>
15#include <linux/platform_device.h>
16#include <linux/leds.h>
17#include <linux/etherdevice.h>
18#include <linux/time.h>
19#include <linux/io.h>
20#include <linux/gpio.h>
21
22#include <asm/bootinfo.h>
23#include <asm/irq.h>
24
25#include <lantiq_soc.h>
26
27#include "devices.h"
28
29/* nor flash */
30static struct resource ltq_nor_resource = {
31 .name = "nor",
32 .start = LTQ_FLASH_START,
33 .end = LTQ_FLASH_START + LTQ_FLASH_MAX - 1,
34 .flags = IORESOURCE_MEM,
35};
36
37static struct platform_device ltq_nor = {
38 .name = "ltq_nor",
39 .resource = &ltq_nor_resource,
40 .num_resources = 1,
41};
42
43void __init ltq_register_nor(struct physmap_flash_data *data)
44{
45 ltq_nor.dev.platform_data = data;
46 platform_device_register(&ltq_nor);
47}
48
49/* watchdog */
50static struct resource ltq_wdt_resource = {
51 .name = "watchdog",
52 .start = LTQ_WDT_BASE_ADDR,
53 .end = LTQ_WDT_BASE_ADDR + LTQ_WDT_SIZE - 1,
54 .flags = IORESOURCE_MEM,
55};
56
57void __init ltq_register_wdt(void)
58{
59 platform_device_register_simple("ltq_wdt", 0, &ltq_wdt_resource, 1);
60}
61
62/* asc ports */
63static struct resource ltq_asc0_resources[] = {
64 {
65 .name = "asc0",
66 .start = LTQ_ASC0_BASE_ADDR,
67 .end = LTQ_ASC0_BASE_ADDR + LTQ_ASC_SIZE - 1,
68 .flags = IORESOURCE_MEM,
69 },
70 IRQ_RES(tx, LTQ_ASC_TIR(0)),
71 IRQ_RES(rx, LTQ_ASC_RIR(0)),
72 IRQ_RES(err, LTQ_ASC_EIR(0)),
73};
74
75static struct resource ltq_asc1_resources[] = {
76 {
77 .name = "asc1",
78 .start = LTQ_ASC1_BASE_ADDR,
79 .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
80 .flags = IORESOURCE_MEM,
81 },
82 IRQ_RES(tx, LTQ_ASC_TIR(1)),
83 IRQ_RES(rx, LTQ_ASC_RIR(1)),
84 IRQ_RES(err, LTQ_ASC_EIR(1)),
85};
86
87void __init ltq_register_asc(int port)
88{
89 switch (port) {
90 case 0:
91 platform_device_register_simple("ltq_asc", 0,
92 ltq_asc0_resources, ARRAY_SIZE(ltq_asc0_resources));
93 break;
94 case 1:
95 platform_device_register_simple("ltq_asc", 1,
96 ltq_asc1_resources, ARRAY_SIZE(ltq_asc1_resources));
97 break;
98 default:
99 break;
100 }
101}
102
103#ifdef CONFIG_PCI
104/* pci */
105static struct platform_device ltq_pci = {
106 .name = "ltq_pci",
107 .num_resources = 0,
108};
109
110void __init ltq_register_pci(struct ltq_pci_data *data)
111{
112 ltq_pci.dev.platform_data = data;
113 platform_device_register(&ltq_pci);
114}
115#else
116void __init ltq_register_pci(struct ltq_pci_data *data)
117{
118 pr_err("kernel is compiled without PCI support\n");
119}
120#endif
diff --git a/arch/mips/lantiq/devices.h b/arch/mips/lantiq/devices.h
deleted file mode 100644
index 2947bb19a528..000000000000
--- a/arch/mips/lantiq/devices.h
+++ /dev/null
@@ -1,23 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_DEVICES_H__
10#define _LTQ_DEVICES_H__
11
12#include <lantiq_platform.h>
13#include <linux/mtd/physmap.h>
14
15#define IRQ_RES(resname, irq) \
16 {.name = #resname, .start = (irq), .flags = IORESOURCE_IRQ}
17
18extern void ltq_register_nor(struct physmap_flash_data *data);
19extern void ltq_register_wdt(void);
20extern void ltq_register_asc(int port);
21extern void ltq_register_pci(struct ltq_pci_data *data);
22
23#endif
diff --git a/arch/mips/lantiq/dts/Makefile b/arch/mips/lantiq/dts/Makefile
new file mode 100644
index 000000000000..674fca45f72d
--- /dev/null
+++ b/arch/mips/lantiq/dts/Makefile
@@ -0,0 +1,4 @@
1obj-$(CONFIG_DT_EASY50712) := easy50712.dtb.o
2
3$(obj)/%.dtb: $(obj)/%.dts
4 $(call if_changed,dtc)
diff --git a/arch/mips/lantiq/dts/danube.dtsi b/arch/mips/lantiq/dts/danube.dtsi
new file mode 100644
index 000000000000..3a4520f009cf
--- /dev/null
+++ b/arch/mips/lantiq/dts/danube.dtsi
@@ -0,0 +1,105 @@
1/ {
2 #address-cells = <1>;
3 #size-cells = <1>;
4 compatible = "lantiq,xway", "lantiq,danube";
5
6 cpus {
7 cpu@0 {
8 compatible = "mips,mips24Kc";
9 };
10 };
11
12 biu@1F800000 {
13 #address-cells = <1>;
14 #size-cells = <1>;
15 compatible = "lantiq,biu", "simple-bus";
16 reg = <0x1F800000 0x800000>;
17 ranges = <0x0 0x1F800000 0x7FFFFF>;
18
19 icu0: icu@80200 {
20 #interrupt-cells = <1>;
21 interrupt-controller;
22 compatible = "lantiq,icu";
23 reg = <0x80200 0x120>;
24 };
25
26 watchdog@803F0 {
27 compatible = "lantiq,wdt";
28 reg = <0x803F0 0x10>;
29 };
30 };
31
32 sram@1F000000 {
33 #address-cells = <1>;
34 #size-cells = <1>;
35 compatible = "lantiq,sram";
36 reg = <0x1F000000 0x800000>;
37 ranges = <0x0 0x1F000000 0x7FFFFF>;
38
39 eiu0: eiu@101000 {
40 #interrupt-cells = <1>;
41 interrupt-controller;
42 interrupt-parent;
43 compatible = "lantiq,eiu-xway";
44 reg = <0x101000 0x1000>;
45 };
46
47 pmu0: pmu@102000 {
48 compatible = "lantiq,pmu-xway";
49 reg = <0x102000 0x1000>;
50 };
51
52 cgu0: cgu@103000 {
53 compatible = "lantiq,cgu-xway";
54 reg = <0x103000 0x1000>;
55 #clock-cells = <1>;
56 };
57
58 rcu0: rcu@203000 {
59 compatible = "lantiq,rcu-xway";
60 reg = <0x203000 0x1000>;
61 };
62 };
63
64 fpi@10000000 {
65 #address-cells = <1>;
66 #size-cells = <1>;
67 compatible = "lantiq,fpi", "simple-bus";
68 ranges = <0x0 0x10000000 0xEEFFFFF>;
69 reg = <0x10000000 0xEF00000>;
70
71 gptu@E100A00 {
72 compatible = "lantiq,gptu-xway";
73 reg = <0xE100A00 0x100>;
74 };
75
76 serial@E100C00 {
77 compatible = "lantiq,asc";
78 reg = <0xE100C00 0x400>;
79 interrupt-parent = <&icu0>;
80 interrupts = <112 113 114>;
81 };
82
83 dma0: dma@E104100 {
84 compatible = "lantiq,dma-xway";
85 reg = <0xE104100 0x800>;
86 };
87
88 ebu0: ebu@E105300 {
89 compatible = "lantiq,ebu-xway";
90 reg = <0xE105300 0x100>;
91 };
92
93 pci0: pci@E105400 {
94 #address-cells = <3>;
95 #size-cells = <2>;
96 #interrupt-cells = <1>;
97 compatible = "lantiq,pci-xway";
98 bus-range = <0x0 0x0>;
99 ranges = <0x2000000 0 0x8000000 0x8000000 0 0x2000000 /* pci memory */
100 0x1000000 0 0x00000000 0xAE00000 0 0x200000>; /* io space */
101 reg = <0x7000000 0x8000 /* config space */
102 0xE105400 0x400>; /* pci bridge */
103 };
104 };
105};
diff --git a/arch/mips/lantiq/dts/easy50712.dts b/arch/mips/lantiq/dts/easy50712.dts
new file mode 100644
index 000000000000..68c17310bc82
--- /dev/null
+++ b/arch/mips/lantiq/dts/easy50712.dts
@@ -0,0 +1,113 @@
1/dts-v1/;
2
3/include/ "danube.dtsi"
4
5/ {
6 chosen {
7 bootargs = "console=ttyLTQ0,115200 init=/etc/preinit";
8 };
9
10 memory@0 {
11 reg = <0x0 0x2000000>;
12 };
13
14 fpi@10000000 {
15 #address-cells = <1>;
16 #size-cells = <1>;
17 localbus@0 {
18 #address-cells = <2>;
19 #size-cells = <1>;
20 ranges = <0 0 0x0 0x3ffffff /* addrsel0 */
21 1 0 0x4000000 0x4000010>; /* addsel1 */
22 compatible = "lantiq,localbus", "simple-bus";
23
24 nor-boot@0 {
25 compatible = "lantiq,nor";
26 bank-width = <2>;
27 reg = <0 0x0 0x2000000>;
28 #address-cells = <1>;
29 #size-cells = <1>;
30
31 partition@0 {
32 label = "uboot";
33 reg = <0x00000 0x10000>; /* 64 KB */
34 };
35
36 partition@10000 {
37 label = "uboot_env";
38 reg = <0x10000 0x10000>; /* 64 KB */
39 };
40
41 partition@20000 {
42 label = "linux";
43 reg = <0x20000 0x3d0000>;
44 };
45
46 partition@400000 {
47 label = "rootfs";
48 reg = <0x400000 0x400000>;
49 };
50 };
51 };
52
53 gpio: pinmux@E100B10 {
54 compatible = "lantiq,pinctrl-xway";
55 pinctrl-names = "default";
56 pinctrl-0 = <&state_default>;
57
58 #gpio-cells = <2>;
59 gpio-controller;
60 reg = <0xE100B10 0xA0>;
61
62 state_default: pinmux {
63 stp {
64 lantiq,groups = "stp";
65 lantiq,function = "stp";
66 };
67 exin {
68 lantiq,groups = "exin1";
69 lantiq,function = "exin";
70 };
71 pci {
72 lantiq,groups = "gnt1";
73 lantiq,function = "pci";
74 };
75 conf_out {
76 lantiq,pins = "io4", "io5", "io6"; /* stp */
77 lantiq,open-drain;
78 lantiq,pull = <0>;
79 };
80 };
81 };
82
83 etop@E180000 {
84 compatible = "lantiq,etop-xway";
85 reg = <0xE180000 0x40000>;
86 interrupt-parent = <&icu0>;
87 interrupts = <73 78>;
88 phy-mode = "rmii";
89 mac-address = [ 00 11 22 33 44 55 ];
90 };
91
92 stp0: stp@E100BB0 {
93 #gpio-cells = <2>;
94 compatible = "lantiq,gpio-stp-xway";
95 gpio-controller;
96 reg = <0xE100BB0 0x40>;
97
98 lantiq,shadow = <0xfff>;
99 lantiq,groups = <0x3>;
100 };
101
102 pci@E105400 {
103 lantiq,bus-clock = <33333333>;
104 interrupt-map-mask = <0xf800 0x0 0x0 0x7>;
105 interrupt-map = <
106 0x7000 0 0 1 &icu0 29 1 // slot 14, irq 29
107 >;
108 gpios-reset = <&gpio 21 0>;
109 req-mask = <0x1>; /* GNT1 */
110 };
111
112 };
113};
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 972e05f87631..9b28d0940ef4 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -6,17 +6,16 @@
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org> 6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */ 7 */
8 8
9#include <linux/init.h>
10#include <linux/cpu.h> 9#include <linux/cpu.h>
11
12#include <lantiq.h>
13#include <lantiq_soc.h> 10#include <lantiq_soc.h>
14 11
15/* no ioremap possible at this early stage, lets use KSEG1 instead */
16#define LTQ_ASC_BASE KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
17#define ASC_BUF 1024 12#define ASC_BUF 1024
18#define LTQ_ASC_FSTAT ((u32 *)(LTQ_ASC_BASE + 0x0048)) 13#define LTQ_ASC_FSTAT ((u32 *)(LTQ_EARLY_ASC + 0x0048))
19#define LTQ_ASC_TBUF ((u32 *)(LTQ_ASC_BASE + 0x0020)) 14#ifdef __BIG_ENDIAN
15#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020 + 3))
16#else
17#define LTQ_ASC_TBUF ((u32 *)(LTQ_EARLY_ASC + 0x0020))
18#endif
20#define TXMASK 0x3F00 19#define TXMASK 0x3F00
21#define TXOFFSET 8 20#define TXOFFSET 8
22 21
@@ -27,7 +26,7 @@ void prom_putchar(char c)
27 local_irq_save(flags); 26 local_irq_save(flags);
28 do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET); 27 do { } while ((ltq_r32(LTQ_ASC_FSTAT) & TXMASK) >> TXOFFSET);
29 if (c == '\n') 28 if (c == '\n')
30 ltq_w32('\r', LTQ_ASC_TBUF); 29 ltq_w8('\r', LTQ_ASC_TBUF);
31 ltq_w32(c, LTQ_ASC_TBUF); 30 ltq_w8(c, LTQ_ASC_TBUF);
32 local_irq_restore(flags); 31 local_irq_restore(flags);
33} 32}
diff --git a/arch/mips/lantiq/falcon/Makefile b/arch/mips/lantiq/falcon/Makefile
new file mode 100644
index 000000000000..ff220f97693d
--- /dev/null
+++ b/arch/mips/lantiq/falcon/Makefile
@@ -0,0 +1 @@
obj-y := prom.o reset.o sysctrl.o
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
new file mode 100644
index 000000000000..c1d278f05a3a
--- /dev/null
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -0,0 +1,87 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
7 * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
8 */
9
10#include <linux/kernel.h>
11#include <asm/io.h>
12
13#include <lantiq_soc.h>
14
15#include "../prom.h"
16
17#define SOC_FALCON "Falcon"
18#define SOC_FALCON_D "Falcon-D"
19#define SOC_FALCON_V "Falcon-V"
20#define SOC_FALCON_M "Falcon-M"
21
22#define COMP_FALCON "lantiq,falcon"
23
24#define PART_SHIFT 12
25#define PART_MASK 0x0FFFF000
26#define REV_SHIFT 28
27#define REV_MASK 0xF0000000
28#define SREV_SHIFT 22
29#define SREV_MASK 0x03C00000
30#define TYPE_SHIFT 26
31#define TYPE_MASK 0x3C000000
32
33/* reset, nmi and ejtag exception vectors */
34#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
35#define BOOT_RVEC (BOOT_REG_BASE | 0x00)
36#define BOOT_NVEC (BOOT_REG_BASE | 0x04)
37#define BOOT_EVEC (BOOT_REG_BASE | 0x08)
38
39void __init ltq_soc_nmi_setup(void)
40{
41 extern void (*nmi_handler)(void);
42
43 ltq_w32((unsigned long)&nmi_handler, (void *)BOOT_NVEC);
44}
45
46void __init ltq_soc_ejtag_setup(void)
47{
48 extern void (*ejtag_debug_handler)(void);
49
50 ltq_w32((unsigned long)&ejtag_debug_handler, (void *)BOOT_EVEC);
51}
52
53void __init ltq_soc_detect(struct ltq_soc_info *i)
54{
55 u32 type;
56 i->partnum = (ltq_r32(FALCON_CHIPID) & PART_MASK) >> PART_SHIFT;
57 i->rev = (ltq_r32(FALCON_CHIPID) & REV_MASK) >> REV_SHIFT;
58 i->srev = ((ltq_r32(FALCON_CHIPCONF) & SREV_MASK) >> SREV_SHIFT);
59 i->compatible = COMP_FALCON;
60 i->type = SOC_TYPE_FALCON;
61 sprintf(i->rev_type, "%c%d%d", (i->srev & 0x4) ? ('B') : ('A'),
62 i->rev & 0x7, (i->srev & 0x3) + 1);
63
64 switch (i->partnum) {
65 case SOC_ID_FALCON:
66 type = (ltq_r32(FALCON_CHIPTYPE) & TYPE_MASK) >> TYPE_SHIFT;
67 switch (type) {
68 case 0:
69 i->name = SOC_FALCON_D;
70 break;
71 case 1:
72 i->name = SOC_FALCON_V;
73 break;
74 case 2:
75 i->name = SOC_FALCON_M;
76 break;
77 default:
78 i->name = SOC_FALCON;
79 break;
80 }
81 break;
82
83 default:
84 unreachable();
85 break;
86 }
87}
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
new file mode 100644
index 000000000000..568248253426
--- /dev/null
+++ b/arch/mips/lantiq/falcon/reset.c
@@ -0,0 +1,90 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
7 * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
8 */
9
10#include <linux/init.h>
11#include <linux/io.h>
12#include <linux/pm.h>
13#include <asm/reboot.h>
14#include <linux/export.h>
15
16#include <lantiq_soc.h>
17
18/* CPU0 Reset Source Register */
19#define SYS1_CPU0RS 0x0040
20/* reset cause mask */
21#define CPU0RS_MASK 0x0003
22/* CPU0 Boot Mode Register */
23#define SYS1_BM 0x00a0
24/* boot mode mask */
25#define BM_MASK 0x0005
26
27/* allow platform code to find out what surce we booted from */
28unsigned char ltq_boot_select(void)
29{
30 return ltq_sys1_r32(SYS1_BM) & BM_MASK;
31}
32
33/* allow the watchdog driver to find out what the boot reason was */
34int ltq_reset_cause(void)
35{
36 return ltq_sys1_r32(SYS1_CPU0RS) & CPU0RS_MASK;
37}
38EXPORT_SYMBOL_GPL(ltq_reset_cause);
39
40#define BOOT_REG_BASE (KSEG1 | 0x1F200000)
41#define BOOT_PW1_REG (BOOT_REG_BASE | 0x20)
42#define BOOT_PW2_REG (BOOT_REG_BASE | 0x24)
43#define BOOT_PW1 0x4C545100
44#define BOOT_PW2 0x0051544C
45
46#define WDT_REG_BASE (KSEG1 | 0x1F8803F0)
47#define WDT_PW1 0x00BE0000
48#define WDT_PW2 0x00DC0000
49
50static void machine_restart(char *command)
51{
52 local_irq_disable();
53
54 /* reboot magic */
55 ltq_w32(BOOT_PW1, (void *)BOOT_PW1_REG); /* 'LTQ\0' */
56 ltq_w32(BOOT_PW2, (void *)BOOT_PW2_REG); /* '\0QTL' */
57 ltq_w32(0, (void *)BOOT_REG_BASE); /* reset Bootreg RVEC */
58
59 /* watchdog magic */
60 ltq_w32(WDT_PW1, (void *)WDT_REG_BASE);
61 ltq_w32(WDT_PW2 |
62 (0x3 << 26) | /* PWL */
63 (0x2 << 24) | /* CLKDIV */
64 (0x1 << 31) | /* enable */
65 (1), /* reload */
66 (void *)WDT_REG_BASE);
67 unreachable();
68}
69
70static void machine_halt(void)
71{
72 local_irq_disable();
73 unreachable();
74}
75
76static void machine_power_off(void)
77{
78 local_irq_disable();
79 unreachable();
80}
81
82static int __init mips_reboot_setup(void)
83{
84 _machine_restart = machine_restart;
85 _machine_halt = machine_halt;
86 pm_power_off = machine_power_off;
87 return 0;
88}
89
90arch_initcall(mips_reboot_setup);
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
new file mode 100644
index 000000000000..ba0123d13d40
--- /dev/null
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -0,0 +1,260 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
7 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
8 */
9
10#include <linux/ioport.h>
11#include <linux/export.h>
12#include <linux/clkdev.h>
13#include <linux/of_address.h>
14#include <asm/delay.h>
15
16#include <lantiq_soc.h>
17
18#include "../clk.h"
19
20/* infrastructure control register */
21#define SYS1_INFRAC 0x00bc
22/* Configuration fuses for drivers and pll */
23#define STATUS_CONFIG 0x0040
24
25/* GPE frequency selection */
26#define GPPC_OFFSET 24
27#define GPEFREQ_MASK 0x00000C0
28#define GPEFREQ_OFFSET 10
29/* Clock status register */
30#define SYSCTL_CLKS 0x0000
31/* Clock enable register */
32#define SYSCTL_CLKEN 0x0004
33/* Clock clear register */
34#define SYSCTL_CLKCLR 0x0008
35/* Activation Status Register */
36#define SYSCTL_ACTS 0x0020
37/* Activation Register */
38#define SYSCTL_ACT 0x0024
39/* Deactivation Register */
40#define SYSCTL_DEACT 0x0028
41/* reboot Register */
42#define SYSCTL_RBT 0x002c
43/* CPU0 Clock Control Register */
44#define SYS1_CPU0CC 0x0040
45/* HRST_OUT_N Control Register */
46#define SYS1_HRSTOUTC 0x00c0
47/* clock divider bit */
48#define CPU0CC_CPUDIV 0x0001
49
50/* Activation Status Register */
51#define ACTS_ASC1_ACT 0x00000800
52#define ACTS_I2C_ACT 0x00004000
53#define ACTS_P0 0x00010000
54#define ACTS_P1 0x00010000
55#define ACTS_P2 0x00020000
56#define ACTS_P3 0x00020000
57#define ACTS_P4 0x00040000
58#define ACTS_PADCTRL0 0x00100000
59#define ACTS_PADCTRL1 0x00100000
60#define ACTS_PADCTRL2 0x00200000
61#define ACTS_PADCTRL3 0x00200000
62#define ACTS_PADCTRL4 0x00400000
63
64#define sysctl_w32(m, x, y) ltq_w32((x), sysctl_membase[m] + (y))
65#define sysctl_r32(m, x) ltq_r32(sysctl_membase[m] + (x))
66#define sysctl_w32_mask(m, clear, set, reg) \
67 sysctl_w32(m, (sysctl_r32(m, reg) & ~(clear)) | (set), reg)
68
69#define status_w32(x, y) ltq_w32((x), status_membase + (y))
70#define status_r32(x) ltq_r32(status_membase + (x))
71
72static void __iomem *sysctl_membase[3], *status_membase;
73void __iomem *ltq_sys1_membase, *ltq_ebu_membase;
74
75void falcon_trigger_hrst(int level)
76{
77 sysctl_w32(SYSCTL_SYS1, level & 1, SYS1_HRSTOUTC);
78}
79
80static inline void sysctl_wait(struct clk *clk,
81 unsigned int test, unsigned int reg)
82{
83 int err = 1000000;
84
85 do {} while (--err && ((sysctl_r32(clk->module, reg)
86 & clk->bits) != test));
87 if (!err)
88 pr_err("module de/activation failed %d %08X %08X %08X\n",
89 clk->module, clk->bits, test,
90 sysctl_r32(clk->module, reg) & clk->bits);
91}
92
93static int sysctl_activate(struct clk *clk)
94{
95 sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
96 sysctl_w32(clk->module, clk->bits, SYSCTL_ACT);
97 sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
98 return 0;
99}
100
101static void sysctl_deactivate(struct clk *clk)
102{
103 sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
104 sysctl_w32(clk->module, clk->bits, SYSCTL_DEACT);
105 sysctl_wait(clk, 0, SYSCTL_ACTS);
106}
107
108static int sysctl_clken(struct clk *clk)
109{
110 sysctl_w32(clk->module, clk->bits, SYSCTL_CLKEN);
111 sysctl_wait(clk, clk->bits, SYSCTL_CLKS);
112 return 0;
113}
114
115static void sysctl_clkdis(struct clk *clk)
116{
117 sysctl_w32(clk->module, clk->bits, SYSCTL_CLKCLR);
118 sysctl_wait(clk, 0, SYSCTL_CLKS);
119}
120
121static void sysctl_reboot(struct clk *clk)
122{
123 unsigned int act;
124 unsigned int bits;
125
126 act = sysctl_r32(clk->module, SYSCTL_ACT);
127 bits = ~act & clk->bits;
128 if (bits != 0) {
129 sysctl_w32(clk->module, bits, SYSCTL_CLKEN);
130 sysctl_w32(clk->module, bits, SYSCTL_ACT);
131 sysctl_wait(clk, bits, SYSCTL_ACTS);
132 }
133 sysctl_w32(clk->module, act & clk->bits, SYSCTL_RBT);
134 sysctl_wait(clk, clk->bits, SYSCTL_ACTS);
135}
136
137/* enable the ONU core */
138static void falcon_gpe_enable(void)
139{
140 unsigned int freq;
141 unsigned int status;
142
143 /* if if the clock is already enabled */
144 status = sysctl_r32(SYSCTL_SYS1, SYS1_INFRAC);
145 if (status & (1 << (GPPC_OFFSET + 1)))
146 return;
147
148 if (status_r32(STATUS_CONFIG) == 0)
149 freq = 1; /* use 625MHz on unfused chip */
150 else
151 freq = (status_r32(STATUS_CONFIG) &
152 GPEFREQ_MASK) >>
153 GPEFREQ_OFFSET;
154
155 /* apply new frequency */
156 sysctl_w32_mask(SYSCTL_SYS1, 7 << (GPPC_OFFSET + 1),
157 freq << (GPPC_OFFSET + 2) , SYS1_INFRAC);
158 udelay(1);
159
160 /* enable new frequency */
161 sysctl_w32_mask(SYSCTL_SYS1, 0, 1 << (GPPC_OFFSET + 1), SYS1_INFRAC);
162 udelay(1);
163}
164
165static inline void clkdev_add_sys(const char *dev, unsigned int module,
166 unsigned int bits)
167{
168 struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
169
170 clk->cl.dev_id = dev;
171 clk->cl.con_id = NULL;
172 clk->cl.clk = clk;
173 clk->module = module;
174 clk->activate = sysctl_activate;
175 clk->deactivate = sysctl_deactivate;
176 clk->enable = sysctl_clken;
177 clk->disable = sysctl_clkdis;
178 clk->reboot = sysctl_reboot;
179 clkdev_add(&clk->cl);
180}
181
182void __init ltq_soc_init(void)
183{
184 struct device_node *np_status =
185 of_find_compatible_node(NULL, NULL, "lantiq,status-falcon");
186 struct device_node *np_ebu =
187 of_find_compatible_node(NULL, NULL, "lantiq,ebu-falcon");
188 struct device_node *np_sys1 =
189 of_find_compatible_node(NULL, NULL, "lantiq,sys1-falcon");
190 struct device_node *np_syseth =
191 of_find_compatible_node(NULL, NULL, "lantiq,syseth-falcon");
192 struct device_node *np_sysgpe =
193 of_find_compatible_node(NULL, NULL, "lantiq,sysgpe-falcon");
194 struct resource res_status, res_ebu, res_sys[3];
195 int i;
196
197 /* check if all the core register ranges are available */
198 if (!np_status || !np_ebu || !np_sys1 || !np_syseth || !np_sysgpe)
199 panic("Failed to load core nodes from devicetree");
200
201 if (of_address_to_resource(np_status, 0, &res_status) ||
202 of_address_to_resource(np_ebu, 0, &res_ebu) ||
203 of_address_to_resource(np_sys1, 0, &res_sys[0]) ||
204 of_address_to_resource(np_syseth, 0, &res_sys[1]) ||
205 of_address_to_resource(np_sysgpe, 0, &res_sys[2]))
206 panic("Failed to get core resources");
207
208 if ((request_mem_region(res_status.start, resource_size(&res_status),
209 res_status.name) < 0) ||
210 (request_mem_region(res_ebu.start, resource_size(&res_ebu),
211 res_ebu.name) < 0) ||
212 (request_mem_region(res_sys[0].start,
213 resource_size(&res_sys[0]),
214 res_sys[0].name) < 0) ||
215 (request_mem_region(res_sys[1].start,
216 resource_size(&res_sys[1]),
217 res_sys[1].name) < 0) ||
218 (request_mem_region(res_sys[2].start,
219 resource_size(&res_sys[2]),
220 res_sys[2].name) < 0))
221 pr_err("Failed to request core reources");
222
223 status_membase = ioremap_nocache(res_status.start,
224 resource_size(&res_status));
225 ltq_ebu_membase = ioremap_nocache(res_ebu.start,
226 resource_size(&res_ebu));
227
228 if (!status_membase || !ltq_ebu_membase)
229 panic("Failed to remap core resources");
230
231 for (i = 0; i < 3; i++) {
232 sysctl_membase[i] = ioremap_nocache(res_sys[i].start,
233 resource_size(&res_sys[i]));
234 if (!sysctl_membase[i])
235 panic("Failed to remap sysctrl resources");
236 }
237 ltq_sys1_membase = sysctl_membase[0];
238
239 falcon_gpe_enable();
240
241 /* get our 3 static rates for cpu, fpi and io clocks */
242 if (ltq_sys1_r32(SYS1_CPU0CC) & CPU0CC_CPUDIV)
243 clkdev_add_static(CLOCK_200M, CLOCK_100M, CLOCK_200M);
244 else
245 clkdev_add_static(CLOCK_400M, CLOCK_100M, CLOCK_200M);
246
247 /* add our clock domains */
248 clkdev_add_sys("1d810000.gpio", SYSCTL_SYSETH, ACTS_P0);
249 clkdev_add_sys("1d810100.gpio", SYSCTL_SYSETH, ACTS_P2);
250 clkdev_add_sys("1e800100.gpio", SYSCTL_SYS1, ACTS_P1);
251 clkdev_add_sys("1e800200.gpio", SYSCTL_SYS1, ACTS_P3);
252 clkdev_add_sys("1e800300.gpio", SYSCTL_SYS1, ACTS_P4);
253 clkdev_add_sys("1db01000.pad", SYSCTL_SYSETH, ACTS_PADCTRL0);
254 clkdev_add_sys("1db02000.pad", SYSCTL_SYSETH, ACTS_PADCTRL2);
255 clkdev_add_sys("1e800400.pad", SYSCTL_SYS1, ACTS_PADCTRL1);
256 clkdev_add_sys("1e800500.pad", SYSCTL_SYS1, ACTS_PADCTRL3);
257 clkdev_add_sys("1e800600.pad", SYSCTL_SYS1, ACTS_PADCTRL4);
258 clkdev_add_sys("1e100C00.serial", SYSCTL_SYS1, ACTS_ASC1_ACT);
259 clkdev_add_sys("1e200000.i2c", SYSCTL_SYS1, ACTS_I2C_ACT);
260}
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index d673731c538a..57c1a4e51408 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -9,6 +9,11 @@
9 9
10#include <linux/interrupt.h> 10#include <linux/interrupt.h>
11#include <linux/ioport.h> 11#include <linux/ioport.h>
12#include <linux/sched.h>
13#include <linux/irqdomain.h>
14#include <linux/of_platform.h>
15#include <linux/of_address.h>
16#include <linux/of_irq.h>
12 17
13#include <asm/bootinfo.h> 18#include <asm/bootinfo.h>
14#include <asm/irq_cpu.h> 19#include <asm/irq_cpu.h>
@@ -16,7 +21,7 @@
16#include <lantiq_soc.h> 21#include <lantiq_soc.h>
17#include <irq.h> 22#include <irq.h>
18 23
19/* register definitions */ 24/* register definitions - internal irqs */
20#define LTQ_ICU_IM0_ISR 0x0000 25#define LTQ_ICU_IM0_ISR 0x0000
21#define LTQ_ICU_IM0_IER 0x0008 26#define LTQ_ICU_IM0_IER 0x0008
22#define LTQ_ICU_IM0_IOSR 0x0010 27#define LTQ_ICU_IM0_IOSR 0x0010
@@ -25,6 +30,7 @@
25#define LTQ_ICU_IM1_ISR 0x0028 30#define LTQ_ICU_IM1_ISR 0x0028
26#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR) 31#define LTQ_ICU_OFFSET (LTQ_ICU_IM1_ISR - LTQ_ICU_IM0_ISR)
27 32
33/* register definitions - external irqs */
28#define LTQ_EIU_EXIN_C 0x0000 34#define LTQ_EIU_EXIN_C 0x0000
29#define LTQ_EIU_EXIN_INIC 0x0004 35#define LTQ_EIU_EXIN_INIC 0x0004
30#define LTQ_EIU_EXIN_INEN 0x000C 36#define LTQ_EIU_EXIN_INEN 0x000C
@@ -37,10 +43,14 @@
37#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1) 43#define LTQ_EIU_IR4 (INT_NUM_IM1_IRL0 + 1)
38#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2) 44#define LTQ_EIU_IR5 (INT_NUM_IM1_IRL0 + 2)
39#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30) 45#define LTQ_EIU_IR6 (INT_NUM_IM2_IRL0 + 30)
40 46#define XWAY_EXIN_COUNT 3
41#define MAX_EIU 6 47#define MAX_EIU 6
42 48
43/* irqs generated by device attached to the EBU need to be acked in 49/* the performance counter */
50#define LTQ_PERF_IRQ (INT_NUM_IM4_IRL0 + 31)
51
52/*
53 * irqs generated by devices attached to the EBU need to be acked in
44 * a special manner 54 * a special manner
45 */ 55 */
46#define LTQ_ICU_EBU_IRQ 22 56#define LTQ_ICU_EBU_IRQ 22
@@ -51,6 +61,17 @@
51#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y)) 61#define ltq_eiu_w32(x, y) ltq_w32((x), ltq_eiu_membase + (y))
52#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x)) 62#define ltq_eiu_r32(x) ltq_r32(ltq_eiu_membase + (x))
53 63
64/* our 2 ipi interrupts for VSMP */
65#define MIPS_CPU_IPI_RESCHED_IRQ 0
66#define MIPS_CPU_IPI_CALL_IRQ 1
67
68/* we have a cascade of 8 irqs */
69#define MIPS_CPU_IRQ_CASCADE 8
70
71#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC)
72int gic_present;
73#endif
74
54static unsigned short ltq_eiu_irq[MAX_EIU] = { 75static unsigned short ltq_eiu_irq[MAX_EIU] = {
55 LTQ_EIU_IR0, 76 LTQ_EIU_IR0,
56 LTQ_EIU_IR1, 77 LTQ_EIU_IR1,
@@ -60,64 +81,51 @@ static unsigned short ltq_eiu_irq[MAX_EIU] = {
60 LTQ_EIU_IR5, 81 LTQ_EIU_IR5,
61}; 82};
62 83
63static struct resource ltq_icu_resource = { 84static int exin_avail;
64 .name = "icu",
65 .start = LTQ_ICU_BASE_ADDR,
66 .end = LTQ_ICU_BASE_ADDR + LTQ_ICU_SIZE - 1,
67 .flags = IORESOURCE_MEM,
68};
69
70static struct resource ltq_eiu_resource = {
71 .name = "eiu",
72 .start = LTQ_EIU_BASE_ADDR,
73 .end = LTQ_EIU_BASE_ADDR + LTQ_ICU_SIZE - 1,
74 .flags = IORESOURCE_MEM,
75};
76
77static void __iomem *ltq_icu_membase; 85static void __iomem *ltq_icu_membase;
78static void __iomem *ltq_eiu_membase; 86static void __iomem *ltq_eiu_membase;
79 87
80void ltq_disable_irq(struct irq_data *d) 88void ltq_disable_irq(struct irq_data *d)
81{ 89{
82 u32 ier = LTQ_ICU_IM0_IER; 90 u32 ier = LTQ_ICU_IM0_IER;
83 int irq_nr = d->irq - INT_NUM_IRQ0; 91 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
84 92
85 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 93 ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
86 irq_nr %= INT_NUM_IM_OFFSET; 94 offset %= INT_NUM_IM_OFFSET;
87 ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); 95 ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
88} 96}
89 97
90void ltq_mask_and_ack_irq(struct irq_data *d) 98void ltq_mask_and_ack_irq(struct irq_data *d)
91{ 99{
92 u32 ier = LTQ_ICU_IM0_IER; 100 u32 ier = LTQ_ICU_IM0_IER;
93 u32 isr = LTQ_ICU_IM0_ISR; 101 u32 isr = LTQ_ICU_IM0_ISR;
94 int irq_nr = d->irq - INT_NUM_IRQ0; 102 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
95 103
96 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 104 ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
97 isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 105 isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
98 irq_nr %= INT_NUM_IM_OFFSET; 106 offset %= INT_NUM_IM_OFFSET;
99 ltq_icu_w32(ltq_icu_r32(ier) & ~(1 << irq_nr), ier); 107 ltq_icu_w32(ltq_icu_r32(ier) & ~BIT(offset), ier);
100 ltq_icu_w32((1 << irq_nr), isr); 108 ltq_icu_w32(BIT(offset), isr);
101} 109}
102 110
103static void ltq_ack_irq(struct irq_data *d) 111static void ltq_ack_irq(struct irq_data *d)
104{ 112{
105 u32 isr = LTQ_ICU_IM0_ISR; 113 u32 isr = LTQ_ICU_IM0_ISR;
106 int irq_nr = d->irq - INT_NUM_IRQ0; 114 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
107 115
108 isr += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 116 isr += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
109 irq_nr %= INT_NUM_IM_OFFSET; 117 offset %= INT_NUM_IM_OFFSET;
110 ltq_icu_w32((1 << irq_nr), isr); 118 ltq_icu_w32(BIT(offset), isr);
111} 119}
112 120
113void ltq_enable_irq(struct irq_data *d) 121void ltq_enable_irq(struct irq_data *d)
114{ 122{
115 u32 ier = LTQ_ICU_IM0_IER; 123 u32 ier = LTQ_ICU_IM0_IER;
116 int irq_nr = d->irq - INT_NUM_IRQ0; 124 int offset = d->hwirq - MIPS_CPU_IRQ_CASCADE;
117 125
118 ier += LTQ_ICU_OFFSET * (irq_nr / INT_NUM_IM_OFFSET); 126 ier += LTQ_ICU_OFFSET * (offset / INT_NUM_IM_OFFSET);
119 irq_nr %= INT_NUM_IM_OFFSET; 127 offset %= INT_NUM_IM_OFFSET;
120 ltq_icu_w32(ltq_icu_r32(ier) | (1 << irq_nr), ier); 128 ltq_icu_w32(ltq_icu_r32(ier) | BIT(offset), ier);
121} 129}
122 130
123static unsigned int ltq_startup_eiu_irq(struct irq_data *d) 131static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
@@ -126,15 +134,15 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
126 134
127 ltq_enable_irq(d); 135 ltq_enable_irq(d);
128 for (i = 0; i < MAX_EIU; i++) { 136 for (i = 0; i < MAX_EIU; i++) {
129 if (d->irq == ltq_eiu_irq[i]) { 137 if (d->hwirq == ltq_eiu_irq[i]) {
130 /* low level - we should really handle set_type */ 138 /* low level - we should really handle set_type */
131 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) | 139 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_C) |
132 (0x6 << (i * 4)), LTQ_EIU_EXIN_C); 140 (0x6 << (i * 4)), LTQ_EIU_EXIN_C);
133 /* clear all pending */ 141 /* clear all pending */
134 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~(1 << i), 142 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INIC) & ~BIT(i),
135 LTQ_EIU_EXIN_INIC); 143 LTQ_EIU_EXIN_INIC);
136 /* enable */ 144 /* enable */
137 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | (1 << i), 145 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) | BIT(i),
138 LTQ_EIU_EXIN_INEN); 146 LTQ_EIU_EXIN_INEN);
139 break; 147 break;
140 } 148 }
@@ -149,9 +157,9 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
149 157
150 ltq_disable_irq(d); 158 ltq_disable_irq(d);
151 for (i = 0; i < MAX_EIU; i++) { 159 for (i = 0; i < MAX_EIU; i++) {
152 if (d->irq == ltq_eiu_irq[i]) { 160 if (d->hwirq == ltq_eiu_irq[i]) {
153 /* disable */ 161 /* disable */
154 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~(1 << i), 162 ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
155 LTQ_EIU_EXIN_INEN); 163 LTQ_EIU_EXIN_INEN);
156 break; 164 break;
157 } 165 }
@@ -188,14 +196,15 @@ static void ltq_hw_irqdispatch(int module)
188 if (irq == 0) 196 if (irq == 0)
189 return; 197 return;
190 198
191 /* silicon bug causes only the msb set to 1 to be valid. all 199 /*
200 * silicon bug causes only the msb set to 1 to be valid. all
192 * other bits might be bogus 201 * other bits might be bogus
193 */ 202 */
194 irq = __fls(irq); 203 irq = __fls(irq);
195 do_IRQ((int)irq + INT_NUM_IM0_IRL0 + (INT_NUM_IM_OFFSET * module)); 204 do_IRQ((int)irq + MIPS_CPU_IRQ_CASCADE + (INT_NUM_IM_OFFSET * module));
196 205
197 /* if this is a EBU irq, we need to ack it or get a deadlock */ 206 /* if this is a EBU irq, we need to ack it or get a deadlock */
198 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0)) 207 if ((irq == LTQ_ICU_EBU_IRQ) && (module == 0) && LTQ_EBU_PCC_ISTAT)
199 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10, 208 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_ISTAT) | 0x10,
200 LTQ_EBU_PCC_ISTAT); 209 LTQ_EBU_PCC_ISTAT);
201} 210}
@@ -216,6 +225,47 @@ static void ltq_hw5_irqdispatch(void)
216 do_IRQ(MIPS_CPU_TIMER_IRQ); 225 do_IRQ(MIPS_CPU_TIMER_IRQ);
217} 226}
218 227
228#ifdef CONFIG_MIPS_MT_SMP
229void __init arch_init_ipiirq(int irq, struct irqaction *action)
230{
231 setup_irq(irq, action);
232 irq_set_handler(irq, handle_percpu_irq);
233}
234
235static void ltq_sw0_irqdispatch(void)
236{
237 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ);
238}
239
240static void ltq_sw1_irqdispatch(void)
241{
242 do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ);
243}
244static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
245{
246 scheduler_ipi();
247 return IRQ_HANDLED;
248}
249
250static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
251{
252 smp_call_function_interrupt();
253 return IRQ_HANDLED;
254}
255
256static struct irqaction irq_resched = {
257 .handler = ipi_resched_interrupt,
258 .flags = IRQF_PERCPU,
259 .name = "IPI_resched"
260};
261
262static struct irqaction irq_call = {
263 .handler = ipi_call_interrupt,
264 .flags = IRQF_PERCPU,
265 .name = "IPI_call"
266};
267#endif
268
219asmlinkage void plat_irq_dispatch(void) 269asmlinkage void plat_irq_dispatch(void)
220{ 270{
221 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; 271 unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM;
@@ -238,45 +288,75 @@ out:
238 return; 288 return;
239} 289}
240 290
291static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
292{
293 struct irq_chip *chip = &ltq_irq_type;
294 int i;
295
296 for (i = 0; i < exin_avail; i++)
297 if (hw == ltq_eiu_irq[i])
298 chip = &ltq_eiu_type;
299
300 irq_set_chip_and_handler(hw, chip, handle_level_irq);
301
302 return 0;
303}
304
305static const struct irq_domain_ops irq_domain_ops = {
306 .xlate = irq_domain_xlate_onetwocell,
307 .map = icu_map,
308};
309
241static struct irqaction cascade = { 310static struct irqaction cascade = {
242 .handler = no_action, 311 .handler = no_action,
243 .name = "cascade", 312 .name = "cascade",
244}; 313};
245 314
246void __init arch_init_irq(void) 315int __init icu_of_init(struct device_node *node, struct device_node *parent)
247{ 316{
317 struct device_node *eiu_node;
318 struct resource res;
248 int i; 319 int i;
249 320
250 if (insert_resource(&iomem_resource, &ltq_icu_resource) < 0) 321 if (of_address_to_resource(node, 0, &res))
251 panic("Failed to insert icu memory"); 322 panic("Failed to get icu memory range");
252 323
253 if (request_mem_region(ltq_icu_resource.start, 324 if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
254 resource_size(&ltq_icu_resource), "icu") < 0) 325 pr_err("Failed to request icu memory");
255 panic("Failed to request icu memory");
256 326
257 ltq_icu_membase = ioremap_nocache(ltq_icu_resource.start, 327 ltq_icu_membase = ioremap_nocache(res.start, resource_size(&res));
258 resource_size(&ltq_icu_resource));
259 if (!ltq_icu_membase) 328 if (!ltq_icu_membase)
260 panic("Failed to remap icu memory"); 329 panic("Failed to remap icu memory");
261 330
262 if (insert_resource(&iomem_resource, &ltq_eiu_resource) < 0) 331 /* the external interrupts are optional and xway only */
263 panic("Failed to insert eiu memory"); 332 eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu");
264 333 if (eiu_node && of_address_to_resource(eiu_node, 0, &res)) {
265 if (request_mem_region(ltq_eiu_resource.start, 334 /* find out how many external irq sources we have */
266 resource_size(&ltq_eiu_resource), "eiu") < 0) 335 const __be32 *count = of_get_property(node,
267 panic("Failed to request eiu memory"); 336 "lantiq,count", NULL);
268 337
269 ltq_eiu_membase = ioremap_nocache(ltq_eiu_resource.start, 338 if (count)
270 resource_size(&ltq_eiu_resource)); 339 exin_avail = *count;
271 if (!ltq_eiu_membase) 340 if (exin_avail > MAX_EIU)
272 panic("Failed to remap eiu memory"); 341 exin_avail = MAX_EIU;
342
343 if (request_mem_region(res.start, resource_size(&res),
344 res.name) < 0)
345 pr_err("Failed to request eiu memory");
346
347 ltq_eiu_membase = ioremap_nocache(res.start,
348 resource_size(&res));
349 if (!ltq_eiu_membase)
350 panic("Failed to remap eiu memory");
351 }
273 352
274 /* make sure all irqs are turned off by default */ 353 /* turn off all irqs by default */
275 for (i = 0; i < 5; i++) 354 for (i = 0; i < 5; i++) {
355 /* make sure all irqs are turned off by default */
276 ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET)); 356 ltq_icu_w32(0, LTQ_ICU_IM0_IER + (i * LTQ_ICU_OFFSET));
277 357 /* clear all possibly pending interrupts */
278 /* clear all possibly pending interrupts */ 358 ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET));
279 ltq_icu_w32(~0, LTQ_ICU_IM0_ISR + (i * LTQ_ICU_OFFSET)); 359 }
280 360
281 mips_cpu_irq_init(); 361 mips_cpu_irq_init();
282 362
@@ -293,20 +373,19 @@ void __init arch_init_irq(void)
293 set_vi_handler(7, ltq_hw5_irqdispatch); 373 set_vi_handler(7, ltq_hw5_irqdispatch);
294 } 374 }
295 375
296 for (i = INT_NUM_IRQ0; 376 irq_domain_add_linear(node, 6 * INT_NUM_IM_OFFSET,
297 i <= (INT_NUM_IRQ0 + (5 * INT_NUM_IM_OFFSET)); i++) 377 &irq_domain_ops, 0);
298 if ((i == LTQ_EIU_IR0) || (i == LTQ_EIU_IR1) || 378
299 (i == LTQ_EIU_IR2)) 379#if defined(CONFIG_MIPS_MT_SMP)
300 irq_set_chip_and_handler(i, &ltq_eiu_type, 380 if (cpu_has_vint) {
301 handle_level_irq); 381 pr_info("Setting up IPI vectored interrupts\n");
302 /* EIU3-5 only exist on ar9 and vr9 */ 382 set_vi_handler(MIPS_CPU_IPI_RESCHED_IRQ, ltq_sw0_irqdispatch);
303 else if (((i == LTQ_EIU_IR3) || (i == LTQ_EIU_IR4) || 383 set_vi_handler(MIPS_CPU_IPI_CALL_IRQ, ltq_sw1_irqdispatch);
304 (i == LTQ_EIU_IR5)) && (ltq_is_ar9() || ltq_is_vr9())) 384 }
305 irq_set_chip_and_handler(i, &ltq_eiu_type, 385 arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_RESCHED_IRQ,
306 handle_level_irq); 386 &irq_resched);
307 else 387 arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
308 irq_set_chip_and_handler(i, &ltq_irq_type, 388#endif
309 handle_level_irq);
310 389
311#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) 390#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC)
312 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | 391 set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
@@ -315,9 +394,23 @@ void __init arch_init_irq(void)
315 set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 | 394 set_c0_status(IE_SW0 | IE_SW1 | IE_IRQ0 | IE_IRQ1 |
316 IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5); 395 IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
317#endif 396#endif
397
398 /* tell oprofile which irq to use */
399 cp0_perfcount_irq = LTQ_PERF_IRQ;
400 return 0;
318} 401}
319 402
320unsigned int __cpuinit get_c0_compare_int(void) 403unsigned int __cpuinit get_c0_compare_int(void)
321{ 404{
322 return CP0_LEGACY_COMPARE_IRQ; 405 return CP0_LEGACY_COMPARE_IRQ;
323} 406}
407
408static struct of_device_id __initdata of_irq_ids[] = {
409 { .compatible = "lantiq,icu", .data = icu_of_init },
410 {},
411};
412
413void __init arch_init_irq(void)
414{
415 of_irq_init(of_irq_ids);
416}
diff --git a/arch/mips/lantiq/machtypes.h b/arch/mips/lantiq/machtypes.h
deleted file mode 100644
index 7e01b8c484eb..000000000000
--- a/arch/mips/lantiq/machtypes.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LANTIQ_MACH_H__
10#define _LANTIQ_MACH_H__
11
12#include <asm/mips_machine.h>
13
14enum lantiq_mach_type {
15 LTQ_MACH_GENERIC = 0,
16 LTQ_MACH_EASY50712, /* Danube evaluation board */
17 LTQ_MACH_EASY50601, /* Amazon SE evaluation board */
18};
19
20#endif
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index e34fcfd0d5ca..d185e8477fdf 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -8,6 +8,7 @@
8 8
9#include <linux/export.h> 9#include <linux/export.h>
10#include <linux/clk.h> 10#include <linux/clk.h>
11#include <linux/of_platform.h>
11#include <asm/bootinfo.h> 12#include <asm/bootinfo.h>
12#include <asm/time.h> 13#include <asm/time.h>
13 14
@@ -16,19 +17,15 @@
16#include "prom.h" 17#include "prom.h"
17#include "clk.h" 18#include "clk.h"
18 19
19static struct ltq_soc_info soc_info; 20/* access to the ebu needs to be locked between different drivers */
20 21DEFINE_SPINLOCK(ebu_lock);
21unsigned int ltq_get_cpu_ver(void) 22EXPORT_SYMBOL_GPL(ebu_lock);
22{
23 return soc_info.rev;
24}
25EXPORT_SYMBOL(ltq_get_cpu_ver);
26 23
27unsigned int ltq_get_soc_type(void) 24/*
28{ 25 * this struct is filled by the soc specific detection code and holds
29 return soc_info.type; 26 * information about the specific soc type, revision and name
30} 27 */
31EXPORT_SYMBOL(ltq_get_soc_type); 28static struct ltq_soc_info soc_info;
32 29
33const char *get_system_type(void) 30const char *get_system_type(void)
34{ 31{
@@ -45,27 +42,62 @@ static void __init prom_init_cmdline(void)
45 char **argv = (char **) KSEG1ADDR(fw_arg1); 42 char **argv = (char **) KSEG1ADDR(fw_arg1);
46 int i; 43 int i;
47 44
45 arcs_cmdline[0] = '\0';
46
48 for (i = 0; i < argc; i++) { 47 for (i = 0; i < argc; i++) {
49 char *p = (char *) KSEG1ADDR(argv[i]); 48 char *p = (char *) KSEG1ADDR(argv[i]);
50 49
51 if (p && *p) { 50 if (CPHYSADDR(p) && *p) {
52 strlcat(arcs_cmdline, p, sizeof(arcs_cmdline)); 51 strlcat(arcs_cmdline, p, sizeof(arcs_cmdline));
53 strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline)); 52 strlcat(arcs_cmdline, " ", sizeof(arcs_cmdline));
54 } 53 }
55 } 54 }
56} 55}
57 56
58void __init prom_init(void) 57void __init plat_mem_setup(void)
59{ 58{
60 struct clk *clk; 59 ioport_resource.start = IOPORT_RESOURCE_START;
60 ioport_resource.end = IOPORT_RESOURCE_END;
61 iomem_resource.start = IOMEM_RESOURCE_START;
62 iomem_resource.end = IOMEM_RESOURCE_END;
63
64 set_io_port_base((unsigned long) KSEG1);
61 65
66 /*
67 * Load the builtin devicetree. This causes the chosen node to be
68 * parsed resulting in our memory appearing
69 */
70 __dt_setup_arch(&__dtb_start);
71}
72
73void __init prom_init(void)
74{
75 /* call the soc specific detetcion code and get it to fill soc_info */
62 ltq_soc_detect(&soc_info); 76 ltq_soc_detect(&soc_info);
63 clk_init(); 77 snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev %s",
64 clk = clk_get(0, "cpu"); 78 soc_info.name, soc_info.rev_type);
65 snprintf(soc_info.sys_type, LTQ_SYS_TYPE_LEN - 1, "%s rev1.%d",
66 soc_info.name, soc_info.rev);
67 clk_put(clk);
68 soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0'; 79 soc_info.sys_type[LTQ_SYS_TYPE_LEN - 1] = '\0';
69 pr_info("SoC: %s\n", soc_info.sys_type); 80 pr_info("SoC: %s\n", soc_info.sys_type);
70 prom_init_cmdline(); 81 prom_init_cmdline();
82
83#if defined(CONFIG_MIPS_MT_SMP)
84 if (register_vsmp_smp_ops())
85 panic("failed to register_vsmp_smp_ops()");
86#endif
71} 87}
88
89int __init plat_of_setup(void)
90{
91 static struct of_device_id of_ids[3];
92
93 if (!of_have_populated_dt())
94 panic("device tree not present");
95
96 strncpy(of_ids[0].compatible, soc_info.compatible,
97 sizeof(of_ids[0].compatible));
98 strncpy(of_ids[1].compatible, "simple-bus",
99 sizeof(of_ids[1].compatible));
100 return of_platform_bus_probe(NULL, of_ids, NULL);
101}
102
103arch_initcall(plat_of_setup);
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
index b4229d94280f..a3fa1a2bfaae 100644
--- a/arch/mips/lantiq/prom.h
+++ b/arch/mips/lantiq/prom.h
@@ -10,16 +10,22 @@
10#define _LTQ_PROM_H__ 10#define _LTQ_PROM_H__
11 11
12#define LTQ_SYS_TYPE_LEN 0x100 12#define LTQ_SYS_TYPE_LEN 0x100
13#define LTQ_SYS_REV_LEN 0x10
13 14
14struct ltq_soc_info { 15struct ltq_soc_info {
15 unsigned char *name; 16 unsigned char *name;
16 unsigned int rev; 17 unsigned int rev;
18 unsigned char rev_type[LTQ_SYS_REV_LEN];
19 unsigned int srev;
17 unsigned int partnum; 20 unsigned int partnum;
18 unsigned int type; 21 unsigned int type;
19 unsigned char sys_type[LTQ_SYS_TYPE_LEN]; 22 unsigned char sys_type[LTQ_SYS_TYPE_LEN];
23 unsigned char *compatible;
20}; 24};
21 25
22extern void ltq_soc_detect(struct ltq_soc_info *i); 26extern void ltq_soc_detect(struct ltq_soc_info *i);
23extern void ltq_soc_setup(void); 27extern void ltq_soc_init(void);
28
29extern struct boot_param_header __dtb_start;
24 30
25#endif 31#endif
diff --git a/arch/mips/lantiq/setup.c b/arch/mips/lantiq/setup.c
deleted file mode 100644
index 1ff6c9d6cb93..000000000000
--- a/arch/mips/lantiq/setup.c
+++ /dev/null
@@ -1,66 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/kernel.h>
10#include <linux/export.h>
11#include <linux/io.h>
12#include <linux/ioport.h>
13#include <asm/bootinfo.h>
14
15#include <lantiq_soc.h>
16
17#include "machtypes.h"
18#include "devices.h"
19#include "prom.h"
20
21void __init plat_mem_setup(void)
22{
23 /* assume 16M as default incase uboot fails to pass proper ramsize */
24 unsigned long memsize = 16;
25 char **envp = (char **) KSEG1ADDR(fw_arg2);
26
27 ioport_resource.start = IOPORT_RESOURCE_START;
28 ioport_resource.end = IOPORT_RESOURCE_END;
29 iomem_resource.start = IOMEM_RESOURCE_START;
30 iomem_resource.end = IOMEM_RESOURCE_END;
31
32 set_io_port_base((unsigned long) KSEG1);
33
34 while (*envp) {
35 char *e = (char *)KSEG1ADDR(*envp);
36 if (!strncmp(e, "memsize=", 8)) {
37 e += 8;
38 if (strict_strtoul(e, 0, &memsize))
39 pr_warn("bad memsize specified\n");
40 }
41 envp++;
42 }
43 memsize *= 1024 * 1024;
44 add_memory_region(0x00000000, memsize, BOOT_MEM_RAM);
45}
46
47static int __init
48lantiq_setup(void)
49{
50 ltq_soc_setup();
51 mips_machine_setup();
52 return 0;
53}
54
55arch_initcall(lantiq_setup);
56
57static void __init
58lantiq_generic_init(void)
59{
60 /* Nothing to do */
61}
62
63MIPS_MACHINE(LTQ_MACH_GENERIC,
64 "Generic",
65 "Generic Lantiq based board",
66 lantiq_generic_init);
diff --git a/arch/mips/lantiq/xway/Kconfig b/arch/mips/lantiq/xway/Kconfig
deleted file mode 100644
index 2b857de36620..000000000000
--- a/arch/mips/lantiq/xway/Kconfig
+++ /dev/null
@@ -1,23 +0,0 @@
1if SOC_XWAY
2
3menu "MIPS Machine"
4
5config LANTIQ_MACH_EASY50712
6 bool "Easy50712 - Danube"
7 default y
8
9endmenu
10
11endif
12
13if SOC_AMAZON_SE
14
15menu "MIPS Machine"
16
17config LANTIQ_MACH_EASY50601
18 bool "Easy50601 - Amazon SE"
19 default y
20
21endmenu
22
23endif
diff --git a/arch/mips/lantiq/xway/Makefile b/arch/mips/lantiq/xway/Makefile
index c517f2e77563..dc3194f6ee42 100644
--- a/arch/mips/lantiq/xway/Makefile
+++ b/arch/mips/lantiq/xway/Makefile
@@ -1,7 +1 @@
1obj-y := pmu.o ebu.o reset.o gpio.o gpio_stp.o gpio_ebu.o devices.o dma.o obj-y := prom.o sysctrl.o clk.o reset.o gpio.o dma.o
2
3obj-$(CONFIG_SOC_XWAY) += clk-xway.o prom-xway.o setup-xway.o
4obj-$(CONFIG_SOC_AMAZON_SE) += clk-ase.o prom-ase.o setup-ase.o
5
6obj-$(CONFIG_LANTIQ_MACH_EASY50712) += mach-easy50712.o
7obj-$(CONFIG_LANTIQ_MACH_EASY50601) += mach-easy50601.o
diff --git a/arch/mips/lantiq/xway/clk-ase.c b/arch/mips/lantiq/xway/clk-ase.c
deleted file mode 100644
index 652258309c9c..000000000000
--- a/arch/mips/lantiq/xway/clk-ase.c
+++ /dev/null
@@ -1,48 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/io.h>
10#include <linux/export.h>
11#include <linux/init.h>
12#include <linux/clk.h>
13
14#include <asm/time.h>
15#include <asm/irq.h>
16#include <asm/div64.h>
17
18#include <lantiq_soc.h>
19
20/* cgu registers */
21#define LTQ_CGU_SYS 0x0010
22
23unsigned int ltq_get_io_region_clock(void)
24{
25 return CLOCK_133M;
26}
27EXPORT_SYMBOL(ltq_get_io_region_clock);
28
29unsigned int ltq_get_fpi_bus_clock(int fpi)
30{
31 return CLOCK_133M;
32}
33EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
34
35unsigned int ltq_get_cpu_hz(void)
36{
37 if (ltq_cgu_r32(LTQ_CGU_SYS) & (1 << 5))
38 return CLOCK_266M;
39 else
40 return CLOCK_133M;
41}
42EXPORT_SYMBOL(ltq_get_cpu_hz);
43
44unsigned int ltq_get_fpi_hz(void)
45{
46 return CLOCK_133M;
47}
48EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk-xway.c b/arch/mips/lantiq/xway/clk-xway.c
deleted file mode 100644
index 696b1a3e0642..000000000000
--- a/arch/mips/lantiq/xway/clk-xway.c
+++ /dev/null
@@ -1,223 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/io.h>
10#include <linux/export.h>
11#include <linux/init.h>
12#include <linux/clk.h>
13
14#include <asm/time.h>
15#include <asm/irq.h>
16#include <asm/div64.h>
17
18#include <lantiq_soc.h>
19
20static unsigned int ltq_ram_clocks[] = {
21 CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
22#define DDR_HZ ltq_ram_clocks[ltq_cgu_r32(LTQ_CGU_SYS) & 0x3]
23
24#define BASIC_FREQUENCY_1 35328000
25#define BASIC_FREQUENCY_2 36000000
26#define BASIS_REQUENCY_USB 12000000
27
28#define GET_BITS(x, msb, lsb) \
29 (((x) & ((1 << ((msb) + 1)) - 1)) >> (lsb))
30
31#define LTQ_CGU_PLL0_CFG 0x0004
32#define LTQ_CGU_PLL1_CFG 0x0008
33#define LTQ_CGU_PLL2_CFG 0x000C
34#define LTQ_CGU_SYS 0x0010
35#define LTQ_CGU_UPDATE 0x0014
36#define LTQ_CGU_IF_CLK 0x0018
37#define LTQ_CGU_OSC_CON 0x001C
38#define LTQ_CGU_SMD 0x0020
39#define LTQ_CGU_CT1SR 0x0028
40#define LTQ_CGU_CT2SR 0x002C
41#define LTQ_CGU_PCMCR 0x0030
42#define LTQ_CGU_PCI_CR 0x0034
43#define LTQ_CGU_PD_PC 0x0038
44#define LTQ_CGU_FMR 0x003C
45
46#define CGU_PLL0_PHASE_DIVIDER_ENABLE \
47 (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 31))
48#define CGU_PLL0_BYPASS \
49 (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 30))
50#define CGU_PLL0_CFG_DSMSEL \
51 (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 28))
52#define CGU_PLL0_CFG_FRAC_EN \
53 (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & (1 << 27))
54#define CGU_PLL1_SRC \
55 (ltq_cgu_r32(LTQ_CGU_PLL1_CFG) & (1 << 31))
56#define CGU_PLL2_PHASE_DIVIDER_ENABLE \
57 (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & (1 << 20))
58#define CGU_SYS_FPI_SEL (1 << 6)
59#define CGU_SYS_DDR_SEL 0x3
60#define CGU_PLL0_SRC (1 << 29)
61
62#define CGU_PLL0_CFG_PLLK GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 26, 17)
63#define CGU_PLL0_CFG_PLLN GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 12, 6)
64#define CGU_PLL0_CFG_PLLM GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL0_CFG), 5, 2)
65#define CGU_PLL2_SRC GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 18, 17)
66#define CGU_PLL2_CFG_INPUT_DIV GET_BITS(ltq_cgu_r32(LTQ_CGU_PLL2_CFG), 16, 13)
67
68static unsigned int ltq_get_pll0_fdiv(void);
69
70static inline unsigned int get_input_clock(int pll)
71{
72 switch (pll) {
73 case 0:
74 if (ltq_cgu_r32(LTQ_CGU_PLL0_CFG) & CGU_PLL0_SRC)
75 return BASIS_REQUENCY_USB;
76 else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
77 return BASIC_FREQUENCY_1;
78 else
79 return BASIC_FREQUENCY_2;
80 case 1:
81 if (CGU_PLL1_SRC)
82 return BASIS_REQUENCY_USB;
83 else if (CGU_PLL0_PHASE_DIVIDER_ENABLE)
84 return BASIC_FREQUENCY_1;
85 else
86 return BASIC_FREQUENCY_2;
87 case 2:
88 switch (CGU_PLL2_SRC) {
89 case 0:
90 return ltq_get_pll0_fdiv();
91 case 1:
92 return CGU_PLL2_PHASE_DIVIDER_ENABLE ?
93 BASIC_FREQUENCY_1 :
94 BASIC_FREQUENCY_2;
95 case 2:
96 return BASIS_REQUENCY_USB;
97 }
98 default:
99 return 0;
100 }
101}
102
103static inline unsigned int cal_dsm(int pll, unsigned int num, unsigned int den)
104{
105 u64 res, clock = get_input_clock(pll);
106
107 res = num * clock;
108 do_div(res, den);
109 return res;
110}
111
112static inline unsigned int mash_dsm(int pll, unsigned int M, unsigned int N,
113 unsigned int K)
114{
115 unsigned int num = ((N + 1) << 10) + K;
116 unsigned int den = (M + 1) << 10;
117
118 return cal_dsm(pll, num, den);
119}
120
121static inline unsigned int ssff_dsm_1(int pll, unsigned int M, unsigned int N,
122 unsigned int K)
123{
124 unsigned int num = ((N + 1) << 11) + K + 512;
125 unsigned int den = (M + 1) << 11;
126
127 return cal_dsm(pll, num, den);
128}
129
130static inline unsigned int ssff_dsm_2(int pll, unsigned int M, unsigned int N,
131 unsigned int K)
132{
133 unsigned int num = K >= 512 ?
134 ((N + 1) << 12) + K - 512 : ((N + 1) << 12) + K + 3584;
135 unsigned int den = (M + 1) << 12;
136
137 return cal_dsm(pll, num, den);
138}
139
140static inline unsigned int dsm(int pll, unsigned int M, unsigned int N,
141 unsigned int K, unsigned int dsmsel, unsigned int phase_div_en)
142{
143 if (!dsmsel)
144 return mash_dsm(pll, M, N, K);
145 else if (!phase_div_en)
146 return mash_dsm(pll, M, N, K);
147 else
148 return ssff_dsm_2(pll, M, N, K);
149}
150
151static inline unsigned int ltq_get_pll0_fosc(void)
152{
153 if (CGU_PLL0_BYPASS)
154 return get_input_clock(0);
155 else
156 return !CGU_PLL0_CFG_FRAC_EN
157 ? dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN, 0,
158 CGU_PLL0_CFG_DSMSEL,
159 CGU_PLL0_PHASE_DIVIDER_ENABLE)
160 : dsm(0, CGU_PLL0_CFG_PLLM, CGU_PLL0_CFG_PLLN,
161 CGU_PLL0_CFG_PLLK, CGU_PLL0_CFG_DSMSEL,
162 CGU_PLL0_PHASE_DIVIDER_ENABLE);
163}
164
165static unsigned int ltq_get_pll0_fdiv(void)
166{
167 unsigned int div = CGU_PLL2_CFG_INPUT_DIV + 1;
168
169 return (ltq_get_pll0_fosc() + (div >> 1)) / div;
170}
171
172unsigned int ltq_get_io_region_clock(void)
173{
174 unsigned int ret = ltq_get_pll0_fosc();
175
176 switch (ltq_cgu_r32(LTQ_CGU_PLL2_CFG) & CGU_SYS_DDR_SEL) {
177 default:
178 case 0:
179 return (ret + 1) / 2;
180 case 1:
181 return (ret * 2 + 2) / 5;
182 case 2:
183 return (ret + 1) / 3;
184 case 3:
185 return (ret + 2) / 4;
186 }
187}
188EXPORT_SYMBOL(ltq_get_io_region_clock);
189
190unsigned int ltq_get_fpi_bus_clock(int fpi)
191{
192 unsigned int ret = ltq_get_io_region_clock();
193
194 if ((fpi == 2) && (ltq_cgu_r32(LTQ_CGU_SYS) & CGU_SYS_FPI_SEL))
195 ret >>= 1;
196 return ret;
197}
198EXPORT_SYMBOL(ltq_get_fpi_bus_clock);
199
200unsigned int ltq_get_cpu_hz(void)
201{
202 switch (ltq_cgu_r32(LTQ_CGU_SYS) & 0xc) {
203 case 0:
204 return CLOCK_333M;
205 case 4:
206 return DDR_HZ;
207 case 8:
208 return DDR_HZ << 1;
209 default:
210 return DDR_HZ >> 1;
211 }
212}
213EXPORT_SYMBOL(ltq_get_cpu_hz);
214
215unsigned int ltq_get_fpi_hz(void)
216{
217 unsigned int ddr_clock = DDR_HZ;
218
219 if (ltq_cgu_r32(LTQ_CGU_SYS) & 0x40)
220 return ddr_clock >> 1;
221 return ddr_clock;
222}
223EXPORT_SYMBOL(ltq_get_fpi_hz);
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
new file mode 100644
index 000000000000..9aa17f79a742
--- /dev/null
+++ b/arch/mips/lantiq/xway/clk.c
@@ -0,0 +1,151 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/io.h>
10#include <linux/export.h>
11#include <linux/init.h>
12#include <linux/clk.h>
13
14#include <asm/time.h>
15#include <asm/irq.h>
16#include <asm/div64.h>
17
18#include <lantiq_soc.h>
19
20#include "../clk.h"
21
22static unsigned int ram_clocks[] = {
23 CLOCK_167M, CLOCK_133M, CLOCK_111M, CLOCK_83M };
24#define DDR_HZ ram_clocks[ltq_cgu_r32(CGU_SYS) & 0x3]
25
26/* legacy xway clock */
27#define CGU_SYS 0x10
28
29/* vr9 clock */
30#define CGU_SYS_VR9 0x0c
31#define CGU_IF_CLK_VR9 0x24
32
33unsigned long ltq_danube_fpi_hz(void)
34{
35 unsigned long ddr_clock = DDR_HZ;
36
37 if (ltq_cgu_r32(CGU_SYS) & 0x40)
38 return ddr_clock >> 1;
39 return ddr_clock;
40}
41
42unsigned long ltq_danube_cpu_hz(void)
43{
44 switch (ltq_cgu_r32(CGU_SYS) & 0xc) {
45 case 0:
46 return CLOCK_333M;
47 case 4:
48 return DDR_HZ;
49 case 8:
50 return DDR_HZ << 1;
51 default:
52 return DDR_HZ >> 1;
53 }
54}
55
56unsigned long ltq_ar9_sys_hz(void)
57{
58 if (((ltq_cgu_r32(CGU_SYS) >> 3) & 0x3) == 0x2)
59 return CLOCK_393M;
60 return CLOCK_333M;
61}
62
63unsigned long ltq_ar9_fpi_hz(void)
64{
65 unsigned long sys = ltq_ar9_sys_hz();
66
67 if (ltq_cgu_r32(CGU_SYS) & BIT(0))
68 return sys;
69 return sys >> 1;
70}
71
72unsigned long ltq_ar9_cpu_hz(void)
73{
74 if (ltq_cgu_r32(CGU_SYS) & BIT(2))
75 return ltq_ar9_fpi_hz();
76 else
77 return ltq_ar9_sys_hz();
78}
79
80unsigned long ltq_vr9_cpu_hz(void)
81{
82 unsigned int cpu_sel;
83 unsigned long clk;
84
85 cpu_sel = (ltq_cgu_r32(CGU_SYS_VR9) >> 4) & 0xf;
86
87 switch (cpu_sel) {
88 case 0:
89 clk = CLOCK_600M;
90 break;
91 case 1:
92 clk = CLOCK_500M;
93 break;
94 case 2:
95 clk = CLOCK_393M;
96 break;
97 case 3:
98 clk = CLOCK_333M;
99 break;
100 case 5:
101 case 6:
102 clk = CLOCK_196_608M;
103 break;
104 case 7:
105 clk = CLOCK_167M;
106 break;
107 case 4:
108 case 8:
109 case 9:
110 clk = CLOCK_125M;
111 break;
112 default:
113 clk = 0;
114 break;
115 }
116
117 return clk;
118}
119
120unsigned long ltq_vr9_fpi_hz(void)
121{
122 unsigned int ocp_sel, cpu_clk;
123 unsigned long clk;
124
125 cpu_clk = ltq_vr9_cpu_hz();
126 ocp_sel = ltq_cgu_r32(CGU_SYS_VR9) & 0x3;
127
128 switch (ocp_sel) {
129 case 0:
130 /* OCP ratio 1 */
131 clk = cpu_clk;
132 break;
133 case 2:
134 /* OCP ratio 2 */
135 clk = cpu_clk / 2;
136 break;
137 case 3:
138 /* OCP ratio 2.5 */
139 clk = (cpu_clk * 2) / 5;
140 break;
141 case 4:
142 /* OCP ratio 3 */
143 clk = cpu_clk / 3;
144 break;
145 default:
146 clk = 0;
147 break;
148 }
149
150 return clk;
151}
diff --git a/arch/mips/lantiq/xway/devices.c b/arch/mips/lantiq/xway/devices.c
deleted file mode 100644
index d614aa7ff07f..000000000000
--- a/arch/mips/lantiq/xway/devices.c
+++ /dev/null
@@ -1,119 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/export.h>
11#include <linux/types.h>
12#include <linux/string.h>
13#include <linux/mtd/physmap.h>
14#include <linux/kernel.h>
15#include <linux/reboot.h>
16#include <linux/platform_device.h>
17#include <linux/leds.h>
18#include <linux/etherdevice.h>
19#include <linux/time.h>
20#include <linux/io.h>
21#include <linux/gpio.h>
22
23#include <asm/bootinfo.h>
24#include <asm/irq.h>
25
26#include <lantiq_soc.h>
27#include <lantiq_irq.h>
28#include <lantiq_platform.h>
29
30#include "devices.h"
31
32/* gpio */
33static struct resource ltq_gpio_resource[] = {
34 {
35 .name = "gpio0",
36 .start = LTQ_GPIO0_BASE_ADDR,
37 .end = LTQ_GPIO0_BASE_ADDR + LTQ_GPIO_SIZE - 1,
38 .flags = IORESOURCE_MEM,
39 }, {
40 .name = "gpio1",
41 .start = LTQ_GPIO1_BASE_ADDR,
42 .end = LTQ_GPIO1_BASE_ADDR + LTQ_GPIO_SIZE - 1,
43 .flags = IORESOURCE_MEM,
44 }, {
45 .name = "gpio2",
46 .start = LTQ_GPIO2_BASE_ADDR,
47 .end = LTQ_GPIO2_BASE_ADDR + LTQ_GPIO_SIZE - 1,
48 .flags = IORESOURCE_MEM,
49 }
50};
51
52void __init ltq_register_gpio(void)
53{
54 platform_device_register_simple("ltq_gpio", 0,
55 &ltq_gpio_resource[0], 1);
56 platform_device_register_simple("ltq_gpio", 1,
57 &ltq_gpio_resource[1], 1);
58
59 /* AR9 and VR9 have an extra gpio block */
60 if (ltq_is_ar9() || ltq_is_vr9()) {
61 platform_device_register_simple("ltq_gpio", 2,
62 &ltq_gpio_resource[2], 1);
63 }
64}
65
66/* serial to parallel conversion */
67static struct resource ltq_stp_resource = {
68 .name = "stp",
69 .start = LTQ_STP_BASE_ADDR,
70 .end = LTQ_STP_BASE_ADDR + LTQ_STP_SIZE - 1,
71 .flags = IORESOURCE_MEM,
72};
73
74void __init ltq_register_gpio_stp(void)
75{
76 platform_device_register_simple("ltq_stp", 0, &ltq_stp_resource, 1);
77}
78
79/* asc ports - amazon se has its own serial mapping */
80static struct resource ltq_ase_asc_resources[] = {
81 {
82 .name = "asc0",
83 .start = LTQ_ASC1_BASE_ADDR,
84 .end = LTQ_ASC1_BASE_ADDR + LTQ_ASC_SIZE - 1,
85 .flags = IORESOURCE_MEM,
86 },
87 IRQ_RES(tx, LTQ_ASC_ASE_TIR),
88 IRQ_RES(rx, LTQ_ASC_ASE_RIR),
89 IRQ_RES(err, LTQ_ASC_ASE_EIR),
90};
91
92void __init ltq_register_ase_asc(void)
93{
94 platform_device_register_simple("ltq_asc", 0,
95 ltq_ase_asc_resources, ARRAY_SIZE(ltq_ase_asc_resources));
96}
97
98/* ethernet */
99static struct resource ltq_etop_resources = {
100 .name = "etop",
101 .start = LTQ_ETOP_BASE_ADDR,
102 .end = LTQ_ETOP_BASE_ADDR + LTQ_ETOP_SIZE - 1,
103 .flags = IORESOURCE_MEM,
104};
105
106static struct platform_device ltq_etop = {
107 .name = "ltq_etop",
108 .resource = &ltq_etop_resources,
109 .num_resources = 1,
110};
111
112void __init
113ltq_register_etop(struct ltq_eth_data *eth)
114{
115 if (eth) {
116 ltq_etop.dev.platform_data = eth;
117 platform_device_register(&ltq_etop);
118 }
119}
diff --git a/arch/mips/lantiq/xway/devices.h b/arch/mips/lantiq/xway/devices.h
deleted file mode 100644
index e90493471bc1..000000000000
--- a/arch/mips/lantiq/xway/devices.h
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#ifndef _LTQ_DEVICES_XWAY_H__
10#define _LTQ_DEVICES_XWAY_H__
11
12#include "../devices.h"
13#include <linux/phy.h>
14
15extern void ltq_register_gpio(void);
16extern void ltq_register_gpio_stp(void);
17extern void ltq_register_ase_asc(void);
18extern void ltq_register_etop(struct ltq_eth_data *eth);
19
20#endif
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index b210e936c7c3..55d2c4fa4714 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -19,7 +19,8 @@
19#include <linux/platform_device.h> 19#include <linux/platform_device.h>
20#include <linux/io.h> 20#include <linux/io.h>
21#include <linux/dma-mapping.h> 21#include <linux/dma-mapping.h>
22#include <linux/export.h> 22#include <linux/module.h>
23#include <linux/clk.h>
23 24
24#include <lantiq_soc.h> 25#include <lantiq_soc.h>
25#include <xway_dma.h> 26#include <xway_dma.h>
@@ -55,13 +56,6 @@
55#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \ 56#define ltq_dma_w32_mask(x, y, z) ltq_w32_mask(x, y, \
56 ltq_dma_membase + (z)) 57 ltq_dma_membase + (z))
57 58
58static struct resource ltq_dma_resource = {
59 .name = "dma",
60 .start = LTQ_DMA_BASE_ADDR,
61 .end = LTQ_DMA_BASE_ADDR + LTQ_DMA_SIZE - 1,
62 .flags = IORESOURCE_MEM,
63};
64
65static void __iomem *ltq_dma_membase; 59static void __iomem *ltq_dma_membase;
66 60
67void 61void
@@ -215,27 +209,28 @@ ltq_dma_init_port(int p)
215} 209}
216EXPORT_SYMBOL_GPL(ltq_dma_init_port); 210EXPORT_SYMBOL_GPL(ltq_dma_init_port);
217 211
218int __init 212static int __devinit
219ltq_dma_init(void) 213ltq_dma_init(struct platform_device *pdev)
220{ 214{
215 struct clk *clk;
216 struct resource *res;
221 int i; 217 int i;
222 218
223 /* insert and request the memory region */ 219 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
224 if (insert_resource(&iomem_resource, &ltq_dma_resource) < 0) 220 if (!res)
225 panic("Failed to insert dma memory"); 221 panic("Failed to get dma resource");
226
227 if (request_mem_region(ltq_dma_resource.start,
228 resource_size(&ltq_dma_resource), "dma") < 0)
229 panic("Failed to request dma memory");
230 222
231 /* remap dma register range */ 223 /* remap dma register range */
232 ltq_dma_membase = ioremap_nocache(ltq_dma_resource.start, 224 ltq_dma_membase = devm_request_and_ioremap(&pdev->dev, res);
233 resource_size(&ltq_dma_resource));
234 if (!ltq_dma_membase) 225 if (!ltq_dma_membase)
235 panic("Failed to remap dma memory"); 226 panic("Failed to remap dma resource");
236 227
237 /* power up and reset the dma engine */ 228 /* power up and reset the dma engine */
238 ltq_pmu_enable(PMU_DMA); 229 clk = clk_get(&pdev->dev, NULL);
230 if (IS_ERR(clk))
231 panic("Failed to get dma clock");
232
233 clk_enable(clk);
239 ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL); 234 ltq_dma_w32_mask(0, DMA_RESET, LTQ_DMA_CTRL);
240 235
241 /* disable all interrupts */ 236 /* disable all interrupts */
@@ -248,7 +243,29 @@ ltq_dma_init(void)
248 ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL); 243 ltq_dma_w32(DMA_POLL | DMA_CLK_DIV4, LTQ_DMA_CPOLL);
249 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL); 244 ltq_dma_w32_mask(DMA_CHAN_ON, 0, LTQ_DMA_CCTRL);
250 } 245 }
246 dev_info(&pdev->dev, "init done\n");
251 return 0; 247 return 0;
252} 248}
253 249
254postcore_initcall(ltq_dma_init); 250static const struct of_device_id dma_match[] = {
251 { .compatible = "lantiq,dma-xway" },
252 {},
253};
254MODULE_DEVICE_TABLE(of, dma_match);
255
256static struct platform_driver dma_driver = {
257 .probe = ltq_dma_init,
258 .driver = {
259 .name = "dma-xway",
260 .owner = THIS_MODULE,
261 .of_match_table = dma_match,
262 },
263};
264
265int __init
266dma_init(void)
267{
268 return platform_driver_register(&dma_driver);
269}
270
271postcore_initcall(dma_init);
diff --git a/arch/mips/lantiq/xway/ebu.c b/arch/mips/lantiq/xway/ebu.c
deleted file mode 100644
index 862e3e830680..000000000000
--- a/arch/mips/lantiq/xway/ebu.c
+++ /dev/null
@@ -1,52 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * EBU - the external bus unit attaches PCI, NOR and NAND
7 *
8 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
9 */
10
11#include <linux/kernel.h>
12#include <linux/module.h>
13#include <linux/ioport.h>
14
15#include <lantiq_soc.h>
16
17/* all access to the ebu must be locked */
18DEFINE_SPINLOCK(ebu_lock);
19EXPORT_SYMBOL_GPL(ebu_lock);
20
21static struct resource ltq_ebu_resource = {
22 .name = "ebu",
23 .start = LTQ_EBU_BASE_ADDR,
24 .end = LTQ_EBU_BASE_ADDR + LTQ_EBU_SIZE - 1,
25 .flags = IORESOURCE_MEM,
26};
27
28/* remapped base addr of the clock unit and external bus unit */
29void __iomem *ltq_ebu_membase;
30
31static int __init lantiq_ebu_init(void)
32{
33 /* insert and request the memory region */
34 if (insert_resource(&iomem_resource, &ltq_ebu_resource) < 0)
35 panic("Failed to insert ebu memory");
36
37 if (request_mem_region(ltq_ebu_resource.start,
38 resource_size(&ltq_ebu_resource), "ebu") < 0)
39 panic("Failed to request ebu memory");
40
41 /* remap ebu register range */
42 ltq_ebu_membase = ioremap_nocache(ltq_ebu_resource.start,
43 resource_size(&ltq_ebu_resource));
44 if (!ltq_ebu_membase)
45 panic("Failed to remap ebu memory");
46
47 /* make sure to unprotect the memory region where flash is located */
48 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
49 return 0;
50}
51
52postcore_initcall(lantiq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio.c b/arch/mips/lantiq/xway/gpio.c
index c429a5bc080f..2ab39e93d9be 100644
--- a/arch/mips/lantiq/xway/gpio.c
+++ b/arch/mips/lantiq/xway/gpio.c
@@ -36,18 +36,6 @@ struct ltq_gpio {
36 36
37static struct ltq_gpio ltq_gpio_port[MAX_PORTS]; 37static struct ltq_gpio ltq_gpio_port[MAX_PORTS];
38 38
39int gpio_to_irq(unsigned int gpio)
40{
41 return -EINVAL;
42}
43EXPORT_SYMBOL(gpio_to_irq);
44
45int irq_to_gpio(unsigned int gpio)
46{
47 return -EINVAL;
48}
49EXPORT_SYMBOL(irq_to_gpio);
50
51int ltq_gpio_request(unsigned int pin, unsigned int alt0, 39int ltq_gpio_request(unsigned int pin, unsigned int alt0,
52 unsigned int alt1, unsigned int dir, const char *name) 40 unsigned int alt1, unsigned int dir, const char *name)
53{ 41{
diff --git a/arch/mips/lantiq/xway/gpio_ebu.c b/arch/mips/lantiq/xway/gpio_ebu.c
deleted file mode 100644
index aae17170472f..000000000000
--- a/arch/mips/lantiq/xway/gpio_ebu.c
+++ /dev/null
@@ -1,126 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/export.h>
11#include <linux/types.h>
12#include <linux/platform_device.h>
13#include <linux/mutex.h>
14#include <linux/gpio.h>
15#include <linux/io.h>
16
17#include <lantiq_soc.h>
18
19/*
20 * By attaching hardware latches to the EBU it is possible to create output
21 * only gpios. This driver configures a special memory address, which when
22 * written to outputs 16 bit to the latches.
23 */
24
25#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
26#define LTQ_EBU_WP 0x80000000 /* write protect bit */
27
28/* we keep a shadow value of the last value written to the ebu */
29static int ltq_ebu_gpio_shadow = 0x0;
30static void __iomem *ltq_ebu_gpio_membase;
31
32static void ltq_ebu_apply(void)
33{
34 unsigned long flags;
35
36 spin_lock_irqsave(&ebu_lock, flags);
37 ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
38 *((__u16 *)ltq_ebu_gpio_membase) = ltq_ebu_gpio_shadow;
39 ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
40 spin_unlock_irqrestore(&ebu_lock, flags);
41}
42
43static void ltq_ebu_set(struct gpio_chip *chip, unsigned offset, int value)
44{
45 if (value)
46 ltq_ebu_gpio_shadow |= (1 << offset);
47 else
48 ltq_ebu_gpio_shadow &= ~(1 << offset);
49 ltq_ebu_apply();
50}
51
52static int ltq_ebu_direction_output(struct gpio_chip *chip, unsigned offset,
53 int value)
54{
55 ltq_ebu_set(chip, offset, value);
56
57 return 0;
58}
59
60static struct gpio_chip ltq_ebu_chip = {
61 .label = "ltq_ebu",
62 .direction_output = ltq_ebu_direction_output,
63 .set = ltq_ebu_set,
64 .base = 72,
65 .ngpio = 16,
66 .can_sleep = 1,
67 .owner = THIS_MODULE,
68};
69
70static int ltq_ebu_probe(struct platform_device *pdev)
71{
72 int ret = 0;
73 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
74
75 if (!res) {
76 dev_err(&pdev->dev, "failed to get memory resource\n");
77 return -ENOENT;
78 }
79
80 res = devm_request_mem_region(&pdev->dev, res->start,
81 resource_size(res), dev_name(&pdev->dev));
82 if (!res) {
83 dev_err(&pdev->dev, "failed to request memory resource\n");
84 return -EBUSY;
85 }
86
87 ltq_ebu_gpio_membase = devm_ioremap_nocache(&pdev->dev, res->start,
88 resource_size(res));
89 if (!ltq_ebu_gpio_membase) {
90 dev_err(&pdev->dev, "Failed to ioremap mem region\n");
91 return -ENOMEM;
92 }
93
94 /* grab the default shadow value passed form the platform code */
95 ltq_ebu_gpio_shadow = (unsigned int) pdev->dev.platform_data;
96
97 /* tell the ebu controller which memory address we will be using */
98 ltq_ebu_w32(pdev->resource->start | 0x1, LTQ_EBU_ADDRSEL1);
99
100 /* write protect the region */
101 ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
102
103 ret = gpiochip_add(&ltq_ebu_chip);
104 if (!ret)
105 ltq_ebu_apply();
106 return ret;
107}
108
109static struct platform_driver ltq_ebu_driver = {
110 .probe = ltq_ebu_probe,
111 .driver = {
112 .name = "ltq_ebu",
113 .owner = THIS_MODULE,
114 },
115};
116
117static int __init ltq_ebu_init(void)
118{
119 int ret = platform_driver_register(&ltq_ebu_driver);
120
121 if (ret)
122 pr_info("ltq_ebu : Error registering platform driver!");
123 return ret;
124}
125
126postcore_initcall(ltq_ebu_init);
diff --git a/arch/mips/lantiq/xway/gpio_stp.c b/arch/mips/lantiq/xway/gpio_stp.c
deleted file mode 100644
index fd07d87adaa9..000000000000
--- a/arch/mips/lantiq/xway/gpio_stp.c
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2007 John Crispin <blogic@openwrt.org>
7 *
8 */
9
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/export.h>
13#include <linux/types.h>
14#include <linux/platform_device.h>
15#include <linux/mutex.h>
16#include <linux/io.h>
17#include <linux/gpio.h>
18
19#include <lantiq_soc.h>
20
21#define LTQ_STP_CON0 0x00
22#define LTQ_STP_CON1 0x04
23#define LTQ_STP_CPU0 0x08
24#define LTQ_STP_CPU1 0x0C
25#define LTQ_STP_AR 0x10
26
27#define LTQ_STP_CON_SWU (1 << 31)
28#define LTQ_STP_2HZ 0
29#define LTQ_STP_4HZ (1 << 23)
30#define LTQ_STP_8HZ (2 << 23)
31#define LTQ_STP_10HZ (3 << 23)
32#define LTQ_STP_SPEED_MASK (0xf << 23)
33#define LTQ_STP_UPD_FPI (1 << 31)
34#define LTQ_STP_UPD_MASK (3 << 30)
35#define LTQ_STP_ADSL_SRC (3 << 24)
36
37#define LTQ_STP_GROUP0 (1 << 0)
38
39#define LTQ_STP_RISING 0
40#define LTQ_STP_FALLING (1 << 26)
41#define LTQ_STP_EDGE_MASK (1 << 26)
42
43#define ltq_stp_r32(reg) __raw_readl(ltq_stp_membase + reg)
44#define ltq_stp_w32(val, reg) __raw_writel(val, ltq_stp_membase + reg)
45#define ltq_stp_w32_mask(clear, set, reg) \
46 ltq_w32((ltq_r32(ltq_stp_membase + reg) & ~(clear)) | (set), \
47 ltq_stp_membase + (reg))
48
49static int ltq_stp_shadow = 0xffff;
50static void __iomem *ltq_stp_membase;
51
52static void ltq_stp_set(struct gpio_chip *chip, unsigned offset, int value)
53{
54 if (value)
55 ltq_stp_shadow |= (1 << offset);
56 else
57 ltq_stp_shadow &= ~(1 << offset);
58 ltq_stp_w32(ltq_stp_shadow, LTQ_STP_CPU0);
59}
60
61static int ltq_stp_direction_output(struct gpio_chip *chip, unsigned offset,
62 int value)
63{
64 ltq_stp_set(chip, offset, value);
65
66 return 0;
67}
68
69static struct gpio_chip ltq_stp_chip = {
70 .label = "ltq_stp",
71 .direction_output = ltq_stp_direction_output,
72 .set = ltq_stp_set,
73 .base = 48,
74 .ngpio = 24,
75 .can_sleep = 1,
76 .owner = THIS_MODULE,
77};
78
79static int ltq_stp_hw_init(void)
80{
81 /* the 3 pins used to control the external stp */
82 ltq_gpio_request(4, 1, 0, 1, "stp-st");
83 ltq_gpio_request(5, 1, 0, 1, "stp-d");
84 ltq_gpio_request(6, 1, 0, 1, "stp-sh");
85
86 /* sane defaults */
87 ltq_stp_w32(0, LTQ_STP_AR);
88 ltq_stp_w32(0, LTQ_STP_CPU0);
89 ltq_stp_w32(0, LTQ_STP_CPU1);
90 ltq_stp_w32(LTQ_STP_CON_SWU, LTQ_STP_CON0);
91 ltq_stp_w32(0, LTQ_STP_CON1);
92
93 /* rising or falling edge */
94 ltq_stp_w32_mask(LTQ_STP_EDGE_MASK, LTQ_STP_FALLING, LTQ_STP_CON0);
95
96 /* per default stp 15-0 are set */
97 ltq_stp_w32_mask(0, LTQ_STP_GROUP0, LTQ_STP_CON1);
98
99 /* stp are update periodically by the FPI bus */
100 ltq_stp_w32_mask(LTQ_STP_UPD_MASK, LTQ_STP_UPD_FPI, LTQ_STP_CON1);
101
102 /* set stp update speed */
103 ltq_stp_w32_mask(LTQ_STP_SPEED_MASK, LTQ_STP_8HZ, LTQ_STP_CON1);
104
105 /* tell the hardware that pin (led) 0 and 1 are controlled
106 * by the dsl arc
107 */
108 ltq_stp_w32_mask(0, LTQ_STP_ADSL_SRC, LTQ_STP_CON0);
109
110 ltq_pmu_enable(PMU_LED);
111 return 0;
112}
113
114static int __devinit ltq_stp_probe(struct platform_device *pdev)
115{
116 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
117 int ret = 0;
118
119 if (!res)
120 return -ENOENT;
121 res = devm_request_mem_region(&pdev->dev, res->start,
122 resource_size(res), dev_name(&pdev->dev));
123 if (!res) {
124 dev_err(&pdev->dev, "failed to request STP memory\n");
125 return -EBUSY;
126 }
127 ltq_stp_membase = devm_ioremap_nocache(&pdev->dev, res->start,
128 resource_size(res));
129 if (!ltq_stp_membase) {
130 dev_err(&pdev->dev, "failed to remap STP memory\n");
131 return -ENOMEM;
132 }
133 ret = gpiochip_add(&ltq_stp_chip);
134 if (!ret)
135 ret = ltq_stp_hw_init();
136
137 return ret;
138}
139
140static struct platform_driver ltq_stp_driver = {
141 .probe = ltq_stp_probe,
142 .driver = {
143 .name = "ltq_stp",
144 .owner = THIS_MODULE,
145 },
146};
147
148int __init ltq_stp_init(void)
149{
150 int ret = platform_driver_register(&ltq_stp_driver);
151
152 if (ret)
153 pr_info("ltq_stp: error registering platform driver");
154 return ret;
155}
156
157postcore_initcall(ltq_stp_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50601.c b/arch/mips/lantiq/xway/mach-easy50601.c
deleted file mode 100644
index d5aaf637ab19..000000000000
--- a/arch/mips/lantiq/xway/mach-easy50601.c
+++ /dev/null
@@ -1,57 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/partitions.h>
13#include <linux/mtd/physmap.h>
14#include <linux/input.h>
15
16#include <lantiq.h>
17
18#include "../machtypes.h"
19#include "devices.h"
20
21static struct mtd_partition easy50601_partitions[] = {
22 {
23 .name = "uboot",
24 .offset = 0x0,
25 .size = 0x10000,
26 },
27 {
28 .name = "uboot_env",
29 .offset = 0x10000,
30 .size = 0x10000,
31 },
32 {
33 .name = "linux",
34 .offset = 0x20000,
35 .size = 0xE0000,
36 },
37 {
38 .name = "rootfs",
39 .offset = 0x100000,
40 .size = 0x300000,
41 },
42};
43
44static struct physmap_flash_data easy50601_flash_data = {
45 .nr_parts = ARRAY_SIZE(easy50601_partitions),
46 .parts = easy50601_partitions,
47};
48
49static void __init easy50601_init(void)
50{
51 ltq_register_nor(&easy50601_flash_data);
52}
53
54MIPS_MACHINE(LTQ_MACH_EASY50601,
55 "EASY50601",
56 "EASY50601 Eval Board",
57 easy50601_init);
diff --git a/arch/mips/lantiq/xway/mach-easy50712.c b/arch/mips/lantiq/xway/mach-easy50712.c
deleted file mode 100644
index ea5027b3239d..000000000000
--- a/arch/mips/lantiq/xway/mach-easy50712.c
+++ /dev/null
@@ -1,74 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/platform_device.h>
11#include <linux/mtd/mtd.h>
12#include <linux/mtd/partitions.h>
13#include <linux/mtd/physmap.h>
14#include <linux/input.h>
15#include <linux/phy.h>
16
17#include <lantiq_soc.h>
18#include <irq.h>
19
20#include "../machtypes.h"
21#include "devices.h"
22
23static struct mtd_partition easy50712_partitions[] = {
24 {
25 .name = "uboot",
26 .offset = 0x0,
27 .size = 0x10000,
28 },
29 {
30 .name = "uboot_env",
31 .offset = 0x10000,
32 .size = 0x10000,
33 },
34 {
35 .name = "linux",
36 .offset = 0x20000,
37 .size = 0xe0000,
38 },
39 {
40 .name = "rootfs",
41 .offset = 0x100000,
42 .size = 0x300000,
43 },
44};
45
46static struct physmap_flash_data easy50712_flash_data = {
47 .nr_parts = ARRAY_SIZE(easy50712_partitions),
48 .parts = easy50712_partitions,
49};
50
51static struct ltq_pci_data ltq_pci_data = {
52 .clock = PCI_CLOCK_INT,
53 .gpio = PCI_GNT1 | PCI_REQ1,
54 .irq = {
55 [14] = INT_NUM_IM0_IRL0 + 22,
56 },
57};
58
59static struct ltq_eth_data ltq_eth_data = {
60 .mii_mode = PHY_INTERFACE_MODE_MII,
61};
62
63static void __init easy50712_init(void)
64{
65 ltq_register_gpio_stp();
66 ltq_register_nor(&easy50712_flash_data);
67 ltq_register_pci(&ltq_pci_data);
68 ltq_register_etop(&ltq_eth_data);
69}
70
71MIPS_MACHINE(LTQ_MACH_EASY50712,
72 "EASY50712",
73 "EASY50712 Eval Board",
74 easy50712_init);
diff --git a/arch/mips/lantiq/xway/pmu.c b/arch/mips/lantiq/xway/pmu.c
deleted file mode 100644
index fe85361e032e..000000000000
--- a/arch/mips/lantiq/xway/pmu.c
+++ /dev/null
@@ -1,69 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/ioport.h>
12
13#include <lantiq_soc.h>
14
15/* PMU - the power management unit allows us to turn part of the core
16 * on and off
17 */
18
19/* the enable / disable registers */
20#define LTQ_PMU_PWDCR 0x1C
21#define LTQ_PMU_PWDSR 0x20
22
23#define ltq_pmu_w32(x, y) ltq_w32((x), ltq_pmu_membase + (y))
24#define ltq_pmu_r32(x) ltq_r32(ltq_pmu_membase + (x))
25
26static struct resource ltq_pmu_resource = {
27 .name = "pmu",
28 .start = LTQ_PMU_BASE_ADDR,
29 .end = LTQ_PMU_BASE_ADDR + LTQ_PMU_SIZE - 1,
30 .flags = IORESOURCE_MEM,
31};
32
33static void __iomem *ltq_pmu_membase;
34
35void ltq_pmu_enable(unsigned int module)
36{
37 int err = 1000000;
38
39 ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) & ~module, LTQ_PMU_PWDCR);
40 do {} while (--err && (ltq_pmu_r32(LTQ_PMU_PWDSR) & module));
41
42 if (!err)
43 panic("activating PMU module failed!");
44}
45EXPORT_SYMBOL(ltq_pmu_enable);
46
47void ltq_pmu_disable(unsigned int module)
48{
49 ltq_pmu_w32(ltq_pmu_r32(LTQ_PMU_PWDCR) | module, LTQ_PMU_PWDCR);
50}
51EXPORT_SYMBOL(ltq_pmu_disable);
52
53int __init ltq_pmu_init(void)
54{
55 if (insert_resource(&iomem_resource, &ltq_pmu_resource) < 0)
56 panic("Failed to insert pmu memory");
57
58 if (request_mem_region(ltq_pmu_resource.start,
59 resource_size(&ltq_pmu_resource), "pmu") < 0)
60 panic("Failed to request pmu memory");
61
62 ltq_pmu_membase = ioremap_nocache(ltq_pmu_resource.start,
63 resource_size(&ltq_pmu_resource));
64 if (!ltq_pmu_membase)
65 panic("Failed to remap pmu memory");
66 return 0;
67}
68
69core_initcall(ltq_pmu_init);
diff --git a/arch/mips/lantiq/xway/prom-ase.c b/arch/mips/lantiq/xway/prom-ase.c
deleted file mode 100644
index ae4959ae865c..000000000000
--- a/arch/mips/lantiq/xway/prom-ase.c
+++ /dev/null
@@ -1,39 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/export.h>
10#include <linux/clk.h>
11#include <asm/bootinfo.h>
12#include <asm/time.h>
13
14#include <lantiq_soc.h>
15
16#include "../prom.h"
17
18#define SOC_AMAZON_SE "Amazon_SE"
19
20#define PART_SHIFT 12
21#define PART_MASK 0x0FFFFFFF
22#define REV_SHIFT 28
23#define REV_MASK 0xF0000000
24
25void __init ltq_soc_detect(struct ltq_soc_info *i)
26{
27 i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
28 i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
29 switch (i->partnum) {
30 case SOC_ID_AMAZON_SE:
31 i->name = SOC_AMAZON_SE;
32 i->type = SOC_TYPE_AMAZON_SE;
33 break;
34
35 default:
36 unreachable();
37 break;
38 }
39}
diff --git a/arch/mips/lantiq/xway/prom-xway.c b/arch/mips/lantiq/xway/prom-xway.c
deleted file mode 100644
index 2228133ca356..000000000000
--- a/arch/mips/lantiq/xway/prom-xway.c
+++ /dev/null
@@ -1,54 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/export.h>
10#include <linux/clk.h>
11#include <asm/bootinfo.h>
12#include <asm/time.h>
13
14#include <lantiq_soc.h>
15
16#include "../prom.h"
17
18#define SOC_DANUBE "Danube"
19#define SOC_TWINPASS "Twinpass"
20#define SOC_AR9 "AR9"
21
22#define PART_SHIFT 12
23#define PART_MASK 0x0FFFFFFF
24#define REV_SHIFT 28
25#define REV_MASK 0xF0000000
26
27void __init ltq_soc_detect(struct ltq_soc_info *i)
28{
29 i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
30 i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
31 switch (i->partnum) {
32 case SOC_ID_DANUBE1:
33 case SOC_ID_DANUBE2:
34 i->name = SOC_DANUBE;
35 i->type = SOC_TYPE_DANUBE;
36 break;
37
38 case SOC_ID_TWINPASS:
39 i->name = SOC_TWINPASS;
40 i->type = SOC_TYPE_DANUBE;
41 break;
42
43 case SOC_ID_ARX188:
44 case SOC_ID_ARX168:
45 case SOC_ID_ARX182:
46 i->name = SOC_AR9;
47 i->type = SOC_TYPE_AR9;
48 break;
49
50 default:
51 unreachable();
52 break;
53 }
54}
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
new file mode 100644
index 000000000000..248429ab2622
--- /dev/null
+++ b/arch/mips/lantiq/xway/prom.c
@@ -0,0 +1,115 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/export.h>
10#include <linux/clk.h>
11#include <asm/bootinfo.h>
12#include <asm/time.h>
13
14#include <lantiq_soc.h>
15
16#include "../prom.h"
17
18#define SOC_DANUBE "Danube"
19#define SOC_TWINPASS "Twinpass"
20#define SOC_AMAZON_SE "Amazon_SE"
21#define SOC_AR9 "AR9"
22#define SOC_GR9 "GR9"
23#define SOC_VR9 "VR9"
24
25#define COMP_DANUBE "lantiq,danube"
26#define COMP_TWINPASS "lantiq,twinpass"
27#define COMP_AMAZON_SE "lantiq,ase"
28#define COMP_AR9 "lantiq,ar9"
29#define COMP_GR9 "lantiq,gr9"
30#define COMP_VR9 "lantiq,vr9"
31
32#define PART_SHIFT 12
33#define PART_MASK 0x0FFFFFFF
34#define REV_SHIFT 28
35#define REV_MASK 0xF0000000
36
37void __init ltq_soc_detect(struct ltq_soc_info *i)
38{
39 i->partnum = (ltq_r32(LTQ_MPS_CHIPID) & PART_MASK) >> PART_SHIFT;
40 i->rev = (ltq_r32(LTQ_MPS_CHIPID) & REV_MASK) >> REV_SHIFT;
41 sprintf(i->rev_type, "1.%d", i->rev);
42 switch (i->partnum) {
43 case SOC_ID_DANUBE1:
44 case SOC_ID_DANUBE2:
45 i->name = SOC_DANUBE;
46 i->type = SOC_TYPE_DANUBE;
47 i->compatible = COMP_DANUBE;
48 break;
49
50 case SOC_ID_TWINPASS:
51 i->name = SOC_TWINPASS;
52 i->type = SOC_TYPE_DANUBE;
53 i->compatible = COMP_TWINPASS;
54 break;
55
56 case SOC_ID_ARX188:
57 case SOC_ID_ARX168_1:
58 case SOC_ID_ARX168_2:
59 case SOC_ID_ARX182:
60 i->name = SOC_AR9;
61 i->type = SOC_TYPE_AR9;
62 i->compatible = COMP_AR9;
63 break;
64
65 case SOC_ID_GRX188:
66 case SOC_ID_GRX168:
67 i->name = SOC_GR9;
68 i->type = SOC_TYPE_AR9;
69 i->compatible = COMP_GR9;
70 break;
71
72 case SOC_ID_AMAZON_SE_1:
73 case SOC_ID_AMAZON_SE_2:
74#ifdef CONFIG_PCI
75 panic("ase is only supported for non pci kernels");
76#endif
77 i->name = SOC_AMAZON_SE;
78 i->type = SOC_TYPE_AMAZON_SE;
79 i->compatible = COMP_AMAZON_SE;
80 break;
81
82 case SOC_ID_VRX282:
83 case SOC_ID_VRX268:
84 case SOC_ID_VRX288:
85 i->name = SOC_VR9;
86 i->type = SOC_TYPE_VR9;
87 i->compatible = COMP_VR9;
88 break;
89
90 case SOC_ID_GRX268:
91 case SOC_ID_GRX288:
92 i->name = SOC_GR9;
93 i->type = SOC_TYPE_VR9;
94 i->compatible = COMP_GR9;
95 break;
96
97 case SOC_ID_VRX268_2:
98 case SOC_ID_VRX288_2:
99 i->name = SOC_VR9;
100 i->type = SOC_TYPE_VR9_2;
101 i->compatible = COMP_VR9;
102 break;
103
104 case SOC_ID_GRX282_2:
105 case SOC_ID_GRX288_2:
106 i->name = SOC_GR9;
107 i->type = SOC_TYPE_VR9_2;
108 i->compatible = COMP_GR9;
109 break;
110
111 default:
112 unreachable();
113 break;
114 }
115}
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index 8b66bd87f0c1..22c55f73aa9d 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -11,26 +11,31 @@
11#include <linux/ioport.h> 11#include <linux/ioport.h>
12#include <linux/pm.h> 12#include <linux/pm.h>
13#include <linux/export.h> 13#include <linux/export.h>
14#include <linux/delay.h>
15#include <linux/of_address.h>
16#include <linux/of_platform.h>
17
14#include <asm/reboot.h> 18#include <asm/reboot.h>
15 19
16#include <lantiq_soc.h> 20#include <lantiq_soc.h>
17 21
22#include "../prom.h"
23
18#define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y)) 24#define ltq_rcu_w32(x, y) ltq_w32((x), ltq_rcu_membase + (y))
19#define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x)) 25#define ltq_rcu_r32(x) ltq_r32(ltq_rcu_membase + (x))
20 26
21/* register definitions */ 27/* reset request register */
22#define LTQ_RCU_RST 0x0010 28#define RCU_RST_REQ 0x0010
23#define LTQ_RCU_RST_ALL 0x40000000 29/* reset status register */
24 30#define RCU_RST_STAT 0x0014
25#define LTQ_RCU_RST_STAT 0x0014
26#define LTQ_RCU_STAT_SHIFT 26
27 31
28static struct resource ltq_rcu_resource = { 32/* reboot bit */
29 .name = "rcu", 33#define RCU_RD_SRST BIT(30)
30 .start = LTQ_RCU_BASE_ADDR, 34/* reset cause */
31 .end = LTQ_RCU_BASE_ADDR + LTQ_RCU_SIZE - 1, 35#define RCU_STAT_SHIFT 26
32 .flags = IORESOURCE_MEM, 36/* boot selection */
33}; 37#define RCU_BOOT_SEL_SHIFT 26
38#define RCU_BOOT_SEL_MASK 0x7
34 39
35/* remapped base addr of the reset control unit */ 40/* remapped base addr of the reset control unit */
36static void __iomem *ltq_rcu_membase; 41static void __iomem *ltq_rcu_membase;
@@ -38,48 +43,64 @@ static void __iomem *ltq_rcu_membase;
38/* This function is used by the watchdog driver */ 43/* This function is used by the watchdog driver */
39int ltq_reset_cause(void) 44int ltq_reset_cause(void)
40{ 45{
41 u32 val = ltq_rcu_r32(LTQ_RCU_RST_STAT); 46 u32 val = ltq_rcu_r32(RCU_RST_STAT);
42 return val >> LTQ_RCU_STAT_SHIFT; 47 return val >> RCU_STAT_SHIFT;
43} 48}
44EXPORT_SYMBOL_GPL(ltq_reset_cause); 49EXPORT_SYMBOL_GPL(ltq_reset_cause);
45 50
51/* allow platform code to find out what source we booted from */
52unsigned char ltq_boot_select(void)
53{
54 u32 val = ltq_rcu_r32(RCU_RST_STAT);
55 return (val >> RCU_BOOT_SEL_SHIFT) & RCU_BOOT_SEL_MASK;
56}
57
58/* reset a io domain for u micro seconds */
59void ltq_reset_once(unsigned int module, ulong u)
60{
61 ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | module, RCU_RST_REQ);
62 udelay(u);
63 ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) & ~module, RCU_RST_REQ);
64}
65
46static void ltq_machine_restart(char *command) 66static void ltq_machine_restart(char *command)
47{ 67{
48 pr_notice("System restart\n");
49 local_irq_disable(); 68 local_irq_disable();
50 ltq_rcu_w32(ltq_rcu_r32(LTQ_RCU_RST) | LTQ_RCU_RST_ALL, LTQ_RCU_RST); 69 ltq_rcu_w32(ltq_rcu_r32(RCU_RST_REQ) | RCU_RD_SRST, RCU_RST_REQ);
51 unreachable(); 70 unreachable();
52} 71}
53 72
54static void ltq_machine_halt(void) 73static void ltq_machine_halt(void)
55{ 74{
56 pr_notice("System halted.\n");
57 local_irq_disable(); 75 local_irq_disable();
58 unreachable(); 76 unreachable();
59} 77}
60 78
61static void ltq_machine_power_off(void) 79static void ltq_machine_power_off(void)
62{ 80{
63 pr_notice("Please turn off the power now.\n");
64 local_irq_disable(); 81 local_irq_disable();
65 unreachable(); 82 unreachable();
66} 83}
67 84
68static int __init mips_reboot_setup(void) 85static int __init mips_reboot_setup(void)
69{ 86{
70 /* insert and request the memory region */ 87 struct resource res;
71 if (insert_resource(&iomem_resource, &ltq_rcu_resource) < 0) 88 struct device_node *np =
72 panic("Failed to insert rcu memory"); 89 of_find_compatible_node(NULL, NULL, "lantiq,rcu-xway");
90
91 /* check if all the reset register range is available */
92 if (!np)
93 panic("Failed to load reset resources from devicetree");
94
95 if (of_address_to_resource(np, 0, &res))
96 panic("Failed to get rcu memory range");
73 97
74 if (request_mem_region(ltq_rcu_resource.start, 98 if (request_mem_region(res.start, resource_size(&res), res.name) < 0)
75 resource_size(&ltq_rcu_resource), "rcu") < 0) 99 pr_err("Failed to request rcu memory");
76 panic("Failed to request rcu memory");
77 100
78 /* remap rcu register range */ 101 ltq_rcu_membase = ioremap_nocache(res.start, resource_size(&res));
79 ltq_rcu_membase = ioremap_nocache(ltq_rcu_resource.start,
80 resource_size(&ltq_rcu_resource));
81 if (!ltq_rcu_membase) 102 if (!ltq_rcu_membase)
82 panic("Failed to remap rcu memory"); 103 panic("Failed to remap core memory");
83 104
84 _machine_restart = ltq_machine_restart; 105 _machine_restart = ltq_machine_restart;
85 _machine_halt = ltq_machine_halt; 106 _machine_halt = ltq_machine_halt;
diff --git a/arch/mips/lantiq/xway/setup-ase.c b/arch/mips/lantiq/xway/setup-ase.c
deleted file mode 100644
index f6f326798a39..000000000000
--- a/arch/mips/lantiq/xway/setup-ase.c
+++ /dev/null
@@ -1,19 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
7 */
8
9#include <lantiq_soc.h>
10
11#include "../prom.h"
12#include "devices.h"
13
14void __init ltq_soc_setup(void)
15{
16 ltq_register_ase_asc();
17 ltq_register_gpio();
18 ltq_register_wdt();
19}
diff --git a/arch/mips/lantiq/xway/setup-xway.c b/arch/mips/lantiq/xway/setup-xway.c
deleted file mode 100644
index c292f643a858..000000000000
--- a/arch/mips/lantiq/xway/setup-xway.c
+++ /dev/null
@@ -1,20 +0,0 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
7 */
8
9#include <lantiq_soc.h>
10
11#include "../prom.h"
12#include "devices.h"
13
14void __init ltq_soc_setup(void)
15{
16 ltq_register_asc(0);
17 ltq_register_asc(1);
18 ltq_register_gpio();
19 ltq_register_wdt();
20}
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
new file mode 100644
index 000000000000..83780f7c842b
--- /dev/null
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -0,0 +1,371 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2011-2012 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/ioport.h>
10#include <linux/export.h>
11#include <linux/clkdev.h>
12#include <linux/of.h>
13#include <linux/of_platform.h>
14#include <linux/of_address.h>
15
16#include <lantiq_soc.h>
17
18#include "../clk.h"
19#include "../prom.h"
20
21/* clock control register */
22#define CGU_IFCCR 0x0018
23/* system clock register */
24#define CGU_SYS 0x0010
25/* pci control register */
26#define CGU_PCICR 0x0034
27/* ephy configuration register */
28#define CGU_EPHY 0x10
29/* power control register */
30#define PMU_PWDCR 0x1C
31/* power status register */
32#define PMU_PWDSR 0x20
33/* power control register */
34#define PMU_PWDCR1 0x24
35/* power status register */
36#define PMU_PWDSR1 0x28
37/* power control register */
38#define PWDCR(x) ((x) ? (PMU_PWDCR1) : (PMU_PWDCR))
39/* power status register */
40#define PWDSR(x) ((x) ? (PMU_PWDSR1) : (PMU_PWDSR))
41
42/* clock gates that we can en/disable */
43#define PMU_USB0_P BIT(0)
44#define PMU_PCI BIT(4)
45#define PMU_DMA BIT(5)
46#define PMU_USB0 BIT(6)
47#define PMU_ASC0 BIT(7)
48#define PMU_EPHY BIT(7) /* ase */
49#define PMU_SPI BIT(8)
50#define PMU_DFE BIT(9)
51#define PMU_EBU BIT(10)
52#define PMU_STP BIT(11)
53#define PMU_GPT BIT(12)
54#define PMU_AHBS BIT(13) /* vr9 */
55#define PMU_FPI BIT(14)
56#define PMU_AHBM BIT(15)
57#define PMU_ASC1 BIT(17)
58#define PMU_PPE_QSB BIT(18)
59#define PMU_PPE_SLL01 BIT(19)
60#define PMU_PPE_TC BIT(21)
61#define PMU_PPE_EMA BIT(22)
62#define PMU_PPE_DPLUM BIT(23)
63#define PMU_PPE_DPLUS BIT(24)
64#define PMU_USB1_P BIT(26)
65#define PMU_USB1 BIT(27)
66#define PMU_SWITCH BIT(28)
67#define PMU_PPE_TOP BIT(29)
68#define PMU_GPHY BIT(30)
69#define PMU_PCIE_CLK BIT(31)
70
71#define PMU1_PCIE_PHY BIT(0)
72#define PMU1_PCIE_CTL BIT(1)
73#define PMU1_PCIE_PDI BIT(4)
74#define PMU1_PCIE_MSI BIT(5)
75
76#define pmu_w32(x, y) ltq_w32((x), pmu_membase + (y))
77#define pmu_r32(x) ltq_r32(pmu_membase + (x))
78
79static void __iomem *pmu_membase;
80void __iomem *ltq_cgu_membase;
81void __iomem *ltq_ebu_membase;
82
83/* legacy function kept alive to ease clkdev transition */
84void ltq_pmu_enable(unsigned int module)
85{
86 int err = 1000000;
87
88 pmu_w32(pmu_r32(PMU_PWDCR) & ~module, PMU_PWDCR);
89 do {} while (--err && (pmu_r32(PMU_PWDSR) & module));
90
91 if (!err)
92 panic("activating PMU module failed!");
93}
94EXPORT_SYMBOL(ltq_pmu_enable);
95
96/* legacy function kept alive to ease clkdev transition */
97void ltq_pmu_disable(unsigned int module)
98{
99 pmu_w32(pmu_r32(PMU_PWDCR) | module, PMU_PWDCR);
100}
101EXPORT_SYMBOL(ltq_pmu_disable);
102
103/* enable a hw clock */
104static int cgu_enable(struct clk *clk)
105{
106 ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | clk->bits, CGU_IFCCR);
107 return 0;
108}
109
110/* disable a hw clock */
111static void cgu_disable(struct clk *clk)
112{
113 ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~clk->bits, CGU_IFCCR);
114}
115
116/* enable a clock gate */
117static int pmu_enable(struct clk *clk)
118{
119 int retry = 1000000;
120
121 pmu_w32(pmu_r32(PWDCR(clk->module)) & ~clk->bits,
122 PWDCR(clk->module));
123 do {} while (--retry && (pmu_r32(PWDSR(clk->module)) & clk->bits));
124
125 if (!retry)
126 panic("activating PMU module failed!\n");
127
128 return 0;
129}
130
131/* disable a clock gate */
132static void pmu_disable(struct clk *clk)
133{
134 pmu_w32(pmu_r32(PWDCR(clk->module)) | clk->bits,
135 PWDCR(clk->module));
136}
137
138/* the pci enable helper */
139static int pci_enable(struct clk *clk)
140{
141 unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
142 /* set bus clock speed */
143 if (of_machine_is_compatible("lantiq,ar9")) {
144 ifccr &= ~0x1f00000;
145 if (clk->rate == CLOCK_33M)
146 ifccr |= 0xe00000;
147 else
148 ifccr |= 0x700000; /* 62.5M */
149 } else {
150 ifccr &= ~0xf00000;
151 if (clk->rate == CLOCK_33M)
152 ifccr |= 0x800000;
153 else
154 ifccr |= 0x400000; /* 62.5M */
155 }
156 ltq_cgu_w32(ifccr, CGU_IFCCR);
157 pmu_enable(clk);
158 return 0;
159}
160
161/* enable the external clock as a source */
162static int pci_ext_enable(struct clk *clk)
163{
164 ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) & ~(1 << 16),
165 CGU_IFCCR);
166 ltq_cgu_w32((1 << 30), CGU_PCICR);
167 return 0;
168}
169
170/* disable the external clock as a source */
171static void pci_ext_disable(struct clk *clk)
172{
173 ltq_cgu_w32(ltq_cgu_r32(CGU_IFCCR) | (1 << 16),
174 CGU_IFCCR);
175 ltq_cgu_w32((1 << 31) | (1 << 30), CGU_PCICR);
176}
177
178/* enable a clockout source */
179static int clkout_enable(struct clk *clk)
180{
181 int i;
182
183 /* get the correct rate */
184 for (i = 0; i < 4; i++) {
185 if (clk->rates[i] == clk->rate) {
186 int shift = 14 - (2 * clk->module);
187 unsigned int ifccr = ltq_cgu_r32(CGU_IFCCR);
188
189 ifccr &= ~(3 << shift);
190 ifccr |= i << shift;
191 ltq_cgu_w32(ifccr, CGU_IFCCR);
192 return 0;
193 }
194 }
195 return -1;
196}
197
198/* manage the clock gates via PMU */
199static void clkdev_add_pmu(const char *dev, const char *con,
200 unsigned int module, unsigned int bits)
201{
202 struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
203
204 clk->cl.dev_id = dev;
205 clk->cl.con_id = con;
206 clk->cl.clk = clk;
207 clk->enable = pmu_enable;
208 clk->disable = pmu_disable;
209 clk->module = module;
210 clk->bits = bits;
211 clkdev_add(&clk->cl);
212}
213
214/* manage the clock generator */
215static void clkdev_add_cgu(const char *dev, const char *con,
216 unsigned int bits)
217{
218 struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
219
220 clk->cl.dev_id = dev;
221 clk->cl.con_id = con;
222 clk->cl.clk = clk;
223 clk->enable = cgu_enable;
224 clk->disable = cgu_disable;
225 clk->bits = bits;
226 clkdev_add(&clk->cl);
227}
228
229/* pci needs its own enable function as the setup is a bit more complex */
230static unsigned long valid_pci_rates[] = {CLOCK_33M, CLOCK_62_5M, 0};
231
232static void clkdev_add_pci(void)
233{
234 struct clk *clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
235 struct clk *clk_ext = kzalloc(sizeof(struct clk), GFP_KERNEL);
236
237 /* main pci clock */
238 clk->cl.dev_id = "17000000.pci";
239 clk->cl.con_id = NULL;
240 clk->cl.clk = clk;
241 clk->rate = CLOCK_33M;
242 clk->rates = valid_pci_rates;
243 clk->enable = pci_enable;
244 clk->disable = pmu_disable;
245 clk->module = 0;
246 clk->bits = PMU_PCI;
247 clkdev_add(&clk->cl);
248
249 /* use internal/external bus clock */
250 clk_ext->cl.dev_id = "17000000.pci";
251 clk_ext->cl.con_id = "external";
252 clk_ext->cl.clk = clk_ext;
253 clk_ext->enable = pci_ext_enable;
254 clk_ext->disable = pci_ext_disable;
255 clkdev_add(&clk_ext->cl);
256}
257
258/* xway socs can generate clocks on gpio pins */
259static unsigned long valid_clkout_rates[4][5] = {
260 {CLOCK_32_768K, CLOCK_1_536M, CLOCK_2_5M, CLOCK_12M, 0},
261 {CLOCK_40M, CLOCK_12M, CLOCK_24M, CLOCK_48M, 0},
262 {CLOCK_25M, CLOCK_40M, CLOCK_30M, CLOCK_60M, 0},
263 {CLOCK_12M, CLOCK_50M, CLOCK_32_768K, CLOCK_25M, 0},
264};
265
266static void clkdev_add_clkout(void)
267{
268 int i;
269
270 for (i = 0; i < 4; i++) {
271 struct clk *clk;
272 char *name;
273
274 name = kzalloc(sizeof("clkout0"), GFP_KERNEL);
275 sprintf(name, "clkout%d", i);
276
277 clk = kzalloc(sizeof(struct clk), GFP_KERNEL);
278 clk->cl.dev_id = "1f103000.cgu";
279 clk->cl.con_id = name;
280 clk->cl.clk = clk;
281 clk->rate = 0;
282 clk->rates = valid_clkout_rates[i];
283 clk->enable = clkout_enable;
284 clk->module = i;
285 clkdev_add(&clk->cl);
286 }
287}
288
289/* bring up all register ranges that we need for basic system control */
290void __init ltq_soc_init(void)
291{
292 struct resource res_pmu, res_cgu, res_ebu;
293 struct device_node *np_pmu =
294 of_find_compatible_node(NULL, NULL, "lantiq,pmu-xway");
295 struct device_node *np_cgu =
296 of_find_compatible_node(NULL, NULL, "lantiq,cgu-xway");
297 struct device_node *np_ebu =
298 of_find_compatible_node(NULL, NULL, "lantiq,ebu-xway");
299
300 /* check if all the core register ranges are available */
301 if (!np_pmu || !np_cgu || !np_ebu)
302 panic("Failed to load core nodess from devicetree");
303
304 if (of_address_to_resource(np_pmu, 0, &res_pmu) ||
305 of_address_to_resource(np_cgu, 0, &res_cgu) ||
306 of_address_to_resource(np_ebu, 0, &res_ebu))
307 panic("Failed to get core resources");
308
309 if ((request_mem_region(res_pmu.start, resource_size(&res_pmu),
310 res_pmu.name) < 0) ||
311 (request_mem_region(res_cgu.start, resource_size(&res_cgu),
312 res_cgu.name) < 0) ||
313 (request_mem_region(res_ebu.start, resource_size(&res_ebu),
314 res_ebu.name) < 0))
315 pr_err("Failed to request core reources");
316
317 pmu_membase = ioremap_nocache(res_pmu.start, resource_size(&res_pmu));
318 ltq_cgu_membase = ioremap_nocache(res_cgu.start,
319 resource_size(&res_cgu));
320 ltq_ebu_membase = ioremap_nocache(res_ebu.start,
321 resource_size(&res_ebu));
322 if (!pmu_membase || !ltq_cgu_membase || !ltq_ebu_membase)
323 panic("Failed to remap core resources");
324
325 /* make sure to unprotect the memory region where flash is located */
326 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_BUSCON0) & ~EBU_WRDIS, LTQ_EBU_BUSCON0);
327
328 /* add our generic xway clocks */
329 clkdev_add_pmu("10000000.fpi", NULL, 0, PMU_FPI);
330 clkdev_add_pmu("1e100400.serial", NULL, 0, PMU_ASC0);
331 clkdev_add_pmu("1e100a00.gptu", NULL, 0, PMU_GPT);
332 clkdev_add_pmu("1e100bb0.stp", NULL, 0, PMU_STP);
333 clkdev_add_pmu("1e104100.dma", NULL, 0, PMU_DMA);
334 clkdev_add_pmu("1e100800.spi", NULL, 0, PMU_SPI);
335 clkdev_add_pmu("1e105300.ebu", NULL, 0, PMU_EBU);
336 clkdev_add_clkout();
337
338 /* add the soc dependent clocks */
339 if (!of_machine_is_compatible("lantiq,vr9"))
340 clkdev_add_pmu("1e180000.etop", NULL, 0, PMU_PPE);
341
342 if (!of_machine_is_compatible("lantiq,ase")) {
343 clkdev_add_pmu("1e100c00.serial", NULL, 0, PMU_ASC1);
344 clkdev_add_pci();
345 }
346
347 if (of_machine_is_compatible("lantiq,ase")) {
348 if (ltq_cgu_r32(CGU_SYS) & (1 << 5))
349 clkdev_add_static(CLOCK_266M, CLOCK_133M, CLOCK_133M);
350 else
351 clkdev_add_static(CLOCK_133M, CLOCK_133M, CLOCK_133M);
352 clkdev_add_cgu("1e180000.etop", "ephycgu", CGU_EPHY),
353 clkdev_add_pmu("1e180000.etop", "ephy", 0, PMU_EPHY);
354 } else if (of_machine_is_compatible("lantiq,vr9")) {
355 clkdev_add_static(ltq_vr9_cpu_hz(), ltq_vr9_fpi_hz(),
356 ltq_vr9_fpi_hz());
357 clkdev_add_pmu("1d900000.pcie", "phy", 1, PMU1_PCIE_PHY);
358 clkdev_add_pmu("1d900000.pcie", "bus", 0, PMU_PCIE_CLK);
359 clkdev_add_pmu("1d900000.pcie", "msi", 1, PMU1_PCIE_MSI);
360 clkdev_add_pmu("1d900000.pcie", "pdi", 1, PMU1_PCIE_PDI);
361 clkdev_add_pmu("1d900000.pcie", "ctl", 1, PMU1_PCIE_CTL);
362 clkdev_add_pmu("1d900000.pcie", "ahb", 0, PMU_AHBM | PMU_AHBS);
363 } else if (of_machine_is_compatible("lantiq,ar9")) {
364 clkdev_add_static(ltq_ar9_cpu_hz(), ltq_ar9_fpi_hz(),
365 ltq_ar9_fpi_hz());
366 clkdev_add_pmu("1e180000.etop", "switch", 0, PMU_SWITCH);
367 } else {
368 clkdev_add_static(ltq_danube_cpu_hz(), ltq_danube_fpi_hz(),
369 ltq_danube_fpi_hz());
370 }
371}
diff --git a/arch/mips/mm/c-octeon.c b/arch/mips/mm/c-octeon.c
index 47037ec5589b..44e69e7a4519 100644
--- a/arch/mips/mm/c-octeon.c
+++ b/arch/mips/mm/c-octeon.c
@@ -21,6 +21,7 @@
21#include <asm/page.h> 21#include <asm/page.h>
22#include <asm/pgtable.h> 22#include <asm/pgtable.h>
23#include <asm/r4kcache.h> 23#include <asm/r4kcache.h>
24#include <asm/traps.h>
24#include <asm/mmu_context.h> 25#include <asm/mmu_context.h>
25#include <asm/war.h> 26#include <asm/war.h>
26 27
@@ -248,6 +249,11 @@ static void __cpuinit probe_octeon(void)
248 } 249 }
249} 250}
250 251
252static void __cpuinit octeon_cache_error_setup(void)
253{
254 extern char except_vec2_octeon;
255 set_handler(0x100, &except_vec2_octeon, 0x80);
256}
251 257
252/** 258/**
253 * Setup the Octeon cache flush routines 259 * Setup the Octeon cache flush routines
@@ -255,12 +261,6 @@ static void __cpuinit probe_octeon(void)
255 */ 261 */
256void __cpuinit octeon_cache_init(void) 262void __cpuinit octeon_cache_init(void)
257{ 263{
258 extern unsigned long ebase;
259 extern char except_vec2_octeon;
260
261 memcpy((void *)(ebase + 0x100), &except_vec2_octeon, 0x80);
262 octeon_flush_cache_sigtramp(ebase + 0x100);
263
264 probe_octeon(); 264 probe_octeon();
265 265
266 shm_align_mask = PAGE_SIZE - 1; 266 shm_align_mask = PAGE_SIZE - 1;
@@ -280,6 +280,8 @@ void __cpuinit octeon_cache_init(void)
280 280
281 build_clear_page(); 281 build_clear_page();
282 build_copy_page(); 282 build_copy_page();
283
284 board_cache_error_setup = octeon_cache_error_setup;
283} 285}
284 286
285/** 287/**
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index bda8eb26ece7..5109be96d98d 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -32,7 +32,7 @@
32#include <asm/mmu_context.h> 32#include <asm/mmu_context.h>
33#include <asm/war.h> 33#include <asm/war.h>
34#include <asm/cacheflush.h> /* for run_uncached() */ 34#include <asm/cacheflush.h> /* for run_uncached() */
35 35#include <asm/traps.h>
36 36
37/* 37/*
38 * Special Variant of smp_call_function for use by cache functions: 38 * Special Variant of smp_call_function for use by cache functions:
@@ -1385,10 +1385,8 @@ static int __init setcoherentio(char *str)
1385__setup("coherentio", setcoherentio); 1385__setup("coherentio", setcoherentio);
1386#endif 1386#endif
1387 1387
1388void __cpuinit r4k_cache_init(void) 1388static void __cpuinit r4k_cache_error_setup(void)
1389{ 1389{
1390 extern void build_clear_page(void);
1391 extern void build_copy_page(void);
1392 extern char __weak except_vec2_generic; 1390 extern char __weak except_vec2_generic;
1393 extern char __weak except_vec2_sb1; 1391 extern char __weak except_vec2_sb1;
1394 struct cpuinfo_mips *c = &current_cpu_data; 1392 struct cpuinfo_mips *c = &current_cpu_data;
@@ -1403,6 +1401,13 @@ void __cpuinit r4k_cache_init(void)
1403 set_uncached_handler(0x100, &except_vec2_generic, 0x80); 1401 set_uncached_handler(0x100, &except_vec2_generic, 0x80);
1404 break; 1402 break;
1405 } 1403 }
1404}
1405
1406void __cpuinit r4k_cache_init(void)
1407{
1408 extern void build_clear_page(void);
1409 extern void build_copy_page(void);
1410 struct cpuinfo_mips *c = &current_cpu_data;
1406 1411
1407 probe_pcache(); 1412 probe_pcache();
1408 setup_scache(); 1413 setup_scache();
@@ -1465,4 +1470,5 @@ void __cpuinit r4k_cache_init(void)
1465 local_r4k___flush_cache_all(NULL); 1470 local_r4k___flush_cache_all(NULL);
1466#endif 1471#endif
1467 coherency_setup(); 1472 coherency_setup();
1473 board_cache_error_setup = r4k_cache_error_setup;
1468} 1474}
diff --git a/arch/mips/oprofile/Makefile b/arch/mips/oprofile/Makefile
index 29f2f13eb31c..1208c280f77d 100644
--- a/arch/mips/oprofile/Makefile
+++ b/arch/mips/oprofile/Makefile
@@ -1,5 +1,3 @@
1ccflags-y := -Werror
2
3obj-$(CONFIG_OPROFILE) += oprofile.o 1obj-$(CONFIG_OPROFILE) += oprofile.o
4 2
5DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \ 3DRIVER_OBJS = $(addprefix ../../../drivers/oprofile/, \
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 54759f1669d3..baba3bcaa3c2 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -298,6 +298,11 @@ static void reset_counters(void *arg)
298 } 298 }
299} 299}
300 300
301static irqreturn_t mipsxx_perfcount_int(int irq, void *dev_id)
302{
303 return mipsxx_perfcount_handler();
304}
305
301static int __init mipsxx_init(void) 306static int __init mipsxx_init(void)
302{ 307{
303 int counters; 308 int counters;
@@ -374,6 +379,10 @@ static int __init mipsxx_init(void)
374 save_perf_irq = perf_irq; 379 save_perf_irq = perf_irq;
375 perf_irq = mipsxx_perfcount_handler; 380 perf_irq = mipsxx_perfcount_handler;
376 381
382 if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
383 return request_irq(cp0_perfcount_irq, mipsxx_perfcount_int,
384 0, "Perfcounter", save_perf_irq);
385
377 return 0; 386 return 0;
378} 387}
379 388
@@ -381,6 +390,9 @@ static void mipsxx_exit(void)
381{ 390{
382 int counters = op_model_mipsxx_ops.num_counters; 391 int counters = op_model_mipsxx_ops.num_counters;
383 392
393 if ((cp0_perfcount_irq >= 0) && (cp0_compare_irq != cp0_perfcount_irq))
394 free_irq(cp0_perfcount_irq, save_perf_irq);
395
384 counters = counters_per_cpu_to_total(counters); 396 counters = counters_per_cpu_to_total(counters);
385 on_each_cpu(reset_counters, (void *)(long)counters, 1); 397 on_each_cpu(reset_counters, (void *)(long)counters, 1);
386 398
diff --git a/arch/mips/pci/Makefile b/arch/mips/pci/Makefile
index c3ac4b086eb2..c703f43a9914 100644
--- a/arch/mips/pci/Makefile
+++ b/arch/mips/pci/Makefile
@@ -19,7 +19,8 @@ obj-$(CONFIG_BCM47XX) += pci-bcm47xx.o
19obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \ 19obj-$(CONFIG_BCM63XX) += pci-bcm63xx.o fixup-bcm63xx.o \
20 ops-bcm63xx.o 20 ops-bcm63xx.o
21obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o 21obj-$(CONFIG_MIPS_ALCHEMY) += pci-alchemy.o
22obj-$(CONFIG_SOC_AR724X) += pci-ath724x.o 22obj-$(CONFIG_SOC_AR71XX) += pci-ar71xx.o
23obj-$(CONFIG_PCI_AR724X) += pci-ar724x.o
23 24
24# 25#
25# These are still pretty much in the old state, watch, go blind. 26# These are still pretty much in the old state, watch, go blind.
@@ -41,7 +42,8 @@ obj-$(CONFIG_SIBYTE_SB1250) += fixup-sb1250.o pci-sb1250.o
41obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o 42obj-$(CONFIG_SIBYTE_BCM112X) += fixup-sb1250.o pci-sb1250.o
42obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o 43obj-$(CONFIG_SIBYTE_BCM1x80) += pci-bcm1480.o pci-bcm1480ht.o
43obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o 44obj-$(CONFIG_SNI_RM) += fixup-sni.o ops-sni.o
44obj-$(CONFIG_SOC_XWAY) += pci-lantiq.o ops-lantiq.o 45obj-$(CONFIG_LANTIQ) += fixup-lantiq.o
46obj-$(CONFIG_PCI_LANTIQ) += pci-lantiq.o ops-lantiq.o
45obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o 47obj-$(CONFIG_TANBAC_TB0219) += fixup-tb0219.o
46obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o 48obj-$(CONFIG_TANBAC_TB0226) += fixup-tb0226.o
47obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o 49obj-$(CONFIG_TANBAC_TB0287) += fixup-tb0287.o
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
new file mode 100644
index 000000000000..6c829df28dc7
--- /dev/null
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -0,0 +1,40 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/of_irq.h>
10#include <linux/of_pci.h>
11
12int (*ltq_pci_plat_arch_init)(struct pci_dev *dev) = NULL;
13int (*ltq_pci_plat_dev_init)(struct pci_dev *dev) = NULL;
14
15int pcibios_plat_dev_init(struct pci_dev *dev)
16{
17 if (ltq_pci_plat_arch_init)
18 return ltq_pci_plat_arch_init(dev);
19
20 if (ltq_pci_plat_dev_init)
21 return ltq_pci_plat_dev_init(dev);
22
23 return 0;
24}
25
26int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
27{
28 struct of_irq dev_irq;
29 int irq;
30
31 if (of_irq_map_pci(dev, &dev_irq)) {
32 dev_err(&dev->dev, "trying to map irq for unknown slot:%d pin:%d\n",
33 slot, pin);
34 return 0;
35 }
36 irq = irq_create_of_mapping(dev_irq.controller, dev_irq.specifier,
37 dev_irq.size);
38 dev_info(&dev->dev, "SLOT:%d PIN:%d IRQ:%d\n", slot, pin, irq);
39 return irq;
40}
diff --git a/arch/mips/pci/ops-loongson2.c b/arch/mips/pci/ops-loongson2.c
index d657ee0bc131..afd221122d22 100644
--- a/arch/mips/pci/ops-loongson2.c
+++ b/arch/mips/pci/ops-loongson2.c
@@ -15,6 +15,7 @@
15#include <linux/pci.h> 15#include <linux/pci.h>
16#include <linux/kernel.h> 16#include <linux/kernel.h>
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/export.h>
18 19
19#include <loongson.h> 20#include <loongson.h>
20 21
diff --git a/arch/mips/pci/pci-ar71xx.c b/arch/mips/pci/pci-ar71xx.c
new file mode 100644
index 000000000000..1552522b8718
--- /dev/null
+++ b/arch/mips/pci/pci-ar71xx.c
@@ -0,0 +1,375 @@
1/*
2 * Atheros AR71xx PCI host controller driver
3 *
4 * Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
5 * Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
6 *
7 * Parts of this file are based on Atheros' 2.6.15 BSP
8 *
9 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License version 2 as published
11 * by the Free Software Foundation.
12 */
13
14#include <linux/resource.h>
15#include <linux/types.h>
16#include <linux/delay.h>
17#include <linux/bitops.h>
18#include <linux/pci.h>
19#include <linux/pci_regs.h>
20#include <linux/interrupt.h>
21
22#include <asm/mach-ath79/ar71xx_regs.h>
23#include <asm/mach-ath79/ath79.h>
24#include <asm/mach-ath79/pci.h>
25
26#define AR71XX_PCI_MEM_BASE 0x10000000
27#define AR71XX_PCI_MEM_SIZE 0x08000000
28
29#define AR71XX_PCI_WIN0_OFFS 0x10000000
30#define AR71XX_PCI_WIN1_OFFS 0x11000000
31#define AR71XX_PCI_WIN2_OFFS 0x12000000
32#define AR71XX_PCI_WIN3_OFFS 0x13000000
33#define AR71XX_PCI_WIN4_OFFS 0x14000000
34#define AR71XX_PCI_WIN5_OFFS 0x15000000
35#define AR71XX_PCI_WIN6_OFFS 0x16000000
36#define AR71XX_PCI_WIN7_OFFS 0x07000000
37
38#define AR71XX_PCI_CFG_BASE \
39 (AR71XX_PCI_MEM_BASE + AR71XX_PCI_WIN7_OFFS + 0x10000)
40#define AR71XX_PCI_CFG_SIZE 0x100
41
42#define AR71XX_PCI_REG_CRP_AD_CBE 0x00
43#define AR71XX_PCI_REG_CRP_WRDATA 0x04
44#define AR71XX_PCI_REG_CRP_RDDATA 0x08
45#define AR71XX_PCI_REG_CFG_AD 0x0c
46#define AR71XX_PCI_REG_CFG_CBE 0x10
47#define AR71XX_PCI_REG_CFG_WRDATA 0x14
48#define AR71XX_PCI_REG_CFG_RDDATA 0x18
49#define AR71XX_PCI_REG_PCI_ERR 0x1c
50#define AR71XX_PCI_REG_PCI_ERR_ADDR 0x20
51#define AR71XX_PCI_REG_AHB_ERR 0x24
52#define AR71XX_PCI_REG_AHB_ERR_ADDR 0x28
53
54#define AR71XX_PCI_CRP_CMD_WRITE 0x00010000
55#define AR71XX_PCI_CRP_CMD_READ 0x00000000
56#define AR71XX_PCI_CFG_CMD_READ 0x0000000a
57#define AR71XX_PCI_CFG_CMD_WRITE 0x0000000b
58
59#define AR71XX_PCI_INT_CORE BIT(4)
60#define AR71XX_PCI_INT_DEV2 BIT(2)
61#define AR71XX_PCI_INT_DEV1 BIT(1)
62#define AR71XX_PCI_INT_DEV0 BIT(0)
63
64#define AR71XX_PCI_IRQ_COUNT 5
65
66static DEFINE_SPINLOCK(ar71xx_pci_lock);
67static void __iomem *ar71xx_pcicfg_base;
68
69/* Byte lane enable bits */
70static const u8 ar71xx_pci_ble_table[4][4] = {
71 {0x0, 0xf, 0xf, 0xf},
72 {0xe, 0xd, 0xb, 0x7},
73 {0xc, 0xf, 0x3, 0xf},
74 {0xf, 0xf, 0xf, 0xf},
75};
76
77static const u32 ar71xx_pci_read_mask[8] = {
78 0, 0xff, 0xffff, 0, 0xffffffff, 0, 0, 0
79};
80
81static inline u32 ar71xx_pci_get_ble(int where, int size, int local)
82{
83 u32 t;
84
85 t = ar71xx_pci_ble_table[size & 3][where & 3];
86 BUG_ON(t == 0xf);
87 t <<= (local) ? 20 : 4;
88
89 return t;
90}
91
92static inline u32 ar71xx_pci_bus_addr(struct pci_bus *bus, unsigned int devfn,
93 int where)
94{
95 u32 ret;
96
97 if (!bus->number) {
98 /* type 0 */
99 ret = (1 << PCI_SLOT(devfn)) | (PCI_FUNC(devfn) << 8) |
100 (where & ~3);
101 } else {
102 /* type 1 */
103 ret = (bus->number << 16) | (PCI_SLOT(devfn) << 11) |
104 (PCI_FUNC(devfn) << 8) | (where & ~3) | 1;
105 }
106
107 return ret;
108}
109
110static int ar71xx_pci_check_error(int quiet)
111{
112 void __iomem *base = ar71xx_pcicfg_base;
113 u32 pci_err;
114 u32 ahb_err;
115
116 pci_err = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR) & 3;
117 if (pci_err) {
118 if (!quiet) {
119 u32 addr;
120
121 addr = __raw_readl(base + AR71XX_PCI_REG_PCI_ERR_ADDR);
122 pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
123 "PCI", pci_err, addr);
124 }
125
126 /* clear PCI error status */
127 __raw_writel(pci_err, base + AR71XX_PCI_REG_PCI_ERR);
128 }
129
130 ahb_err = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR) & 1;
131 if (ahb_err) {
132 if (!quiet) {
133 u32 addr;
134
135 addr = __raw_readl(base + AR71XX_PCI_REG_AHB_ERR_ADDR);
136 pr_crit("ar71xx: %s bus error %d at addr 0x%x\n",
137 "AHB", ahb_err, addr);
138 }
139
140 /* clear AHB error status */
141 __raw_writel(ahb_err, base + AR71XX_PCI_REG_AHB_ERR);
142 }
143
144 return !!(ahb_err | pci_err);
145}
146
147static inline void ar71xx_pci_local_write(int where, int size, u32 value)
148{
149 void __iomem *base = ar71xx_pcicfg_base;
150 u32 ad_cbe;
151
152 value = value << (8 * (where & 3));
153
154 ad_cbe = AR71XX_PCI_CRP_CMD_WRITE | (where & ~3);
155 ad_cbe |= ar71xx_pci_get_ble(where, size, 1);
156
157 __raw_writel(ad_cbe, base + AR71XX_PCI_REG_CRP_AD_CBE);
158 __raw_writel(value, base + AR71XX_PCI_REG_CRP_WRDATA);
159}
160
161static inline int ar71xx_pci_set_cfgaddr(struct pci_bus *bus,
162 unsigned int devfn,
163 int where, int size, u32 cmd)
164{
165 void __iomem *base = ar71xx_pcicfg_base;
166 u32 addr;
167
168 addr = ar71xx_pci_bus_addr(bus, devfn, where);
169
170 __raw_writel(addr, base + AR71XX_PCI_REG_CFG_AD);
171 __raw_writel(cmd | ar71xx_pci_get_ble(where, size, 0),
172 base + AR71XX_PCI_REG_CFG_CBE);
173
174 return ar71xx_pci_check_error(1);
175}
176
177static int ar71xx_pci_read_config(struct pci_bus *bus, unsigned int devfn,
178 int where, int size, u32 *value)
179{
180 void __iomem *base = ar71xx_pcicfg_base;
181 unsigned long flags;
182 u32 data;
183 int err;
184 int ret;
185
186 ret = PCIBIOS_SUCCESSFUL;
187 data = ~0;
188
189 spin_lock_irqsave(&ar71xx_pci_lock, flags);
190
191 err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
192 AR71XX_PCI_CFG_CMD_READ);
193 if (err)
194 ret = PCIBIOS_DEVICE_NOT_FOUND;
195 else
196 data = __raw_readl(base + AR71XX_PCI_REG_CFG_RDDATA);
197
198 spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
199
200 *value = (data >> (8 * (where & 3))) & ar71xx_pci_read_mask[size & 7];
201
202 return ret;
203}
204
205static int ar71xx_pci_write_config(struct pci_bus *bus, unsigned int devfn,
206 int where, int size, u32 value)
207{
208 void __iomem *base = ar71xx_pcicfg_base;
209 unsigned long flags;
210 int err;
211 int ret;
212
213 value = value << (8 * (where & 3));
214 ret = PCIBIOS_SUCCESSFUL;
215
216 spin_lock_irqsave(&ar71xx_pci_lock, flags);
217
218 err = ar71xx_pci_set_cfgaddr(bus, devfn, where, size,
219 AR71XX_PCI_CFG_CMD_WRITE);
220 if (err)
221 ret = PCIBIOS_DEVICE_NOT_FOUND;
222 else
223 __raw_writel(value, base + AR71XX_PCI_REG_CFG_WRDATA);
224
225 spin_unlock_irqrestore(&ar71xx_pci_lock, flags);
226
227 return ret;
228}
229
230static struct pci_ops ar71xx_pci_ops = {
231 .read = ar71xx_pci_read_config,
232 .write = ar71xx_pci_write_config,
233};
234
235static struct resource ar71xx_pci_io_resource = {
236 .name = "PCI IO space",
237 .start = 0,
238 .end = 0,
239 .flags = IORESOURCE_IO,
240};
241
242static struct resource ar71xx_pci_mem_resource = {
243 .name = "PCI memory space",
244 .start = AR71XX_PCI_MEM_BASE,
245 .end = AR71XX_PCI_MEM_BASE + AR71XX_PCI_MEM_SIZE - 1,
246 .flags = IORESOURCE_MEM
247};
248
249static struct pci_controller ar71xx_pci_controller = {
250 .pci_ops = &ar71xx_pci_ops,
251 .mem_resource = &ar71xx_pci_mem_resource,
252 .io_resource = &ar71xx_pci_io_resource,
253};
254
255static void ar71xx_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
256{
257 void __iomem *base = ath79_reset_base;
258 u32 pending;
259
260 pending = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_STATUS) &
261 __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
262
263 if (pending & AR71XX_PCI_INT_DEV0)
264 generic_handle_irq(ATH79_PCI_IRQ(0));
265
266 else if (pending & AR71XX_PCI_INT_DEV1)
267 generic_handle_irq(ATH79_PCI_IRQ(1));
268
269 else if (pending & AR71XX_PCI_INT_DEV2)
270 generic_handle_irq(ATH79_PCI_IRQ(2));
271
272 else if (pending & AR71XX_PCI_INT_CORE)
273 generic_handle_irq(ATH79_PCI_IRQ(4));
274
275 else
276 spurious_interrupt();
277}
278
279static void ar71xx_pci_irq_unmask(struct irq_data *d)
280{
281 unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
282 void __iomem *base = ath79_reset_base;
283 u32 t;
284
285 t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
286 __raw_writel(t | (1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
287
288 /* flush write */
289 __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
290}
291
292static void ar71xx_pci_irq_mask(struct irq_data *d)
293{
294 unsigned int irq = d->irq - ATH79_PCI_IRQ_BASE;
295 void __iomem *base = ath79_reset_base;
296 u32 t;
297
298 t = __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
299 __raw_writel(t & ~(1 << irq), base + AR71XX_RESET_REG_PCI_INT_ENABLE);
300
301 /* flush write */
302 __raw_readl(base + AR71XX_RESET_REG_PCI_INT_ENABLE);
303}
304
305static struct irq_chip ar71xx_pci_irq_chip = {
306 .name = "AR71XX PCI",
307 .irq_mask = ar71xx_pci_irq_mask,
308 .irq_unmask = ar71xx_pci_irq_unmask,
309 .irq_mask_ack = ar71xx_pci_irq_mask,
310};
311
312static __init void ar71xx_pci_irq_init(void)
313{
314 void __iomem *base = ath79_reset_base;
315 int i;
316
317 __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_ENABLE);
318 __raw_writel(0, base + AR71XX_RESET_REG_PCI_INT_STATUS);
319
320 BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR71XX_PCI_IRQ_COUNT);
321
322 for (i = ATH79_PCI_IRQ_BASE;
323 i < ATH79_PCI_IRQ_BASE + AR71XX_PCI_IRQ_COUNT; i++)
324 irq_set_chip_and_handler(i, &ar71xx_pci_irq_chip,
325 handle_level_irq);
326
327 irq_set_chained_handler(ATH79_CPU_IRQ_IP2, ar71xx_pci_irq_handler);
328}
329
330static __init void ar71xx_pci_reset(void)
331{
332 void __iomem *ddr_base = ath79_ddr_base;
333
334 ath79_device_reset_set(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
335 mdelay(100);
336
337 ath79_device_reset_clear(AR71XX_RESET_PCI_BUS | AR71XX_RESET_PCI_CORE);
338 mdelay(100);
339
340 __raw_writel(AR71XX_PCI_WIN0_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN0);
341 __raw_writel(AR71XX_PCI_WIN1_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN1);
342 __raw_writel(AR71XX_PCI_WIN2_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN2);
343 __raw_writel(AR71XX_PCI_WIN3_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN3);
344 __raw_writel(AR71XX_PCI_WIN4_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN4);
345 __raw_writel(AR71XX_PCI_WIN5_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN5);
346 __raw_writel(AR71XX_PCI_WIN6_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN6);
347 __raw_writel(AR71XX_PCI_WIN7_OFFS, ddr_base + AR71XX_DDR_REG_PCI_WIN7);
348
349 mdelay(100);
350}
351
352__init int ar71xx_pcibios_init(void)
353{
354 u32 t;
355
356 ar71xx_pcicfg_base = ioremap(AR71XX_PCI_CFG_BASE, AR71XX_PCI_CFG_SIZE);
357 if (ar71xx_pcicfg_base == NULL)
358 return -ENOMEM;
359
360 ar71xx_pci_reset();
361
362 /* setup COMMAND register */
363 t = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE
364 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR | PCI_COMMAND_FAST_BACK;
365 ar71xx_pci_local_write(PCI_COMMAND, 4, t);
366
367 /* clear bus errors */
368 ar71xx_pci_check_error(1);
369
370 ar71xx_pci_irq_init();
371
372 register_pci_controller(&ar71xx_pci_controller);
373
374 return 0;
375}
diff --git a/arch/mips/pci/pci-ar724x.c b/arch/mips/pci/pci-ar724x.c
new file mode 100644
index 000000000000..414a7459858d
--- /dev/null
+++ b/arch/mips/pci/pci-ar724x.c
@@ -0,0 +1,292 @@
1/*
2 * Atheros AR724X PCI host controller driver
3 *
4 * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
5 * Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
6 *
7 * This program is free software; you can redistribute it and/or modify it
8 * under the terms of the GNU General Public License version 2 as published
9 * by the Free Software Foundation.
10 */
11
12#include <linux/irq.h>
13#include <linux/pci.h>
14#include <asm/mach-ath79/ath79.h>
15#include <asm/mach-ath79/ar71xx_regs.h>
16#include <asm/mach-ath79/pci.h>
17
18#define AR724X_PCI_CFG_BASE 0x14000000
19#define AR724X_PCI_CFG_SIZE 0x1000
20#define AR724X_PCI_CTRL_BASE (AR71XX_APB_BASE + 0x000f0000)
21#define AR724X_PCI_CTRL_SIZE 0x100
22
23#define AR724X_PCI_MEM_BASE 0x10000000
24#define AR724X_PCI_MEM_SIZE 0x08000000
25
26#define AR724X_PCI_REG_INT_STATUS 0x4c
27#define AR724X_PCI_REG_INT_MASK 0x50
28
29#define AR724X_PCI_INT_DEV0 BIT(14)
30
31#define AR724X_PCI_IRQ_COUNT 1
32
33#define AR7240_BAR0_WAR_VALUE 0xffff
34
35static DEFINE_SPINLOCK(ar724x_pci_lock);
36static void __iomem *ar724x_pci_devcfg_base;
37static void __iomem *ar724x_pci_ctrl_base;
38
39static u32 ar724x_pci_bar0_value;
40static bool ar724x_pci_bar0_is_cached;
41
42static int ar724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
43 int size, uint32_t *value)
44{
45 unsigned long flags;
46 void __iomem *base;
47 u32 data;
48
49 if (devfn)
50 return PCIBIOS_DEVICE_NOT_FOUND;
51
52 base = ar724x_pci_devcfg_base;
53
54 spin_lock_irqsave(&ar724x_pci_lock, flags);
55 data = __raw_readl(base + (where & ~3));
56
57 switch (size) {
58 case 1:
59 if (where & 1)
60 data >>= 8;
61 if (where & 2)
62 data >>= 16;
63 data &= 0xff;
64 break;
65 case 2:
66 if (where & 2)
67 data >>= 16;
68 data &= 0xffff;
69 break;
70 case 4:
71 break;
72 default:
73 spin_unlock_irqrestore(&ar724x_pci_lock, flags);
74
75 return PCIBIOS_BAD_REGISTER_NUMBER;
76 }
77
78 spin_unlock_irqrestore(&ar724x_pci_lock, flags);
79
80 if (where == PCI_BASE_ADDRESS_0 && size == 4 &&
81 ar724x_pci_bar0_is_cached) {
82 /* use the cached value */
83 *value = ar724x_pci_bar0_value;
84 } else {
85 *value = data;
86 }
87
88 return PCIBIOS_SUCCESSFUL;
89}
90
91static int ar724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
92 int size, uint32_t value)
93{
94 unsigned long flags;
95 void __iomem *base;
96 u32 data;
97 int s;
98
99 if (devfn)
100 return PCIBIOS_DEVICE_NOT_FOUND;
101
102 if (soc_is_ar7240() && where == PCI_BASE_ADDRESS_0 && size == 4) {
103 if (value != 0xffffffff) {
104 /*
105 * WAR for a hw issue. If the BAR0 register of the
106 * device is set to the proper base address, the
107 * memory space of the device is not accessible.
108 *
109 * Cache the intended value so it can be read back,
110 * and write a SoC specific constant value to the
111 * BAR0 register in order to make the device memory
112 * accessible.
113 */
114 ar724x_pci_bar0_is_cached = true;
115 ar724x_pci_bar0_value = value;
116
117 value = AR7240_BAR0_WAR_VALUE;
118 } else {
119 ar724x_pci_bar0_is_cached = false;
120 }
121 }
122
123 base = ar724x_pci_devcfg_base;
124
125 spin_lock_irqsave(&ar724x_pci_lock, flags);
126 data = __raw_readl(base + (where & ~3));
127
128 switch (size) {
129 case 1:
130 s = ((where & 3) * 8);
131 data &= ~(0xff << s);
132 data |= ((value & 0xff) << s);
133 break;
134 case 2:
135 s = ((where & 2) * 8);
136 data &= ~(0xffff << s);
137 data |= ((value & 0xffff) << s);
138 break;
139 case 4:
140 data = value;
141 break;
142 default:
143 spin_unlock_irqrestore(&ar724x_pci_lock, flags);
144
145 return PCIBIOS_BAD_REGISTER_NUMBER;
146 }
147
148 __raw_writel(data, base + (where & ~3));
149 /* flush write */
150 __raw_readl(base + (where & ~3));
151 spin_unlock_irqrestore(&ar724x_pci_lock, flags);
152
153 return PCIBIOS_SUCCESSFUL;
154}
155
156static struct pci_ops ar724x_pci_ops = {
157 .read = ar724x_pci_read,
158 .write = ar724x_pci_write,
159};
160
161static struct resource ar724x_io_resource = {
162 .name = "PCI IO space",
163 .start = 0,
164 .end = 0,
165 .flags = IORESOURCE_IO,
166};
167
168static struct resource ar724x_mem_resource = {
169 .name = "PCI memory space",
170 .start = AR724X_PCI_MEM_BASE,
171 .end = AR724X_PCI_MEM_BASE + AR724X_PCI_MEM_SIZE - 1,
172 .flags = IORESOURCE_MEM,
173};
174
175static struct pci_controller ar724x_pci_controller = {
176 .pci_ops = &ar724x_pci_ops,
177 .io_resource = &ar724x_io_resource,
178 .mem_resource = &ar724x_mem_resource,
179};
180
181static void ar724x_pci_irq_handler(unsigned int irq, struct irq_desc *desc)
182{
183 void __iomem *base;
184 u32 pending;
185
186 base = ar724x_pci_ctrl_base;
187
188 pending = __raw_readl(base + AR724X_PCI_REG_INT_STATUS) &
189 __raw_readl(base + AR724X_PCI_REG_INT_MASK);
190
191 if (pending & AR724X_PCI_INT_DEV0)
192 generic_handle_irq(ATH79_PCI_IRQ(0));
193
194 else
195 spurious_interrupt();
196}
197
198static void ar724x_pci_irq_unmask(struct irq_data *d)
199{
200 void __iomem *base;
201 u32 t;
202
203 base = ar724x_pci_ctrl_base;
204
205 switch (d->irq) {
206 case ATH79_PCI_IRQ(0):
207 t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
208 __raw_writel(t | AR724X_PCI_INT_DEV0,
209 base + AR724X_PCI_REG_INT_MASK);
210 /* flush write */
211 __raw_readl(base + AR724X_PCI_REG_INT_MASK);
212 }
213}
214
215static void ar724x_pci_irq_mask(struct irq_data *d)
216{
217 void __iomem *base;
218 u32 t;
219
220 base = ar724x_pci_ctrl_base;
221
222 switch (d->irq) {
223 case ATH79_PCI_IRQ(0):
224 t = __raw_readl(base + AR724X_PCI_REG_INT_MASK);
225 __raw_writel(t & ~AR724X_PCI_INT_DEV0,
226 base + AR724X_PCI_REG_INT_MASK);
227
228 /* flush write */
229 __raw_readl(base + AR724X_PCI_REG_INT_MASK);
230
231 t = __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
232 __raw_writel(t | AR724X_PCI_INT_DEV0,
233 base + AR724X_PCI_REG_INT_STATUS);
234
235 /* flush write */
236 __raw_readl(base + AR724X_PCI_REG_INT_STATUS);
237 }
238}
239
240static struct irq_chip ar724x_pci_irq_chip = {
241 .name = "AR724X PCI ",
242 .irq_mask = ar724x_pci_irq_mask,
243 .irq_unmask = ar724x_pci_irq_unmask,
244 .irq_mask_ack = ar724x_pci_irq_mask,
245};
246
247static void __init ar724x_pci_irq_init(int irq)
248{
249 void __iomem *base;
250 int i;
251
252 base = ar724x_pci_ctrl_base;
253
254 __raw_writel(0, base + AR724X_PCI_REG_INT_MASK);
255 __raw_writel(0, base + AR724X_PCI_REG_INT_STATUS);
256
257 BUILD_BUG_ON(ATH79_PCI_IRQ_COUNT < AR724X_PCI_IRQ_COUNT);
258
259 for (i = ATH79_PCI_IRQ_BASE;
260 i < ATH79_PCI_IRQ_BASE + AR724X_PCI_IRQ_COUNT; i++)
261 irq_set_chip_and_handler(i, &ar724x_pci_irq_chip,
262 handle_level_irq);
263
264 irq_set_chained_handler(irq, ar724x_pci_irq_handler);
265}
266
267int __init ar724x_pcibios_init(int irq)
268{
269 int ret;
270
271 ret = -ENOMEM;
272
273 ar724x_pci_devcfg_base = ioremap(AR724X_PCI_CFG_BASE,
274 AR724X_PCI_CFG_SIZE);
275 if (ar724x_pci_devcfg_base == NULL)
276 goto err;
277
278 ar724x_pci_ctrl_base = ioremap(AR724X_PCI_CTRL_BASE,
279 AR724X_PCI_CTRL_SIZE);
280 if (ar724x_pci_ctrl_base == NULL)
281 goto err_unmap_devcfg;
282
283 ar724x_pci_irq_init(irq);
284 register_pci_controller(&ar724x_pci_controller);
285
286 return PCIBIOS_SUCCESSFUL;
287
288err_unmap_devcfg:
289 iounmap(ar724x_pci_devcfg_base);
290err:
291 return ret;
292}
diff --git a/arch/mips/pci/pci-ath724x.c b/arch/mips/pci/pci-ath724x.c
deleted file mode 100644
index a4dd24a4130b..000000000000
--- a/arch/mips/pci/pci-ath724x.c
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Atheros 724x PCI support
3 *
4 * Copyright (C) 2011 René Bolldorf <xsecute@googlemail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation.
9 */
10
11#include <linux/pci.h>
12#include <asm/mach-ath79/pci-ath724x.h>
13
14#define reg_read(_phys) (*(unsigned int *) KSEG1ADDR(_phys))
15#define reg_write(_phys, _val) ((*(unsigned int *) KSEG1ADDR(_phys)) = (_val))
16
17#define ATH724X_PCI_DEV_BASE 0x14000000
18#define ATH724X_PCI_MEM_BASE 0x10000000
19#define ATH724X_PCI_MEM_SIZE 0x08000000
20
21static DEFINE_SPINLOCK(ath724x_pci_lock);
22static struct ath724x_pci_data *pci_data;
23static int pci_data_size;
24
25static int ath724x_pci_read(struct pci_bus *bus, unsigned int devfn, int where,
26 int size, uint32_t *value)
27{
28 unsigned long flags, addr, tval, mask;
29
30 if (devfn)
31 return PCIBIOS_DEVICE_NOT_FOUND;
32
33 if (where & (size - 1))
34 return PCIBIOS_BAD_REGISTER_NUMBER;
35
36 spin_lock_irqsave(&ath724x_pci_lock, flags);
37
38 switch (size) {
39 case 1:
40 addr = where & ~3;
41 mask = 0xff000000 >> ((where % 4) * 8);
42 tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
43 tval = tval & ~mask;
44 *value = (tval >> ((4 - (where % 4))*8));
45 break;
46 case 2:
47 addr = where & ~3;
48 mask = 0xffff0000 >> ((where % 4)*8);
49 tval = reg_read(ATH724X_PCI_DEV_BASE + addr);
50 tval = tval & ~mask;
51 *value = (tval >> ((4 - (where % 4))*8));
52 break;
53 case 4:
54 *value = reg_read(ATH724X_PCI_DEV_BASE + where);
55 break;
56 default:
57 spin_unlock_irqrestore(&ath724x_pci_lock, flags);
58
59 return PCIBIOS_BAD_REGISTER_NUMBER;
60 }
61
62 spin_unlock_irqrestore(&ath724x_pci_lock, flags);
63
64 return PCIBIOS_SUCCESSFUL;
65}
66
67static int ath724x_pci_write(struct pci_bus *bus, unsigned int devfn, int where,
68 int size, uint32_t value)
69{
70 unsigned long flags, tval, addr, mask;
71
72 if (devfn)
73 return PCIBIOS_DEVICE_NOT_FOUND;
74
75 if (where & (size - 1))
76 return PCIBIOS_BAD_REGISTER_NUMBER;
77
78 spin_lock_irqsave(&ath724x_pci_lock, flags);
79
80 switch (size) {
81 case 1:
82 addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
83 mask = 0xff000000 >> ((where % 4)*8);
84 tval = reg_read(addr);
85 tval = tval & ~mask;
86 tval |= (value << ((4 - (where % 4))*8)) & mask;
87 reg_write(addr, tval);
88 break;
89 case 2:
90 addr = (ATH724X_PCI_DEV_BASE + where) & ~3;
91 mask = 0xffff0000 >> ((where % 4)*8);
92 tval = reg_read(addr);
93 tval = tval & ~mask;
94 tval |= (value << ((4 - (where % 4))*8)) & mask;
95 reg_write(addr, tval);
96 break;
97 case 4:
98 reg_write((ATH724X_PCI_DEV_BASE + where), value);
99 break;
100 default:
101 spin_unlock_irqrestore(&ath724x_pci_lock, flags);
102
103 return PCIBIOS_BAD_REGISTER_NUMBER;
104 }
105
106 spin_unlock_irqrestore(&ath724x_pci_lock, flags);
107
108 return PCIBIOS_SUCCESSFUL;
109}
110
111static struct pci_ops ath724x_pci_ops = {
112 .read = ath724x_pci_read,
113 .write = ath724x_pci_write,
114};
115
116static struct resource ath724x_io_resource = {
117 .name = "PCI IO space",
118 .start = 0,
119 .end = 0,
120 .flags = IORESOURCE_IO,
121};
122
123static struct resource ath724x_mem_resource = {
124 .name = "PCI memory space",
125 .start = ATH724X_PCI_MEM_BASE,
126 .end = ATH724X_PCI_MEM_BASE + ATH724X_PCI_MEM_SIZE - 1,
127 .flags = IORESOURCE_MEM,
128};
129
130static struct pci_controller ath724x_pci_controller = {
131 .pci_ops = &ath724x_pci_ops,
132 .io_resource = &ath724x_io_resource,
133 .mem_resource = &ath724x_mem_resource,
134};
135
136void ath724x_pci_add_data(struct ath724x_pci_data *data, int size)
137{
138 pci_data = data;
139 pci_data_size = size;
140}
141
142int __init pcibios_map_irq(const struct pci_dev *dev, uint8_t slot, uint8_t pin)
143{
144 unsigned int devfn = dev->devfn;
145 int irq = -1;
146
147 if (devfn > pci_data_size - 1)
148 return irq;
149
150 irq = pci_data[devfn].irq;
151
152 return irq;
153}
154
155int pcibios_plat_dev_init(struct pci_dev *dev)
156{
157 unsigned int devfn = dev->devfn;
158
159 if (devfn > pci_data_size - 1)
160 return PCIBIOS_DEVICE_NOT_FOUND;
161
162 dev->dev.platform_data = pci_data[devfn].pdata;
163
164 return PCIBIOS_SUCCESSFUL;
165}
166
167static int __init ath724x_pcibios_init(void)
168{
169 register_pci_controller(&ath724x_pci_controller);
170
171 return PCIBIOS_SUCCESSFUL;
172}
173
174arch_initcall(ath724x_pcibios_init);
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 030c77e7926e..ea453532a33c 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -13,8 +13,12 @@
13#include <linux/delay.h> 13#include <linux/delay.h>
14#include <linux/mm.h> 14#include <linux/mm.h>
15#include <linux/vmalloc.h> 15#include <linux/vmalloc.h>
16#include <linux/export.h> 16#include <linux/module.h>
17#include <linux/platform_device.h> 17#include <linux/clk.h>
18#include <linux/of_platform.h>
19#include <linux/of_gpio.h>
20#include <linux/of_irq.h>
21#include <linux/of_pci.h>
18 22
19#include <asm/pci.h> 23#include <asm/pci.h>
20#include <asm/gpio.h> 24#include <asm/gpio.h>
@@ -22,17 +26,9 @@
22 26
23#include <lantiq_soc.h> 27#include <lantiq_soc.h>
24#include <lantiq_irq.h> 28#include <lantiq_irq.h>
25#include <lantiq_platform.h>
26 29
27#include "pci-lantiq.h" 30#include "pci-lantiq.h"
28 31
29#define LTQ_PCI_CFG_BASE 0x17000000
30#define LTQ_PCI_CFG_SIZE 0x00008000
31#define LTQ_PCI_MEM_BASE 0x18000000
32#define LTQ_PCI_MEM_SIZE 0x02000000
33#define LTQ_PCI_IO_BASE 0x1AE00000
34#define LTQ_PCI_IO_SIZE 0x00200000
35
36#define PCI_CR_FCI_ADDR_MAP0 0x00C0 32#define PCI_CR_FCI_ADDR_MAP0 0x00C0
37#define PCI_CR_FCI_ADDR_MAP1 0x00C4 33#define PCI_CR_FCI_ADDR_MAP1 0x00C4
38#define PCI_CR_FCI_ADDR_MAP2 0x00C8 34#define PCI_CR_FCI_ADDR_MAP2 0x00C8
@@ -68,79 +64,27 @@
68#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y)) 64#define ltq_pci_cfg_w32(x, y) ltq_w32((x), ltq_pci_mapped_cfg + (y))
69#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x)) 65#define ltq_pci_cfg_r32(x) ltq_r32(ltq_pci_mapped_cfg + (x))
70 66
71struct ltq_pci_gpio_map {
72 int pin;
73 int alt0;
74 int alt1;
75 int dir;
76 char *name;
77};
78
79/* the pci core can make use of the following gpios */
80static struct ltq_pci_gpio_map ltq_pci_gpio_map[] = {
81 { 0, 1, 0, 0, "pci-exin0" },
82 { 1, 1, 0, 0, "pci-exin1" },
83 { 2, 1, 0, 0, "pci-exin2" },
84 { 39, 1, 0, 0, "pci-exin3" },
85 { 10, 1, 0, 0, "pci-exin4" },
86 { 9, 1, 0, 0, "pci-exin5" },
87 { 30, 1, 0, 1, "pci-gnt1" },
88 { 23, 1, 0, 1, "pci-gnt2" },
89 { 19, 1, 0, 1, "pci-gnt3" },
90 { 38, 1, 0, 1, "pci-gnt4" },
91 { 29, 1, 0, 0, "pci-req1" },
92 { 31, 1, 0, 0, "pci-req2" },
93 { 3, 1, 0, 0, "pci-req3" },
94 { 37, 1, 0, 0, "pci-req4" },
95};
96
97__iomem void *ltq_pci_mapped_cfg; 67__iomem void *ltq_pci_mapped_cfg;
98static __iomem void *ltq_pci_membase; 68static __iomem void *ltq_pci_membase;
99 69
100int (*ltqpci_plat_dev_init)(struct pci_dev *dev) = NULL; 70static int reset_gpio;
101 71static struct clk *clk_pci, *clk_external;
102/* Since the PCI REQ pins can be reused for other functionality, make it 72static struct resource pci_io_resource;
103 possible to exclude those from interpretation by the PCI controller */ 73static struct resource pci_mem_resource;
104static int ltq_pci_req_mask = 0xf; 74static struct pci_ops pci_ops = {
105
106static int *ltq_pci_irq_map;
107
108struct pci_ops ltq_pci_ops = {
109 .read = ltq_pci_read_config_dword, 75 .read = ltq_pci_read_config_dword,
110 .write = ltq_pci_write_config_dword 76 .write = ltq_pci_write_config_dword
111}; 77};
112 78
113static struct resource pci_io_resource = { 79static struct pci_controller pci_controller = {
114 .name = "pci io space", 80 .pci_ops = &pci_ops,
115 .start = LTQ_PCI_IO_BASE,
116 .end = LTQ_PCI_IO_BASE + LTQ_PCI_IO_SIZE - 1,
117 .flags = IORESOURCE_IO
118};
119
120static struct resource pci_mem_resource = {
121 .name = "pci memory space",
122 .start = LTQ_PCI_MEM_BASE,
123 .end = LTQ_PCI_MEM_BASE + LTQ_PCI_MEM_SIZE - 1,
124 .flags = IORESOURCE_MEM
125};
126
127static struct pci_controller ltq_pci_controller = {
128 .pci_ops = &ltq_pci_ops,
129 .mem_resource = &pci_mem_resource, 81 .mem_resource = &pci_mem_resource,
130 .mem_offset = 0x00000000UL, 82 .mem_offset = 0x00000000UL,
131 .io_resource = &pci_io_resource, 83 .io_resource = &pci_io_resource,
132 .io_offset = 0x00000000UL, 84 .io_offset = 0x00000000UL,
133}; 85};
134 86
135int pcibios_plat_dev_init(struct pci_dev *dev) 87static inline u32 ltq_calc_bar11mask(void)
136{
137 if (ltqpci_plat_dev_init)
138 return ltqpci_plat_dev_init(dev);
139
140 return 0;
141}
142
143static u32 ltq_calc_bar11mask(void)
144{ 88{
145 u32 mem, bar11mask; 89 u32 mem, bar11mask;
146 90
@@ -151,48 +95,42 @@ static u32 ltq_calc_bar11mask(void)
151 return bar11mask; 95 return bar11mask;
152} 96}
153 97
154static void ltq_pci_setup_gpio(int gpio) 98static int __devinit ltq_pci_startup(struct platform_device *pdev)
155{
156 int i;
157 for (i = 0; i < ARRAY_SIZE(ltq_pci_gpio_map); i++) {
158 if (gpio & (1 << i)) {
159 ltq_gpio_request(ltq_pci_gpio_map[i].pin,
160 ltq_pci_gpio_map[i].alt0,
161 ltq_pci_gpio_map[i].alt1,
162 ltq_pci_gpio_map[i].dir,
163 ltq_pci_gpio_map[i].name);
164 }
165 }
166 ltq_gpio_request(21, 0, 0, 1, "pci-reset");
167 ltq_pci_req_mask = (gpio >> PCI_REQ_SHIFT) & PCI_REQ_MASK;
168}
169
170static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
171{ 99{
100 struct device_node *node = pdev->dev.of_node;
101 const __be32 *req_mask, *bus_clk;
172 u32 temp_buffer; 102 u32 temp_buffer;
173 103
174 /* set clock to 33Mhz */ 104 /* get our clocks */
175 if (ltq_is_ar9()) { 105 clk_pci = clk_get(&pdev->dev, NULL);
176 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0x1f00000, LTQ_CGU_IFCCR); 106 if (IS_ERR(clk_pci)) {
177 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0xe00000, LTQ_CGU_IFCCR); 107 dev_err(&pdev->dev, "failed to get pci clock\n");
178 } else { 108 return PTR_ERR(clk_pci);
179 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~0xf00000, LTQ_CGU_IFCCR);
180 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | 0x800000, LTQ_CGU_IFCCR);
181 } 109 }
182 110
183 /* external or internal clock ? */ 111 clk_external = clk_get(&pdev->dev, "external");
184 if (conf->clock) { 112 if (IS_ERR(clk_external)) {
185 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) & ~(1 << 16), 113 clk_put(clk_pci);
186 LTQ_CGU_IFCCR); 114 dev_err(&pdev->dev, "failed to get external pci clock\n");
187 ltq_cgu_w32((1 << 30), LTQ_CGU_PCICR); 115 return PTR_ERR(clk_external);
188 } else {
189 ltq_cgu_w32(ltq_cgu_r32(LTQ_CGU_IFCCR) | (1 << 16),
190 LTQ_CGU_IFCCR);
191 ltq_cgu_w32((1 << 31) | (1 << 30), LTQ_CGU_PCICR);
192 } 116 }
193 117
194 /* setup pci clock and gpis used by pci */ 118 /* read the bus speed that we want */
195 ltq_pci_setup_gpio(conf->gpio); 119 bus_clk = of_get_property(node, "lantiq,bus-clock", NULL);
120 if (bus_clk)
121 clk_set_rate(clk_pci, *bus_clk);
122
123 /* and enable the clocks */
124 clk_enable(clk_pci);
125 if (of_find_property(node, "lantiq,external-clock", NULL))
126 clk_enable(clk_external);
127 else
128 clk_disable(clk_external);
129
130 /* setup reset gpio used by pci */
131 reset_gpio = of_get_named_gpio(node, "gpio-reset", 0);
132 if (reset_gpio > 0)
133 devm_gpio_request(&pdev->dev, reset_gpio, "pci-reset");
196 134
197 /* enable auto-switching between PCI and EBU */ 135 /* enable auto-switching between PCI and EBU */
198 ltq_pci_w32(0xa, PCI_CR_CLK_CTRL); 136 ltq_pci_w32(0xa, PCI_CR_CLK_CTRL);
@@ -205,7 +143,12 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
205 143
206 /* enable external 2 PCI masters */ 144 /* enable external 2 PCI masters */
207 temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB); 145 temp_buffer = ltq_pci_r32(PCI_CR_PC_ARB);
208 temp_buffer &= (~(ltq_pci_req_mask << 16)); 146 /* setup the request mask */
147 req_mask = of_get_property(node, "req-mask", NULL);
148 if (req_mask)
149 temp_buffer &= ~((*req_mask & 0xf) << 16);
150 else
151 temp_buffer &= ~0xf0000;
209 /* enable internal arbiter */ 152 /* enable internal arbiter */
210 temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT); 153 temp_buffer |= (1 << INTERNAL_ARB_ENABLE_BIT);
211 /* enable internal PCI master reqest */ 154 /* enable internal PCI master reqest */
@@ -249,47 +192,55 @@ static int __devinit ltq_pci_startup(struct ltq_pci_data *conf)
249 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN); 192 ltq_ebu_w32(ltq_ebu_r32(LTQ_EBU_PCC_IEN) | 0x10, LTQ_EBU_PCC_IEN);
250 193
251 /* toggle reset pin */ 194 /* toggle reset pin */
252 __gpio_set_value(21, 0); 195 if (reset_gpio > 0) {
253 wmb(); 196 __gpio_set_value(reset_gpio, 0);
254 mdelay(1); 197 wmb();
255 __gpio_set_value(21, 1); 198 mdelay(1);
256 return 0; 199 __gpio_set_value(reset_gpio, 1);
257} 200 }
258
259int __init pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
260{
261 if (ltq_pci_irq_map[slot])
262 return ltq_pci_irq_map[slot];
263 printk(KERN_ERR "lq_pci: trying to map irq for unknown slot %d\n",
264 slot);
265
266 return 0; 201 return 0;
267} 202}
268 203
269static int __devinit ltq_pci_probe(struct platform_device *pdev) 204static int __devinit ltq_pci_probe(struct platform_device *pdev)
270{ 205{
271 struct ltq_pci_data *ltq_pci_data = 206 struct resource *res_cfg, *res_bridge;
272 (struct ltq_pci_data *) pdev->dev.platform_data;
273 207
274 pci_clear_flags(PCI_PROBE_ONLY); 208 pci_clear_flags(PCI_PROBE_ONLY);
275 ltq_pci_irq_map = ltq_pci_data->irq;
276 ltq_pci_membase = ioremap_nocache(PCI_CR_BASE_ADDR, PCI_CR_SIZE);
277 ltq_pci_mapped_cfg =
278 ioremap_nocache(LTQ_PCI_CFG_BASE, LTQ_PCI_CFG_BASE);
279 ltq_pci_controller.io_map_base =
280 (unsigned long)ioremap(LTQ_PCI_IO_BASE, LTQ_PCI_IO_SIZE - 1);
281 ltq_pci_startup(ltq_pci_data);
282 register_pci_controller(&ltq_pci_controller);
283 209
210 res_cfg = platform_get_resource(pdev, IORESOURCE_MEM, 0);
211 res_bridge = platform_get_resource(pdev, IORESOURCE_MEM, 1);
212 if (!res_cfg || !res_bridge) {
213 dev_err(&pdev->dev, "missing memory reources\n");
214 return -EINVAL;
215 }
216
217 ltq_pci_membase = devm_request_and_ioremap(&pdev->dev, res_bridge);
218 ltq_pci_mapped_cfg = devm_request_and_ioremap(&pdev->dev, res_cfg);
219
220 if (!ltq_pci_membase || !ltq_pci_mapped_cfg) {
221 dev_err(&pdev->dev, "failed to remap resources\n");
222 return -ENOMEM;
223 }
224
225 ltq_pci_startup(pdev);
226
227 pci_load_of_ranges(&pci_controller, pdev->dev.of_node);
228 register_pci_controller(&pci_controller);
284 return 0; 229 return 0;
285} 230}
286 231
287static struct platform_driver 232static const struct of_device_id ltq_pci_match[] = {
288ltq_pci_driver = { 233 { .compatible = "lantiq,pci-xway" },
234 {},
235};
236MODULE_DEVICE_TABLE(of, ltq_pci_match);
237
238static struct platform_driver ltq_pci_driver = {
289 .probe = ltq_pci_probe, 239 .probe = ltq_pci_probe,
290 .driver = { 240 .driver = {
291 .name = "ltq_pci", 241 .name = "pci-xway",
292 .owner = THIS_MODULE, 242 .owner = THIS_MODULE,
243 .of_match_table = ltq_pci_match,
293 }, 244 },
294}; 245};
295 246
@@ -297,7 +248,7 @@ int __init pcibios_init(void)
297{ 248{
298 int ret = platform_driver_register(&ltq_pci_driver); 249 int ret = platform_driver_register(&ltq_pci_driver);
299 if (ret) 250 if (ret)
300 printk(KERN_INFO "ltq_pci: Error registering platfom driver!"); 251 pr_info("pci-xway: Error registering platform driver!");
301 return ret; 252 return ret;
302} 253}
303 254
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index 0514866fa925..271e8c4a54c7 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -16,6 +16,7 @@
16#include <linux/init.h> 16#include <linux/init.h>
17#include <linux/types.h> 17#include <linux/types.h>
18#include <linux/pci.h> 18#include <linux/pci.h>
19#include <linux/of_address.h>
19 20
20#include <asm/cpu-info.h> 21#include <asm/cpu-info.h>
21 22
@@ -114,9 +115,63 @@ static void __devinit pcibios_scanbus(struct pci_controller *hose)
114 pci_bus_assign_resources(bus); 115 pci_bus_assign_resources(bus);
115 pci_enable_bridges(bus); 116 pci_enable_bridges(bus);
116 } 117 }
118 bus->dev.of_node = hose->of_node;
117 } 119 }
118} 120}
119 121
122#ifdef CONFIG_OF
123void __devinit pci_load_of_ranges(struct pci_controller *hose,
124 struct device_node *node)
125{
126 const __be32 *ranges;
127 int rlen;
128 int pna = of_n_addr_cells(node);
129 int np = pna + 5;
130
131 pr_info("PCI host bridge %s ranges:\n", node->full_name);
132 ranges = of_get_property(node, "ranges", &rlen);
133 if (ranges == NULL)
134 return;
135 hose->of_node = node;
136
137 while ((rlen -= np * 4) >= 0) {
138 u32 pci_space;
139 struct resource *res = NULL;
140 u64 addr, size;
141
142 pci_space = be32_to_cpup(&ranges[0]);
143 addr = of_translate_address(node, ranges + 3);
144 size = of_read_number(ranges + pna + 3, 2);
145 ranges += np;
146 switch ((pci_space >> 24) & 0x3) {
147 case 1: /* PCI IO space */
148 pr_info(" IO 0x%016llx..0x%016llx\n",
149 addr, addr + size - 1);
150 hose->io_map_base =
151 (unsigned long)ioremap(addr, size);
152 res = hose->io_resource;
153 res->flags = IORESOURCE_IO;
154 break;
155 case 2: /* PCI Memory space */
156 case 3: /* PCI 64 bits Memory space */
157 pr_info(" MEM 0x%016llx..0x%016llx\n",
158 addr, addr + size - 1);
159 res = hose->mem_resource;
160 res->flags = IORESOURCE_MEM;
161 break;
162 }
163 if (res != NULL) {
164 res->start = addr;
165 res->name = node->full_name;
166 res->end = res->start + size - 1;
167 res->parent = NULL;
168 res->sibling = NULL;
169 res->child = NULL;
170 }
171 }
172}
173#endif
174
120static DEFINE_MUTEX(pci_scan_mutex); 175static DEFINE_MUTEX(pci_scan_mutex);
121 176
122void __devinit register_pci_controller(struct pci_controller *hose) 177void __devinit register_pci_controller(struct pci_controller *hose)
diff --git a/arch/mips/pmc-sierra/yosemite/Makefile b/arch/mips/pmc-sierra/yosemite/Makefile
index 02f5fb94ea28..5af95ec3319d 100644
--- a/arch/mips/pmc-sierra/yosemite/Makefile
+++ b/arch/mips/pmc-sierra/yosemite/Makefile
@@ -5,5 +5,3 @@
5obj-y += irq.o prom.o py-console.o setup.o 5obj-y += irq.o prom.o py-console.o setup.o
6 6
7obj-$(CONFIG_SMP) += smp.o 7obj-$(CONFIG_SMP) += smp.o
8
9ccflags-y := -Werror
diff --git a/arch/mips/pmc-sierra/yosemite/setup.c b/arch/mips/pmc-sierra/yosemite/setup.c
index 3498ac9c35af..b6472fc88a99 100644
--- a/arch/mips/pmc-sierra/yosemite/setup.c
+++ b/arch/mips/pmc-sierra/yosemite/setup.c
@@ -27,6 +27,7 @@
27#include <linux/bcd.h> 27#include <linux/bcd.h>
28#include <linux/init.h> 28#include <linux/init.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/export.h>
30#include <linux/types.h> 31#include <linux/types.h>
31#include <linux/mm.h> 32#include <linux/mm.h>
32#include <linux/bootmem.h> 33#include <linux/bootmem.h>
diff --git a/arch/mips/powertv/Makefile b/arch/mips/powertv/Makefile
index 348d2e850ef5..39ca9f8d63ae 100644
--- a/arch/mips/powertv/Makefile
+++ b/arch/mips/powertv/Makefile
@@ -27,5 +27,3 @@ obj-y += init.o ioremap.o memory.o powertv_setup.o reset.o time.o \
27 asic/ pci/ 27 asic/ pci/
28 28
29obj-$(CONFIG_USB) += powertv-usb.o 29obj-$(CONFIG_USB) += powertv-usb.o
30
31ccflags-y := -Wall
diff --git a/arch/mips/powertv/asic/Makefile b/arch/mips/powertv/asic/Makefile
index d810a33182a4..35dcc53eb25f 100644
--- a/arch/mips/powertv/asic/Makefile
+++ b/arch/mips/powertv/asic/Makefile
@@ -19,5 +19,3 @@
19obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \ 19obj-y += asic-calliope.o asic-cronus.o asic-gaia.o asic-zeus.o \
20 asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \ 20 asic_devices.o asic_int.o irq_asic.o prealloc-calliope.o \
21 prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o 21 prealloc-cronus.o prealloc-cronuslite.o prealloc-gaia.o prealloc-zeus.o
22
23ccflags-y := -Wall -Werror
diff --git a/arch/mips/powertv/pci/Makefile b/arch/mips/powertv/pci/Makefile
index 5783201cd2c8..2610a6af5b2c 100644
--- a/arch/mips/powertv/pci/Makefile
+++ b/arch/mips/powertv/pci/Makefile
@@ -17,5 +17,3 @@
17# 17#
18 18
19obj-$(CONFIG_PCI) += fixup-powertv.o 19obj-$(CONFIG_PCI) += fixup-powertv.o
20
21ccflags-y := -Wall -Werror
diff --git a/arch/mips/rb532/devices.c b/arch/mips/rb532/devices.c
index a969eb826634..ea774285e6c5 100644
--- a/arch/mips/rb532/devices.c
+++ b/arch/mips/rb532/devices.c
@@ -15,6 +15,7 @@
15 * GNU General Public License for more details. 15 * GNU General Public License for more details.
16 */ 16 */
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/export.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/ctype.h> 20#include <linux/ctype.h>
20#include <linux/string.h> 21#include <linux/string.h>
diff --git a/arch/mips/sni/setup.c b/arch/mips/sni/setup.c
index d16b462154c3..413f17f8e892 100644
--- a/arch/mips/sni/setup.c
+++ b/arch/mips/sni/setup.c
@@ -10,6 +10,7 @@
10 */ 10 */
11#include <linux/eisa.h> 11#include <linux/eisa.h>
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/export.h>
13#include <linux/console.h> 14#include <linux/console.h>
14#include <linux/fb.h> 15#include <linux/fb.h>
15#include <linux/screen_info.h> 16#include <linux/screen_info.h>
diff --git a/arch/mn10300/include/asm/kvm_para.h b/arch/mn10300/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/mn10300/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/openrisc/Kconfig b/arch/openrisc/Kconfig
index 4932247d078a..49765b53f637 100644
--- a/arch/openrisc/Kconfig
+++ b/arch/openrisc/Kconfig
@@ -19,6 +19,8 @@ config OPENRISC
19 select GENERIC_CPU_DEVICES 19 select GENERIC_CPU_DEVICES
20 select GENERIC_ATOMIC64 20 select GENERIC_ATOMIC64
21 select GENERIC_CLOCKEVENTS 21 select GENERIC_CLOCKEVENTS
22 select GENERIC_STRNCPY_FROM_USER
23 select GENERIC_STRNLEN_USER
22 24
23config MMU 25config MMU
24 def_bool y 26 def_bool y
diff --git a/arch/openrisc/include/asm/Kbuild b/arch/openrisc/include/asm/Kbuild
index c936483bc8e2..3f35c38d7b64 100644
--- a/arch/openrisc/include/asm/Kbuild
+++ b/arch/openrisc/include/asm/Kbuild
@@ -66,3 +66,4 @@ generic-y += topology.h
66generic-y += types.h 66generic-y += types.h
67generic-y += ucontext.h 67generic-y += ucontext.h
68generic-y += user.h 68generic-y += user.h
69generic-y += word-at-a-time.h
diff --git a/arch/openrisc/include/asm/kvm_para.h b/arch/openrisc/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/openrisc/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/openrisc/include/asm/uaccess.h b/arch/openrisc/include/asm/uaccess.h
index f5abaa0ffc38..ab2e7a198a4c 100644
--- a/arch/openrisc/include/asm/uaccess.h
+++ b/arch/openrisc/include/asm/uaccess.h
@@ -313,42 +313,12 @@ clear_user(void *addr, unsigned long size)
313 return size; 313 return size;
314} 314}
315 315
316extern int __strncpy_from_user(char *dst, const char *src, long count); 316#define user_addr_max() \
317 (segment_eq(get_fs(), USER_DS) ? TASK_SIZE : ~0UL)
317 318
318static inline long strncpy_from_user(char *dst, const char *src, long count) 319extern long strncpy_from_user(char *dest, const char __user *src, long count);
319{
320 if (access_ok(VERIFY_READ, src, 1))
321 return __strncpy_from_user(dst, src, count);
322 return -EFAULT;
323}
324
325/*
326 * Return the size of a string (including the ending 0)
327 *
328 * Return 0 for error
329 */
330
331extern int __strnlen_user(const char *str, long len, unsigned long top);
332
333/*
334 * Returns the length of the string at str (including the null byte),
335 * or 0 if we hit a page we can't access,
336 * or something > len if we didn't find a null byte.
337 *
338 * The `top' parameter to __strnlen_user is to make sure that
339 * we can never overflow from the user area into kernel space.
340 */
341static inline long strnlen_user(const char __user *str, long len)
342{
343 unsigned long top = (unsigned long)get_fs();
344 unsigned long res = 0;
345
346 if (__addr_ok(str))
347 res = __strnlen_user(str, len, top);
348
349 return res;
350}
351 320
352#define strlen_user(str) strnlen_user(str, TASK_SIZE-1) 321extern __must_check long strlen_user(const char __user *str);
322extern __must_check long strnlen_user(const char __user *str, long n);
353 323
354#endif /* __ASM_OPENRISC_UACCESS_H */ 324#endif /* __ASM_OPENRISC_UACCESS_H */
diff --git a/arch/openrisc/lib/string.S b/arch/openrisc/lib/string.S
index 465f04bc7deb..c09fee7dec14 100644
--- a/arch/openrisc/lib/string.S
+++ b/arch/openrisc/lib/string.S
@@ -103,102 +103,3 @@ __clear_user:
103 .section __ex_table, "a" 103 .section __ex_table, "a"
104 .long 9b, 99b // write fault 104 .long 9b, 99b // write fault
105 .previous 105 .previous
106
107/*
108 * long strncpy_from_user(char *dst, const char *src, long count)
109 *
110 *
111 */
112 .global __strncpy_from_user
113__strncpy_from_user:
114 l.addi r1,r1,-16
115 l.sw 0(r1),r6
116 l.sw 4(r1),r5
117 l.sw 8(r1),r4
118 l.sw 12(r1),r3
119
120 l.addi r11,r5,0
1212: l.sfeq r5,r0
122 l.bf 1f
123 l.addi r5,r5,-1
1248: l.lbz r6,0(r4)
125 l.sfeq r6,r0
126 l.bf 1f
1279: l.sb 0(r3),r6
128 l.addi r3,r3,1
129 l.j 2b
130 l.addi r4,r4,1
1311:
132 l.lwz r6,0(r1)
133 l.addi r5,r5,1
134 l.sub r11,r11,r5 // r11 holds the return value
135
136 l.lwz r6,0(r1)
137 l.lwz r5,4(r1)
138 l.lwz r4,8(r1)
139 l.lwz r3,12(r1)
140 l.jr r9
141 l.addi r1,r1,16
142
143 .section .fixup, "ax"
14499:
145 l.movhi r11,hi(-EFAULT)
146 l.ori r11,r11,lo(-EFAULT)
147
148 l.lwz r6,0(r1)
149 l.lwz r5,4(r1)
150 l.lwz r4,8(r1)
151 l.lwz r3,12(r1)
152 l.jr r9
153 l.addi r1,r1,16
154 .previous
155
156 .section __ex_table, "a"
157 .long 8b, 99b // read fault
158 .previous
159
160/*
161 * extern int __strnlen_user(const char *str, long len, unsigned long top);
162 *
163 *
164 * RTRN: - length of a string including NUL termination character
165 * - on page fault 0
166 */
167
168 .global __strnlen_user
169__strnlen_user:
170 l.addi r1,r1,-8
171 l.sw 0(r1),r6
172 l.sw 4(r1),r3
173
174 l.addi r11,r0,0
1752: l.sfeq r11,r4
176 l.bf 1f
177 l.addi r11,r11,1
1788: l.lbz r6,0(r3)
179 l.sfeq r6,r0
180 l.bf 1f
181 l.sfgeu r3,r5 // are we over the top ?
182 l.bf 99f
183 l.j 2b
184 l.addi r3,r3,1
185
1861:
187 l.lwz r6,0(r1)
188 l.lwz r3,4(r1)
189 l.jr r9
190 l.addi r1,r1,8
191
192 .section .fixup, "ax"
19399:
194 l.addi r11,r0,0
195
196 l.lwz r6,0(r1)
197 l.lwz r3,4(r1)
198 l.jr r9
199 l.addi r1,r1,8
200 .previous
201
202 .section __ex_table, "a"
203 .long 8b, 99b // read fault
204 .previous
diff --git a/arch/parisc/include/asm/kvm_para.h b/arch/parisc/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/parisc/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig
index 00b9874e2240..050cb371a69e 100644
--- a/arch/powerpc/Kconfig
+++ b/arch/powerpc/Kconfig
@@ -135,6 +135,8 @@ config PPC
135 select GENERIC_CMOS_UPDATE 135 select GENERIC_CMOS_UPDATE
136 select GENERIC_TIME_VSYSCALL 136 select GENERIC_TIME_VSYSCALL
137 select GENERIC_CLOCKEVENTS 137 select GENERIC_CLOCKEVENTS
138 select GENERIC_STRNCPY_FROM_USER
139 select GENERIC_STRNLEN_USER
138 140
139config EARLY_PRINTK 141config EARLY_PRINTK
140 bool 142 bool
diff --git a/arch/powerpc/boot/dts/mpc8569mds.dts b/arch/powerpc/boot/dts/mpc8569mds.dts
index 7e283c891b7f..fe0d60935e9b 100644
--- a/arch/powerpc/boot/dts/mpc8569mds.dts
+++ b/arch/powerpc/boot/dts/mpc8569mds.dts
@@ -119,6 +119,7 @@
119 sdhc@2e000 { 119 sdhc@2e000 {
120 status = "disabled"; 120 status = "disabled";
121 sdhci,1-bit-only; 121 sdhci,1-bit-only;
122 bus-width = <1>;
122 }; 123 };
123 124
124 par_io@e0100 { 125 par_io@e0100 {
diff --git a/arch/powerpc/include/asm/cputable.h b/arch/powerpc/include/asm/cputable.h
index b9219e99bd2a..50d82c8a037f 100644
--- a/arch/powerpc/include/asm/cputable.h
+++ b/arch/powerpc/include/asm/cputable.h
@@ -168,6 +168,7 @@ extern const char *powerpc_base_platform;
168#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000) 168#define CPU_FTR_LWSYNC ASM_CONST(0x0000000008000000)
169#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000) 169#define CPU_FTR_NOEXECUTE ASM_CONST(0x0000000010000000)
170#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000) 170#define CPU_FTR_INDEXED_DCR ASM_CONST(0x0000000020000000)
171#define CPU_FTR_EMB_HV ASM_CONST(0x0000000040000000)
171 172
172/* 173/*
173 * Add the 64-bit processor unique features in the top half of the word; 174 * Add the 64-bit processor unique features in the top half of the word;
@@ -376,7 +377,8 @@ extern const char *powerpc_base_platform;
376#define CPU_FTRS_47X (CPU_FTRS_440x6) 377#define CPU_FTRS_47X (CPU_FTRS_440x6)
377#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \ 378#define CPU_FTRS_E200 (CPU_FTR_USE_TB | CPU_FTR_SPE_COMP | \
378 CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \ 379 CPU_FTR_NODSISRALIGN | CPU_FTR_COHERENT_ICACHE | \
379 CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE) 380 CPU_FTR_UNIFIED_ID_CACHE | CPU_FTR_NOEXECUTE | \
381 CPU_FTR_DEBUG_LVL_EXC)
380#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \ 382#define CPU_FTRS_E500 (CPU_FTR_MAYBE_CAN_DOZE | CPU_FTR_USE_TB | \
381 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \ 383 CPU_FTR_SPE_COMP | CPU_FTR_MAYBE_CAN_NAP | CPU_FTR_NODSISRALIGN | \
382 CPU_FTR_NOEXECUTE) 384 CPU_FTR_NOEXECUTE)
@@ -385,15 +387,15 @@ extern const char *powerpc_base_platform;
385 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE) 387 CPU_FTR_NODSISRALIGN | CPU_FTR_NOEXECUTE)
386#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ 388#define CPU_FTRS_E500MC (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
387 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 389 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
388 CPU_FTR_DBELL) 390 CPU_FTR_DBELL | CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
389#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ 391#define CPU_FTRS_E5500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
390 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 392 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
391 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 393 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
392 CPU_FTR_DEBUG_LVL_EXC) 394 CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
393#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \ 395#define CPU_FTRS_E6500 (CPU_FTR_USE_TB | CPU_FTR_NODSISRALIGN | \
394 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \ 396 CPU_FTR_L2CSR | CPU_FTR_LWSYNC | CPU_FTR_NOEXECUTE | \
395 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \ 397 CPU_FTR_DBELL | CPU_FTR_POPCNTB | CPU_FTR_POPCNTD | \
396 CPU_FTR_DEBUG_LVL_EXC) 398 CPU_FTR_DEBUG_LVL_EXC | CPU_FTR_EMB_HV)
397#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN) 399#define CPU_FTRS_GENERIC_32 (CPU_FTR_COMMON | CPU_FTR_NODSISRALIGN)
398 400
399/* 64-bit CPUs */ 401/* 64-bit CPUs */
@@ -486,8 +488,10 @@ enum {
486 CPU_FTRS_E200 | 488 CPU_FTRS_E200 |
487#endif 489#endif
488#ifdef CONFIG_E500 490#ifdef CONFIG_E500
489 CPU_FTRS_E500 | CPU_FTRS_E500_2 | CPU_FTRS_E500MC | 491 CPU_FTRS_E500 | CPU_FTRS_E500_2 |
490 CPU_FTRS_E5500 | CPU_FTRS_E6500 | 492#endif
493#ifdef CONFIG_PPC_E500MC
494 CPU_FTRS_E500MC | CPU_FTRS_E5500 | CPU_FTRS_E6500 |
491#endif 495#endif
492 0, 496 0,
493}; 497};
@@ -531,9 +535,12 @@ enum {
531 CPU_FTRS_E200 & 535 CPU_FTRS_E200 &
532#endif 536#endif
533#ifdef CONFIG_E500 537#ifdef CONFIG_E500
534 CPU_FTRS_E500 & CPU_FTRS_E500_2 & CPU_FTRS_E500MC & 538 CPU_FTRS_E500 & CPU_FTRS_E500_2 &
535 CPU_FTRS_E5500 & CPU_FTRS_E6500 & 539#endif
540#ifdef CONFIG_PPC_E500MC
541 CPU_FTRS_E500MC & CPU_FTRS_E5500 & CPU_FTRS_E6500 &
536#endif 542#endif
543 ~CPU_FTR_EMB_HV & /* can be removed at runtime */
537 CPU_FTRS_POSSIBLE, 544 CPU_FTRS_POSSIBLE,
538}; 545};
539#endif /* __powerpc64__ */ 546#endif /* __powerpc64__ */
diff --git a/arch/powerpc/include/asm/dbell.h b/arch/powerpc/include/asm/dbell.h
index efa74ac44a35..154c067761b1 100644
--- a/arch/powerpc/include/asm/dbell.h
+++ b/arch/powerpc/include/asm/dbell.h
@@ -19,6 +19,9 @@
19 19
20#define PPC_DBELL_MSG_BRDCAST (0x04000000) 20#define PPC_DBELL_MSG_BRDCAST (0x04000000)
21#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36)) 21#define PPC_DBELL_TYPE(x) (((x) & 0xf) << (63-36))
22#define PPC_DBELL_TYPE_MASK PPC_DBELL_TYPE(0xf)
23#define PPC_DBELL_LPID(x) ((x) << (63 - 49))
24#define PPC_DBELL_PIR_MASK 0x3fff
22enum ppc_dbell { 25enum ppc_dbell {
23 PPC_DBELL = 0, /* doorbell */ 26 PPC_DBELL = 0, /* doorbell */
24 PPC_DBELL_CRIT = 1, /* critical doorbell */ 27 PPC_DBELL_CRIT = 1, /* critical doorbell */
diff --git a/arch/powerpc/include/asm/hvcall.h b/arch/powerpc/include/asm/hvcall.h
index 612252388190..423cf9eaf4a4 100644
--- a/arch/powerpc/include/asm/hvcall.h
+++ b/arch/powerpc/include/asm/hvcall.h
@@ -133,6 +133,16 @@
133#define H_PP1 (1UL<<(63-62)) 133#define H_PP1 (1UL<<(63-62))
134#define H_PP2 (1UL<<(63-63)) 134#define H_PP2 (1UL<<(63-63))
135 135
136/* Flags for H_REGISTER_VPA subfunction field */
137#define H_VPA_FUNC_SHIFT (63-18) /* Bit posn of subfunction code */
138#define H_VPA_FUNC_MASK 7UL
139#define H_VPA_REG_VPA 1UL /* Register Virtual Processor Area */
140#define H_VPA_REG_DTL 2UL /* Register Dispatch Trace Log */
141#define H_VPA_REG_SLB 3UL /* Register SLB shadow buffer */
142#define H_VPA_DEREG_VPA 5UL /* Deregister Virtual Processor Area */
143#define H_VPA_DEREG_DTL 6UL /* Deregister Dispatch Trace Log */
144#define H_VPA_DEREG_SLB 7UL /* Deregister SLB shadow buffer */
145
136/* VASI States */ 146/* VASI States */
137#define H_VASI_INVALID 0 147#define H_VASI_INVALID 0
138#define H_VASI_ENABLED 1 148#define H_VASI_ENABLED 1
diff --git a/arch/powerpc/include/asm/hw_irq.h b/arch/powerpc/include/asm/hw_irq.h
index 51010bfc792e..c9aac24b02e2 100644
--- a/arch/powerpc/include/asm/hw_irq.h
+++ b/arch/powerpc/include/asm/hw_irq.h
@@ -33,6 +33,7 @@
33extern void __replay_interrupt(unsigned int vector); 33extern void __replay_interrupt(unsigned int vector);
34 34
35extern void timer_interrupt(struct pt_regs *); 35extern void timer_interrupt(struct pt_regs *);
36extern void performance_monitor_exception(struct pt_regs *regs);
36 37
37#ifdef CONFIG_PPC64 38#ifdef CONFIG_PPC64
38#include <asm/paca.h> 39#include <asm/paca.h>
diff --git a/arch/powerpc/include/asm/kvm.h b/arch/powerpc/include/asm/kvm.h
index b921c3f48928..1bea4d8ea6f4 100644
--- a/arch/powerpc/include/asm/kvm.h
+++ b/arch/powerpc/include/asm/kvm.h
@@ -277,6 +277,7 @@ struct kvm_sync_regs {
277#define KVM_CPU_E500V2 2 277#define KVM_CPU_E500V2 2
278#define KVM_CPU_3S_32 3 278#define KVM_CPU_3S_32 3
279#define KVM_CPU_3S_64 4 279#define KVM_CPU_3S_64 4
280#define KVM_CPU_E500MC 5
280 281
281/* for KVM_CAP_SPAPR_TCE */ 282/* for KVM_CAP_SPAPR_TCE */
282struct kvm_create_spapr_tce { 283struct kvm_create_spapr_tce {
diff --git a/arch/powerpc/include/asm/kvm_asm.h b/arch/powerpc/include/asm/kvm_asm.h
index 7b1f0e0fc653..76fdcfef0889 100644
--- a/arch/powerpc/include/asm/kvm_asm.h
+++ b/arch/powerpc/include/asm/kvm_asm.h
@@ -20,6 +20,16 @@
20#ifndef __POWERPC_KVM_ASM_H__ 20#ifndef __POWERPC_KVM_ASM_H__
21#define __POWERPC_KVM_ASM_H__ 21#define __POWERPC_KVM_ASM_H__
22 22
23#ifdef __ASSEMBLY__
24#ifdef CONFIG_64BIT
25#define PPC_STD(sreg, offset, areg) std sreg, (offset)(areg)
26#define PPC_LD(treg, offset, areg) ld treg, (offset)(areg)
27#else
28#define PPC_STD(sreg, offset, areg) stw sreg, (offset+4)(areg)
29#define PPC_LD(treg, offset, areg) lwz treg, (offset+4)(areg)
30#endif
31#endif
32
23/* IVPR must be 64KiB-aligned. */ 33/* IVPR must be 64KiB-aligned. */
24#define VCPU_SIZE_ORDER 4 34#define VCPU_SIZE_ORDER 4
25#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12) 35#define VCPU_SIZE_LOG (VCPU_SIZE_ORDER + 12)
@@ -48,6 +58,14 @@
48#define BOOKE_INTERRUPT_SPE_FP_DATA 33 58#define BOOKE_INTERRUPT_SPE_FP_DATA 33
49#define BOOKE_INTERRUPT_SPE_FP_ROUND 34 59#define BOOKE_INTERRUPT_SPE_FP_ROUND 34
50#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35 60#define BOOKE_INTERRUPT_PERFORMANCE_MONITOR 35
61#define BOOKE_INTERRUPT_DOORBELL 36
62#define BOOKE_INTERRUPT_DOORBELL_CRITICAL 37
63
64/* booke_hv */
65#define BOOKE_INTERRUPT_GUEST_DBELL 38
66#define BOOKE_INTERRUPT_GUEST_DBELL_CRIT 39
67#define BOOKE_INTERRUPT_HV_SYSCALL 40
68#define BOOKE_INTERRUPT_HV_PRIV 41
51 69
52/* book3s */ 70/* book3s */
53 71
diff --git a/arch/powerpc/include/asm/kvm_book3s.h b/arch/powerpc/include/asm/kvm_book3s.h
index fd07f43d6622..f0e0c6a66d97 100644
--- a/arch/powerpc/include/asm/kvm_book3s.h
+++ b/arch/powerpc/include/asm/kvm_book3s.h
@@ -453,4 +453,7 @@ static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
453 453
454#define INS_DCBZ 0x7c0007ec 454#define INS_DCBZ 0x7c0007ec
455 455
456/* LPIDs we support with this build -- runtime limit may be lower */
457#define KVMPPC_NR_LPIDS (LPID_RSVD + 1)
458
456#endif /* __ASM_KVM_BOOK3S_H__ */ 459#endif /* __ASM_KVM_BOOK3S_H__ */
diff --git a/arch/powerpc/include/asm/kvm_book3s_asm.h b/arch/powerpc/include/asm/kvm_book3s_asm.h
index 1f2f5b6156bd..88609b23b775 100644
--- a/arch/powerpc/include/asm/kvm_book3s_asm.h
+++ b/arch/powerpc/include/asm/kvm_book3s_asm.h
@@ -79,6 +79,9 @@ struct kvmppc_host_state {
79 u8 napping; 79 u8 napping;
80 80
81#ifdef CONFIG_KVM_BOOK3S_64_HV 81#ifdef CONFIG_KVM_BOOK3S_64_HV
82 u8 hwthread_req;
83 u8 hwthread_state;
84
82 struct kvm_vcpu *kvm_vcpu; 85 struct kvm_vcpu *kvm_vcpu;
83 struct kvmppc_vcore *kvm_vcore; 86 struct kvmppc_vcore *kvm_vcore;
84 unsigned long xics_phys; 87 unsigned long xics_phys;
@@ -122,4 +125,9 @@ struct kvmppc_book3s_shadow_vcpu {
122 125
123#endif /*__ASSEMBLY__ */ 126#endif /*__ASSEMBLY__ */
124 127
128/* Values for kvm_state */
129#define KVM_HWTHREAD_IN_KERNEL 0
130#define KVM_HWTHREAD_IN_NAP 1
131#define KVM_HWTHREAD_IN_KVM 2
132
125#endif /* __ASM_KVM_BOOK3S_ASM_H__ */ 133#endif /* __ASM_KVM_BOOK3S_ASM_H__ */
diff --git a/arch/powerpc/include/asm/kvm_booke.h b/arch/powerpc/include/asm/kvm_booke.h
index a90e09188777..b7cd3356a532 100644
--- a/arch/powerpc/include/asm/kvm_booke.h
+++ b/arch/powerpc/include/asm/kvm_booke.h
@@ -23,6 +23,9 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25 25
26/* LPIDs we support with this build -- runtime limit may be lower */
27#define KVMPPC_NR_LPIDS 64
28
26static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val) 29static inline void kvmppc_set_gpr(struct kvm_vcpu *vcpu, int num, ulong val)
27{ 30{
28 vcpu->arch.gpr[num] = val; 31 vcpu->arch.gpr[num] = val;
diff --git a/arch/powerpc/include/asm/kvm_booke_hv_asm.h b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
new file mode 100644
index 000000000000..30a600fa1b6a
--- /dev/null
+++ b/arch/powerpc/include/asm/kvm_booke_hv_asm.h
@@ -0,0 +1,49 @@
1/*
2 * Copyright 2010-2011 Freescale Semiconductor, Inc.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9#ifndef ASM_KVM_BOOKE_HV_ASM_H
10#define ASM_KVM_BOOKE_HV_ASM_H
11
12#ifdef __ASSEMBLY__
13
14/*
15 * All exceptions from guest state must go through KVM
16 * (except for those which are delivered directly to the guest) --
17 * there are no exceptions for which we fall through directly to
18 * the normal host handler.
19 *
20 * Expected inputs (normal exceptions):
21 * SCRATCH0 = saved r10
22 * r10 = thread struct
23 * r11 = appropriate SRR1 variant (currently used as scratch)
24 * r13 = saved CR
25 * *(r10 + THREAD_NORMSAVE(0)) = saved r11
26 * *(r10 + THREAD_NORMSAVE(2)) = saved r13
27 *
28 * Expected inputs (crit/mcheck/debug exceptions):
29 * appropriate SCRATCH = saved r8
30 * r8 = exception level stack frame
31 * r9 = *(r8 + _CCR) = saved CR
32 * r11 = appropriate SRR1 variant (currently used as scratch)
33 * *(r8 + GPR9) = saved r9
34 * *(r8 + GPR10) = saved r10 (r10 not yet clobbered)
35 * *(r8 + GPR11) = saved r11
36 */
37.macro DO_KVM intno srr1
38#ifdef CONFIG_KVM_BOOKE_HV
39BEGIN_FTR_SECTION
40 mtocrf 0x80, r11 /* check MSR[GS] without clobbering reg */
41 bf 3, kvmppc_resume_\intno\()_\srr1
42 b kvmppc_handler_\intno\()_\srr1
43kvmppc_resume_\intno\()_\srr1:
44END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
45#endif
46.endm
47
48#endif /*__ASSEMBLY__ */
49#endif /* ASM_KVM_BOOKE_HV_ASM_H */
diff --git a/arch/powerpc/include/asm/kvm_e500.h b/arch/powerpc/include/asm/kvm_e500.h
deleted file mode 100644
index 8cd50a514271..000000000000
--- a/arch/powerpc/include/asm/kvm_e500.h
+++ /dev/null
@@ -1,96 +0,0 @@
1/*
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, <yu.liu@freescale.com>
5 *
6 * Description:
7 * This file is derived from arch/powerpc/include/asm/kvm_44x.h,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef __ASM_KVM_E500_H__
16#define __ASM_KVM_E500_H__
17
18#include <linux/kvm_host.h>
19
20#define BOOKE_INTERRUPT_SIZE 36
21
22#define E500_PID_NUM 3
23#define E500_TLB_NUM 2
24
25#define E500_TLB_VALID 1
26#define E500_TLB_DIRTY 2
27
28struct tlbe_ref {
29 pfn_t pfn;
30 unsigned int flags; /* E500_TLB_* */
31};
32
33struct tlbe_priv {
34 struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
35};
36
37struct vcpu_id_table;
38
39struct kvmppc_e500_tlb_params {
40 int entries, ways, sets;
41};
42
43struct kvmppc_vcpu_e500 {
44 /* Unmodified copy of the guest's TLB -- shared with host userspace. */
45 struct kvm_book3e_206_tlb_entry *gtlb_arch;
46
47 /* Starting entry number in gtlb_arch[] */
48 int gtlb_offset[E500_TLB_NUM];
49
50 /* KVM internal information associated with each guest TLB entry */
51 struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
52
53 struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
54
55 unsigned int gtlb_nv[E500_TLB_NUM];
56
57 /*
58 * information associated with each host TLB entry --
59 * TLB1 only for now. If/when guest TLB1 entries can be
60 * mapped with host TLB0, this will be used for that too.
61 *
62 * We don't want to use this for guest TLB0 because then we'd
63 * have the overhead of doing the translation again even if
64 * the entry is still in the guest TLB (e.g. we swapped out
65 * and back, and our host TLB entries got evicted).
66 */
67 struct tlbe_ref *tlb_refs[E500_TLB_NUM];
68 unsigned int host_tlb1_nv;
69
70 u32 host_pid[E500_PID_NUM];
71 u32 pid[E500_PID_NUM];
72 u32 svr;
73
74 /* vcpu id table */
75 struct vcpu_id_table *idt;
76
77 u32 l1csr0;
78 u32 l1csr1;
79 u32 hid0;
80 u32 hid1;
81 u32 tlb0cfg;
82 u32 tlb1cfg;
83 u64 mcar;
84
85 struct page **shared_tlb_pages;
86 int num_shared_tlb_pages;
87
88 struct kvm_vcpu vcpu;
89};
90
91static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
92{
93 return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
94}
95
96#endif /* __ASM_KVM_E500_H__ */
diff --git a/arch/powerpc/include/asm/kvm_host.h b/arch/powerpc/include/asm/kvm_host.h
index 52eb9c1f4fe0..d848cdc49715 100644
--- a/arch/powerpc/include/asm/kvm_host.h
+++ b/arch/powerpc/include/asm/kvm_host.h
@@ -82,7 +82,7 @@ struct kvm_vcpu;
82 82
83struct lppaca; 83struct lppaca;
84struct slb_shadow; 84struct slb_shadow;
85struct dtl; 85struct dtl_entry;
86 86
87struct kvm_vm_stat { 87struct kvm_vm_stat {
88 u32 remote_tlb_flush; 88 u32 remote_tlb_flush;
@@ -106,6 +106,8 @@ struct kvm_vcpu_stat {
106 u32 dec_exits; 106 u32 dec_exits;
107 u32 ext_intr_exits; 107 u32 ext_intr_exits;
108 u32 halt_wakeup; 108 u32 halt_wakeup;
109 u32 dbell_exits;
110 u32 gdbell_exits;
109#ifdef CONFIG_PPC_BOOK3S 111#ifdef CONFIG_PPC_BOOK3S
110 u32 pf_storage; 112 u32 pf_storage;
111 u32 pf_instruc; 113 u32 pf_instruc;
@@ -140,6 +142,7 @@ enum kvm_exit_types {
140 EMULATED_TLBSX_EXITS, 142 EMULATED_TLBSX_EXITS,
141 EMULATED_TLBWE_EXITS, 143 EMULATED_TLBWE_EXITS,
142 EMULATED_RFI_EXITS, 144 EMULATED_RFI_EXITS,
145 EMULATED_RFCI_EXITS,
143 DEC_EXITS, 146 DEC_EXITS,
144 EXT_INTR_EXITS, 147 EXT_INTR_EXITS,
145 HALT_WAKEUP, 148 HALT_WAKEUP,
@@ -147,6 +150,8 @@ enum kvm_exit_types {
147 FP_UNAVAIL, 150 FP_UNAVAIL,
148 DEBUG_EXITS, 151 DEBUG_EXITS,
149 TIMEINGUEST, 152 TIMEINGUEST,
153 DBELL_EXITS,
154 GDBELL_EXITS,
150 __NUMBER_OF_KVM_EXIT_TYPES 155 __NUMBER_OF_KVM_EXIT_TYPES
151}; 156};
152 157
@@ -217,10 +222,10 @@ struct kvm_arch_memory_slot {
217}; 222};
218 223
219struct kvm_arch { 224struct kvm_arch {
225 unsigned int lpid;
220#ifdef CONFIG_KVM_BOOK3S_64_HV 226#ifdef CONFIG_KVM_BOOK3S_64_HV
221 unsigned long hpt_virt; 227 unsigned long hpt_virt;
222 struct revmap_entry *revmap; 228 struct revmap_entry *revmap;
223 unsigned int lpid;
224 unsigned int host_lpid; 229 unsigned int host_lpid;
225 unsigned long host_lpcr; 230 unsigned long host_lpcr;
226 unsigned long sdr1; 231 unsigned long sdr1;
@@ -232,7 +237,6 @@ struct kvm_arch {
232 unsigned long vrma_slb_v; 237 unsigned long vrma_slb_v;
233 int rma_setup_done; 238 int rma_setup_done;
234 int using_mmu_notifiers; 239 int using_mmu_notifiers;
235 struct list_head spapr_tce_tables;
236 spinlock_t slot_phys_lock; 240 spinlock_t slot_phys_lock;
237 unsigned long *slot_phys[KVM_MEM_SLOTS_NUM]; 241 unsigned long *slot_phys[KVM_MEM_SLOTS_NUM];
238 int slot_npages[KVM_MEM_SLOTS_NUM]; 242 int slot_npages[KVM_MEM_SLOTS_NUM];
@@ -240,6 +244,9 @@ struct kvm_arch {
240 struct kvmppc_vcore *vcores[KVM_MAX_VCORES]; 244 struct kvmppc_vcore *vcores[KVM_MAX_VCORES];
241 struct kvmppc_linear_info *hpt_li; 245 struct kvmppc_linear_info *hpt_li;
242#endif /* CONFIG_KVM_BOOK3S_64_HV */ 246#endif /* CONFIG_KVM_BOOK3S_64_HV */
247#ifdef CONFIG_PPC_BOOK3S_64
248 struct list_head spapr_tce_tables;
249#endif
243}; 250};
244 251
245/* 252/*
@@ -263,6 +270,9 @@ struct kvmppc_vcore {
263 struct list_head runnable_threads; 270 struct list_head runnable_threads;
264 spinlock_t lock; 271 spinlock_t lock;
265 wait_queue_head_t wq; 272 wait_queue_head_t wq;
273 u64 stolen_tb;
274 u64 preempt_tb;
275 struct kvm_vcpu *runner;
266}; 276};
267 277
268#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff) 278#define VCORE_ENTRY_COUNT(vc) ((vc)->entry_exit_count & 0xff)
@@ -274,6 +284,19 @@ struct kvmppc_vcore {
274#define VCORE_EXITING 2 284#define VCORE_EXITING 2
275#define VCORE_SLEEPING 3 285#define VCORE_SLEEPING 3
276 286
287/*
288 * Struct used to manage memory for a virtual processor area
289 * registered by a PAPR guest. There are three types of area
290 * that a guest can register.
291 */
292struct kvmppc_vpa {
293 void *pinned_addr; /* Address in kernel linear mapping */
294 void *pinned_end; /* End of region */
295 unsigned long next_gpa; /* Guest phys addr for update */
296 unsigned long len; /* Number of bytes required */
297 u8 update_pending; /* 1 => update pinned_addr from next_gpa */
298};
299
277struct kvmppc_pte { 300struct kvmppc_pte {
278 ulong eaddr; 301 ulong eaddr;
279 u64 vpage; 302 u64 vpage;
@@ -345,6 +368,17 @@ struct kvm_vcpu_arch {
345 u64 vsr[64]; 368 u64 vsr[64];
346#endif 369#endif
347 370
371#ifdef CONFIG_KVM_BOOKE_HV
372 u32 host_mas4;
373 u32 host_mas6;
374 u32 shadow_epcr;
375 u32 epcr;
376 u32 shadow_msrp;
377 u32 eplc;
378 u32 epsc;
379 u32 oldpir;
380#endif
381
348#ifdef CONFIG_PPC_BOOK3S 382#ifdef CONFIG_PPC_BOOK3S
349 /* For Gekko paired singles */ 383 /* For Gekko paired singles */
350 u32 qpr[32]; 384 u32 qpr[32];
@@ -370,6 +404,7 @@ struct kvm_vcpu_arch {
370#endif 404#endif
371 u32 vrsave; /* also USPRG0 */ 405 u32 vrsave; /* also USPRG0 */
372 u32 mmucr; 406 u32 mmucr;
407 /* shadow_msr is unused for BookE HV */
373 ulong shadow_msr; 408 ulong shadow_msr;
374 ulong csrr0; 409 ulong csrr0;
375 ulong csrr1; 410 ulong csrr1;
@@ -426,8 +461,12 @@ struct kvm_vcpu_arch {
426 ulong fault_esr; 461 ulong fault_esr;
427 ulong queued_dear; 462 ulong queued_dear;
428 ulong queued_esr; 463 ulong queued_esr;
464 u32 tlbcfg[4];
465 u32 mmucfg;
466 u32 epr;
429#endif 467#endif
430 gpa_t paddr_accessed; 468 gpa_t paddr_accessed;
469 gva_t vaddr_accessed;
431 470
432 u8 io_gpr; /* GPR used as IO source/target */ 471 u8 io_gpr; /* GPR used as IO source/target */
433 u8 mmio_is_bigendian; 472 u8 mmio_is_bigendian;
@@ -453,11 +492,6 @@ struct kvm_vcpu_arch {
453 u8 prodded; 492 u8 prodded;
454 u32 last_inst; 493 u32 last_inst;
455 494
456 struct lppaca *vpa;
457 struct slb_shadow *slb_shadow;
458 struct dtl *dtl;
459 struct dtl *dtl_end;
460
461 wait_queue_head_t *wqp; 495 wait_queue_head_t *wqp;
462 struct kvmppc_vcore *vcore; 496 struct kvmppc_vcore *vcore;
463 int ret; 497 int ret;
@@ -482,6 +516,14 @@ struct kvm_vcpu_arch {
482 struct task_struct *run_task; 516 struct task_struct *run_task;
483 struct kvm_run *kvm_run; 517 struct kvm_run *kvm_run;
484 pgd_t *pgdir; 518 pgd_t *pgdir;
519
520 spinlock_t vpa_update_lock;
521 struct kvmppc_vpa vpa;
522 struct kvmppc_vpa dtl;
523 struct dtl_entry *dtl_ptr;
524 unsigned long dtl_index;
525 u64 stolen_logged;
526 struct kvmppc_vpa slb_shadow;
485#endif 527#endif
486}; 528};
487 529
@@ -498,4 +540,6 @@ struct kvm_vcpu_arch {
498#define KVM_MMIO_REG_QPR 0x0040 540#define KVM_MMIO_REG_QPR 0x0040
499#define KVM_MMIO_REG_FQPR 0x0060 541#define KVM_MMIO_REG_FQPR 0x0060
500 542
543#define __KVM_HAVE_ARCH_WQP
544
501#endif /* __POWERPC_KVM_HOST_H__ */ 545#endif /* __POWERPC_KVM_HOST_H__ */
diff --git a/arch/powerpc/include/asm/kvm_para.h b/arch/powerpc/include/asm/kvm_para.h
index 7b754e743003..c18916bff689 100644
--- a/arch/powerpc/include/asm/kvm_para.h
+++ b/arch/powerpc/include/asm/kvm_para.h
@@ -206,6 +206,11 @@ static inline unsigned int kvm_arch_para_features(void)
206 return r; 206 return r;
207} 207}
208 208
209static inline bool kvm_check_and_clear_guest_paused(void)
210{
211 return false;
212}
213
209#endif /* __KERNEL__ */ 214#endif /* __KERNEL__ */
210 215
211#endif /* __POWERPC_KVM_PARA_H__ */ 216#endif /* __POWERPC_KVM_PARA_H__ */
diff --git a/arch/powerpc/include/asm/kvm_ppc.h b/arch/powerpc/include/asm/kvm_ppc.h
index 9d6dee0f7d48..f68c22fa2fce 100644
--- a/arch/powerpc/include/asm/kvm_ppc.h
+++ b/arch/powerpc/include/asm/kvm_ppc.h
@@ -95,7 +95,7 @@ extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
95extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); 95extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
96extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); 96extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
97 97
98extern void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); 98extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
99extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); 99extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
100extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); 100extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
101extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); 101extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
@@ -107,8 +107,10 @@ extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
107 107
108extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 108extern int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
109 unsigned int op, int *advance); 109 unsigned int op, int *advance);
110extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); 110extern int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn,
111extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 111 ulong val);
112extern int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn,
113 ulong *val);
112 114
113extern int kvmppc_booke_init(void); 115extern int kvmppc_booke_init(void);
114extern void kvmppc_booke_exit(void); 116extern void kvmppc_booke_exit(void);
@@ -126,6 +128,8 @@ extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
126extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); 128extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
127extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, 129extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
128 struct kvm_create_spapr_tce *args); 130 struct kvm_create_spapr_tce *args);
131extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
132 unsigned long ioba, unsigned long tce);
129extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, 133extern long kvm_vm_ioctl_allocate_rma(struct kvm *kvm,
130 struct kvm_allocate_rma *rma); 134 struct kvm_allocate_rma *rma);
131extern struct kvmppc_linear_info *kvm_alloc_rma(void); 135extern struct kvmppc_linear_info *kvm_alloc_rma(void);
@@ -138,6 +142,11 @@ extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
138 struct kvm_userspace_memory_region *mem); 142 struct kvm_userspace_memory_region *mem);
139extern void kvmppc_core_commit_memory_region(struct kvm *kvm, 143extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
140 struct kvm_userspace_memory_region *mem); 144 struct kvm_userspace_memory_region *mem);
145extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
146 struct kvm_ppc_smmu_info *info);
147
148extern int kvmppc_bookehv_init(void);
149extern void kvmppc_bookehv_exit(void);
141 150
142/* 151/*
143 * Cuts out inst bits with ordering according to spec. 152 * Cuts out inst bits with ordering according to spec.
@@ -204,4 +213,9 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
204int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, 213int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
205 struct kvm_dirty_tlb *cfg); 214 struct kvm_dirty_tlb *cfg);
206 215
216long kvmppc_alloc_lpid(void);
217void kvmppc_claim_lpid(long lpid);
218void kvmppc_free_lpid(long lpid);
219void kvmppc_init_lpid(unsigned long nr_lpids);
220
207#endif /* __POWERPC_KVM_PPC_H__ */ 221#endif /* __POWERPC_KVM_PPC_H__ */
diff --git a/arch/powerpc/include/asm/mmu-book3e.h b/arch/powerpc/include/asm/mmu-book3e.h
index cdb5421877e2..eeabcdbc30f7 100644
--- a/arch/powerpc/include/asm/mmu-book3e.h
+++ b/arch/powerpc/include/asm/mmu-book3e.h
@@ -104,6 +104,8 @@
104#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ 104#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */
105#define MAS4_TSIZED_SHIFT 7 105#define MAS4_TSIZED_SHIFT 7
106 106
107#define MAS5_SGS 0x80000000
108
107#define MAS6_SPID0 0x3FFF0000 109#define MAS6_SPID0 0x3FFF0000
108#define MAS6_SPID1 0x00007FFE 110#define MAS6_SPID1 0x00007FFE
109#define MAS6_ISIZE(x) MAS1_TSIZE(x) 111#define MAS6_ISIZE(x) MAS1_TSIZE(x)
@@ -118,6 +120,10 @@
118 120
119#define MAS7_RPN 0xFFFFFFFF 121#define MAS7_RPN 0xFFFFFFFF
120 122
123#define MAS8_TGS 0x80000000 /* Guest space */
124#define MAS8_VF 0x40000000 /* Virtualization Fault */
125#define MAS8_TLPID 0x000000ff
126
121/* Bit definitions for MMUCFG */ 127/* Bit definitions for MMUCFG */
122#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ 128#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */
123#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ 129#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */
diff --git a/arch/powerpc/include/asm/processor.h b/arch/powerpc/include/asm/processor.h
index 55e85631c42e..413a5eaef56c 100644
--- a/arch/powerpc/include/asm/processor.h
+++ b/arch/powerpc/include/asm/processor.h
@@ -240,6 +240,9 @@ struct thread_struct {
240#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 240#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
241 void* kvm_shadow_vcpu; /* KVM internal data */ 241 void* kvm_shadow_vcpu; /* KVM internal data */
242#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */ 242#endif /* CONFIG_KVM_BOOK3S_32_HANDLER */
243#if defined(CONFIG_KVM) && defined(CONFIG_BOOKE)
244 struct kvm_vcpu *kvm_vcpu;
245#endif
243#ifdef CONFIG_PPC64 246#ifdef CONFIG_PPC64
244 unsigned long dscr; 247 unsigned long dscr;
245 int dscr_inherit; 248 int dscr_inherit;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index 9d7f0fb69028..f0cb7f461b9d 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -257,7 +257,9 @@
257#define LPCR_LPES_SH 2 257#define LPCR_LPES_SH 2
258#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */ 258#define LPCR_RMI 0x00000002 /* real mode is cache inhibit */
259#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */ 259#define LPCR_HDICE 0x00000001 /* Hyp Decr enable (HV,PR,EE) */
260#ifndef SPRN_LPID
260#define SPRN_LPID 0x13F /* Logical Partition Identifier */ 261#define SPRN_LPID 0x13F /* Logical Partition Identifier */
262#endif
261#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */ 263#define LPID_RSVD 0x3ff /* Reserved LPID for partn switching */
262#define SPRN_HMER 0x150 /* Hardware m? error recovery */ 264#define SPRN_HMER 0x150 /* Hardware m? error recovery */
263#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */ 265#define SPRN_HMEER 0x151 /* Hardware m? enable error recovery */
diff --git a/arch/powerpc/include/asm/reg_booke.h b/arch/powerpc/include/asm/reg_booke.h
index 8a97aa7289d3..2d916c4982c5 100644
--- a/arch/powerpc/include/asm/reg_booke.h
+++ b/arch/powerpc/include/asm/reg_booke.h
@@ -56,18 +56,30 @@
56#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */ 56#define SPRN_SPRG7W 0x117 /* Special Purpose Register General 7 Write */
57#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */ 57#define SPRN_EPCR 0x133 /* Embedded Processor Control Register */
58#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */ 58#define SPRN_DBCR2 0x136 /* Debug Control Register 2 */
59#define SPRN_MSRP 0x137 /* MSR Protect Register */
59#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */ 60#define SPRN_IAC3 0x13A /* Instruction Address Compare 3 */
60#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */ 61#define SPRN_IAC4 0x13B /* Instruction Address Compare 4 */
61#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */ 62#define SPRN_DVC1 0x13E /* Data Value Compare Register 1 */
62#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */ 63#define SPRN_DVC2 0x13F /* Data Value Compare Register 2 */
64#define SPRN_LPID 0x152 /* Logical Partition ID */
63#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */ 65#define SPRN_MAS8 0x155 /* MMU Assist Register 8 */
64#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */ 66#define SPRN_TLB0PS 0x158 /* TLB 0 Page Size Register */
65#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */ 67#define SPRN_TLB1PS 0x159 /* TLB 1 Page Size Register */
66#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */ 68#define SPRN_MAS5_MAS6 0x15c /* MMU Assist Register 5 || 6 */
67#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */ 69#define SPRN_MAS8_MAS1 0x15d /* MMU Assist Register 8 || 1 */
68#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */ 70#define SPRN_EPTCFG 0x15e /* Embedded Page Table Config */
71#define SPRN_GSPRG0 0x170 /* Guest SPRG0 */
72#define SPRN_GSPRG1 0x171 /* Guest SPRG1 */
73#define SPRN_GSPRG2 0x172 /* Guest SPRG2 */
74#define SPRN_GSPRG3 0x173 /* Guest SPRG3 */
69#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */ 75#define SPRN_MAS7_MAS3 0x174 /* MMU Assist Register 7 || 3 */
70#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */ 76#define SPRN_MAS0_MAS1 0x175 /* MMU Assist Register 0 || 1 */
77#define SPRN_GSRR0 0x17A /* Guest SRR0 */
78#define SPRN_GSRR1 0x17B /* Guest SRR1 */
79#define SPRN_GEPR 0x17C /* Guest EPR */
80#define SPRN_GDEAR 0x17D /* Guest DEAR */
81#define SPRN_GPIR 0x17E /* Guest PIR */
82#define SPRN_GESR 0x17F /* Guest Exception Syndrome Register */
71#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */ 83#define SPRN_IVOR0 0x190 /* Interrupt Vector Offset Register 0 */
72#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */ 84#define SPRN_IVOR1 0x191 /* Interrupt Vector Offset Register 1 */
73#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */ 85#define SPRN_IVOR2 0x192 /* Interrupt Vector Offset Register 2 */
@@ -88,6 +100,13 @@
88#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */ 100#define SPRN_IVOR39 0x1B1 /* Interrupt Vector Offset Register 39 */
89#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */ 101#define SPRN_IVOR40 0x1B2 /* Interrupt Vector Offset Register 40 */
90#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */ 102#define SPRN_IVOR41 0x1B3 /* Interrupt Vector Offset Register 41 */
103#define SPRN_GIVOR2 0x1B8 /* Guest IVOR2 */
104#define SPRN_GIVOR3 0x1B9 /* Guest IVOR3 */
105#define SPRN_GIVOR4 0x1BA /* Guest IVOR4 */
106#define SPRN_GIVOR8 0x1BB /* Guest IVOR8 */
107#define SPRN_GIVOR13 0x1BC /* Guest IVOR13 */
108#define SPRN_GIVOR14 0x1BD /* Guest IVOR14 */
109#define SPRN_GIVPR 0x1BF /* Guest IVPR */
91#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */ 110#define SPRN_SPEFSCR 0x200 /* SPE & Embedded FP Status & Control */
92#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */ 111#define SPRN_BBEAR 0x201 /* Branch Buffer Entry Address Register */
93#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */ 112#define SPRN_BBTAR 0x202 /* Branch Buffer Target Address Register */
@@ -240,6 +259,10 @@
240#define MCSR_LDG 0x00002000UL /* Guarded Load */ 259#define MCSR_LDG 0x00002000UL /* Guarded Load */
241#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */ 260#define MCSR_TLBSYNC 0x00000002UL /* Multiple tlbsyncs detected */
242#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */ 261#define MCSR_BSL2_ERR 0x00000001UL /* Backside L2 cache error */
262
263#define MSRP_UCLEP 0x04000000 /* Protect MSR[UCLE] */
264#define MSRP_DEP 0x00000200 /* Protect MSR[DE] */
265#define MSRP_PMMP 0x00000004 /* Protect MSR[PMM] */
243#endif 266#endif
244 267
245#ifdef CONFIG_E200 268#ifdef CONFIG_E200
@@ -594,6 +617,17 @@
594#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates 617#define SPRN_EPCR_DMIUH 0x00400000 /* Disable MAS Interrupt updates
595 * for hypervisor */ 618 * for hypervisor */
596 619
620/* Bit definitions for EPLC/EPSC */
621#define EPC_EPR 0x80000000 /* 1 = user, 0 = kernel */
622#define EPC_EPR_SHIFT 31
623#define EPC_EAS 0x40000000 /* Address Space */
624#define EPC_EAS_SHIFT 30
625#define EPC_EGS 0x20000000 /* 1 = guest, 0 = hypervisor */
626#define EPC_EGS_SHIFT 29
627#define EPC_ELPID 0x00ff0000
628#define EPC_ELPID_SHIFT 16
629#define EPC_EPID 0x00003fff
630#define EPC_EPID_SHIFT 0
597 631
598/* 632/*
599 * The IBM-403 is an even more odd special case, as it is much 633 * The IBM-403 is an even more odd special case, as it is much
diff --git a/arch/powerpc/include/asm/switch_to.h b/arch/powerpc/include/asm/switch_to.h
index 1a6320290d26..200d763a0a67 100644
--- a/arch/powerpc/include/asm/switch_to.h
+++ b/arch/powerpc/include/asm/switch_to.h
@@ -17,6 +17,7 @@ extern struct task_struct *_switch(struct thread_struct *prev,
17 struct thread_struct *next); 17 struct thread_struct *next);
18 18
19extern void giveup_fpu(struct task_struct *); 19extern void giveup_fpu(struct task_struct *);
20extern void load_up_fpu(void);
20extern void disable_kernel_fp(void); 21extern void disable_kernel_fp(void);
21extern void enable_kernel_fp(void); 22extern void enable_kernel_fp(void);
22extern void flush_fp_to_thread(struct task_struct *); 23extern void flush_fp_to_thread(struct task_struct *);
diff --git a/arch/powerpc/include/asm/time.h b/arch/powerpc/include/asm/time.h
index 2136f58a54e8..3b4b4a8da922 100644
--- a/arch/powerpc/include/asm/time.h
+++ b/arch/powerpc/include/asm/time.h
@@ -23,6 +23,7 @@
23extern unsigned long tb_ticks_per_jiffy; 23extern unsigned long tb_ticks_per_jiffy;
24extern unsigned long tb_ticks_per_usec; 24extern unsigned long tb_ticks_per_usec;
25extern unsigned long tb_ticks_per_sec; 25extern unsigned long tb_ticks_per_sec;
26extern struct clock_event_device decrementer_clockevent;
26 27
27struct rtc_time; 28struct rtc_time;
28extern void to_tm(int tim, struct rtc_time * tm); 29extern void to_tm(int tim, struct rtc_time * tm);
diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
index bd0fb8495154..17bb40cad5bf 100644
--- a/arch/powerpc/include/asm/uaccess.h
+++ b/arch/powerpc/include/asm/uaccess.h
@@ -40,6 +40,8 @@
40 40
41#define segment_eq(a, b) ((a).seg == (b).seg) 41#define segment_eq(a, b) ((a).seg == (b).seg)
42 42
43#define user_addr_max() (get_fs().seg)
44
43#ifdef __powerpc64__ 45#ifdef __powerpc64__
44/* 46/*
45 * This check is sufficient because there is a large enough 47 * This check is sufficient because there is a large enough
@@ -453,42 +455,9 @@ static inline unsigned long clear_user(void __user *addr, unsigned long size)
453 return size; 455 return size;
454} 456}
455 457
456extern int __strncpy_from_user(char *dst, const char __user *src, long count); 458extern long strncpy_from_user(char *dst, const char __user *src, long count);
457 459extern __must_check long strlen_user(const char __user *str);
458static inline long strncpy_from_user(char *dst, const char __user *src, 460extern __must_check long strnlen_user(const char __user *str, long n);
459 long count)
460{
461 might_sleep();
462 if (likely(access_ok(VERIFY_READ, src, 1)))
463 return __strncpy_from_user(dst, src, count);
464 return -EFAULT;
465}
466
467/*
468 * Return the size of a string (including the ending 0)
469 *
470 * Return 0 for error
471 */
472extern int __strnlen_user(const char __user *str, long len, unsigned long top);
473
474/*
475 * Returns the length of the string at str (including the null byte),
476 * or 0 if we hit a page we can't access,
477 * or something > len if we didn't find a null byte.
478 *
479 * The `top' parameter to __strnlen_user is to make sure that
480 * we can never overflow from the user area into kernel space.
481 */
482static inline int strnlen_user(const char __user *str, long len)
483{
484 unsigned long top = current->thread.fs.seg;
485
486 if ((unsigned long)str > top)
487 return 0;
488 return __strnlen_user(str, len, top);
489}
490
491#define strlen_user(str) strnlen_user((str), 0x7ffffffe)
492 461
493#endif /* __ASSEMBLY__ */ 462#endif /* __ASSEMBLY__ */
494#endif /* __KERNEL__ */ 463#endif /* __KERNEL__ */
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..d0b6d4ac6dda
--- /dev/null
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -0,0 +1,41 @@
1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H
3
4/*
5 * Word-at-a-time interfaces for PowerPC.
6 */
7
8#include <linux/kernel.h>
9#include <asm/asm-compat.h>
10
11struct word_at_a_time {
12 const unsigned long high_bits, low_bits;
13};
14
15#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
16
17/* Bit set in the bytes that have a zero */
18static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
19{
20 unsigned long mask = (val & c->low_bits) + c->low_bits;
21 return ~(mask | rhs);
22}
23
24#define create_zero_mask(mask) (mask)
25
26static inline long find_zero(unsigned long mask)
27{
28 long leading_zero_bits;
29
30 asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
31 return leading_zero_bits >> 3;
32}
33
34static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
35{
36 unsigned long rhs = val | c->low_bits;
37 *data = rhs;
38 return (val + c->high_bits) & ~rhs;
39}
40
41#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/powerpc/kernel/asm-offsets.c b/arch/powerpc/kernel/asm-offsets.c
index 4554dc2fe857..52c7ad78242e 100644
--- a/arch/powerpc/kernel/asm-offsets.c
+++ b/arch/powerpc/kernel/asm-offsets.c
@@ -116,6 +116,9 @@ int main(void)
116#ifdef CONFIG_KVM_BOOK3S_32_HANDLER 116#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
117 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu)); 117 DEFINE(THREAD_KVM_SVCPU, offsetof(struct thread_struct, kvm_shadow_vcpu));
118#endif 118#endif
119#ifdef CONFIG_KVM_BOOKE_HV
120 DEFINE(THREAD_KVM_VCPU, offsetof(struct thread_struct, kvm_vcpu));
121#endif
119 122
120 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags)); 123 DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
121 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags)); 124 DEFINE(TI_LOCAL_FLAGS, offsetof(struct thread_info, local_flags));
@@ -383,6 +386,7 @@ int main(void)
383#ifdef CONFIG_KVM 386#ifdef CONFIG_KVM
384 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack)); 387 DEFINE(VCPU_HOST_STACK, offsetof(struct kvm_vcpu, arch.host_stack));
385 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid)); 388 DEFINE(VCPU_HOST_PID, offsetof(struct kvm_vcpu, arch.host_pid));
389 DEFINE(VCPU_GUEST_PID, offsetof(struct kvm_vcpu, arch.pid));
386 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr)); 390 DEFINE(VCPU_GPRS, offsetof(struct kvm_vcpu, arch.gpr));
387 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave)); 391 DEFINE(VCPU_VRSAVE, offsetof(struct kvm_vcpu, arch.vrsave));
388 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr)); 392 DEFINE(VCPU_FPRS, offsetof(struct kvm_vcpu, arch.fpr));
@@ -425,9 +429,11 @@ int main(void)
425 DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4)); 429 DEFINE(VCPU_SHARED_MAS4, offsetof(struct kvm_vcpu_arch_shared, mas4));
426 DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6)); 430 DEFINE(VCPU_SHARED_MAS6, offsetof(struct kvm_vcpu_arch_shared, mas6));
427 431
432 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
433 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
434
428 /* book3s */ 435 /* book3s */
429#ifdef CONFIG_KVM_BOOK3S_64_HV 436#ifdef CONFIG_KVM_BOOK3S_64_HV
430 DEFINE(KVM_LPID, offsetof(struct kvm, arch.lpid));
431 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1)); 437 DEFINE(KVM_SDR1, offsetof(struct kvm, arch.sdr1));
432 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid)); 438 DEFINE(KVM_HOST_LPID, offsetof(struct kvm, arch.host_lpid));
433 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr)); 439 DEFINE(KVM_HOST_LPCR, offsetof(struct kvm, arch.host_lpcr));
@@ -440,9 +446,9 @@ int main(void)
440 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v)); 446 DEFINE(KVM_VRMA_SLB_V, offsetof(struct kvm, arch.vrma_slb_v));
441 DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr)); 447 DEFINE(VCPU_DSISR, offsetof(struct kvm_vcpu, arch.shregs.dsisr));
442 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar)); 448 DEFINE(VCPU_DAR, offsetof(struct kvm_vcpu, arch.shregs.dar));
449 DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa.pinned_addr));
443#endif 450#endif
444#ifdef CONFIG_PPC_BOOK3S 451#ifdef CONFIG_PPC_BOOK3S
445 DEFINE(VCPU_KVM, offsetof(struct kvm_vcpu, kvm));
446 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id)); 452 DEFINE(VCPU_VCPUID, offsetof(struct kvm_vcpu, vcpu_id));
447 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr)); 453 DEFINE(VCPU_PURR, offsetof(struct kvm_vcpu, arch.purr));
448 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr)); 454 DEFINE(VCPU_SPURR, offsetof(struct kvm_vcpu, arch.spurr));
@@ -457,7 +463,6 @@ int main(void)
457 DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions)); 463 DEFINE(VCPU_PENDING_EXC, offsetof(struct kvm_vcpu, arch.pending_exceptions));
458 DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded)); 464 DEFINE(VCPU_CEDED, offsetof(struct kvm_vcpu, arch.ceded));
459 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded)); 465 DEFINE(VCPU_PRODDED, offsetof(struct kvm_vcpu, arch.prodded));
460 DEFINE(VCPU_VPA, offsetof(struct kvm_vcpu, arch.vpa));
461 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr)); 466 DEFINE(VCPU_MMCR, offsetof(struct kvm_vcpu, arch.mmcr));
462 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc)); 467 DEFINE(VCPU_PMC, offsetof(struct kvm_vcpu, arch.pmc));
463 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb)); 468 DEFINE(VCPU_SLB, offsetof(struct kvm_vcpu, arch.slb));
@@ -533,6 +538,8 @@ int main(void)
533 HSTATE_FIELD(HSTATE_NAPPING, napping); 538 HSTATE_FIELD(HSTATE_NAPPING, napping);
534 539
535#ifdef CONFIG_KVM_BOOK3S_64_HV 540#ifdef CONFIG_KVM_BOOK3S_64_HV
541 HSTATE_FIELD(HSTATE_HWTHREAD_REQ, hwthread_req);
542 HSTATE_FIELD(HSTATE_HWTHREAD_STATE, hwthread_state);
536 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu); 543 HSTATE_FIELD(HSTATE_KVM_VCPU, kvm_vcpu);
537 HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore); 544 HSTATE_FIELD(HSTATE_KVM_VCORE, kvm_vcore);
538 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys); 545 HSTATE_FIELD(HSTATE_XICS_PHYS, xics_phys);
@@ -593,6 +600,12 @@ int main(void)
593 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr)); 600 DEFINE(VCPU_HOST_SPEFSCR, offsetof(struct kvm_vcpu, arch.host_spefscr));
594#endif 601#endif
595 602
603#ifdef CONFIG_KVM_BOOKE_HV
604 DEFINE(VCPU_HOST_MAS4, offsetof(struct kvm_vcpu, arch.host_mas4));
605 DEFINE(VCPU_HOST_MAS6, offsetof(struct kvm_vcpu, arch.host_mas6));
606 DEFINE(VCPU_EPLC, offsetof(struct kvm_vcpu, arch.eplc));
607#endif
608
596#ifdef CONFIG_KVM_EXIT_TIMING 609#ifdef CONFIG_KVM_EXIT_TIMING
597 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu, 610 DEFINE(VCPU_TIMING_EXIT_TBU, offsetof(struct kvm_vcpu,
598 arch.timing_exit.tv32.tbu)); 611 arch.timing_exit.tv32.tbu));
diff --git a/arch/powerpc/kernel/cpu_setup_fsl_booke.S b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
index 8053db02b85e..69fdd2322a66 100644
--- a/arch/powerpc/kernel/cpu_setup_fsl_booke.S
+++ b/arch/powerpc/kernel/cpu_setup_fsl_booke.S
@@ -73,6 +73,7 @@ _GLOBAL(__setup_cpu_e500v2)
73 mtlr r4 73 mtlr r4
74 blr 74 blr
75_GLOBAL(__setup_cpu_e500mc) 75_GLOBAL(__setup_cpu_e500mc)
76 mr r5, r4
76 mflr r4 77 mflr r4
77 bl __e500_icache_setup 78 bl __e500_icache_setup
78 bl __e500_dcache_setup 79 bl __e500_dcache_setup
diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
index f7bed44ee165..1c06d2971545 100644
--- a/arch/powerpc/kernel/exceptions-64s.S
+++ b/arch/powerpc/kernel/exceptions-64s.S
@@ -63,11 +63,13 @@ BEGIN_FTR_SECTION
63 GET_PACA(r13) 63 GET_PACA(r13)
64 64
65#ifdef CONFIG_KVM_BOOK3S_64_HV 65#ifdef CONFIG_KVM_BOOK3S_64_HV
66 lbz r0,PACAPROCSTART(r13) 66 li r0,KVM_HWTHREAD_IN_KERNEL
67 cmpwi r0,0x80 67 stb r0,HSTATE_HWTHREAD_STATE(r13)
68 bne 1f 68 /* Order setting hwthread_state vs. testing hwthread_req */
69 li r0,1 69 sync
70 stb r0,PACAPROCSTART(r13) 70 lbz r0,HSTATE_HWTHREAD_REQ(r13)
71 cmpwi r0,0
72 beq 1f
71 b kvm_start_guest 73 b kvm_start_guest
721: 741:
73#endif 75#endif
diff --git a/arch/powerpc/kernel/head_44x.S b/arch/powerpc/kernel/head_44x.S
index 22d608e8bb7d..7a2e5e421abf 100644
--- a/arch/powerpc/kernel/head_44x.S
+++ b/arch/powerpc/kernel/head_44x.S
@@ -248,10 +248,11 @@ _ENTRY(_start);
248 248
249interrupt_base: 249interrupt_base:
250 /* Critical Input Interrupt */ 250 /* Critical Input Interrupt */
251 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 251 CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
252 252
253 /* Machine Check Interrupt */ 253 /* Machine Check Interrupt */
254 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 254 CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
255 machine_check_exception)
255 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception) 256 MCHECK_EXCEPTION(0x0210, MachineCheckA, machine_check_exception)
256 257
257 /* Data Storage Interrupt */ 258 /* Data Storage Interrupt */
@@ -261,7 +262,8 @@ interrupt_base:
261 INSTRUCTION_STORAGE_EXCEPTION 262 INSTRUCTION_STORAGE_EXCEPTION
262 263
263 /* External Input Interrupt */ 264 /* External Input Interrupt */
264 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) 265 EXCEPTION(0x0500, BOOKE_INTERRUPT_EXTERNAL, ExternalInput, \
266 do_IRQ, EXC_XFER_LITE)
265 267
266 /* Alignment Interrupt */ 268 /* Alignment Interrupt */
267 ALIGNMENT_EXCEPTION 269 ALIGNMENT_EXCEPTION
@@ -273,29 +275,32 @@ interrupt_base:
273#ifdef CONFIG_PPC_FPU 275#ifdef CONFIG_PPC_FPU
274 FP_UNAVAILABLE_EXCEPTION 276 FP_UNAVAILABLE_EXCEPTION
275#else 277#else
276 EXCEPTION(0x2010, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 278 EXCEPTION(0x2010, BOOKE_INTERRUPT_FP_UNAVAIL, \
279 FloatingPointUnavailable, unknown_exception, EXC_XFER_EE)
277#endif 280#endif
278 /* System Call Interrupt */ 281 /* System Call Interrupt */
279 START_EXCEPTION(SystemCall) 282 START_EXCEPTION(SystemCall)
280 NORMAL_EXCEPTION_PROLOG 283 NORMAL_EXCEPTION_PROLOG(BOOKE_INTERRUPT_SYSCALL)
281 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 284 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
282 285
283 /* Auxiliary Processor Unavailable Interrupt */ 286 /* Auxiliary Processor Unavailable Interrupt */
284 EXCEPTION(0x2020, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 287 EXCEPTION(0x2020, BOOKE_INTERRUPT_AP_UNAVAIL, \
288 AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE)
285 289
286 /* Decrementer Interrupt */ 290 /* Decrementer Interrupt */
287 DECREMENTER_EXCEPTION 291 DECREMENTER_EXCEPTION
288 292
289 /* Fixed Internal Timer Interrupt */ 293 /* Fixed Internal Timer Interrupt */
290 /* TODO: Add FIT support */ 294 /* TODO: Add FIT support */
291 EXCEPTION(0x1010, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) 295 EXCEPTION(0x1010, BOOKE_INTERRUPT_FIT, FixedIntervalTimer, \
296 unknown_exception, EXC_XFER_EE)
292 297
293 /* Watchdog Timer Interrupt */ 298 /* Watchdog Timer Interrupt */
294 /* TODO: Add watchdog support */ 299 /* TODO: Add watchdog support */
295#ifdef CONFIG_BOOKE_WDT 300#ifdef CONFIG_BOOKE_WDT
296 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, WatchdogException) 301 CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, WatchdogException)
297#else 302#else
298 CRITICAL_EXCEPTION(0x1020, WatchdogTimer, unknown_exception) 303 CRITICAL_EXCEPTION(0x1020, WATCHDOG, WatchdogTimer, unknown_exception)
299#endif 304#endif
300 305
301 /* Data TLB Error Interrupt */ 306 /* Data TLB Error Interrupt */
diff --git a/arch/powerpc/kernel/head_booke.h b/arch/powerpc/kernel/head_booke.h
index 0e4175388f47..5f051eeb93a2 100644
--- a/arch/powerpc/kernel/head_booke.h
+++ b/arch/powerpc/kernel/head_booke.h
@@ -2,6 +2,9 @@
2#define __HEAD_BOOKE_H__ 2#define __HEAD_BOOKE_H__
3 3
4#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */ 4#include <asm/ptrace.h> /* for STACK_FRAME_REGS_MARKER */
5#include <asm/kvm_asm.h>
6#include <asm/kvm_booke_hv_asm.h>
7
5/* 8/*
6 * Macros used for common Book-e exception handling 9 * Macros used for common Book-e exception handling
7 */ 10 */
@@ -28,14 +31,15 @@
28 */ 31 */
29#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4)) 32#define THREAD_NORMSAVE(offset) (THREAD_NORMSAVES + (offset * 4))
30 33
31#define NORMAL_EXCEPTION_PROLOG \ 34#define NORMAL_EXCEPTION_PROLOG(intno) \
32 mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \ 35 mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
33 mfspr r10, SPRN_SPRG_THREAD; \ 36 mfspr r10, SPRN_SPRG_THREAD; \
34 stw r11, THREAD_NORMSAVE(0)(r10); \ 37 stw r11, THREAD_NORMSAVE(0)(r10); \
35 stw r13, THREAD_NORMSAVE(2)(r10); \ 38 stw r13, THREAD_NORMSAVE(2)(r10); \
36 mfcr r13; /* save CR in r13 for now */\ 39 mfcr r13; /* save CR in r13 for now */\
37 mfspr r11,SPRN_SRR1; /* check whether user or kernel */\ 40 mfspr r11, SPRN_SRR1; \
38 andi. r11,r11,MSR_PR; \ 41 DO_KVM BOOKE_INTERRUPT_##intno SPRN_SRR1; \
42 andi. r11, r11, MSR_PR; /* check whether user or kernel */\
39 mr r11, r1; \ 43 mr r11, r1; \
40 beq 1f; \ 44 beq 1f; \
41 /* if from user, start at top of this thread's kernel stack */ \ 45 /* if from user, start at top of this thread's kernel stack */ \
@@ -113,7 +117,7 @@
113 * registers as the normal prolog above. Instead we use a portion of the 117 * registers as the normal prolog above. Instead we use a portion of the
114 * critical/machine check exception stack at low physical addresses. 118 * critical/machine check exception stack at low physical addresses.
115 */ 119 */
116#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, exc_level_srr0, exc_level_srr1) \ 120#define EXC_LEVEL_EXCEPTION_PROLOG(exc_level, intno, exc_level_srr0, exc_level_srr1) \
117 mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \ 121 mtspr SPRN_SPRG_WSCRATCH_##exc_level,r8; \
118 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \ 122 BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);/* r8 points to the exc_level stack*/ \
119 stw r9,GPR9(r8); /* save various registers */\ 123 stw r9,GPR9(r8); /* save various registers */\
@@ -121,8 +125,9 @@
121 stw r10,GPR10(r8); \ 125 stw r10,GPR10(r8); \
122 stw r11,GPR11(r8); \ 126 stw r11,GPR11(r8); \
123 stw r9,_CCR(r8); /* save CR on stack */\ 127 stw r9,_CCR(r8); /* save CR on stack */\
124 mfspr r10,exc_level_srr1; /* check whether user or kernel */\ 128 mfspr r11,exc_level_srr1; /* check whether user or kernel */\
125 andi. r10,r10,MSR_PR; \ 129 DO_KVM BOOKE_INTERRUPT_##intno exc_level_srr1; \
130 andi. r11,r11,MSR_PR; \
126 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\ 131 mfspr r11,SPRN_SPRG_THREAD; /* if from user, start at top of */\
127 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\ 132 lwz r11,THREAD_INFO-THREAD(r11); /* this thread's kernel stack */\
128 addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\ 133 addi r11,r11,EXC_LVL_FRAME_OVERHEAD; /* allocate stack frame */\
@@ -162,12 +167,30 @@
162 SAVE_4GPRS(3, r11); \ 167 SAVE_4GPRS(3, r11); \
163 SAVE_2GPRS(7, r11) 168 SAVE_2GPRS(7, r11)
164 169
165#define CRITICAL_EXCEPTION_PROLOG \ 170#define CRITICAL_EXCEPTION_PROLOG(intno) \
166 EXC_LEVEL_EXCEPTION_PROLOG(CRIT, SPRN_CSRR0, SPRN_CSRR1) 171 EXC_LEVEL_EXCEPTION_PROLOG(CRIT, intno, SPRN_CSRR0, SPRN_CSRR1)
167#define DEBUG_EXCEPTION_PROLOG \ 172#define DEBUG_EXCEPTION_PROLOG \
168 EXC_LEVEL_EXCEPTION_PROLOG(DBG, SPRN_DSRR0, SPRN_DSRR1) 173 EXC_LEVEL_EXCEPTION_PROLOG(DBG, DEBUG, SPRN_DSRR0, SPRN_DSRR1)
169#define MCHECK_EXCEPTION_PROLOG \ 174#define MCHECK_EXCEPTION_PROLOG \
170 EXC_LEVEL_EXCEPTION_PROLOG(MC, SPRN_MCSRR0, SPRN_MCSRR1) 175 EXC_LEVEL_EXCEPTION_PROLOG(MC, MACHINE_CHECK, \
176 SPRN_MCSRR0, SPRN_MCSRR1)
177
178/*
179 * Guest Doorbell -- this is a bit odd in that uses GSRR0/1 despite
180 * being delivered to the host. This exception can only happen
181 * inside a KVM guest -- so we just handle up to the DO_KVM rather
182 * than try to fit this into one of the existing prolog macros.
183 */
184#define GUEST_DOORBELL_EXCEPTION \
185 START_EXCEPTION(GuestDoorbell); \
186 mtspr SPRN_SPRG_WSCRATCH0, r10; /* save one register */ \
187 mfspr r10, SPRN_SPRG_THREAD; \
188 stw r11, THREAD_NORMSAVE(0)(r10); \
189 mfspr r11, SPRN_SRR1; \
190 stw r13, THREAD_NORMSAVE(2)(r10); \
191 mfcr r13; /* save CR in r13 for now */\
192 DO_KVM BOOKE_INTERRUPT_GUEST_DBELL SPRN_GSRR1; \
193 trap
171 194
172/* 195/*
173 * Exception vectors. 196 * Exception vectors.
@@ -181,16 +204,16 @@ label:
181 .long func; \ 204 .long func; \
182 .long ret_from_except_full 205 .long ret_from_except_full
183 206
184#define EXCEPTION(n, label, hdlr, xfer) \ 207#define EXCEPTION(n, intno, label, hdlr, xfer) \
185 START_EXCEPTION(label); \ 208 START_EXCEPTION(label); \
186 NORMAL_EXCEPTION_PROLOG; \ 209 NORMAL_EXCEPTION_PROLOG(intno); \
187 addi r3,r1,STACK_FRAME_OVERHEAD; \ 210 addi r3,r1,STACK_FRAME_OVERHEAD; \
188 xfer(n, hdlr) 211 xfer(n, hdlr)
189 212
190#define CRITICAL_EXCEPTION(n, label, hdlr) \ 213#define CRITICAL_EXCEPTION(n, intno, label, hdlr) \
191 START_EXCEPTION(label); \ 214 START_EXCEPTION(label); \
192 CRITICAL_EXCEPTION_PROLOG; \ 215 CRITICAL_EXCEPTION_PROLOG(intno); \
193 addi r3,r1,STACK_FRAME_OVERHEAD; \ 216 addi r3,r1,STACK_FRAME_OVERHEAD; \
194 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \ 217 EXC_XFER_TEMPLATE(hdlr, n+2, (MSR_KERNEL & ~(MSR_ME|MSR_DE|MSR_CE)), \
195 NOCOPY, crit_transfer_to_handler, \ 218 NOCOPY, crit_transfer_to_handler, \
196 ret_from_crit_exc) 219 ret_from_crit_exc)
@@ -302,7 +325,7 @@ label:
302 325
303#define DEBUG_CRIT_EXCEPTION \ 326#define DEBUG_CRIT_EXCEPTION \
304 START_EXCEPTION(DebugCrit); \ 327 START_EXCEPTION(DebugCrit); \
305 CRITICAL_EXCEPTION_PROLOG; \ 328 CRITICAL_EXCEPTION_PROLOG(DEBUG); \
306 \ 329 \
307 /* \ 330 /* \
308 * If there is a single step or branch-taken exception in an \ 331 * If there is a single step or branch-taken exception in an \
@@ -355,7 +378,7 @@ label:
355 378
356#define DATA_STORAGE_EXCEPTION \ 379#define DATA_STORAGE_EXCEPTION \
357 START_EXCEPTION(DataStorage) \ 380 START_EXCEPTION(DataStorage) \
358 NORMAL_EXCEPTION_PROLOG; \ 381 NORMAL_EXCEPTION_PROLOG(DATA_STORAGE); \
359 mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ 382 mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
360 stw r5,_ESR(r11); \ 383 stw r5,_ESR(r11); \
361 mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \ 384 mfspr r4,SPRN_DEAR; /* Grab the DEAR */ \
@@ -363,7 +386,7 @@ label:
363 386
364#define INSTRUCTION_STORAGE_EXCEPTION \ 387#define INSTRUCTION_STORAGE_EXCEPTION \
365 START_EXCEPTION(InstructionStorage) \ 388 START_EXCEPTION(InstructionStorage) \
366 NORMAL_EXCEPTION_PROLOG; \ 389 NORMAL_EXCEPTION_PROLOG(INST_STORAGE); \
367 mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \ 390 mfspr r5,SPRN_ESR; /* Grab the ESR and save it */ \
368 stw r5,_ESR(r11); \ 391 stw r5,_ESR(r11); \
369 mr r4,r12; /* Pass SRR0 as arg2 */ \ 392 mr r4,r12; /* Pass SRR0 as arg2 */ \
@@ -372,7 +395,7 @@ label:
372 395
373#define ALIGNMENT_EXCEPTION \ 396#define ALIGNMENT_EXCEPTION \
374 START_EXCEPTION(Alignment) \ 397 START_EXCEPTION(Alignment) \
375 NORMAL_EXCEPTION_PROLOG; \ 398 NORMAL_EXCEPTION_PROLOG(ALIGNMENT); \
376 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \ 399 mfspr r4,SPRN_DEAR; /* Grab the DEAR and save it */ \
377 stw r4,_DEAR(r11); \ 400 stw r4,_DEAR(r11); \
378 addi r3,r1,STACK_FRAME_OVERHEAD; \ 401 addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -380,7 +403,7 @@ label:
380 403
381#define PROGRAM_EXCEPTION \ 404#define PROGRAM_EXCEPTION \
382 START_EXCEPTION(Program) \ 405 START_EXCEPTION(Program) \
383 NORMAL_EXCEPTION_PROLOG; \ 406 NORMAL_EXCEPTION_PROLOG(PROGRAM); \
384 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \ 407 mfspr r4,SPRN_ESR; /* Grab the ESR and save it */ \
385 stw r4,_ESR(r11); \ 408 stw r4,_ESR(r11); \
386 addi r3,r1,STACK_FRAME_OVERHEAD; \ 409 addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -388,7 +411,7 @@ label:
388 411
389#define DECREMENTER_EXCEPTION \ 412#define DECREMENTER_EXCEPTION \
390 START_EXCEPTION(Decrementer) \ 413 START_EXCEPTION(Decrementer) \
391 NORMAL_EXCEPTION_PROLOG; \ 414 NORMAL_EXCEPTION_PROLOG(DECREMENTER); \
392 lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \ 415 lis r0,TSR_DIS@h; /* Setup the DEC interrupt mask */ \
393 mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \ 416 mtspr SPRN_TSR,r0; /* Clear the DEC interrupt */ \
394 addi r3,r1,STACK_FRAME_OVERHEAD; \ 417 addi r3,r1,STACK_FRAME_OVERHEAD; \
@@ -396,7 +419,7 @@ label:
396 419
397#define FP_UNAVAILABLE_EXCEPTION \ 420#define FP_UNAVAILABLE_EXCEPTION \
398 START_EXCEPTION(FloatingPointUnavailable) \ 421 START_EXCEPTION(FloatingPointUnavailable) \
399 NORMAL_EXCEPTION_PROLOG; \ 422 NORMAL_EXCEPTION_PROLOG(FP_UNAVAIL); \
400 beq 1f; \ 423 beq 1f; \
401 bl load_up_fpu; /* if from user, just load it up */ \ 424 bl load_up_fpu; /* if from user, just load it up */ \
402 b fast_exception_return; \ 425 b fast_exception_return; \
diff --git a/arch/powerpc/kernel/head_fsl_booke.S b/arch/powerpc/kernel/head_fsl_booke.S
index de80e0f9a2bd..1f4434a38608 100644
--- a/arch/powerpc/kernel/head_fsl_booke.S
+++ b/arch/powerpc/kernel/head_fsl_booke.S
@@ -301,19 +301,20 @@ _ENTRY(__early_start)
301 301
302interrupt_base: 302interrupt_base:
303 /* Critical Input Interrupt */ 303 /* Critical Input Interrupt */
304 CRITICAL_EXCEPTION(0x0100, CriticalInput, unknown_exception) 304 CRITICAL_EXCEPTION(0x0100, CRITICAL, CriticalInput, unknown_exception)
305 305
306 /* Machine Check Interrupt */ 306 /* Machine Check Interrupt */
307#ifdef CONFIG_E200 307#ifdef CONFIG_E200
308 /* no RFMCI, MCSRRs on E200 */ 308 /* no RFMCI, MCSRRs on E200 */
309 CRITICAL_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 309 CRITICAL_EXCEPTION(0x0200, MACHINE_CHECK, MachineCheck, \
310 machine_check_exception)
310#else 311#else
311 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception) 312 MCHECK_EXCEPTION(0x0200, MachineCheck, machine_check_exception)
312#endif 313#endif
313 314
314 /* Data Storage Interrupt */ 315 /* Data Storage Interrupt */
315 START_EXCEPTION(DataStorage) 316 START_EXCEPTION(DataStorage)
316 NORMAL_EXCEPTION_PROLOG 317 NORMAL_EXCEPTION_PROLOG(DATA_STORAGE)
317 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */ 318 mfspr r5,SPRN_ESR /* Grab the ESR, save it, pass arg3 */
318 stw r5,_ESR(r11) 319 stw r5,_ESR(r11)
319 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */ 320 mfspr r4,SPRN_DEAR /* Grab the DEAR, save it, pass arg2 */
@@ -328,7 +329,7 @@ interrupt_base:
328 INSTRUCTION_STORAGE_EXCEPTION 329 INSTRUCTION_STORAGE_EXCEPTION
329 330
330 /* External Input Interrupt */ 331 /* External Input Interrupt */
331 EXCEPTION(0x0500, ExternalInput, do_IRQ, EXC_XFER_LITE) 332 EXCEPTION(0x0500, EXTERNAL, ExternalInput, do_IRQ, EXC_XFER_LITE)
332 333
333 /* Alignment Interrupt */ 334 /* Alignment Interrupt */
334 ALIGNMENT_EXCEPTION 335 ALIGNMENT_EXCEPTION
@@ -342,32 +343,36 @@ interrupt_base:
342#else 343#else
343#ifdef CONFIG_E200 344#ifdef CONFIG_E200
344 /* E200 treats 'normal' floating point instructions as FP Unavail exception */ 345 /* E200 treats 'normal' floating point instructions as FP Unavail exception */
345 EXCEPTION(0x0800, FloatingPointUnavailable, program_check_exception, EXC_XFER_EE) 346 EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
347 program_check_exception, EXC_XFER_EE)
346#else 348#else
347 EXCEPTION(0x0800, FloatingPointUnavailable, unknown_exception, EXC_XFER_EE) 349 EXCEPTION(0x0800, FP_UNAVAIL, FloatingPointUnavailable, \
350 unknown_exception, EXC_XFER_EE)
348#endif 351#endif
349#endif 352#endif
350 353
351 /* System Call Interrupt */ 354 /* System Call Interrupt */
352 START_EXCEPTION(SystemCall) 355 START_EXCEPTION(SystemCall)
353 NORMAL_EXCEPTION_PROLOG 356 NORMAL_EXCEPTION_PROLOG(SYSCALL)
354 EXC_XFER_EE_LITE(0x0c00, DoSyscall) 357 EXC_XFER_EE_LITE(0x0c00, DoSyscall)
355 358
356 /* Auxiliary Processor Unavailable Interrupt */ 359 /* Auxiliary Processor Unavailable Interrupt */
357 EXCEPTION(0x2900, AuxillaryProcessorUnavailable, unknown_exception, EXC_XFER_EE) 360 EXCEPTION(0x2900, AP_UNAVAIL, AuxillaryProcessorUnavailable, \
361 unknown_exception, EXC_XFER_EE)
358 362
359 /* Decrementer Interrupt */ 363 /* Decrementer Interrupt */
360 DECREMENTER_EXCEPTION 364 DECREMENTER_EXCEPTION
361 365
362 /* Fixed Internal Timer Interrupt */ 366 /* Fixed Internal Timer Interrupt */
363 /* TODO: Add FIT support */ 367 /* TODO: Add FIT support */
364 EXCEPTION(0x3100, FixedIntervalTimer, unknown_exception, EXC_XFER_EE) 368 EXCEPTION(0x3100, FIT, FixedIntervalTimer, \
369 unknown_exception, EXC_XFER_EE)
365 370
366 /* Watchdog Timer Interrupt */ 371 /* Watchdog Timer Interrupt */
367#ifdef CONFIG_BOOKE_WDT 372#ifdef CONFIG_BOOKE_WDT
368 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, WatchdogException) 373 CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, WatchdogException)
369#else 374#else
370 CRITICAL_EXCEPTION(0x3200, WatchdogTimer, unknown_exception) 375 CRITICAL_EXCEPTION(0x3200, WATCHDOG, WatchdogTimer, unknown_exception)
371#endif 376#endif
372 377
373 /* Data TLB Error Interrupt */ 378 /* Data TLB Error Interrupt */
@@ -375,10 +380,16 @@ interrupt_base:
375 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 380 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
376 mfspr r10, SPRN_SPRG_THREAD 381 mfspr r10, SPRN_SPRG_THREAD
377 stw r11, THREAD_NORMSAVE(0)(r10) 382 stw r11, THREAD_NORMSAVE(0)(r10)
383#ifdef CONFIG_KVM_BOOKE_HV
384BEGIN_FTR_SECTION
385 mfspr r11, SPRN_SRR1
386END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
387#endif
378 stw r12, THREAD_NORMSAVE(1)(r10) 388 stw r12, THREAD_NORMSAVE(1)(r10)
379 stw r13, THREAD_NORMSAVE(2)(r10) 389 stw r13, THREAD_NORMSAVE(2)(r10)
380 mfcr r13 390 mfcr r13
381 stw r13, THREAD_NORMSAVE(3)(r10) 391 stw r13, THREAD_NORMSAVE(3)(r10)
392 DO_KVM BOOKE_INTERRUPT_DTLB_MISS SPRN_SRR1
382 mfspr r10, SPRN_DEAR /* Get faulting address */ 393 mfspr r10, SPRN_DEAR /* Get faulting address */
383 394
384 /* If we are faulting a kernel address, we have to use the 395 /* If we are faulting a kernel address, we have to use the
@@ -463,10 +474,16 @@ interrupt_base:
463 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */ 474 mtspr SPRN_SPRG_WSCRATCH0, r10 /* Save some working registers */
464 mfspr r10, SPRN_SPRG_THREAD 475 mfspr r10, SPRN_SPRG_THREAD
465 stw r11, THREAD_NORMSAVE(0)(r10) 476 stw r11, THREAD_NORMSAVE(0)(r10)
477#ifdef CONFIG_KVM_BOOKE_HV
478BEGIN_FTR_SECTION
479 mfspr r11, SPRN_SRR1
480END_FTR_SECTION_IFSET(CPU_FTR_EMB_HV)
481#endif
466 stw r12, THREAD_NORMSAVE(1)(r10) 482 stw r12, THREAD_NORMSAVE(1)(r10)
467 stw r13, THREAD_NORMSAVE(2)(r10) 483 stw r13, THREAD_NORMSAVE(2)(r10)
468 mfcr r13 484 mfcr r13
469 stw r13, THREAD_NORMSAVE(3)(r10) 485 stw r13, THREAD_NORMSAVE(3)(r10)
486 DO_KVM BOOKE_INTERRUPT_ITLB_MISS SPRN_SRR1
470 mfspr r10, SPRN_SRR0 /* Get faulting address */ 487 mfspr r10, SPRN_SRR0 /* Get faulting address */
471 488
472 /* If we are faulting a kernel address, we have to use the 489 /* If we are faulting a kernel address, we have to use the
@@ -538,36 +555,54 @@ interrupt_base:
538#ifdef CONFIG_SPE 555#ifdef CONFIG_SPE
539 /* SPE Unavailable */ 556 /* SPE Unavailable */
540 START_EXCEPTION(SPEUnavailable) 557 START_EXCEPTION(SPEUnavailable)
541 NORMAL_EXCEPTION_PROLOG 558 NORMAL_EXCEPTION_PROLOG(SPE_UNAVAIL)
542 bne load_up_spe 559 bne load_up_spe
543 addi r3,r1,STACK_FRAME_OVERHEAD 560 addi r3,r1,STACK_FRAME_OVERHEAD
544 EXC_XFER_EE_LITE(0x2010, KernelSPE) 561 EXC_XFER_EE_LITE(0x2010, KernelSPE)
545#else 562#else
546 EXCEPTION(0x2020, SPEUnavailable, unknown_exception, EXC_XFER_EE) 563 EXCEPTION(0x2020, SPE_UNAVAIL, SPEUnavailable, \
564 unknown_exception, EXC_XFER_EE)
547#endif /* CONFIG_SPE */ 565#endif /* CONFIG_SPE */
548 566
549 /* SPE Floating Point Data */ 567 /* SPE Floating Point Data */
550#ifdef CONFIG_SPE 568#ifdef CONFIG_SPE
551 EXCEPTION(0x2030, SPEFloatingPointData, SPEFloatingPointException, EXC_XFER_EE); 569 EXCEPTION(0x2030, SPE_FP_DATA, SPEFloatingPointData, \
570 SPEFloatingPointException, EXC_XFER_EE);
552 571
553 /* SPE Floating Point Round */ 572 /* SPE Floating Point Round */
554 EXCEPTION(0x2050, SPEFloatingPointRound, SPEFloatingPointRoundException, EXC_XFER_EE) 573 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
574 SPEFloatingPointRoundException, EXC_XFER_EE)
555#else 575#else
556 EXCEPTION(0x2040, SPEFloatingPointData, unknown_exception, EXC_XFER_EE) 576 EXCEPTION(0x2040, SPE_FP_DATA, SPEFloatingPointData, \
557 EXCEPTION(0x2050, SPEFloatingPointRound, unknown_exception, EXC_XFER_EE) 577 unknown_exception, EXC_XFER_EE)
578 EXCEPTION(0x2050, SPE_FP_ROUND, SPEFloatingPointRound, \
579 unknown_exception, EXC_XFER_EE)
558#endif /* CONFIG_SPE */ 580#endif /* CONFIG_SPE */
559 581
560 /* Performance Monitor */ 582 /* Performance Monitor */
561 EXCEPTION(0x2060, PerformanceMonitor, performance_monitor_exception, EXC_XFER_STD) 583 EXCEPTION(0x2060, PERFORMANCE_MONITOR, PerformanceMonitor, \
584 performance_monitor_exception, EXC_XFER_STD)
562 585
563 EXCEPTION(0x2070, Doorbell, doorbell_exception, EXC_XFER_STD) 586 EXCEPTION(0x2070, DOORBELL, Doorbell, doorbell_exception, EXC_XFER_STD)
564 587
565 CRITICAL_EXCEPTION(0x2080, CriticalDoorbell, unknown_exception) 588 CRITICAL_EXCEPTION(0x2080, DOORBELL_CRITICAL, \
589 CriticalDoorbell, unknown_exception)
566 590
567 /* Debug Interrupt */ 591 /* Debug Interrupt */
568 DEBUG_DEBUG_EXCEPTION 592 DEBUG_DEBUG_EXCEPTION
569 DEBUG_CRIT_EXCEPTION 593 DEBUG_CRIT_EXCEPTION
570 594
595 GUEST_DOORBELL_EXCEPTION
596
597 CRITICAL_EXCEPTION(0, GUEST_DBELL_CRIT, CriticalGuestDoorbell, \
598 unknown_exception)
599
600 /* Hypercall */
601 EXCEPTION(0, HV_SYSCALL, Hypercall, unknown_exception, EXC_XFER_EE)
602
603 /* Embedded Hypervisor Privilege */
604 EXCEPTION(0, HV_PRIV, Ehvpriv, unknown_exception, EXC_XFER_EE)
605
571/* 606/*
572 * Local functions 607 * Local functions
573 */ 608 */
@@ -871,8 +906,31 @@ _GLOBAL(__setup_e500mc_ivors)
871 mtspr SPRN_IVOR36,r3 906 mtspr SPRN_IVOR36,r3
872 li r3,CriticalDoorbell@l 907 li r3,CriticalDoorbell@l
873 mtspr SPRN_IVOR37,r3 908 mtspr SPRN_IVOR37,r3
909
910 /*
911 * We only want to touch IVOR38-41 if we're running on hardware
912 * that supports category E.HV. The architectural way to determine
913 * this is MMUCFG[LPIDSIZE].
914 */
915 mfspr r3, SPRN_MMUCFG
916 andis. r3, r3, MMUCFG_LPIDSIZE@h
917 beq no_hv
918 li r3,GuestDoorbell@l
919 mtspr SPRN_IVOR38,r3
920 li r3,CriticalGuestDoorbell@l
921 mtspr SPRN_IVOR39,r3
922 li r3,Hypercall@l
923 mtspr SPRN_IVOR40,r3
924 li r3,Ehvpriv@l
925 mtspr SPRN_IVOR41,r3
926skip_hv_ivors:
874 sync 927 sync
875 blr 928 blr
929no_hv:
930 lwz r3, CPU_SPEC_FEATURES(r5)
931 rlwinm r3, r3, 0, ~CPU_FTR_EMB_HV
932 stw r3, CPU_SPEC_FEATURES(r5)
933 b skip_hv_ivors
876 934
877#ifdef CONFIG_SPE 935#ifdef CONFIG_SPE
878/* 936/*
diff --git a/arch/powerpc/kernel/idle_power7.S b/arch/powerpc/kernel/idle_power7.S
index 0cdc9a392839..7140d838339e 100644
--- a/arch/powerpc/kernel/idle_power7.S
+++ b/arch/powerpc/kernel/idle_power7.S
@@ -16,6 +16,7 @@
16#include <asm/asm-offsets.h> 16#include <asm/asm-offsets.h>
17#include <asm/ppc-opcode.h> 17#include <asm/ppc-opcode.h>
18#include <asm/hw_irq.h> 18#include <asm/hw_irq.h>
19#include <asm/kvm_book3s_asm.h>
19 20
20#undef DEBUG 21#undef DEBUG
21 22
@@ -81,6 +82,12 @@ _GLOBAL(power7_idle)
81 std r9,_MSR(r1) 82 std r9,_MSR(r1)
82 std r1,PACAR1(r13) 83 std r1,PACAR1(r13)
83 84
85#ifdef CONFIG_KVM_BOOK3S_64_HV
86 /* Tell KVM we're napping */
87 li r4,KVM_HWTHREAD_IN_NAP
88 stb r4,HSTATE_HWTHREAD_STATE(r13)
89#endif
90
84 /* Magic NAP mode enter sequence */ 91 /* Magic NAP mode enter sequence */
85 std r0,0(r1) 92 std r0,0(r1)
86 ptesync 93 ptesync
diff --git a/arch/powerpc/kernel/ppc_ksyms.c b/arch/powerpc/kernel/ppc_ksyms.c
index 786a2700ec2d..3e4031581c65 100644
--- a/arch/powerpc/kernel/ppc_ksyms.c
+++ b/arch/powerpc/kernel/ppc_ksyms.c
@@ -85,8 +85,6 @@ EXPORT_SYMBOL(csum_tcpudp_magic);
85 85
86EXPORT_SYMBOL(__copy_tofrom_user); 86EXPORT_SYMBOL(__copy_tofrom_user);
87EXPORT_SYMBOL(__clear_user); 87EXPORT_SYMBOL(__clear_user);
88EXPORT_SYMBOL(__strncpy_from_user);
89EXPORT_SYMBOL(__strnlen_user);
90EXPORT_SYMBOL(copy_page); 88EXPORT_SYMBOL(copy_page);
91 89
92#if defined(CONFIG_PCI) && defined(CONFIG_PPC32) 90#if defined(CONFIG_PCI) && defined(CONFIG_PPC32)
@@ -190,3 +188,7 @@ EXPORT_SYMBOL(__arch_hweight16);
190EXPORT_SYMBOL(__arch_hweight32); 188EXPORT_SYMBOL(__arch_hweight32);
191EXPORT_SYMBOL(__arch_hweight64); 189EXPORT_SYMBOL(__arch_hweight64);
192#endif 190#endif
191
192#ifdef CONFIG_PPC_BOOK3S_64
193EXPORT_SYMBOL_GPL(mmu_psize_defs);
194#endif
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2c42cd72d0f5..99a995c2a3f2 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -100,7 +100,7 @@ static int decrementer_set_next_event(unsigned long evt,
100static void decrementer_set_mode(enum clock_event_mode mode, 100static void decrementer_set_mode(enum clock_event_mode mode,
101 struct clock_event_device *dev); 101 struct clock_event_device *dev);
102 102
103static struct clock_event_device decrementer_clockevent = { 103struct clock_event_device decrementer_clockevent = {
104 .name = "decrementer", 104 .name = "decrementer",
105 .rating = 200, 105 .rating = 200,
106 .irq = 0, 106 .irq = 0,
@@ -108,6 +108,7 @@ static struct clock_event_device decrementer_clockevent = {
108 .set_mode = decrementer_set_mode, 108 .set_mode = decrementer_set_mode,
109 .features = CLOCK_EVT_FEAT_ONESHOT, 109 .features = CLOCK_EVT_FEAT_ONESHOT,
110}; 110};
111EXPORT_SYMBOL(decrementer_clockevent);
111 112
112DEFINE_PER_CPU(u64, decrementers_next_tb); 113DEFINE_PER_CPU(u64, decrementers_next_tb);
113static DEFINE_PER_CPU(struct clock_event_device, decrementers); 114static DEFINE_PER_CPU(struct clock_event_device, decrementers);
diff --git a/arch/powerpc/kvm/44x.c b/arch/powerpc/kvm/44x.c
index 7b612a76c701..50e7dbc7356c 100644
--- a/arch/powerpc/kvm/44x.c
+++ b/arch/powerpc/kvm/44x.c
@@ -29,15 +29,18 @@
29#include <asm/kvm_ppc.h> 29#include <asm/kvm_ppc.h>
30 30
31#include "44x_tlb.h" 31#include "44x_tlb.h"
32#include "booke.h"
32 33
33void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 34void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
34{ 35{
36 kvmppc_booke_vcpu_load(vcpu, cpu);
35 kvmppc_44x_tlb_load(vcpu); 37 kvmppc_44x_tlb_load(vcpu);
36} 38}
37 39
38void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 40void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
39{ 41{
40 kvmppc_44x_tlb_put(vcpu); 42 kvmppc_44x_tlb_put(vcpu);
43 kvmppc_booke_vcpu_put(vcpu);
41} 44}
42 45
43int kvmppc_core_check_processor_compat(void) 46int kvmppc_core_check_processor_compat(void)
@@ -160,6 +163,15 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
160 kmem_cache_free(kvm_vcpu_cache, vcpu_44x); 163 kmem_cache_free(kvm_vcpu_cache, vcpu_44x);
161} 164}
162 165
166int kvmppc_core_init_vm(struct kvm *kvm)
167{
168 return 0;
169}
170
171void kvmppc_core_destroy_vm(struct kvm *kvm)
172{
173}
174
163static int __init kvmppc_44x_init(void) 175static int __init kvmppc_44x_init(void)
164{ 176{
165 int r; 177 int r;
diff --git a/arch/powerpc/kvm/44x_emulate.c b/arch/powerpc/kvm/44x_emulate.c
index 549bb2c9a47a..c8c61578fdfc 100644
--- a/arch/powerpc/kvm/44x_emulate.c
+++ b/arch/powerpc/kvm/44x_emulate.c
@@ -37,22 +37,19 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
37 unsigned int inst, int *advance) 37 unsigned int inst, int *advance)
38{ 38{
39 int emulated = EMULATE_DONE; 39 int emulated = EMULATE_DONE;
40 int dcrn; 40 int dcrn = get_dcrn(inst);
41 int ra; 41 int ra = get_ra(inst);
42 int rb; 42 int rb = get_rb(inst);
43 int rc; 43 int rc = get_rc(inst);
44 int rs; 44 int rs = get_rs(inst);
45 int rt; 45 int rt = get_rt(inst);
46 int ws; 46 int ws = get_ws(inst);
47 47
48 switch (get_op(inst)) { 48 switch (get_op(inst)) {
49 case 31: 49 case 31:
50 switch (get_xop(inst)) { 50 switch (get_xop(inst)) {
51 51
52 case XOP_MFDCR: 52 case XOP_MFDCR:
53 dcrn = get_dcrn(inst);
54 rt = get_rt(inst);
55
56 /* The guest may access CPR0 registers to determine the timebase 53 /* The guest may access CPR0 registers to determine the timebase
57 * frequency, and it must know the real host frequency because it 54 * frequency, and it must know the real host frequency because it
58 * can directly access the timebase registers. 55 * can directly access the timebase registers.
@@ -88,9 +85,6 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
88 break; 85 break;
89 86
90 case XOP_MTDCR: 87 case XOP_MTDCR:
91 dcrn = get_dcrn(inst);
92 rs = get_rs(inst);
93
94 /* emulate some access in kernel */ 88 /* emulate some access in kernel */
95 switch (dcrn) { 89 switch (dcrn) {
96 case DCRN_CPR0_CONFIG_ADDR: 90 case DCRN_CPR0_CONFIG_ADDR:
@@ -108,17 +102,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
108 break; 102 break;
109 103
110 case XOP_TLBWE: 104 case XOP_TLBWE:
111 ra = get_ra(inst);
112 rs = get_rs(inst);
113 ws = get_ws(inst);
114 emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); 105 emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
115 break; 106 break;
116 107
117 case XOP_TLBSX: 108 case XOP_TLBSX:
118 rt = get_rt(inst);
119 ra = get_ra(inst);
120 rb = get_rb(inst);
121 rc = get_rc(inst);
122 emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); 109 emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
123 break; 110 break;
124 111
@@ -141,41 +128,41 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
141 return emulated; 128 return emulated;
142} 129}
143 130
144int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 131int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
145{ 132{
146 int emulated = EMULATE_DONE; 133 int emulated = EMULATE_DONE;
147 134
148 switch (sprn) { 135 switch (sprn) {
149 case SPRN_PID: 136 case SPRN_PID:
150 kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break; 137 kvmppc_set_pid(vcpu, spr_val); break;
151 case SPRN_MMUCR: 138 case SPRN_MMUCR:
152 vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break; 139 vcpu->arch.mmucr = spr_val; break;
153 case SPRN_CCR0: 140 case SPRN_CCR0:
154 vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break; 141 vcpu->arch.ccr0 = spr_val; break;
155 case SPRN_CCR1: 142 case SPRN_CCR1:
156 vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break; 143 vcpu->arch.ccr1 = spr_val; break;
157 default: 144 default:
158 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); 145 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
159 } 146 }
160 147
161 return emulated; 148 return emulated;
162} 149}
163 150
164int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 151int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
165{ 152{
166 int emulated = EMULATE_DONE; 153 int emulated = EMULATE_DONE;
167 154
168 switch (sprn) { 155 switch (sprn) {
169 case SPRN_PID: 156 case SPRN_PID:
170 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; 157 *spr_val = vcpu->arch.pid; break;
171 case SPRN_MMUCR: 158 case SPRN_MMUCR:
172 kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; 159 *spr_val = vcpu->arch.mmucr; break;
173 case SPRN_CCR0: 160 case SPRN_CCR0:
174 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; 161 *spr_val = vcpu->arch.ccr0; break;
175 case SPRN_CCR1: 162 case SPRN_CCR1:
176 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; 163 *spr_val = vcpu->arch.ccr1; break;
177 default: 164 default:
178 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 165 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
179 } 166 }
180 167
181 return emulated; 168 return emulated;
diff --git a/arch/powerpc/kvm/Kconfig b/arch/powerpc/kvm/Kconfig
index 8f64709ae331..f4dacb9c57fa 100644
--- a/arch/powerpc/kvm/Kconfig
+++ b/arch/powerpc/kvm/Kconfig
@@ -90,6 +90,9 @@ config KVM_BOOK3S_64_PR
90 depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV 90 depends on KVM_BOOK3S_64 && !KVM_BOOK3S_64_HV
91 select KVM_BOOK3S_PR 91 select KVM_BOOK3S_PR
92 92
93config KVM_BOOKE_HV
94 bool
95
93config KVM_440 96config KVM_440
94 bool "KVM support for PowerPC 440 processors" 97 bool "KVM support for PowerPC 440 processors"
95 depends on EXPERIMENTAL && 44x 98 depends on EXPERIMENTAL && 44x
@@ -106,7 +109,7 @@ config KVM_440
106 109
107config KVM_EXIT_TIMING 110config KVM_EXIT_TIMING
108 bool "Detailed exit timing" 111 bool "Detailed exit timing"
109 depends on KVM_440 || KVM_E500 112 depends on KVM_440 || KVM_E500V2 || KVM_E500MC
110 ---help--- 113 ---help---
111 Calculate elapsed time for every exit/enter cycle. A per-vcpu 114 Calculate elapsed time for every exit/enter cycle. A per-vcpu
112 report is available in debugfs kvm/vm#_vcpu#_timing. 115 report is available in debugfs kvm/vm#_vcpu#_timing.
@@ -115,14 +118,29 @@ config KVM_EXIT_TIMING
115 118
116 If unsure, say N. 119 If unsure, say N.
117 120
118config KVM_E500 121config KVM_E500V2
119 bool "KVM support for PowerPC E500 processors" 122 bool "KVM support for PowerPC E500v2 processors"
120 depends on EXPERIMENTAL && E500 123 depends on EXPERIMENTAL && E500 && !PPC_E500MC
121 select KVM 124 select KVM
122 select KVM_MMIO 125 select KVM_MMIO
123 ---help--- 126 ---help---
124 Support running unmodified E500 guest kernels in virtual machines on 127 Support running unmodified E500 guest kernels in virtual machines on
125 E500 host processors. 128 E500v2 host processors.
129
130 This module provides access to the hardware capabilities through
131 a character device node named /dev/kvm.
132
133 If unsure, say N.
134
135config KVM_E500MC
136 bool "KVM support for PowerPC E500MC/E5500 processors"
137 depends on EXPERIMENTAL && PPC_E500MC
138 select KVM
139 select KVM_MMIO
140 select KVM_BOOKE_HV
141 ---help---
142 Support running unmodified E500MC/E5500 (32-bit) guest kernels in
143 virtual machines on E500MC/E5500 host processors.
126 144
127 This module provides access to the hardware capabilities through 145 This module provides access to the hardware capabilities through
128 a character device node named /dev/kvm. 146 a character device node named /dev/kvm.
diff --git a/arch/powerpc/kvm/Makefile b/arch/powerpc/kvm/Makefile
index 3688aeecc4b2..c2a08636e6d4 100644
--- a/arch/powerpc/kvm/Makefile
+++ b/arch/powerpc/kvm/Makefile
@@ -36,7 +36,17 @@ kvm-e500-objs := \
36 e500.o \ 36 e500.o \
37 e500_tlb.o \ 37 e500_tlb.o \
38 e500_emulate.o 38 e500_emulate.o
39kvm-objs-$(CONFIG_KVM_E500) := $(kvm-e500-objs) 39kvm-objs-$(CONFIG_KVM_E500V2) := $(kvm-e500-objs)
40
41kvm-e500mc-objs := \
42 $(common-objs-y) \
43 booke.o \
44 booke_emulate.o \
45 bookehv_interrupts.o \
46 e500mc.o \
47 e500_tlb.o \
48 e500_emulate.o
49kvm-objs-$(CONFIG_KVM_E500MC) := $(kvm-e500mc-objs)
40 50
41kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \ 51kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
42 ../../../virt/kvm/coalesced_mmio.o \ 52 ../../../virt/kvm/coalesced_mmio.o \
@@ -44,6 +54,7 @@ kvm-book3s_64-objs-$(CONFIG_KVM_BOOK3S_64_PR) := \
44 book3s_paired_singles.o \ 54 book3s_paired_singles.o \
45 book3s_pr.o \ 55 book3s_pr.o \
46 book3s_pr_papr.o \ 56 book3s_pr_papr.o \
57 book3s_64_vio_hv.o \
47 book3s_emulate.o \ 58 book3s_emulate.o \
48 book3s_interrupts.o \ 59 book3s_interrupts.o \
49 book3s_mmu_hpte.o \ 60 book3s_mmu_hpte.o \
@@ -68,6 +79,7 @@ kvm-book3s_64-module-objs := \
68 powerpc.o \ 79 powerpc.o \
69 emulate.o \ 80 emulate.o \
70 book3s.o \ 81 book3s.o \
82 book3s_64_vio.o \
71 $(kvm-book3s_64-objs-y) 83 $(kvm-book3s_64-objs-y)
72 84
73kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs) 85kvm-objs-$(CONFIG_KVM_BOOK3S_64) := $(kvm-book3s_64-module-objs)
@@ -88,7 +100,8 @@ kvm-objs-$(CONFIG_KVM_BOOK3S_32) := $(kvm-book3s_32-objs)
88kvm-objs := $(kvm-objs-m) $(kvm-objs-y) 100kvm-objs := $(kvm-objs-m) $(kvm-objs-y)
89 101
90obj-$(CONFIG_KVM_440) += kvm.o 102obj-$(CONFIG_KVM_440) += kvm.o
91obj-$(CONFIG_KVM_E500) += kvm.o 103obj-$(CONFIG_KVM_E500V2) += kvm.o
104obj-$(CONFIG_KVM_E500MC) += kvm.o
92obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o 105obj-$(CONFIG_KVM_BOOK3S_64) += kvm.o
93obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o 106obj-$(CONFIG_KVM_BOOK3S_32) += kvm.o
94 107
diff --git a/arch/powerpc/kvm/book3s.c b/arch/powerpc/kvm/book3s.c
index 7d54f4ed6d96..3f2a8360c857 100644
--- a/arch/powerpc/kvm/book3s.c
+++ b/arch/powerpc/kvm/book3s.c
@@ -258,7 +258,7 @@ static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
258 return true; 258 return true;
259} 259}
260 260
261void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 261int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
262{ 262{
263 unsigned long *pending = &vcpu->arch.pending_exceptions; 263 unsigned long *pending = &vcpu->arch.pending_exceptions;
264 unsigned long old_pending = vcpu->arch.pending_exceptions; 264 unsigned long old_pending = vcpu->arch.pending_exceptions;
@@ -283,12 +283,17 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
283 283
284 /* Tell the guest about our interrupt status */ 284 /* Tell the guest about our interrupt status */
285 kvmppc_update_int_pending(vcpu, *pending, old_pending); 285 kvmppc_update_int_pending(vcpu, *pending, old_pending);
286
287 return 0;
286} 288}
287 289
288pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn) 290pfn_t kvmppc_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn)
289{ 291{
290 ulong mp_pa = vcpu->arch.magic_page_pa; 292 ulong mp_pa = vcpu->arch.magic_page_pa;
291 293
294 if (!(vcpu->arch.shared->msr & MSR_SF))
295 mp_pa = (uint32_t)mp_pa;
296
292 /* Magic page override */ 297 /* Magic page override */
293 if (unlikely(mp_pa) && 298 if (unlikely(mp_pa) &&
294 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) == 299 unlikely(((gfn << PAGE_SHIFT) & KVM_PAM) ==
diff --git a/arch/powerpc/kvm/book3s_64_mmu_hv.c b/arch/powerpc/kvm/book3s_64_mmu_hv.c
index c3beaeef3f60..80a577517584 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_hv.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_hv.c
@@ -36,13 +36,11 @@
36 36
37/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ 37/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
38#define MAX_LPID_970 63 38#define MAX_LPID_970 63
39#define NR_LPIDS (LPID_RSVD + 1)
40unsigned long lpid_inuse[BITS_TO_LONGS(NR_LPIDS)];
41 39
42long kvmppc_alloc_hpt(struct kvm *kvm) 40long kvmppc_alloc_hpt(struct kvm *kvm)
43{ 41{
44 unsigned long hpt; 42 unsigned long hpt;
45 unsigned long lpid; 43 long lpid;
46 struct revmap_entry *rev; 44 struct revmap_entry *rev;
47 struct kvmppc_linear_info *li; 45 struct kvmppc_linear_info *li;
48 46
@@ -72,14 +70,9 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
72 } 70 }
73 kvm->arch.revmap = rev; 71 kvm->arch.revmap = rev;
74 72
75 /* Allocate the guest's logical partition ID */ 73 lpid = kvmppc_alloc_lpid();
76 do { 74 if (lpid < 0)
77 lpid = find_first_zero_bit(lpid_inuse, NR_LPIDS); 75 goto out_freeboth;
78 if (lpid >= NR_LPIDS) {
79 pr_err("kvm_alloc_hpt: No LPIDs free\n");
80 goto out_freeboth;
81 }
82 } while (test_and_set_bit(lpid, lpid_inuse));
83 76
84 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18); 77 kvm->arch.sdr1 = __pa(hpt) | (HPT_ORDER - 18);
85 kvm->arch.lpid = lpid; 78 kvm->arch.lpid = lpid;
@@ -96,7 +89,7 @@ long kvmppc_alloc_hpt(struct kvm *kvm)
96 89
97void kvmppc_free_hpt(struct kvm *kvm) 90void kvmppc_free_hpt(struct kvm *kvm)
98{ 91{
99 clear_bit(kvm->arch.lpid, lpid_inuse); 92 kvmppc_free_lpid(kvm->arch.lpid);
100 vfree(kvm->arch.revmap); 93 vfree(kvm->arch.revmap);
101 if (kvm->arch.hpt_li) 94 if (kvm->arch.hpt_li)
102 kvm_release_hpt(kvm->arch.hpt_li); 95 kvm_release_hpt(kvm->arch.hpt_li);
@@ -171,8 +164,7 @@ int kvmppc_mmu_hv_init(void)
171 if (!cpu_has_feature(CPU_FTR_HVMODE)) 164 if (!cpu_has_feature(CPU_FTR_HVMODE))
172 return -EINVAL; 165 return -EINVAL;
173 166
174 memset(lpid_inuse, 0, sizeof(lpid_inuse)); 167 /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
175
176 if (cpu_has_feature(CPU_FTR_ARCH_206)) { 168 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
177 host_lpid = mfspr(SPRN_LPID); /* POWER7 */ 169 host_lpid = mfspr(SPRN_LPID); /* POWER7 */
178 rsvd_lpid = LPID_RSVD; 170 rsvd_lpid = LPID_RSVD;
@@ -181,9 +173,11 @@ int kvmppc_mmu_hv_init(void)
181 rsvd_lpid = MAX_LPID_970; 173 rsvd_lpid = MAX_LPID_970;
182 } 174 }
183 175
184 set_bit(host_lpid, lpid_inuse); 176 kvmppc_init_lpid(rsvd_lpid + 1);
177
178 kvmppc_claim_lpid(host_lpid);
185 /* rsvd_lpid is reserved for use in partition switching */ 179 /* rsvd_lpid is reserved for use in partition switching */
186 set_bit(rsvd_lpid, lpid_inuse); 180 kvmppc_claim_lpid(rsvd_lpid);
187 181
188 return 0; 182 return 0;
189} 183}
@@ -452,7 +446,7 @@ static int instruction_is_store(unsigned int instr)
452} 446}
453 447
454static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, 448static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
455 unsigned long gpa, int is_store) 449 unsigned long gpa, gva_t ea, int is_store)
456{ 450{
457 int ret; 451 int ret;
458 u32 last_inst; 452 u32 last_inst;
@@ -499,6 +493,7 @@ static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
499 */ 493 */
500 494
501 vcpu->arch.paddr_accessed = gpa; 495 vcpu->arch.paddr_accessed = gpa;
496 vcpu->arch.vaddr_accessed = ea;
502 return kvmppc_emulate_mmio(run, vcpu); 497 return kvmppc_emulate_mmio(run, vcpu);
503} 498}
504 499
@@ -552,7 +547,7 @@ int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
552 /* No memslot means it's an emulated MMIO region */ 547 /* No memslot means it's an emulated MMIO region */
553 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) { 548 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) {
554 unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1)); 549 unsigned long gpa = (gfn << PAGE_SHIFT) | (ea & (psize - 1));
555 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, 550 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
556 dsisr & DSISR_ISSTORE); 551 dsisr & DSISR_ISSTORE);
557 } 552 }
558 553
diff --git a/arch/powerpc/kvm/book3s_64_slb.S b/arch/powerpc/kvm/book3s_64_slb.S
index f2e6e48ea463..56b983e7b738 100644
--- a/arch/powerpc/kvm/book3s_64_slb.S
+++ b/arch/powerpc/kvm/book3s_64_slb.S
@@ -90,8 +90,6 @@ slb_exit_skip_ ## num:
90 or r10, r10, r12 90 or r10, r10, r12
91 slbie r10 91 slbie r10
92 92
93 isync
94
95 /* Fill SLB with our shadow */ 93 /* Fill SLB with our shadow */
96 94
97 lbz r12, SVCPU_SLB_MAX(r3) 95 lbz r12, SVCPU_SLB_MAX(r3)
diff --git a/arch/powerpc/kvm/book3s_64_vio.c b/arch/powerpc/kvm/book3s_64_vio.c
new file mode 100644
index 000000000000..72ffc899c082
--- /dev/null
+++ b/arch/powerpc/kvm/book3s_64_vio.c
@@ -0,0 +1,150 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 * Copyright 2011 David Gibson, IBM Corporation <dwg@au1.ibm.com>
17 */
18
19#include <linux/types.h>
20#include <linux/string.h>
21#include <linux/kvm.h>
22#include <linux/kvm_host.h>
23#include <linux/highmem.h>
24#include <linux/gfp.h>
25#include <linux/slab.h>
26#include <linux/hugetlb.h>
27#include <linux/list.h>
28#include <linux/anon_inodes.h>
29
30#include <asm/tlbflush.h>
31#include <asm/kvm_ppc.h>
32#include <asm/kvm_book3s.h>
33#include <asm/mmu-hash64.h>
34#include <asm/hvcall.h>
35#include <asm/synch.h>
36#include <asm/ppc-opcode.h>
37#include <asm/kvm_host.h>
38#include <asm/udbg.h>
39
40#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
41
42static long kvmppc_stt_npages(unsigned long window_size)
43{
44 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
45 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
46}
47
48static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
49{
50 struct kvm *kvm = stt->kvm;
51 int i;
52
53 mutex_lock(&kvm->lock);
54 list_del(&stt->list);
55 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
56 __free_page(stt->pages[i]);
57 kfree(stt);
58 mutex_unlock(&kvm->lock);
59
60 kvm_put_kvm(kvm);
61}
62
63static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
64{
65 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
66 struct page *page;
67
68 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
69 return VM_FAULT_SIGBUS;
70
71 page = stt->pages[vmf->pgoff];
72 get_page(page);
73 vmf->page = page;
74 return 0;
75}
76
77static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
78 .fault = kvm_spapr_tce_fault,
79};
80
81static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
82{
83 vma->vm_ops = &kvm_spapr_tce_vm_ops;
84 return 0;
85}
86
87static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
88{
89 struct kvmppc_spapr_tce_table *stt = filp->private_data;
90
91 release_spapr_tce_table(stt);
92 return 0;
93}
94
95static struct file_operations kvm_spapr_tce_fops = {
96 .mmap = kvm_spapr_tce_mmap,
97 .release = kvm_spapr_tce_release,
98};
99
100long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
101 struct kvm_create_spapr_tce *args)
102{
103 struct kvmppc_spapr_tce_table *stt = NULL;
104 long npages;
105 int ret = -ENOMEM;
106 int i;
107
108 /* Check this LIOBN hasn't been previously allocated */
109 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
110 if (stt->liobn == args->liobn)
111 return -EBUSY;
112 }
113
114 npages = kvmppc_stt_npages(args->window_size);
115
116 stt = kzalloc(sizeof(*stt) + npages * sizeof(struct page *),
117 GFP_KERNEL);
118 if (!stt)
119 goto fail;
120
121 stt->liobn = args->liobn;
122 stt->window_size = args->window_size;
123 stt->kvm = kvm;
124
125 for (i = 0; i < npages; i++) {
126 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
127 if (!stt->pages[i])
128 goto fail;
129 }
130
131 kvm_get_kvm(kvm);
132
133 mutex_lock(&kvm->lock);
134 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
135
136 mutex_unlock(&kvm->lock);
137
138 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
139 stt, O_RDWR);
140
141fail:
142 if (stt) {
143 for (i = 0; i < npages; i++)
144 if (stt->pages[i])
145 __free_page(stt->pages[i]);
146
147 kfree(stt);
148 }
149 return ret;
150}
diff --git a/arch/powerpc/kvm/book3s_64_vio_hv.c b/arch/powerpc/kvm/book3s_64_vio_hv.c
index ea0f8c537c28..30c2f3b134c6 100644
--- a/arch/powerpc/kvm/book3s_64_vio_hv.c
+++ b/arch/powerpc/kvm/book3s_64_vio_hv.c
@@ -38,6 +38,9 @@
38 38
39#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64)) 39#define TCES_PER_PAGE (PAGE_SIZE / sizeof(u64))
40 40
41/* WARNING: This will be called in real-mode on HV KVM and virtual
42 * mode on PR KVM
43 */
41long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, 44long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
42 unsigned long ioba, unsigned long tce) 45 unsigned long ioba, unsigned long tce)
43{ 46{
diff --git a/arch/powerpc/kvm/book3s_emulate.c b/arch/powerpc/kvm/book3s_emulate.c
index 135663a3e4fc..b9a989dc76cc 100644
--- a/arch/powerpc/kvm/book3s_emulate.c
+++ b/arch/powerpc/kvm/book3s_emulate.c
@@ -87,6 +87,10 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 unsigned int inst, int *advance) 87 unsigned int inst, int *advance)
88{ 88{
89 int emulated = EMULATE_DONE; 89 int emulated = EMULATE_DONE;
90 int rt = get_rt(inst);
91 int rs = get_rs(inst);
92 int ra = get_ra(inst);
93 int rb = get_rb(inst);
90 94
91 switch (get_op(inst)) { 95 switch (get_op(inst)) {
92 case 19: 96 case 19:
@@ -106,21 +110,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
106 case 31: 110 case 31:
107 switch (get_xop(inst)) { 111 switch (get_xop(inst)) {
108 case OP_31_XOP_MFMSR: 112 case OP_31_XOP_MFMSR:
109 kvmppc_set_gpr(vcpu, get_rt(inst), 113 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
110 vcpu->arch.shared->msr);
111 break; 114 break;
112 case OP_31_XOP_MTMSRD: 115 case OP_31_XOP_MTMSRD:
113 { 116 {
114 ulong rs = kvmppc_get_gpr(vcpu, get_rs(inst)); 117 ulong rs_val = kvmppc_get_gpr(vcpu, rs);
115 if (inst & 0x10000) { 118 if (inst & 0x10000) {
116 vcpu->arch.shared->msr &= ~(MSR_RI | MSR_EE); 119 ulong new_msr = vcpu->arch.shared->msr;
117 vcpu->arch.shared->msr |= rs & (MSR_RI | MSR_EE); 120 new_msr &= ~(MSR_RI | MSR_EE);
121 new_msr |= rs_val & (MSR_RI | MSR_EE);
122 vcpu->arch.shared->msr = new_msr;
118 } else 123 } else
119 kvmppc_set_msr(vcpu, rs); 124 kvmppc_set_msr(vcpu, rs_val);
120 break; 125 break;
121 } 126 }
122 case OP_31_XOP_MTMSR: 127 case OP_31_XOP_MTMSR:
123 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, get_rs(inst))); 128 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
124 break; 129 break;
125 case OP_31_XOP_MFSR: 130 case OP_31_XOP_MFSR:
126 { 131 {
@@ -130,7 +135,7 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
130 if (vcpu->arch.mmu.mfsrin) { 135 if (vcpu->arch.mmu.mfsrin) {
131 u32 sr; 136 u32 sr;
132 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 137 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
133 kvmppc_set_gpr(vcpu, get_rt(inst), sr); 138 kvmppc_set_gpr(vcpu, rt, sr);
134 } 139 }
135 break; 140 break;
136 } 141 }
@@ -138,29 +143,29 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
138 { 143 {
139 int srnum; 144 int srnum;
140 145
141 srnum = (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf; 146 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
142 if (vcpu->arch.mmu.mfsrin) { 147 if (vcpu->arch.mmu.mfsrin) {
143 u32 sr; 148 u32 sr;
144 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum); 149 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
145 kvmppc_set_gpr(vcpu, get_rt(inst), sr); 150 kvmppc_set_gpr(vcpu, rt, sr);
146 } 151 }
147 break; 152 break;
148 } 153 }
149 case OP_31_XOP_MTSR: 154 case OP_31_XOP_MTSR:
150 vcpu->arch.mmu.mtsrin(vcpu, 155 vcpu->arch.mmu.mtsrin(vcpu,
151 (inst >> 16) & 0xf, 156 (inst >> 16) & 0xf,
152 kvmppc_get_gpr(vcpu, get_rs(inst))); 157 kvmppc_get_gpr(vcpu, rs));
153 break; 158 break;
154 case OP_31_XOP_MTSRIN: 159 case OP_31_XOP_MTSRIN:
155 vcpu->arch.mmu.mtsrin(vcpu, 160 vcpu->arch.mmu.mtsrin(vcpu,
156 (kvmppc_get_gpr(vcpu, get_rb(inst)) >> 28) & 0xf, 161 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
157 kvmppc_get_gpr(vcpu, get_rs(inst))); 162 kvmppc_get_gpr(vcpu, rs));
158 break; 163 break;
159 case OP_31_XOP_TLBIE: 164 case OP_31_XOP_TLBIE:
160 case OP_31_XOP_TLBIEL: 165 case OP_31_XOP_TLBIEL:
161 { 166 {
162 bool large = (inst & 0x00200000) ? true : false; 167 bool large = (inst & 0x00200000) ? true : false;
163 ulong addr = kvmppc_get_gpr(vcpu, get_rb(inst)); 168 ulong addr = kvmppc_get_gpr(vcpu, rb);
164 vcpu->arch.mmu.tlbie(vcpu, addr, large); 169 vcpu->arch.mmu.tlbie(vcpu, addr, large);
165 break; 170 break;
166 } 171 }
@@ -171,15 +176,15 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
171 return EMULATE_FAIL; 176 return EMULATE_FAIL;
172 177
173 vcpu->arch.mmu.slbmte(vcpu, 178 vcpu->arch.mmu.slbmte(vcpu,
174 kvmppc_get_gpr(vcpu, get_rs(inst)), 179 kvmppc_get_gpr(vcpu, rs),
175 kvmppc_get_gpr(vcpu, get_rb(inst))); 180 kvmppc_get_gpr(vcpu, rb));
176 break; 181 break;
177 case OP_31_XOP_SLBIE: 182 case OP_31_XOP_SLBIE:
178 if (!vcpu->arch.mmu.slbie) 183 if (!vcpu->arch.mmu.slbie)
179 return EMULATE_FAIL; 184 return EMULATE_FAIL;
180 185
181 vcpu->arch.mmu.slbie(vcpu, 186 vcpu->arch.mmu.slbie(vcpu,
182 kvmppc_get_gpr(vcpu, get_rb(inst))); 187 kvmppc_get_gpr(vcpu, rb));
183 break; 188 break;
184 case OP_31_XOP_SLBIA: 189 case OP_31_XOP_SLBIA:
185 if (!vcpu->arch.mmu.slbia) 190 if (!vcpu->arch.mmu.slbia)
@@ -191,22 +196,22 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
191 if (!vcpu->arch.mmu.slbmfee) { 196 if (!vcpu->arch.mmu.slbmfee) {
192 emulated = EMULATE_FAIL; 197 emulated = EMULATE_FAIL;
193 } else { 198 } else {
194 ulong t, rb; 199 ulong t, rb_val;
195 200
196 rb = kvmppc_get_gpr(vcpu, get_rb(inst)); 201 rb_val = kvmppc_get_gpr(vcpu, rb);
197 t = vcpu->arch.mmu.slbmfee(vcpu, rb); 202 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
198 kvmppc_set_gpr(vcpu, get_rt(inst), t); 203 kvmppc_set_gpr(vcpu, rt, t);
199 } 204 }
200 break; 205 break;
201 case OP_31_XOP_SLBMFEV: 206 case OP_31_XOP_SLBMFEV:
202 if (!vcpu->arch.mmu.slbmfev) { 207 if (!vcpu->arch.mmu.slbmfev) {
203 emulated = EMULATE_FAIL; 208 emulated = EMULATE_FAIL;
204 } else { 209 } else {
205 ulong t, rb; 210 ulong t, rb_val;
206 211
207 rb = kvmppc_get_gpr(vcpu, get_rb(inst)); 212 rb_val = kvmppc_get_gpr(vcpu, rb);
208 t = vcpu->arch.mmu.slbmfev(vcpu, rb); 213 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
209 kvmppc_set_gpr(vcpu, get_rt(inst), t); 214 kvmppc_set_gpr(vcpu, rt, t);
210 } 215 }
211 break; 216 break;
212 case OP_31_XOP_DCBA: 217 case OP_31_XOP_DCBA:
@@ -214,17 +219,17 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
214 break; 219 break;
215 case OP_31_XOP_DCBZ: 220 case OP_31_XOP_DCBZ:
216 { 221 {
217 ulong rb = kvmppc_get_gpr(vcpu, get_rb(inst)); 222 ulong rb_val = kvmppc_get_gpr(vcpu, rb);
218 ulong ra = 0; 223 ulong ra_val = 0;
219 ulong addr, vaddr; 224 ulong addr, vaddr;
220 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 }; 225 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
221 u32 dsisr; 226 u32 dsisr;
222 int r; 227 int r;
223 228
224 if (get_ra(inst)) 229 if (ra)
225 ra = kvmppc_get_gpr(vcpu, get_ra(inst)); 230 ra_val = kvmppc_get_gpr(vcpu, ra);
226 231
227 addr = (ra + rb) & ~31ULL; 232 addr = (ra_val + rb_val) & ~31ULL;
228 if (!(vcpu->arch.shared->msr & MSR_SF)) 233 if (!(vcpu->arch.shared->msr & MSR_SF))
229 addr &= 0xffffffff; 234 addr &= 0xffffffff;
230 vaddr = addr; 235 vaddr = addr;
@@ -313,10 +318,9 @@ static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
313 return bat; 318 return bat;
314} 319}
315 320
316int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 321int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
317{ 322{
318 int emulated = EMULATE_DONE; 323 int emulated = EMULATE_DONE;
319 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
320 324
321 switch (sprn) { 325 switch (sprn) {
322 case SPRN_SDR1: 326 case SPRN_SDR1:
@@ -428,7 +432,7 @@ unprivileged:
428 return emulated; 432 return emulated;
429} 433}
430 434
431int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 435int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
432{ 436{
433 int emulated = EMULATE_DONE; 437 int emulated = EMULATE_DONE;
434 438
@@ -441,46 +445,46 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
441 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn); 445 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
442 446
443 if (sprn % 2) 447 if (sprn % 2)
444 kvmppc_set_gpr(vcpu, rt, bat->raw >> 32); 448 *spr_val = bat->raw >> 32;
445 else 449 else
446 kvmppc_set_gpr(vcpu, rt, bat->raw); 450 *spr_val = bat->raw;
447 451
448 break; 452 break;
449 } 453 }
450 case SPRN_SDR1: 454 case SPRN_SDR1:
451 if (!spr_allowed(vcpu, PRIV_HYPER)) 455 if (!spr_allowed(vcpu, PRIV_HYPER))
452 goto unprivileged; 456 goto unprivileged;
453 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->sdr1); 457 *spr_val = to_book3s(vcpu)->sdr1;
454 break; 458 break;
455 case SPRN_DSISR: 459 case SPRN_DSISR:
456 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dsisr); 460 *spr_val = vcpu->arch.shared->dsisr;
457 break; 461 break;
458 case SPRN_DAR: 462 case SPRN_DAR:
459 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); 463 *spr_val = vcpu->arch.shared->dar;
460 break; 464 break;
461 case SPRN_HIOR: 465 case SPRN_HIOR:
462 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hior); 466 *spr_val = to_book3s(vcpu)->hior;
463 break; 467 break;
464 case SPRN_HID0: 468 case SPRN_HID0:
465 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[0]); 469 *spr_val = to_book3s(vcpu)->hid[0];
466 break; 470 break;
467 case SPRN_HID1: 471 case SPRN_HID1:
468 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[1]); 472 *spr_val = to_book3s(vcpu)->hid[1];
469 break; 473 break;
470 case SPRN_HID2: 474 case SPRN_HID2:
471 case SPRN_HID2_GEKKO: 475 case SPRN_HID2_GEKKO:
472 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[2]); 476 *spr_val = to_book3s(vcpu)->hid[2];
473 break; 477 break;
474 case SPRN_HID4: 478 case SPRN_HID4:
475 case SPRN_HID4_GEKKO: 479 case SPRN_HID4_GEKKO:
476 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[4]); 480 *spr_val = to_book3s(vcpu)->hid[4];
477 break; 481 break;
478 case SPRN_HID5: 482 case SPRN_HID5:
479 kvmppc_set_gpr(vcpu, rt, to_book3s(vcpu)->hid[5]); 483 *spr_val = to_book3s(vcpu)->hid[5];
480 break; 484 break;
481 case SPRN_CFAR: 485 case SPRN_CFAR:
482 case SPRN_PURR: 486 case SPRN_PURR:
483 kvmppc_set_gpr(vcpu, rt, 0); 487 *spr_val = 0;
484 break; 488 break;
485 case SPRN_GQR0: 489 case SPRN_GQR0:
486 case SPRN_GQR1: 490 case SPRN_GQR1:
@@ -490,8 +494,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
490 case SPRN_GQR5: 494 case SPRN_GQR5:
491 case SPRN_GQR6: 495 case SPRN_GQR6:
492 case SPRN_GQR7: 496 case SPRN_GQR7:
493 kvmppc_set_gpr(vcpu, rt, 497 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
494 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0]);
495 break; 498 break;
496 case SPRN_THRM1: 499 case SPRN_THRM1:
497 case SPRN_THRM2: 500 case SPRN_THRM2:
@@ -506,7 +509,7 @@ int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
506 case SPRN_PMC3_GEKKO: 509 case SPRN_PMC3_GEKKO:
507 case SPRN_PMC4_GEKKO: 510 case SPRN_PMC4_GEKKO:
508 case SPRN_WPAR_GEKKO: 511 case SPRN_WPAR_GEKKO:
509 kvmppc_set_gpr(vcpu, rt, 0); 512 *spr_val = 0;
510 break; 513 break;
511 default: 514 default:
512unprivileged: 515unprivileged:
@@ -565,23 +568,22 @@ u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
565ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst) 568ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
566{ 569{
567 ulong dar = 0; 570 ulong dar = 0;
568 ulong ra; 571 ulong ra = get_ra(inst);
572 ulong rb = get_rb(inst);
569 573
570 switch (get_op(inst)) { 574 switch (get_op(inst)) {
571 case OP_LFS: 575 case OP_LFS:
572 case OP_LFD: 576 case OP_LFD:
573 case OP_STFD: 577 case OP_STFD:
574 case OP_STFS: 578 case OP_STFS:
575 ra = get_ra(inst);
576 if (ra) 579 if (ra)
577 dar = kvmppc_get_gpr(vcpu, ra); 580 dar = kvmppc_get_gpr(vcpu, ra);
578 dar += (s32)((s16)inst); 581 dar += (s32)((s16)inst);
579 break; 582 break;
580 case 31: 583 case 31:
581 ra = get_ra(inst);
582 if (ra) 584 if (ra)
583 dar = kvmppc_get_gpr(vcpu, ra); 585 dar = kvmppc_get_gpr(vcpu, ra);
584 dar += kvmppc_get_gpr(vcpu, get_rb(inst)); 586 dar += kvmppc_get_gpr(vcpu, rb);
585 break; 587 break;
586 default: 588 default:
587 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst); 589 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
diff --git a/arch/powerpc/kvm/book3s_hv.c b/arch/powerpc/kvm/book3s_hv.c
index 108d1f580177..c6af1d623839 100644
--- a/arch/powerpc/kvm/book3s_hv.c
+++ b/arch/powerpc/kvm/book3s_hv.c
@@ -60,12 +60,20 @@ static int kvmppc_hv_setup_rma(struct kvm_vcpu *vcpu);
60 60
61void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 61void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
62{ 62{
63 struct kvmppc_vcore *vc = vcpu->arch.vcore;
64
63 local_paca->kvm_hstate.kvm_vcpu = vcpu; 65 local_paca->kvm_hstate.kvm_vcpu = vcpu;
64 local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore; 66 local_paca->kvm_hstate.kvm_vcore = vc;
67 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
68 vc->stolen_tb += mftb() - vc->preempt_tb;
65} 69}
66 70
67void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 71void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
68{ 72{
73 struct kvmppc_vcore *vc = vcpu->arch.vcore;
74
75 if (vc->runner == vcpu && vc->vcore_state != VCORE_INACTIVE)
76 vc->preempt_tb = mftb();
69} 77}
70 78
71void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr) 79void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
@@ -134,6 +142,22 @@ static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
134 vpa->yield_count = 1; 142 vpa->yield_count = 1;
135} 143}
136 144
145/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
146struct reg_vpa {
147 u32 dummy;
148 union {
149 u16 hword;
150 u32 word;
151 } length;
152};
153
154static int vpa_is_registered(struct kvmppc_vpa *vpap)
155{
156 if (vpap->update_pending)
157 return vpap->next_gpa != 0;
158 return vpap->pinned_addr != NULL;
159}
160
137static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, 161static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
138 unsigned long flags, 162 unsigned long flags,
139 unsigned long vcpuid, unsigned long vpa) 163 unsigned long vcpuid, unsigned long vpa)
@@ -142,88 +166,182 @@ static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
142 unsigned long len, nb; 166 unsigned long len, nb;
143 void *va; 167 void *va;
144 struct kvm_vcpu *tvcpu; 168 struct kvm_vcpu *tvcpu;
145 int err = H_PARAMETER; 169 int err;
170 int subfunc;
171 struct kvmppc_vpa *vpap;
146 172
147 tvcpu = kvmppc_find_vcpu(kvm, vcpuid); 173 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
148 if (!tvcpu) 174 if (!tvcpu)
149 return H_PARAMETER; 175 return H_PARAMETER;
150 176
151 flags >>= 63 - 18; 177 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
152 flags &= 7; 178 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
153 if (flags == 0 || flags == 4) 179 subfunc == H_VPA_REG_SLB) {
154 return H_PARAMETER; 180 /* Registering new area - address must be cache-line aligned */
155 if (flags < 4) { 181 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
156 if (vpa & 0x7f)
157 return H_PARAMETER; 182 return H_PARAMETER;
158 if (flags >= 2 && !tvcpu->arch.vpa) 183
159 return H_RESOURCE; 184 /* convert logical addr to kernel addr and read length */
160 /* registering new area; convert logical addr to real */
161 va = kvmppc_pin_guest_page(kvm, vpa, &nb); 185 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
162 if (va == NULL) 186 if (va == NULL)
163 return H_PARAMETER; 187 return H_PARAMETER;
164 if (flags <= 1) 188 if (subfunc == H_VPA_REG_VPA)
165 len = *(unsigned short *)(va + 4); 189 len = ((struct reg_vpa *)va)->length.hword;
166 else 190 else
167 len = *(unsigned int *)(va + 4); 191 len = ((struct reg_vpa *)va)->length.word;
168 if (len > nb) 192 kvmppc_unpin_guest_page(kvm, va);
169 goto out_unpin; 193
170 switch (flags) { 194 /* Check length */
171 case 1: /* register VPA */ 195 if (len > nb || len < sizeof(struct reg_vpa))
172 if (len < 640) 196 return H_PARAMETER;
173 goto out_unpin; 197 } else {
174 if (tvcpu->arch.vpa) 198 vpa = 0;
175 kvmppc_unpin_guest_page(kvm, vcpu->arch.vpa); 199 len = 0;
176 tvcpu->arch.vpa = va; 200 }
177 init_vpa(vcpu, va); 201
178 break; 202 err = H_PARAMETER;
179 case 2: /* register DTL */ 203 vpap = NULL;
180 if (len < 48) 204 spin_lock(&tvcpu->arch.vpa_update_lock);
181 goto out_unpin; 205
182 len -= len % 48; 206 switch (subfunc) {
183 if (tvcpu->arch.dtl) 207 case H_VPA_REG_VPA: /* register VPA */
184 kvmppc_unpin_guest_page(kvm, vcpu->arch.dtl); 208 if (len < sizeof(struct lppaca))
185 tvcpu->arch.dtl = va;
186 tvcpu->arch.dtl_end = va + len;
187 break; 209 break;
188 case 3: /* register SLB shadow buffer */ 210 vpap = &tvcpu->arch.vpa;
189 if (len < 16) 211 err = 0;
190 goto out_unpin; 212 break;
191 if (tvcpu->arch.slb_shadow) 213
192 kvmppc_unpin_guest_page(kvm, vcpu->arch.slb_shadow); 214 case H_VPA_REG_DTL: /* register DTL */
193 tvcpu->arch.slb_shadow = va; 215 if (len < sizeof(struct dtl_entry))
194 break; 216 break;
195 } 217 len -= len % sizeof(struct dtl_entry);
196 } else { 218
197 switch (flags) { 219 /* Check that they have previously registered a VPA */
198 case 5: /* unregister VPA */ 220 err = H_RESOURCE;
199 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl) 221 if (!vpa_is_registered(&tvcpu->arch.vpa))
200 return H_RESOURCE;
201 if (!tvcpu->arch.vpa)
202 break;
203 kvmppc_unpin_guest_page(kvm, tvcpu->arch.vpa);
204 tvcpu->arch.vpa = NULL;
205 break; 222 break;
206 case 6: /* unregister DTL */ 223
207 if (!tvcpu->arch.dtl) 224 vpap = &tvcpu->arch.dtl;
208 break; 225 err = 0;
209 kvmppc_unpin_guest_page(kvm, tvcpu->arch.dtl); 226 break;
210 tvcpu->arch.dtl = NULL; 227
228 case H_VPA_REG_SLB: /* register SLB shadow buffer */
229 /* Check that they have previously registered a VPA */
230 err = H_RESOURCE;
231 if (!vpa_is_registered(&tvcpu->arch.vpa))
211 break; 232 break;
212 case 7: /* unregister SLB shadow buffer */ 233
213 if (!tvcpu->arch.slb_shadow) 234 vpap = &tvcpu->arch.slb_shadow;
214 break; 235 err = 0;
215 kvmppc_unpin_guest_page(kvm, tvcpu->arch.slb_shadow); 236 break;
216 tvcpu->arch.slb_shadow = NULL; 237
238 case H_VPA_DEREG_VPA: /* deregister VPA */
239 /* Check they don't still have a DTL or SLB buf registered */
240 err = H_RESOURCE;
241 if (vpa_is_registered(&tvcpu->arch.dtl) ||
242 vpa_is_registered(&tvcpu->arch.slb_shadow))
217 break; 243 break;
218 } 244
245 vpap = &tvcpu->arch.vpa;
246 err = 0;
247 break;
248
249 case H_VPA_DEREG_DTL: /* deregister DTL */
250 vpap = &tvcpu->arch.dtl;
251 err = 0;
252 break;
253
254 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
255 vpap = &tvcpu->arch.slb_shadow;
256 err = 0;
257 break;
258 }
259
260 if (vpap) {
261 vpap->next_gpa = vpa;
262 vpap->len = len;
263 vpap->update_pending = 1;
219 } 264 }
220 return H_SUCCESS;
221 265
222 out_unpin: 266 spin_unlock(&tvcpu->arch.vpa_update_lock);
223 kvmppc_unpin_guest_page(kvm, va); 267
224 return err; 268 return err;
225} 269}
226 270
271static void kvmppc_update_vpa(struct kvm *kvm, struct kvmppc_vpa *vpap)
272{
273 void *va;
274 unsigned long nb;
275
276 vpap->update_pending = 0;
277 va = NULL;
278 if (vpap->next_gpa) {
279 va = kvmppc_pin_guest_page(kvm, vpap->next_gpa, &nb);
280 if (nb < vpap->len) {
281 /*
282 * If it's now too short, it must be that userspace
283 * has changed the mappings underlying guest memory,
284 * so unregister the region.
285 */
286 kvmppc_unpin_guest_page(kvm, va);
287 va = NULL;
288 }
289 }
290 if (vpap->pinned_addr)
291 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr);
292 vpap->pinned_addr = va;
293 if (va)
294 vpap->pinned_end = va + vpap->len;
295}
296
297static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
298{
299 struct kvm *kvm = vcpu->kvm;
300
301 spin_lock(&vcpu->arch.vpa_update_lock);
302 if (vcpu->arch.vpa.update_pending) {
303 kvmppc_update_vpa(kvm, &vcpu->arch.vpa);
304 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
305 }
306 if (vcpu->arch.dtl.update_pending) {
307 kvmppc_update_vpa(kvm, &vcpu->arch.dtl);
308 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
309 vcpu->arch.dtl_index = 0;
310 }
311 if (vcpu->arch.slb_shadow.update_pending)
312 kvmppc_update_vpa(kvm, &vcpu->arch.slb_shadow);
313 spin_unlock(&vcpu->arch.vpa_update_lock);
314}
315
316static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
317 struct kvmppc_vcore *vc)
318{
319 struct dtl_entry *dt;
320 struct lppaca *vpa;
321 unsigned long old_stolen;
322
323 dt = vcpu->arch.dtl_ptr;
324 vpa = vcpu->arch.vpa.pinned_addr;
325 old_stolen = vcpu->arch.stolen_logged;
326 vcpu->arch.stolen_logged = vc->stolen_tb;
327 if (!dt || !vpa)
328 return;
329 memset(dt, 0, sizeof(struct dtl_entry));
330 dt->dispatch_reason = 7;
331 dt->processor_id = vc->pcpu + vcpu->arch.ptid;
332 dt->timebase = mftb();
333 dt->enqueue_to_dispatch_time = vc->stolen_tb - old_stolen;
334 dt->srr0 = kvmppc_get_pc(vcpu);
335 dt->srr1 = vcpu->arch.shregs.msr;
336 ++dt;
337 if (dt == vcpu->arch.dtl.pinned_end)
338 dt = vcpu->arch.dtl.pinned_addr;
339 vcpu->arch.dtl_ptr = dt;
340 /* order writing *dt vs. writing vpa->dtl_idx */
341 smp_wmb();
342 vpa->dtl_idx = ++vcpu->arch.dtl_index;
343}
344
227int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) 345int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
228{ 346{
229 unsigned long req = kvmppc_get_gpr(vcpu, 3); 347 unsigned long req = kvmppc_get_gpr(vcpu, 3);
@@ -468,6 +586,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
468 /* default to host PVR, since we can't spoof it */ 586 /* default to host PVR, since we can't spoof it */
469 vcpu->arch.pvr = mfspr(SPRN_PVR); 587 vcpu->arch.pvr = mfspr(SPRN_PVR);
470 kvmppc_set_pvr(vcpu, vcpu->arch.pvr); 588 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
589 spin_lock_init(&vcpu->arch.vpa_update_lock);
471 590
472 kvmppc_mmu_book3s_hv_init(vcpu); 591 kvmppc_mmu_book3s_hv_init(vcpu);
473 592
@@ -486,6 +605,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
486 INIT_LIST_HEAD(&vcore->runnable_threads); 605 INIT_LIST_HEAD(&vcore->runnable_threads);
487 spin_lock_init(&vcore->lock); 606 spin_lock_init(&vcore->lock);
488 init_waitqueue_head(&vcore->wq); 607 init_waitqueue_head(&vcore->wq);
608 vcore->preempt_tb = mftb();
489 } 609 }
490 kvm->arch.vcores[core] = vcore; 610 kvm->arch.vcores[core] = vcore;
491 } 611 }
@@ -498,6 +618,7 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
498 ++vcore->num_threads; 618 ++vcore->num_threads;
499 spin_unlock(&vcore->lock); 619 spin_unlock(&vcore->lock);
500 vcpu->arch.vcore = vcore; 620 vcpu->arch.vcore = vcore;
621 vcpu->arch.stolen_logged = vcore->stolen_tb;
501 622
502 vcpu->arch.cpu_type = KVM_CPU_3S_64; 623 vcpu->arch.cpu_type = KVM_CPU_3S_64;
503 kvmppc_sanity_check(vcpu); 624 kvmppc_sanity_check(vcpu);
@@ -512,12 +633,14 @@ out:
512 633
513void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) 634void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
514{ 635{
515 if (vcpu->arch.dtl) 636 spin_lock(&vcpu->arch.vpa_update_lock);
516 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl); 637 if (vcpu->arch.dtl.pinned_addr)
517 if (vcpu->arch.slb_shadow) 638 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.dtl.pinned_addr);
518 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow); 639 if (vcpu->arch.slb_shadow.pinned_addr)
519 if (vcpu->arch.vpa) 640 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.slb_shadow.pinned_addr);
520 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa); 641 if (vcpu->arch.vpa.pinned_addr)
642 kvmppc_unpin_guest_page(vcpu->kvm, vcpu->arch.vpa.pinned_addr);
643 spin_unlock(&vcpu->arch.vpa_update_lock);
521 kvm_vcpu_uninit(vcpu); 644 kvm_vcpu_uninit(vcpu);
522 kmem_cache_free(kvm_vcpu_cache, vcpu); 645 kmem_cache_free(kvm_vcpu_cache, vcpu);
523} 646}
@@ -569,6 +692,45 @@ static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
569 list_del(&vcpu->arch.run_list); 692 list_del(&vcpu->arch.run_list);
570} 693}
571 694
695static int kvmppc_grab_hwthread(int cpu)
696{
697 struct paca_struct *tpaca;
698 long timeout = 1000;
699
700 tpaca = &paca[cpu];
701
702 /* Ensure the thread won't go into the kernel if it wakes */
703 tpaca->kvm_hstate.hwthread_req = 1;
704
705 /*
706 * If the thread is already executing in the kernel (e.g. handling
707 * a stray interrupt), wait for it to get back to nap mode.
708 * The smp_mb() is to ensure that our setting of hwthread_req
709 * is visible before we look at hwthread_state, so if this
710 * races with the code at system_reset_pSeries and the thread
711 * misses our setting of hwthread_req, we are sure to see its
712 * setting of hwthread_state, and vice versa.
713 */
714 smp_mb();
715 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
716 if (--timeout <= 0) {
717 pr_err("KVM: couldn't grab cpu %d\n", cpu);
718 return -EBUSY;
719 }
720 udelay(1);
721 }
722 return 0;
723}
724
725static void kvmppc_release_hwthread(int cpu)
726{
727 struct paca_struct *tpaca;
728
729 tpaca = &paca[cpu];
730 tpaca->kvm_hstate.hwthread_req = 0;
731 tpaca->kvm_hstate.kvm_vcpu = NULL;
732}
733
572static void kvmppc_start_thread(struct kvm_vcpu *vcpu) 734static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
573{ 735{
574 int cpu; 736 int cpu;
@@ -588,8 +750,7 @@ static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
588 smp_wmb(); 750 smp_wmb();
589#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) 751#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
590 if (vcpu->arch.ptid) { 752 if (vcpu->arch.ptid) {
591 tpaca->cpu_start = 0x80; 753 kvmppc_grab_hwthread(cpu);
592 wmb();
593 xics_wake_cpu(cpu); 754 xics_wake_cpu(cpu);
594 ++vc->n_woken; 755 ++vc->n_woken;
595 } 756 }
@@ -639,7 +800,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
639 struct kvm_vcpu *vcpu, *vcpu0, *vnext; 800 struct kvm_vcpu *vcpu, *vcpu0, *vnext;
640 long ret; 801 long ret;
641 u64 now; 802 u64 now;
642 int ptid; 803 int ptid, i;
643 804
644 /* don't start if any threads have a signal pending */ 805 /* don't start if any threads have a signal pending */
645 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 806 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
@@ -681,17 +842,29 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
681 vc->nap_count = 0; 842 vc->nap_count = 0;
682 vc->entry_exit_count = 0; 843 vc->entry_exit_count = 0;
683 vc->vcore_state = VCORE_RUNNING; 844 vc->vcore_state = VCORE_RUNNING;
845 vc->stolen_tb += mftb() - vc->preempt_tb;
684 vc->in_guest = 0; 846 vc->in_guest = 0;
685 vc->pcpu = smp_processor_id(); 847 vc->pcpu = smp_processor_id();
686 vc->napping_threads = 0; 848 vc->napping_threads = 0;
687 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) 849 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
688 kvmppc_start_thread(vcpu); 850 kvmppc_start_thread(vcpu);
851 if (vcpu->arch.vpa.update_pending ||
852 vcpu->arch.slb_shadow.update_pending ||
853 vcpu->arch.dtl.update_pending)
854 kvmppc_update_vpas(vcpu);
855 kvmppc_create_dtl_entry(vcpu, vc);
856 }
857 /* Grab any remaining hw threads so they can't go into the kernel */
858 for (i = ptid; i < threads_per_core; ++i)
859 kvmppc_grab_hwthread(vc->pcpu + i);
689 860
690 preempt_disable(); 861 preempt_disable();
691 spin_unlock(&vc->lock); 862 spin_unlock(&vc->lock);
692 863
693 kvm_guest_enter(); 864 kvm_guest_enter();
694 __kvmppc_vcore_entry(NULL, vcpu0); 865 __kvmppc_vcore_entry(NULL, vcpu0);
866 for (i = 0; i < threads_per_core; ++i)
867 kvmppc_release_hwthread(vc->pcpu + i);
695 868
696 spin_lock(&vc->lock); 869 spin_lock(&vc->lock);
697 /* disable sending of IPIs on virtual external irqs */ 870 /* disable sending of IPIs on virtual external irqs */
@@ -737,6 +910,7 @@ static int kvmppc_run_core(struct kvmppc_vcore *vc)
737 spin_lock(&vc->lock); 910 spin_lock(&vc->lock);
738 out: 911 out:
739 vc->vcore_state = VCORE_INACTIVE; 912 vc->vcore_state = VCORE_INACTIVE;
913 vc->preempt_tb = mftb();
740 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads, 914 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
741 arch.run_list) { 915 arch.run_list) {
742 if (vcpu->arch.ret != RESUME_GUEST) { 916 if (vcpu->arch.ret != RESUME_GUEST) {
@@ -835,6 +1009,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
835 spin_lock(&vc->lock); 1009 spin_lock(&vc->lock);
836 continue; 1010 continue;
837 } 1011 }
1012 vc->runner = vcpu;
838 n_ceded = 0; 1013 n_ceded = 0;
839 list_for_each_entry(v, &vc->runnable_threads, arch.run_list) 1014 list_for_each_entry(v, &vc->runnable_threads, arch.run_list)
840 n_ceded += v->arch.ceded; 1015 n_ceded += v->arch.ceded;
@@ -854,6 +1029,7 @@ static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
854 wake_up(&v->arch.cpu_run); 1029 wake_up(&v->arch.cpu_run);
855 } 1030 }
856 } 1031 }
1032 vc->runner = NULL;
857 } 1033 }
858 1034
859 if (signal_pending(current)) { 1035 if (signal_pending(current)) {
@@ -917,115 +1093,6 @@ int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
917 return r; 1093 return r;
918} 1094}
919 1095
920static long kvmppc_stt_npages(unsigned long window_size)
921{
922 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
923 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
924}
925
926static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
927{
928 struct kvm *kvm = stt->kvm;
929 int i;
930
931 mutex_lock(&kvm->lock);
932 list_del(&stt->list);
933 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
934 __free_page(stt->pages[i]);
935 kfree(stt);
936 mutex_unlock(&kvm->lock);
937
938 kvm_put_kvm(kvm);
939}
940
941static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
942{
943 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
944 struct page *page;
945
946 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
947 return VM_FAULT_SIGBUS;
948
949 page = stt->pages[vmf->pgoff];
950 get_page(page);
951 vmf->page = page;
952 return 0;
953}
954
955static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
956 .fault = kvm_spapr_tce_fault,
957};
958
959static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
960{
961 vma->vm_ops = &kvm_spapr_tce_vm_ops;
962 return 0;
963}
964
965static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
966{
967 struct kvmppc_spapr_tce_table *stt = filp->private_data;
968
969 release_spapr_tce_table(stt);
970 return 0;
971}
972
973static struct file_operations kvm_spapr_tce_fops = {
974 .mmap = kvm_spapr_tce_mmap,
975 .release = kvm_spapr_tce_release,
976};
977
978long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
979 struct kvm_create_spapr_tce *args)
980{
981 struct kvmppc_spapr_tce_table *stt = NULL;
982 long npages;
983 int ret = -ENOMEM;
984 int i;
985
986 /* Check this LIOBN hasn't been previously allocated */
987 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
988 if (stt->liobn == args->liobn)
989 return -EBUSY;
990 }
991
992 npages = kvmppc_stt_npages(args->window_size);
993
994 stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
995 GFP_KERNEL);
996 if (!stt)
997 goto fail;
998
999 stt->liobn = args->liobn;
1000 stt->window_size = args->window_size;
1001 stt->kvm = kvm;
1002
1003 for (i = 0; i < npages; i++) {
1004 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
1005 if (!stt->pages[i])
1006 goto fail;
1007 }
1008
1009 kvm_get_kvm(kvm);
1010
1011 mutex_lock(&kvm->lock);
1012 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
1013
1014 mutex_unlock(&kvm->lock);
1015
1016 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
1017 stt, O_RDWR);
1018
1019fail:
1020 if (stt) {
1021 for (i = 0; i < npages; i++)
1022 if (stt->pages[i])
1023 __free_page(stt->pages[i]);
1024
1025 kfree(stt);
1026 }
1027 return ret;
1028}
1029 1096
1030/* Work out RMLS (real mode limit selector) field value for a given RMA size. 1097/* Work out RMLS (real mode limit selector) field value for a given RMA size.
1031 Assumes POWER7 or PPC970. */ 1098 Assumes POWER7 or PPC970. */
@@ -1108,6 +1175,38 @@ long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
1108 return fd; 1175 return fd;
1109} 1176}
1110 1177
1178static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
1179 int linux_psize)
1180{
1181 struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
1182
1183 if (!def->shift)
1184 return;
1185 (*sps)->page_shift = def->shift;
1186 (*sps)->slb_enc = def->sllp;
1187 (*sps)->enc[0].page_shift = def->shift;
1188 (*sps)->enc[0].pte_enc = def->penc;
1189 (*sps)++;
1190}
1191
1192int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1193{
1194 struct kvm_ppc_one_seg_page_size *sps;
1195
1196 info->flags = KVM_PPC_PAGE_SIZES_REAL;
1197 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1198 info->flags |= KVM_PPC_1T_SEGMENTS;
1199 info->slb_size = mmu_slb_size;
1200
1201 /* We only support these sizes for now, and no muti-size segments */
1202 sps = &info->sps[0];
1203 kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
1204 kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
1205 kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
1206
1207 return 0;
1208}
1209
1111/* 1210/*
1112 * Get (and clear) the dirty memory log for a memory slot. 1211 * Get (and clear) the dirty memory log for a memory slot.
1113 */ 1212 */
@@ -1404,12 +1503,12 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1404 return EMULATE_FAIL; 1503 return EMULATE_FAIL;
1405} 1504}
1406 1505
1407int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 1506int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
1408{ 1507{
1409 return EMULATE_FAIL; 1508 return EMULATE_FAIL;
1410} 1509}
1411 1510
1412int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 1511int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
1413{ 1512{
1414 return EMULATE_FAIL; 1513 return EMULATE_FAIL;
1415} 1514}
diff --git a/arch/powerpc/kvm/book3s_hv_interrupts.S b/arch/powerpc/kvm/book3s_hv_interrupts.S
index d3fb4df02c41..84035a528c80 100644
--- a/arch/powerpc/kvm/book3s_hv_interrupts.S
+++ b/arch/powerpc/kvm/book3s_hv_interrupts.S
@@ -68,19 +68,24 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
68 rotldi r10,r10,16 68 rotldi r10,r10,16
69 mtmsrd r10,1 69 mtmsrd r10,1
70 70
71 /* Save host PMU registers and load guest PMU registers */ 71 /* Save host PMU registers */
72 /* R4 is live here (vcpu pointer) but not r3 or r5 */ 72 /* R4 is live here (vcpu pointer) but not r3 or r5 */
73 li r3, 1 73 li r3, 1
74 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 74 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
75 mfspr r7, SPRN_MMCR0 /* save MMCR0 */ 75 mfspr r7, SPRN_MMCR0 /* save MMCR0 */
76 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */ 76 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable interrupts */
77 mfspr r6, SPRN_MMCRA
78BEGIN_FTR_SECTION
79 /* On P7, clear MMCRA in order to disable SDAR updates */
80 li r5, 0
81 mtspr SPRN_MMCRA, r5
82END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
77 isync 83 isync
78 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */ 84 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
79 lbz r5, LPPACA_PMCINUSE(r3) 85 lbz r5, LPPACA_PMCINUSE(r3)
80 cmpwi r5, 0 86 cmpwi r5, 0
81 beq 31f /* skip if not */ 87 beq 31f /* skip if not */
82 mfspr r5, SPRN_MMCR1 88 mfspr r5, SPRN_MMCR1
83 mfspr r6, SPRN_MMCRA
84 std r7, HSTATE_MMCR(r13) 89 std r7, HSTATE_MMCR(r13)
85 std r5, HSTATE_MMCR + 8(r13) 90 std r5, HSTATE_MMCR + 8(r13)
86 std r6, HSTATE_MMCR + 16(r13) 91 std r6, HSTATE_MMCR + 16(r13)
diff --git a/arch/powerpc/kvm/book3s_hv_rmhandlers.S b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
index b70bf22a3ff3..a84aafce2a12 100644
--- a/arch/powerpc/kvm/book3s_hv_rmhandlers.S
+++ b/arch/powerpc/kvm/book3s_hv_rmhandlers.S
@@ -26,6 +26,7 @@
26#include <asm/hvcall.h> 26#include <asm/hvcall.h>
27#include <asm/asm-offsets.h> 27#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h> 28#include <asm/exception-64s.h>
29#include <asm/kvm_book3s_asm.h>
29 30
30/***************************************************************************** 31/*****************************************************************************
31 * * 32 * *
@@ -82,6 +83,7 @@ _GLOBAL(kvmppc_hv_entry_trampoline)
82 83
83#define XICS_XIRR 4 84#define XICS_XIRR 4
84#define XICS_QIRR 0xc 85#define XICS_QIRR 0xc
86#define XICS_IPI 2 /* interrupt source # for IPIs */
85 87
86/* 88/*
87 * We come in here when wakened from nap mode on a secondary hw thread. 89 * We come in here when wakened from nap mode on a secondary hw thread.
@@ -94,26 +96,54 @@ kvm_start_guest:
94 subi r1,r1,STACK_FRAME_OVERHEAD 96 subi r1,r1,STACK_FRAME_OVERHEAD
95 ld r2,PACATOC(r13) 97 ld r2,PACATOC(r13)
96 98
97 /* were we napping due to cede? */ 99 li r0,KVM_HWTHREAD_IN_KVM
98 lbz r0,HSTATE_NAPPING(r13) 100 stb r0,HSTATE_HWTHREAD_STATE(r13)
99 cmpwi r0,0
100 bne kvm_end_cede
101 101
102 /* get vcpu pointer */ 102 /* NV GPR values from power7_idle() will no longer be valid */
103 ld r4, HSTATE_KVM_VCPU(r13) 103 li r0,1
104 stb r0,PACA_NAPSTATELOST(r13)
104 105
105 /* We got here with an IPI; clear it */ 106 /* get vcpu pointer, NULL if we have no vcpu to run */
106 ld r5, HSTATE_XICS_PHYS(r13) 107 ld r4,HSTATE_KVM_VCPU(r13)
107 li r0, 0xff 108 cmpdi cr1,r4,0
108 li r6, XICS_QIRR 109
109 li r7, XICS_XIRR 110 /* Check the wake reason in SRR1 to see why we got here */
110 lwzcix r8, r5, r7 /* ack the interrupt */ 111 mfspr r3,SPRN_SRR1
112 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
113 cmpwi r3,4 /* was it an external interrupt? */
114 bne 27f
115
116 /*
117 * External interrupt - for now assume it is an IPI, since we
118 * should never get any other interrupts sent to offline threads.
119 * Only do this for secondary threads.
120 */
121 beq cr1,25f
122 lwz r3,VCPU_PTID(r4)
123 cmpwi r3,0
124 beq 27f
12525: ld r5,HSTATE_XICS_PHYS(r13)
126 li r0,0xff
127 li r6,XICS_QIRR
128 li r7,XICS_XIRR
129 lwzcix r8,r5,r7 /* get and ack the interrupt */
111 sync 130 sync
112 stbcix r0, r5, r6 /* clear it */ 131 clrldi. r9,r8,40 /* get interrupt source ID. */
113 stwcix r8, r5, r7 /* EOI it */ 132 beq 27f /* none there? */
133 cmpwi r9,XICS_IPI
134 bne 26f
135 stbcix r0,r5,r6 /* clear IPI */
13626: stwcix r8,r5,r7 /* EOI the interrupt */
114 137
115 /* NV GPR values from power7_idle() will no longer be valid */ 13827: /* XXX should handle hypervisor maintenance interrupts etc. here */
116 stb r0, PACA_NAPSTATELOST(r13) 139
140 /* if we have no vcpu to run, go back to sleep */
141 beq cr1,kvm_no_guest
142
143 /* were we napping due to cede? */
144 lbz r0,HSTATE_NAPPING(r13)
145 cmpwi r0,0
146 bne kvm_end_cede
117 147
118.global kvmppc_hv_entry 148.global kvmppc_hv_entry
119kvmppc_hv_entry: 149kvmppc_hv_entry:
@@ -129,24 +159,15 @@ kvmppc_hv_entry:
129 mflr r0 159 mflr r0
130 std r0, HSTATE_VMHANDLER(r13) 160 std r0, HSTATE_VMHANDLER(r13)
131 161
132 ld r14, VCPU_GPR(r14)(r4) 162 /* Set partition DABR */
133 ld r15, VCPU_GPR(r15)(r4) 163 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
134 ld r16, VCPU_GPR(r16)(r4) 164 li r5,3
135 ld r17, VCPU_GPR(r17)(r4) 165 ld r6,VCPU_DABR(r4)
136 ld r18, VCPU_GPR(r18)(r4) 166 mtspr SPRN_DABRX,r5
137 ld r19, VCPU_GPR(r19)(r4) 167 mtspr SPRN_DABR,r6
138 ld r20, VCPU_GPR(r20)(r4) 168BEGIN_FTR_SECTION
139 ld r21, VCPU_GPR(r21)(r4) 169 isync
140 ld r22, VCPU_GPR(r22)(r4) 170END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
141 ld r23, VCPU_GPR(r23)(r4)
142 ld r24, VCPU_GPR(r24)(r4)
143 ld r25, VCPU_GPR(r25)(r4)
144 ld r26, VCPU_GPR(r26)(r4)
145 ld r27, VCPU_GPR(r27)(r4)
146 ld r28, VCPU_GPR(r28)(r4)
147 ld r29, VCPU_GPR(r29)(r4)
148 ld r30, VCPU_GPR(r30)(r4)
149 ld r31, VCPU_GPR(r31)(r4)
150 171
151 /* Load guest PMU registers */ 172 /* Load guest PMU registers */
152 /* R4 is live here (vcpu pointer) */ 173 /* R4 is live here (vcpu pointer) */
@@ -185,6 +206,25 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
185 /* Load up FP, VMX and VSX registers */ 206 /* Load up FP, VMX and VSX registers */
186 bl kvmppc_load_fp 207 bl kvmppc_load_fp
187 208
209 ld r14, VCPU_GPR(r14)(r4)
210 ld r15, VCPU_GPR(r15)(r4)
211 ld r16, VCPU_GPR(r16)(r4)
212 ld r17, VCPU_GPR(r17)(r4)
213 ld r18, VCPU_GPR(r18)(r4)
214 ld r19, VCPU_GPR(r19)(r4)
215 ld r20, VCPU_GPR(r20)(r4)
216 ld r21, VCPU_GPR(r21)(r4)
217 ld r22, VCPU_GPR(r22)(r4)
218 ld r23, VCPU_GPR(r23)(r4)
219 ld r24, VCPU_GPR(r24)(r4)
220 ld r25, VCPU_GPR(r25)(r4)
221 ld r26, VCPU_GPR(r26)(r4)
222 ld r27, VCPU_GPR(r27)(r4)
223 ld r28, VCPU_GPR(r28)(r4)
224 ld r29, VCPU_GPR(r29)(r4)
225 ld r30, VCPU_GPR(r30)(r4)
226 ld r31, VCPU_GPR(r31)(r4)
227
188BEGIN_FTR_SECTION 228BEGIN_FTR_SECTION
189 /* Switch DSCR to guest value */ 229 /* Switch DSCR to guest value */
190 ld r5, VCPU_DSCR(r4) 230 ld r5, VCPU_DSCR(r4)
@@ -226,12 +266,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
226 mtspr SPRN_DAR, r5 266 mtspr SPRN_DAR, r5
227 mtspr SPRN_DSISR, r6 267 mtspr SPRN_DSISR, r6
228 268
229 /* Set partition DABR */
230 li r5,3
231 ld r6,VCPU_DABR(r4)
232 mtspr SPRN_DABRX,r5
233 mtspr SPRN_DABR,r6
234
235BEGIN_FTR_SECTION 269BEGIN_FTR_SECTION
236 /* Restore AMR and UAMOR, set AMOR to all 1s */ 270 /* Restore AMR and UAMOR, set AMOR to all 1s */
237 ld r5,VCPU_AMR(r4) 271 ld r5,VCPU_AMR(r4)
@@ -925,12 +959,6 @@ BEGIN_FTR_SECTION
925 mtspr SPRN_AMR,r6 959 mtspr SPRN_AMR,r6
926END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 960END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
927 961
928 /* Restore host DABR and DABRX */
929 ld r5,HSTATE_DABR(r13)
930 li r6,7
931 mtspr SPRN_DABR,r5
932 mtspr SPRN_DABRX,r6
933
934 /* Switch DSCR back to host value */ 962 /* Switch DSCR back to host value */
935BEGIN_FTR_SECTION 963BEGIN_FTR_SECTION
936 mfspr r8, SPRN_DSCR 964 mfspr r8, SPRN_DSCR
@@ -969,6 +997,10 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
969 std r5, VCPU_SPRG2(r9) 997 std r5, VCPU_SPRG2(r9)
970 std r6, VCPU_SPRG3(r9) 998 std r6, VCPU_SPRG3(r9)
971 999
1000 /* save FP state */
1001 mr r3, r9
1002 bl .kvmppc_save_fp
1003
972 /* Increment yield count if they have a VPA */ 1004 /* Increment yield count if they have a VPA */
973 ld r8, VCPU_VPA(r9) /* do they have a VPA? */ 1005 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
974 cmpdi r8, 0 1006 cmpdi r8, 0
@@ -983,6 +1015,12 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
983 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */ 1015 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
984 mfspr r4, SPRN_MMCR0 /* save MMCR0 */ 1016 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
985 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */ 1017 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
1018 mfspr r6, SPRN_MMCRA
1019BEGIN_FTR_SECTION
1020 /* On P7, clear MMCRA in order to disable SDAR updates */
1021 li r7, 0
1022 mtspr SPRN_MMCRA, r7
1023END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
986 isync 1024 isync
987 beq 21f /* if no VPA, save PMU stuff anyway */ 1025 beq 21f /* if no VPA, save PMU stuff anyway */
988 lbz r7, LPPACA_PMCINUSE(r8) 1026 lbz r7, LPPACA_PMCINUSE(r8)
@@ -991,7 +1029,6 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
991 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */ 1029 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
992 b 22f 1030 b 22f
99321: mfspr r5, SPRN_MMCR1 103121: mfspr r5, SPRN_MMCR1
994 mfspr r6, SPRN_MMCRA
995 std r4, VCPU_MMCR(r9) 1032 std r4, VCPU_MMCR(r9)
996 std r5, VCPU_MMCR + 8(r9) 1033 std r5, VCPU_MMCR + 8(r9)
997 std r6, VCPU_MMCR + 16(r9) 1034 std r6, VCPU_MMCR + 16(r9)
@@ -1016,17 +1053,20 @@ BEGIN_FTR_SECTION
1016 stw r11, VCPU_PMC + 28(r9) 1053 stw r11, VCPU_PMC + 28(r9)
1017END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201) 1054END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
101822: 105522:
1019 /* save FP state */
1020 mr r3, r9
1021 bl .kvmppc_save_fp
1022 1056
1023 /* Secondary threads go off to take a nap on POWER7 */ 1057 /* Secondary threads go off to take a nap on POWER7 */
1024BEGIN_FTR_SECTION 1058BEGIN_FTR_SECTION
1025 lwz r0,VCPU_PTID(r3) 1059 lwz r0,VCPU_PTID(r9)
1026 cmpwi r0,0 1060 cmpwi r0,0
1027 bne secondary_nap 1061 bne secondary_nap
1028END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206) 1062END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1029 1063
1064 /* Restore host DABR and DABRX */
1065 ld r5,HSTATE_DABR(r13)
1066 li r6,7
1067 mtspr SPRN_DABR,r5
1068 mtspr SPRN_DABRX,r6
1069
1030 /* 1070 /*
1031 * Reload DEC. HDEC interrupts were disabled when 1071 * Reload DEC. HDEC interrupts were disabled when
1032 * we reloaded the host's LPCR value. 1072 * we reloaded the host's LPCR value.
@@ -1363,7 +1403,12 @@ bounce_ext_interrupt:
1363 1403
1364_GLOBAL(kvmppc_h_set_dabr) 1404_GLOBAL(kvmppc_h_set_dabr)
1365 std r4,VCPU_DABR(r3) 1405 std r4,VCPU_DABR(r3)
1366 mtspr SPRN_DABR,r4 1406 /* Work around P7 bug where DABR can get corrupted on mtspr */
14071: mtspr SPRN_DABR,r4
1408 mfspr r5, SPRN_DABR
1409 cmpd r4, r5
1410 bne 1b
1411 isync
1367 li r3,0 1412 li r3,0
1368 blr 1413 blr
1369 1414
@@ -1445,8 +1490,8 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1445 * Take a nap until a decrementer or external interrupt occurs, 1490 * Take a nap until a decrementer or external interrupt occurs,
1446 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR 1491 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1447 */ 1492 */
1448 li r0,0x80 1493 li r0,1
1449 stb r0,PACAPROCSTART(r13) 1494 stb r0,HSTATE_HWTHREAD_REQ(r13)
1450 mfspr r5,SPRN_LPCR 1495 mfspr r5,SPRN_LPCR
1451 ori r5,r5,LPCR_PECE0 | LPCR_PECE1 1496 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1452 mtspr SPRN_LPCR,r5 1497 mtspr SPRN_LPCR,r5
@@ -1463,26 +1508,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1463kvm_end_cede: 1508kvm_end_cede:
1464 /* Woken by external or decrementer interrupt */ 1509 /* Woken by external or decrementer interrupt */
1465 ld r1, HSTATE_HOST_R1(r13) 1510 ld r1, HSTATE_HOST_R1(r13)
1466 ld r2, PACATOC(r13)
1467 1511
1468 /* If we're a secondary thread and we got here by an IPI, ack it */
1469 ld r4,HSTATE_KVM_VCPU(r13)
1470 lwz r3,VCPU_PTID(r4)
1471 cmpwi r3,0
1472 beq 27f
1473 mfspr r3,SPRN_SRR1
1474 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
1475 cmpwi r3,4 /* was it an external interrupt? */
1476 bne 27f
1477 ld r5, HSTATE_XICS_PHYS(r13)
1478 li r0,0xff
1479 li r6,XICS_QIRR
1480 li r7,XICS_XIRR
1481 lwzcix r8,r5,r7 /* ack the interrupt */
1482 sync
1483 stbcix r0,r5,r6 /* clear it */
1484 stwcix r8,r5,r7 /* EOI it */
148527:
1486 /* load up FP state */ 1512 /* load up FP state */
1487 bl kvmppc_load_fp 1513 bl kvmppc_load_fp
1488 1514
@@ -1580,12 +1606,17 @@ secondary_nap:
1580 stwcx. r3, 0, r4 1606 stwcx. r3, 0, r4
1581 bne 51b 1607 bne 51b
1582 1608
1609kvm_no_guest:
1610 li r0, KVM_HWTHREAD_IN_NAP
1611 stb r0, HSTATE_HWTHREAD_STATE(r13)
1612 li r0, 0
1613 std r0, HSTATE_KVM_VCPU(r13)
1614
1583 li r3, LPCR_PECE0 1615 li r3, LPCR_PECE0
1584 mfspr r4, SPRN_LPCR 1616 mfspr r4, SPRN_LPCR
1585 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1 1617 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
1586 mtspr SPRN_LPCR, r4 1618 mtspr SPRN_LPCR, r4
1587 isync 1619 isync
1588 li r0, 0
1589 std r0, HSTATE_SCRATCH0(r13) 1620 std r0, HSTATE_SCRATCH0(r13)
1590 ptesync 1621 ptesync
1591 ld r0, HSTATE_SCRATCH0(r13) 1622 ld r0, HSTATE_SCRATCH0(r13)
@@ -1599,8 +1630,8 @@ secondary_nap:
1599 * r3 = vcpu pointer 1630 * r3 = vcpu pointer
1600 */ 1631 */
1601_GLOBAL(kvmppc_save_fp) 1632_GLOBAL(kvmppc_save_fp)
1602 mfmsr r9 1633 mfmsr r5
1603 ori r8,r9,MSR_FP 1634 ori r8,r5,MSR_FP
1604#ifdef CONFIG_ALTIVEC 1635#ifdef CONFIG_ALTIVEC
1605BEGIN_FTR_SECTION 1636BEGIN_FTR_SECTION
1606 oris r8,r8,MSR_VEC@h 1637 oris r8,r8,MSR_VEC@h
@@ -1649,7 +1680,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1649#endif 1680#endif
1650 mfspr r6,SPRN_VRSAVE 1681 mfspr r6,SPRN_VRSAVE
1651 stw r6,VCPU_VRSAVE(r3) 1682 stw r6,VCPU_VRSAVE(r3)
1652 mtmsrd r9 1683 mtmsrd r5
1653 isync 1684 isync
1654 blr 1685 blr
1655 1686
diff --git a/arch/powerpc/kvm/book3s_pr.c b/arch/powerpc/kvm/book3s_pr.c
index 7759053d391b..a1baec340f7e 100644
--- a/arch/powerpc/kvm/book3s_pr.c
+++ b/arch/powerpc/kvm/book3s_pr.c
@@ -120,6 +120,7 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
120 if (msr & MSR_POW) { 120 if (msr & MSR_POW) {
121 if (!vcpu->arch.pending_exceptions) { 121 if (!vcpu->arch.pending_exceptions) {
122 kvm_vcpu_block(vcpu); 122 kvm_vcpu_block(vcpu);
123 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
123 vcpu->stat.halt_wakeup++; 124 vcpu->stat.halt_wakeup++;
124 125
125 /* Unset POW bit after we woke up */ 126 /* Unset POW bit after we woke up */
@@ -144,6 +145,21 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
144 } 145 }
145 } 146 }
146 147
148 /*
149 * When switching from 32 to 64-bit, we may have a stale 32-bit
150 * magic page around, we need to flush it. Typically 32-bit magic
151 * page will be instanciated when calling into RTAS. Note: We
152 * assume that such transition only happens while in kernel mode,
153 * ie, we never transition from user 32-bit to kernel 64-bit with
154 * a 32-bit magic page around.
155 */
156 if (vcpu->arch.magic_page_pa &&
157 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
158 /* going from RTAS to normal kernel code */
159 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
160 ~0xFFFUL);
161 }
162
147 /* Preload FPU if it's enabled */ 163 /* Preload FPU if it's enabled */
148 if (vcpu->arch.shared->msr & MSR_FP) 164 if (vcpu->arch.shared->msr & MSR_FP)
149 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); 165 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
@@ -251,6 +267,9 @@ static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
251{ 267{
252 ulong mp_pa = vcpu->arch.magic_page_pa; 268 ulong mp_pa = vcpu->arch.magic_page_pa;
253 269
270 if (!(vcpu->arch.shared->msr & MSR_SF))
271 mp_pa = (uint32_t)mp_pa;
272
254 if (unlikely(mp_pa) && 273 if (unlikely(mp_pa) &&
255 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) { 274 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
256 return 1; 275 return 1;
@@ -351,6 +370,7 @@ int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
351 /* MMIO */ 370 /* MMIO */
352 vcpu->stat.mmio_exits++; 371 vcpu->stat.mmio_exits++;
353 vcpu->arch.paddr_accessed = pte.raddr; 372 vcpu->arch.paddr_accessed = pte.raddr;
373 vcpu->arch.vaddr_accessed = pte.eaddr;
354 r = kvmppc_emulate_mmio(run, vcpu); 374 r = kvmppc_emulate_mmio(run, vcpu);
355 if ( r == RESUME_HOST_NV ) 375 if ( r == RESUME_HOST_NV )
356 r = RESUME_HOST; 376 r = RESUME_HOST;
@@ -528,6 +548,9 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
528 run->exit_reason = KVM_EXIT_UNKNOWN; 548 run->exit_reason = KVM_EXIT_UNKNOWN;
529 run->ready_for_interrupt_injection = 1; 549 run->ready_for_interrupt_injection = 1;
530 550
551 /* We get here with MSR.EE=0, so enable it to be a nice citizen */
552 __hard_irq_enable();
553
531 trace_kvm_book3s_exit(exit_nr, vcpu); 554 trace_kvm_book3s_exit(exit_nr, vcpu);
532 preempt_enable(); 555 preempt_enable();
533 kvm_resched(vcpu); 556 kvm_resched(vcpu);
@@ -617,10 +640,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
617 break; 640 break;
618 /* We're good on these - the host merely wanted to get our attention */ 641 /* We're good on these - the host merely wanted to get our attention */
619 case BOOK3S_INTERRUPT_DECREMENTER: 642 case BOOK3S_INTERRUPT_DECREMENTER:
643 case BOOK3S_INTERRUPT_HV_DECREMENTER:
620 vcpu->stat.dec_exits++; 644 vcpu->stat.dec_exits++;
621 r = RESUME_GUEST; 645 r = RESUME_GUEST;
622 break; 646 break;
623 case BOOK3S_INTERRUPT_EXTERNAL: 647 case BOOK3S_INTERRUPT_EXTERNAL:
648 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
649 case BOOK3S_INTERRUPT_EXTERNAL_HV:
624 vcpu->stat.ext_intr_exits++; 650 vcpu->stat.ext_intr_exits++;
625 r = RESUME_GUEST; 651 r = RESUME_GUEST;
626 break; 652 break;
@@ -628,6 +654,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
628 r = RESUME_GUEST; 654 r = RESUME_GUEST;
629 break; 655 break;
630 case BOOK3S_INTERRUPT_PROGRAM: 656 case BOOK3S_INTERRUPT_PROGRAM:
657 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
631 { 658 {
632 enum emulation_result er; 659 enum emulation_result er;
633 struct kvmppc_book3s_shadow_vcpu *svcpu; 660 struct kvmppc_book3s_shadow_vcpu *svcpu;
@@ -1131,6 +1158,31 @@ out:
1131 return r; 1158 return r;
1132} 1159}
1133 1160
1161#ifdef CONFIG_PPC64
1162int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1163{
1164 /* No flags */
1165 info->flags = 0;
1166
1167 /* SLB is always 64 entries */
1168 info->slb_size = 64;
1169
1170 /* Standard 4k base page size segment */
1171 info->sps[0].page_shift = 12;
1172 info->sps[0].slb_enc = 0;
1173 info->sps[0].enc[0].page_shift = 12;
1174 info->sps[0].enc[0].pte_enc = 0;
1175
1176 /* Standard 16M large page size segment */
1177 info->sps[1].page_shift = 24;
1178 info->sps[1].slb_enc = SLB_VSID_L;
1179 info->sps[1].enc[0].page_shift = 24;
1180 info->sps[1].enc[0].pte_enc = 0;
1181
1182 return 0;
1183}
1184#endif /* CONFIG_PPC64 */
1185
1134int kvmppc_core_prepare_memory_region(struct kvm *kvm, 1186int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1135 struct kvm_userspace_memory_region *mem) 1187 struct kvm_userspace_memory_region *mem)
1136{ 1188{
@@ -1144,11 +1196,18 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
1144 1196
1145int kvmppc_core_init_vm(struct kvm *kvm) 1197int kvmppc_core_init_vm(struct kvm *kvm)
1146{ 1198{
1199#ifdef CONFIG_PPC64
1200 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1201#endif
1202
1147 return 0; 1203 return 0;
1148} 1204}
1149 1205
1150void kvmppc_core_destroy_vm(struct kvm *kvm) 1206void kvmppc_core_destroy_vm(struct kvm *kvm)
1151{ 1207{
1208#ifdef CONFIG_PPC64
1209 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1210#endif
1152} 1211}
1153 1212
1154static int kvmppc_book3s_init(void) 1213static int kvmppc_book3s_init(void)
diff --git a/arch/powerpc/kvm/book3s_pr_papr.c b/arch/powerpc/kvm/book3s_pr_papr.c
index b9589324797b..3ff9013d6e79 100644
--- a/arch/powerpc/kvm/book3s_pr_papr.c
+++ b/arch/powerpc/kvm/book3s_pr_papr.c
@@ -15,6 +15,8 @@
15 * published by the Free Software Foundation. 15 * published by the Free Software Foundation.
16 */ 16 */
17 17
18#include <linux/anon_inodes.h>
19
18#include <asm/uaccess.h> 20#include <asm/uaccess.h>
19#include <asm/kvm_ppc.h> 21#include <asm/kvm_ppc.h>
20#include <asm/kvm_book3s.h> 22#include <asm/kvm_book3s.h>
@@ -98,6 +100,83 @@ static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
98 return EMULATE_DONE; 100 return EMULATE_DONE;
99} 101}
100 102
103/* Request defs for kvmppc_h_pr_bulk_remove() */
104#define H_BULK_REMOVE_TYPE 0xc000000000000000ULL
105#define H_BULK_REMOVE_REQUEST 0x4000000000000000ULL
106#define H_BULK_REMOVE_RESPONSE 0x8000000000000000ULL
107#define H_BULK_REMOVE_END 0xc000000000000000ULL
108#define H_BULK_REMOVE_CODE 0x3000000000000000ULL
109#define H_BULK_REMOVE_SUCCESS 0x0000000000000000ULL
110#define H_BULK_REMOVE_NOT_FOUND 0x1000000000000000ULL
111#define H_BULK_REMOVE_PARM 0x2000000000000000ULL
112#define H_BULK_REMOVE_HW 0x3000000000000000ULL
113#define H_BULK_REMOVE_RC 0x0c00000000000000ULL
114#define H_BULK_REMOVE_FLAGS 0x0300000000000000ULL
115#define H_BULK_REMOVE_ABSOLUTE 0x0000000000000000ULL
116#define H_BULK_REMOVE_ANDCOND 0x0100000000000000ULL
117#define H_BULK_REMOVE_AVPN 0x0200000000000000ULL
118#define H_BULK_REMOVE_PTEX 0x00ffffffffffffffULL
119#define H_BULK_REMOVE_MAX_BATCH 4
120
121static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
122{
123 int i;
124 int paramnr = 4;
125 int ret = H_SUCCESS;
126
127 for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
128 unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
129 unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
130 unsigned long pteg, rb, flags;
131 unsigned long pte[2];
132 unsigned long v = 0;
133
134 if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
135 break; /* Exit success */
136 } else if ((tsh & H_BULK_REMOVE_TYPE) !=
137 H_BULK_REMOVE_REQUEST) {
138 ret = H_PARAMETER;
139 break; /* Exit fail */
140 }
141
142 tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
143 tsh |= H_BULK_REMOVE_RESPONSE;
144
145 if ((tsh & H_BULK_REMOVE_ANDCOND) &&
146 (tsh & H_BULK_REMOVE_AVPN)) {
147 tsh |= H_BULK_REMOVE_PARM;
148 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
149 ret = H_PARAMETER;
150 break; /* Exit fail */
151 }
152
153 pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
154 copy_from_user(pte, (void __user *)pteg, sizeof(pte));
155
156 /* tsl = AVPN */
157 flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;
158
159 if ((pte[0] & HPTE_V_VALID) == 0 ||
160 ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
161 ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
162 tsh |= H_BULK_REMOVE_NOT_FOUND;
163 } else {
164 /* Splat the pteg in (userland) hpt */
165 copy_to_user((void __user *)pteg, &v, sizeof(v));
166
167 rb = compute_tlbie_rb(pte[0], pte[1],
168 tsh & H_BULK_REMOVE_PTEX);
169 vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
170 tsh |= H_BULK_REMOVE_SUCCESS;
171 tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
172 }
173 kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
174 }
175 kvmppc_set_gpr(vcpu, 3, ret);
176
177 return EMULATE_DONE;
178}
179
101static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) 180static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
102{ 181{
103 unsigned long flags = kvmppc_get_gpr(vcpu, 4); 182 unsigned long flags = kvmppc_get_gpr(vcpu, 4);
@@ -134,6 +213,20 @@ static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
134 return EMULATE_DONE; 213 return EMULATE_DONE;
135} 214}
136 215
216static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
217{
218 unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
219 unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
220 unsigned long tce = kvmppc_get_gpr(vcpu, 6);
221 long rc;
222
223 rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
224 if (rc == H_TOO_HARD)
225 return EMULATE_FAIL;
226 kvmppc_set_gpr(vcpu, 3, rc);
227 return EMULATE_DONE;
228}
229
137int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) 230int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
138{ 231{
139 switch (cmd) { 232 switch (cmd) {
@@ -144,12 +237,12 @@ int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd)
144 case H_PROTECT: 237 case H_PROTECT:
145 return kvmppc_h_pr_protect(vcpu); 238 return kvmppc_h_pr_protect(vcpu);
146 case H_BULK_REMOVE: 239 case H_BULK_REMOVE:
147 /* We just flush all PTEs, so user space can 240 return kvmppc_h_pr_bulk_remove(vcpu);
148 handle the HPT modifications */ 241 case H_PUT_TCE:
149 kvmppc_mmu_pte_flush(vcpu, 0, 0); 242 return kvmppc_h_pr_put_tce(vcpu);
150 break;
151 case H_CEDE: 243 case H_CEDE:
152 kvm_vcpu_block(vcpu); 244 kvm_vcpu_block(vcpu);
245 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
153 vcpu->stat.halt_wakeup++; 246 vcpu->stat.halt_wakeup++;
154 return EMULATE_DONE; 247 return EMULATE_DONE;
155 } 248 }
diff --git a/arch/powerpc/kvm/book3s_segment.S b/arch/powerpc/kvm/book3s_segment.S
index 6e6e9cef34a8..798491a268b3 100644
--- a/arch/powerpc/kvm/book3s_segment.S
+++ b/arch/powerpc/kvm/book3s_segment.S
@@ -128,24 +128,25 @@ no_dcbz32_on:
128 /* First clear RI in our current MSR value */ 128 /* First clear RI in our current MSR value */
129 li r0, MSR_RI 129 li r0, MSR_RI
130 andc r6, r6, r0 130 andc r6, r6, r0
131 MTMSR_EERI(r6)
132 mtsrr0 r9
133 mtsrr1 r4
134 131
135 PPC_LL r0, SVCPU_R0(r3) 132 PPC_LL r0, SVCPU_R0(r3)
136 PPC_LL r1, SVCPU_R1(r3) 133 PPC_LL r1, SVCPU_R1(r3)
137 PPC_LL r2, SVCPU_R2(r3) 134 PPC_LL r2, SVCPU_R2(r3)
138 PPC_LL r4, SVCPU_R4(r3)
139 PPC_LL r5, SVCPU_R5(r3) 135 PPC_LL r5, SVCPU_R5(r3)
140 PPC_LL r6, SVCPU_R6(r3)
141 PPC_LL r7, SVCPU_R7(r3) 136 PPC_LL r7, SVCPU_R7(r3)
142 PPC_LL r8, SVCPU_R8(r3) 137 PPC_LL r8, SVCPU_R8(r3)
143 PPC_LL r9, SVCPU_R9(r3)
144 PPC_LL r10, SVCPU_R10(r3) 138 PPC_LL r10, SVCPU_R10(r3)
145 PPC_LL r11, SVCPU_R11(r3) 139 PPC_LL r11, SVCPU_R11(r3)
146 PPC_LL r12, SVCPU_R12(r3) 140 PPC_LL r12, SVCPU_R12(r3)
147 PPC_LL r13, SVCPU_R13(r3) 141 PPC_LL r13, SVCPU_R13(r3)
148 142
143 MTMSR_EERI(r6)
144 mtsrr0 r9
145 mtsrr1 r4
146
147 PPC_LL r4, SVCPU_R4(r3)
148 PPC_LL r6, SVCPU_R6(r3)
149 PPC_LL r9, SVCPU_R9(r3)
149 PPC_LL r3, (SVCPU_R3)(r3) 150 PPC_LL r3, (SVCPU_R3)(r3)
150 151
151 RFI 152 RFI
diff --git a/arch/powerpc/kvm/booke.c b/arch/powerpc/kvm/booke.c
index ee9e1ee9c858..72f13f4a06e0 100644
--- a/arch/powerpc/kvm/booke.c
+++ b/arch/powerpc/kvm/booke.c
@@ -17,6 +17,8 @@
17 * 17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com> 18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> 19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
20 */ 22 */
21 23
22#include <linux/errno.h> 24#include <linux/errno.h>
@@ -30,9 +32,12 @@
30#include <asm/cputable.h> 32#include <asm/cputable.h>
31#include <asm/uaccess.h> 33#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h> 34#include <asm/kvm_ppc.h>
33#include "timing.h"
34#include <asm/cacheflush.h> 35#include <asm/cacheflush.h>
36#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
35 39
40#include "timing.h"
36#include "booke.h" 41#include "booke.h"
37 42
38unsigned long kvmppc_booke_handlers; 43unsigned long kvmppc_booke_handlers;
@@ -55,6 +60,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
55 { "dec", VCPU_STAT(dec_exits) }, 60 { "dec", VCPU_STAT(dec_exits) },
56 { "ext_intr", VCPU_STAT(ext_intr_exits) }, 61 { "ext_intr", VCPU_STAT(ext_intr_exits) },
57 { "halt_wakeup", VCPU_STAT(halt_wakeup) }, 62 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
63 { "doorbell", VCPU_STAT(dbell_exits) },
64 { "guest doorbell", VCPU_STAT(gdbell_exits) },
58 { NULL } 65 { NULL }
59}; 66};
60 67
@@ -121,6 +128,10 @@ void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
121{ 128{
122 u32 old_msr = vcpu->arch.shared->msr; 129 u32 old_msr = vcpu->arch.shared->msr;
123 130
131#ifdef CONFIG_KVM_BOOKE_HV
132 new_msr |= MSR_GS;
133#endif
134
124 vcpu->arch.shared->msr = new_msr; 135 vcpu->arch.shared->msr = new_msr;
125 136
126 kvmppc_mmu_msr_notify(vcpu, old_msr); 137 kvmppc_mmu_msr_notify(vcpu, old_msr);
@@ -195,17 +206,87 @@ void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
195 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); 206 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
196} 207}
197 208
209static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
210{
211#ifdef CONFIG_KVM_BOOKE_HV
212 mtspr(SPRN_GSRR0, srr0);
213 mtspr(SPRN_GSRR1, srr1);
214#else
215 vcpu->arch.shared->srr0 = srr0;
216 vcpu->arch.shared->srr1 = srr1;
217#endif
218}
219
220static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
221{
222 vcpu->arch.csrr0 = srr0;
223 vcpu->arch.csrr1 = srr1;
224}
225
226static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
227{
228 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
229 vcpu->arch.dsrr0 = srr0;
230 vcpu->arch.dsrr1 = srr1;
231 } else {
232 set_guest_csrr(vcpu, srr0, srr1);
233 }
234}
235
236static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
237{
238 vcpu->arch.mcsrr0 = srr0;
239 vcpu->arch.mcsrr1 = srr1;
240}
241
242static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
243{
244#ifdef CONFIG_KVM_BOOKE_HV
245 return mfspr(SPRN_GDEAR);
246#else
247 return vcpu->arch.shared->dar;
248#endif
249}
250
251static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
252{
253#ifdef CONFIG_KVM_BOOKE_HV
254 mtspr(SPRN_GDEAR, dear);
255#else
256 vcpu->arch.shared->dar = dear;
257#endif
258}
259
260static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
261{
262#ifdef CONFIG_KVM_BOOKE_HV
263 return mfspr(SPRN_GESR);
264#else
265 return vcpu->arch.shared->esr;
266#endif
267}
268
269static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
270{
271#ifdef CONFIG_KVM_BOOKE_HV
272 mtspr(SPRN_GESR, esr);
273#else
274 vcpu->arch.shared->esr = esr;
275#endif
276}
277
198/* Deliver the interrupt of the corresponding priority, if possible. */ 278/* Deliver the interrupt of the corresponding priority, if possible. */
199static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, 279static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
200 unsigned int priority) 280 unsigned int priority)
201{ 281{
202 int allowed = 0; 282 int allowed = 0;
203 ulong uninitialized_var(msr_mask); 283 ulong msr_mask = 0;
204 bool update_esr = false, update_dear = false; 284 bool update_esr = false, update_dear = false;
205 ulong crit_raw = vcpu->arch.shared->critical; 285 ulong crit_raw = vcpu->arch.shared->critical;
206 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); 286 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
207 bool crit; 287 bool crit;
208 bool keep_irq = false; 288 bool keep_irq = false;
289 enum int_class int_class;
209 290
210 /* Truncate crit indicators in 32 bit mode */ 291 /* Truncate crit indicators in 32 bit mode */
211 if (!(vcpu->arch.shared->msr & MSR_SF)) { 292 if (!(vcpu->arch.shared->msr & MSR_SF)) {
@@ -241,46 +322,85 @@ static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
241 case BOOKE_IRQPRIO_AP_UNAVAIL: 322 case BOOKE_IRQPRIO_AP_UNAVAIL:
242 case BOOKE_IRQPRIO_ALIGNMENT: 323 case BOOKE_IRQPRIO_ALIGNMENT:
243 allowed = 1; 324 allowed = 1;
244 msr_mask = MSR_CE|MSR_ME|MSR_DE; 325 msr_mask = MSR_CE | MSR_ME | MSR_DE;
326 int_class = INT_CLASS_NONCRIT;
245 break; 327 break;
246 case BOOKE_IRQPRIO_CRITICAL: 328 case BOOKE_IRQPRIO_CRITICAL:
247 case BOOKE_IRQPRIO_WATCHDOG: 329 case BOOKE_IRQPRIO_DBELL_CRIT:
248 allowed = vcpu->arch.shared->msr & MSR_CE; 330 allowed = vcpu->arch.shared->msr & MSR_CE;
331 allowed = allowed && !crit;
249 msr_mask = MSR_ME; 332 msr_mask = MSR_ME;
333 int_class = INT_CLASS_CRIT;
250 break; 334 break;
251 case BOOKE_IRQPRIO_MACHINE_CHECK: 335 case BOOKE_IRQPRIO_MACHINE_CHECK:
252 allowed = vcpu->arch.shared->msr & MSR_ME; 336 allowed = vcpu->arch.shared->msr & MSR_ME;
253 msr_mask = 0; 337 allowed = allowed && !crit;
338 int_class = INT_CLASS_MC;
254 break; 339 break;
255 case BOOKE_IRQPRIO_DECREMENTER: 340 case BOOKE_IRQPRIO_DECREMENTER:
256 case BOOKE_IRQPRIO_FIT: 341 case BOOKE_IRQPRIO_FIT:
257 keep_irq = true; 342 keep_irq = true;
258 /* fall through */ 343 /* fall through */
259 case BOOKE_IRQPRIO_EXTERNAL: 344 case BOOKE_IRQPRIO_EXTERNAL:
345 case BOOKE_IRQPRIO_DBELL:
260 allowed = vcpu->arch.shared->msr & MSR_EE; 346 allowed = vcpu->arch.shared->msr & MSR_EE;
261 allowed = allowed && !crit; 347 allowed = allowed && !crit;
262 msr_mask = MSR_CE|MSR_ME|MSR_DE; 348 msr_mask = MSR_CE | MSR_ME | MSR_DE;
349 int_class = INT_CLASS_NONCRIT;
263 break; 350 break;
264 case BOOKE_IRQPRIO_DEBUG: 351 case BOOKE_IRQPRIO_DEBUG:
265 allowed = vcpu->arch.shared->msr & MSR_DE; 352 allowed = vcpu->arch.shared->msr & MSR_DE;
353 allowed = allowed && !crit;
266 msr_mask = MSR_ME; 354 msr_mask = MSR_ME;
355 int_class = INT_CLASS_CRIT;
267 break; 356 break;
268 } 357 }
269 358
270 if (allowed) { 359 if (allowed) {
271 vcpu->arch.shared->srr0 = vcpu->arch.pc; 360 switch (int_class) {
272 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr; 361 case INT_CLASS_NONCRIT:
362 set_guest_srr(vcpu, vcpu->arch.pc,
363 vcpu->arch.shared->msr);
364 break;
365 case INT_CLASS_CRIT:
366 set_guest_csrr(vcpu, vcpu->arch.pc,
367 vcpu->arch.shared->msr);
368 break;
369 case INT_CLASS_DBG:
370 set_guest_dsrr(vcpu, vcpu->arch.pc,
371 vcpu->arch.shared->msr);
372 break;
373 case INT_CLASS_MC:
374 set_guest_mcsrr(vcpu, vcpu->arch.pc,
375 vcpu->arch.shared->msr);
376 break;
377 }
378
273 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; 379 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
274 if (update_esr == true) 380 if (update_esr == true)
275 vcpu->arch.shared->esr = vcpu->arch.queued_esr; 381 set_guest_esr(vcpu, vcpu->arch.queued_esr);
276 if (update_dear == true) 382 if (update_dear == true)
277 vcpu->arch.shared->dar = vcpu->arch.queued_dear; 383 set_guest_dear(vcpu, vcpu->arch.queued_dear);
278 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask); 384 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
279 385
280 if (!keep_irq) 386 if (!keep_irq)
281 clear_bit(priority, &vcpu->arch.pending_exceptions); 387 clear_bit(priority, &vcpu->arch.pending_exceptions);
282 } 388 }
283 389
390#ifdef CONFIG_KVM_BOOKE_HV
391 /*
392 * If an interrupt is pending but masked, raise a guest doorbell
393 * so that we are notified when the guest enables the relevant
394 * MSR bit.
395 */
396 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
397 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
398 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
399 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
400 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
401 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
402#endif
403
284 return allowed; 404 return allowed;
285} 405}
286 406
@@ -305,7 +425,7 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
305 } 425 }
306 426
307 priority = __ffs(*pending); 427 priority = __ffs(*pending);
308 while (priority <= BOOKE_IRQPRIO_MAX) { 428 while (priority < BOOKE_IRQPRIO_MAX) {
309 if (kvmppc_booke_irqprio_deliver(vcpu, priority)) 429 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
310 break; 430 break;
311 431
@@ -319,8 +439,9 @@ static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
319} 439}
320 440
321/* Check pending exceptions and deliver one, if possible. */ 441/* Check pending exceptions and deliver one, if possible. */
322void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) 442int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
323{ 443{
444 int r = 0;
324 WARN_ON_ONCE(!irqs_disabled()); 445 WARN_ON_ONCE(!irqs_disabled());
325 446
326 kvmppc_core_check_exceptions(vcpu); 447 kvmppc_core_check_exceptions(vcpu);
@@ -328,16 +449,60 @@ void kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
328 if (vcpu->arch.shared->msr & MSR_WE) { 449 if (vcpu->arch.shared->msr & MSR_WE) {
329 local_irq_enable(); 450 local_irq_enable();
330 kvm_vcpu_block(vcpu); 451 kvm_vcpu_block(vcpu);
452 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
331 local_irq_disable(); 453 local_irq_disable();
332 454
333 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); 455 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
334 kvmppc_core_check_exceptions(vcpu); 456 r = 1;
335 }; 457 };
458
459 return r;
460}
461
462/*
463 * Common checks before entering the guest world. Call with interrupts
464 * disabled.
465 *
466 * returns !0 if a signal is pending and check_signal is true
467 */
468static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
469{
470 int r = 0;
471
472 WARN_ON_ONCE(!irqs_disabled());
473 while (true) {
474 if (need_resched()) {
475 local_irq_enable();
476 cond_resched();
477 local_irq_disable();
478 continue;
479 }
480
481 if (signal_pending(current)) {
482 r = 1;
483 break;
484 }
485
486 if (kvmppc_core_prepare_to_enter(vcpu)) {
487 /* interrupts got enabled in between, so we
488 are back at square 1 */
489 continue;
490 }
491
492 break;
493 }
494
495 return r;
336} 496}
337 497
338int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) 498int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
339{ 499{
340 int ret; 500 int ret;
501#ifdef CONFIG_PPC_FPU
502 unsigned int fpscr;
503 int fpexc_mode;
504 u64 fpr[32];
505#endif
341 506
342 if (!vcpu->arch.sane) { 507 if (!vcpu->arch.sane) {
343 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; 508 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -345,17 +510,53 @@ int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
345 } 510 }
346 511
347 local_irq_disable(); 512 local_irq_disable();
348 513 if (kvmppc_prepare_to_enter(vcpu)) {
349 kvmppc_core_prepare_to_enter(vcpu);
350
351 if (signal_pending(current)) {
352 kvm_run->exit_reason = KVM_EXIT_INTR; 514 kvm_run->exit_reason = KVM_EXIT_INTR;
353 ret = -EINTR; 515 ret = -EINTR;
354 goto out; 516 goto out;
355 } 517 }
356 518
357 kvm_guest_enter(); 519 kvm_guest_enter();
520
521#ifdef CONFIG_PPC_FPU
522 /* Save userspace FPU state in stack */
523 enable_kernel_fp();
524 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
525 fpscr = current->thread.fpscr.val;
526 fpexc_mode = current->thread.fpexc_mode;
527
528 /* Restore guest FPU state to thread */
529 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
530 current->thread.fpscr.val = vcpu->arch.fpscr;
531
532 /*
533 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
534 * as always using the FPU. Kernel usage of FP (via
535 * enable_kernel_fp()) in this thread must not occur while
536 * vcpu->fpu_active is set.
537 */
538 vcpu->fpu_active = 1;
539
540 kvmppc_load_guest_fp(vcpu);
541#endif
542
358 ret = __kvmppc_vcpu_run(kvm_run, vcpu); 543 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
544
545#ifdef CONFIG_PPC_FPU
546 kvmppc_save_guest_fp(vcpu);
547
548 vcpu->fpu_active = 0;
549
550 /* Save guest FPU state from thread */
551 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
552 vcpu->arch.fpscr = current->thread.fpscr.val;
553
554 /* Restore userspace FPU state from stack */
555 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
556 current->thread.fpscr.val = fpscr;
557 current->thread.fpexc_mode = fpexc_mode;
558#endif
559
359 kvm_guest_exit(); 560 kvm_guest_exit();
360 561
361out: 562out:
@@ -363,6 +564,84 @@ out:
363 return ret; 564 return ret;
364} 565}
365 566
567static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
568{
569 enum emulation_result er;
570
571 er = kvmppc_emulate_instruction(run, vcpu);
572 switch (er) {
573 case EMULATE_DONE:
574 /* don't overwrite subtypes, just account kvm_stats */
575 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
576 /* Future optimization: only reload non-volatiles if
577 * they were actually modified by emulation. */
578 return RESUME_GUEST_NV;
579
580 case EMULATE_DO_DCR:
581 run->exit_reason = KVM_EXIT_DCR;
582 return RESUME_HOST;
583
584 case EMULATE_FAIL:
585 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
586 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
587 /* For debugging, encode the failing instruction and
588 * report it to userspace. */
589 run->hw.hardware_exit_reason = ~0ULL << 32;
590 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
591 kvmppc_core_queue_program(vcpu, ESR_PIL);
592 return RESUME_HOST;
593
594 default:
595 BUG();
596 }
597}
598
599static void kvmppc_fill_pt_regs(struct pt_regs *regs)
600{
601 ulong r1, ip, msr, lr;
602
603 asm("mr %0, 1" : "=r"(r1));
604 asm("mflr %0" : "=r"(lr));
605 asm("mfmsr %0" : "=r"(msr));
606 asm("bl 1f; 1: mflr %0" : "=r"(ip));
607
608 memset(regs, 0, sizeof(*regs));
609 regs->gpr[1] = r1;
610 regs->nip = ip;
611 regs->msr = msr;
612 regs->link = lr;
613}
614
615static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
616 unsigned int exit_nr)
617{
618 struct pt_regs regs;
619
620 switch (exit_nr) {
621 case BOOKE_INTERRUPT_EXTERNAL:
622 kvmppc_fill_pt_regs(&regs);
623 do_IRQ(&regs);
624 break;
625 case BOOKE_INTERRUPT_DECREMENTER:
626 kvmppc_fill_pt_regs(&regs);
627 timer_interrupt(&regs);
628 break;
629#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
630 case BOOKE_INTERRUPT_DOORBELL:
631 kvmppc_fill_pt_regs(&regs);
632 doorbell_exception(&regs);
633 break;
634#endif
635 case BOOKE_INTERRUPT_MACHINE_CHECK:
636 /* FIXME */
637 break;
638 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
639 kvmppc_fill_pt_regs(&regs);
640 performance_monitor_exception(&regs);
641 break;
642 }
643}
644
366/** 645/**
367 * kvmppc_handle_exit 646 * kvmppc_handle_exit
368 * 647 *
@@ -371,12 +650,14 @@ out:
371int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, 650int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
372 unsigned int exit_nr) 651 unsigned int exit_nr)
373{ 652{
374 enum emulation_result er;
375 int r = RESUME_HOST; 653 int r = RESUME_HOST;
376 654
377 /* update before a new last_exit_type is rewritten */ 655 /* update before a new last_exit_type is rewritten */
378 kvmppc_update_timing_stats(vcpu); 656 kvmppc_update_timing_stats(vcpu);
379 657
658 /* restart interrupts if they were meant for the host */
659 kvmppc_restart_interrupt(vcpu, exit_nr);
660
380 local_irq_enable(); 661 local_irq_enable();
381 662
382 run->exit_reason = KVM_EXIT_UNKNOWN; 663 run->exit_reason = KVM_EXIT_UNKNOWN;
@@ -386,62 +667,74 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
386 case BOOKE_INTERRUPT_MACHINE_CHECK: 667 case BOOKE_INTERRUPT_MACHINE_CHECK:
387 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); 668 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
388 kvmppc_dump_vcpu(vcpu); 669 kvmppc_dump_vcpu(vcpu);
670 /* For debugging, send invalid exit reason to user space */
671 run->hw.hardware_exit_reason = ~1ULL << 32;
672 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
389 r = RESUME_HOST; 673 r = RESUME_HOST;
390 break; 674 break;
391 675
392 case BOOKE_INTERRUPT_EXTERNAL: 676 case BOOKE_INTERRUPT_EXTERNAL:
393 kvmppc_account_exit(vcpu, EXT_INTR_EXITS); 677 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
394 if (need_resched())
395 cond_resched();
396 r = RESUME_GUEST; 678 r = RESUME_GUEST;
397 break; 679 break;
398 680
399 case BOOKE_INTERRUPT_DECREMENTER: 681 case BOOKE_INTERRUPT_DECREMENTER:
400 /* Since we switched IVPR back to the host's value, the host
401 * handled this interrupt the moment we enabled interrupts.
402 * Now we just offer it a chance to reschedule the guest. */
403 kvmppc_account_exit(vcpu, DEC_EXITS); 682 kvmppc_account_exit(vcpu, DEC_EXITS);
404 if (need_resched())
405 cond_resched();
406 r = RESUME_GUEST; 683 r = RESUME_GUEST;
407 break; 684 break;
408 685
686 case BOOKE_INTERRUPT_DOORBELL:
687 kvmppc_account_exit(vcpu, DBELL_EXITS);
688 r = RESUME_GUEST;
689 break;
690
691 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
692 kvmppc_account_exit(vcpu, GDBELL_EXITS);
693
694 /*
695 * We are here because there is a pending guest interrupt
696 * which could not be delivered as MSR_CE or MSR_ME was not
697 * set. Once we break from here we will retry delivery.
698 */
699 r = RESUME_GUEST;
700 break;
701
702 case BOOKE_INTERRUPT_GUEST_DBELL:
703 kvmppc_account_exit(vcpu, GDBELL_EXITS);
704
705 /*
706 * We are here because there is a pending guest interrupt
707 * which could not be delivered as MSR_EE was not set. Once
708 * we break from here we will retry delivery.
709 */
710 r = RESUME_GUEST;
711 break;
712
713 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
714 r = RESUME_GUEST;
715 break;
716
717 case BOOKE_INTERRUPT_HV_PRIV:
718 r = emulation_exit(run, vcpu);
719 break;
720
409 case BOOKE_INTERRUPT_PROGRAM: 721 case BOOKE_INTERRUPT_PROGRAM:
410 if (vcpu->arch.shared->msr & MSR_PR) { 722 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
411 /* Program traps generated by user-level software must be handled 723 /*
412 * by the guest kernel. */ 724 * Program traps generated by user-level software must
725 * be handled by the guest kernel.
726 *
727 * In GS mode, hypervisor privileged instructions trap
728 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
729 * actual program interrupts, handled by the guest.
730 */
413 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); 731 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
414 r = RESUME_GUEST; 732 r = RESUME_GUEST;
415 kvmppc_account_exit(vcpu, USR_PR_INST); 733 kvmppc_account_exit(vcpu, USR_PR_INST);
416 break; 734 break;
417 } 735 }
418 736
419 er = kvmppc_emulate_instruction(run, vcpu); 737 r = emulation_exit(run, vcpu);
420 switch (er) {
421 case EMULATE_DONE:
422 /* don't overwrite subtypes, just account kvm_stats */
423 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
424 /* Future optimization: only reload non-volatiles if
425 * they were actually modified by emulation. */
426 r = RESUME_GUEST_NV;
427 break;
428 case EMULATE_DO_DCR:
429 run->exit_reason = KVM_EXIT_DCR;
430 r = RESUME_HOST;
431 break;
432 case EMULATE_FAIL:
433 /* XXX Deliver Program interrupt to guest. */
434 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
435 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
436 /* For debugging, encode the failing instruction and
437 * report it to userspace. */
438 run->hw.hardware_exit_reason = ~0ULL << 32;
439 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
440 r = RESUME_HOST;
441 break;
442 default:
443 BUG();
444 }
445 break; 738 break;
446 739
447 case BOOKE_INTERRUPT_FP_UNAVAIL: 740 case BOOKE_INTERRUPT_FP_UNAVAIL:
@@ -506,6 +799,21 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
506 r = RESUME_GUEST; 799 r = RESUME_GUEST;
507 break; 800 break;
508 801
802#ifdef CONFIG_KVM_BOOKE_HV
803 case BOOKE_INTERRUPT_HV_SYSCALL:
804 if (!(vcpu->arch.shared->msr & MSR_PR)) {
805 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
806 } else {
807 /*
808 * hcall from guest userspace -- send privileged
809 * instruction program check.
810 */
811 kvmppc_core_queue_program(vcpu, ESR_PPR);
812 }
813
814 r = RESUME_GUEST;
815 break;
816#else
509 case BOOKE_INTERRUPT_SYSCALL: 817 case BOOKE_INTERRUPT_SYSCALL:
510 if (!(vcpu->arch.shared->msr & MSR_PR) && 818 if (!(vcpu->arch.shared->msr & MSR_PR) &&
511 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { 819 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
@@ -519,6 +827,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
519 kvmppc_account_exit(vcpu, SYSCALL_EXITS); 827 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
520 r = RESUME_GUEST; 828 r = RESUME_GUEST;
521 break; 829 break;
830#endif
522 831
523 case BOOKE_INTERRUPT_DTLB_MISS: { 832 case BOOKE_INTERRUPT_DTLB_MISS: {
524 unsigned long eaddr = vcpu->arch.fault_dear; 833 unsigned long eaddr = vcpu->arch.fault_dear;
@@ -526,7 +835,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
526 gpa_t gpaddr; 835 gpa_t gpaddr;
527 gfn_t gfn; 836 gfn_t gfn;
528 837
529#ifdef CONFIG_KVM_E500 838#ifdef CONFIG_KVM_E500V2
530 if (!(vcpu->arch.shared->msr & MSR_PR) && 839 if (!(vcpu->arch.shared->msr & MSR_PR) &&
531 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { 840 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
532 kvmppc_map_magic(vcpu); 841 kvmppc_map_magic(vcpu);
@@ -567,6 +876,7 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
567 /* Guest has mapped and accessed a page which is not 876 /* Guest has mapped and accessed a page which is not
568 * actually RAM. */ 877 * actually RAM. */
569 vcpu->arch.paddr_accessed = gpaddr; 878 vcpu->arch.paddr_accessed = gpaddr;
879 vcpu->arch.vaddr_accessed = eaddr;
570 r = kvmppc_emulate_mmio(run, vcpu); 880 r = kvmppc_emulate_mmio(run, vcpu);
571 kvmppc_account_exit(vcpu, MMIO_EXITS); 881 kvmppc_account_exit(vcpu, MMIO_EXITS);
572 } 882 }
@@ -634,15 +944,13 @@ int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
634 BUG(); 944 BUG();
635 } 945 }
636 946
637 local_irq_disable(); 947 /*
638 948 * To avoid clobbering exit_reason, only check for signals if we
639 kvmppc_core_prepare_to_enter(vcpu); 949 * aren't already exiting to userspace for some other reason.
640 950 */
641 if (!(r & RESUME_HOST)) { 951 if (!(r & RESUME_HOST)) {
642 /* To avoid clobbering exit_reason, only check for signals if 952 local_irq_disable();
643 * we aren't already exiting to userspace for some other 953 if (kvmppc_prepare_to_enter(vcpu)) {
644 * reason. */
645 if (signal_pending(current)) {
646 run->exit_reason = KVM_EXIT_INTR; 954 run->exit_reason = KVM_EXIT_INTR;
647 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); 955 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
648 kvmppc_account_exit(vcpu, SIGNAL_EXITS); 956 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
@@ -659,12 +967,15 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
659 int r; 967 int r;
660 968
661 vcpu->arch.pc = 0; 969 vcpu->arch.pc = 0;
662 vcpu->arch.shared->msr = 0;
663 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
664 vcpu->arch.shared->pir = vcpu->vcpu_id; 970 vcpu->arch.shared->pir = vcpu->vcpu_id;
665 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ 971 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
972 kvmppc_set_msr(vcpu, 0);
666 973
974#ifndef CONFIG_KVM_BOOKE_HV
975 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
667 vcpu->arch.shadow_pid = 1; 976 vcpu->arch.shadow_pid = 1;
977 vcpu->arch.shared->msr = 0;
978#endif
668 979
669 /* Eye-catching numbers so we know if the guest takes an interrupt 980 /* Eye-catching numbers so we know if the guest takes an interrupt
670 * before it's programmed its own IVPR/IVORs. */ 981 * before it's programmed its own IVPR/IVORs. */
@@ -745,8 +1056,8 @@ static void get_sregs_base(struct kvm_vcpu *vcpu,
745 sregs->u.e.csrr0 = vcpu->arch.csrr0; 1056 sregs->u.e.csrr0 = vcpu->arch.csrr0;
746 sregs->u.e.csrr1 = vcpu->arch.csrr1; 1057 sregs->u.e.csrr1 = vcpu->arch.csrr1;
747 sregs->u.e.mcsr = vcpu->arch.mcsr; 1058 sregs->u.e.mcsr = vcpu->arch.mcsr;
748 sregs->u.e.esr = vcpu->arch.shared->esr; 1059 sregs->u.e.esr = get_guest_esr(vcpu);
749 sregs->u.e.dear = vcpu->arch.shared->dar; 1060 sregs->u.e.dear = get_guest_dear(vcpu);
750 sregs->u.e.tsr = vcpu->arch.tsr; 1061 sregs->u.e.tsr = vcpu->arch.tsr;
751 sregs->u.e.tcr = vcpu->arch.tcr; 1062 sregs->u.e.tcr = vcpu->arch.tcr;
752 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); 1063 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
@@ -763,8 +1074,8 @@ static int set_sregs_base(struct kvm_vcpu *vcpu,
763 vcpu->arch.csrr0 = sregs->u.e.csrr0; 1074 vcpu->arch.csrr0 = sregs->u.e.csrr0;
764 vcpu->arch.csrr1 = sregs->u.e.csrr1; 1075 vcpu->arch.csrr1 = sregs->u.e.csrr1;
765 vcpu->arch.mcsr = sregs->u.e.mcsr; 1076 vcpu->arch.mcsr = sregs->u.e.mcsr;
766 vcpu->arch.shared->esr = sregs->u.e.esr; 1077 set_guest_esr(vcpu, sregs->u.e.esr);
767 vcpu->arch.shared->dar = sregs->u.e.dear; 1078 set_guest_dear(vcpu, sregs->u.e.dear);
768 vcpu->arch.vrsave = sregs->u.e.vrsave; 1079 vcpu->arch.vrsave = sregs->u.e.vrsave;
769 kvmppc_set_tcr(vcpu, sregs->u.e.tcr); 1080 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
770 1081
@@ -932,15 +1243,6 @@ void kvmppc_core_commit_memory_region(struct kvm *kvm,
932{ 1243{
933} 1244}
934 1245
935int kvmppc_core_init_vm(struct kvm *kvm)
936{
937 return 0;
938}
939
940void kvmppc_core_destroy_vm(struct kvm *kvm)
941{
942}
943
944void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) 1246void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
945{ 1247{
946 vcpu->arch.tcr = new_tcr; 1248 vcpu->arch.tcr = new_tcr;
@@ -968,8 +1270,19 @@ void kvmppc_decrementer_func(unsigned long data)
968 kvmppc_set_tsr_bits(vcpu, TSR_DIS); 1270 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
969} 1271}
970 1272
1273void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1274{
1275 current->thread.kvm_vcpu = vcpu;
1276}
1277
1278void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1279{
1280 current->thread.kvm_vcpu = NULL;
1281}
1282
971int __init kvmppc_booke_init(void) 1283int __init kvmppc_booke_init(void)
972{ 1284{
1285#ifndef CONFIG_KVM_BOOKE_HV
973 unsigned long ivor[16]; 1286 unsigned long ivor[16];
974 unsigned long max_ivor = 0; 1287 unsigned long max_ivor = 0;
975 int i; 1288 int i;
@@ -1012,7 +1325,7 @@ int __init kvmppc_booke_init(void)
1012 } 1325 }
1013 flush_icache_range(kvmppc_booke_handlers, 1326 flush_icache_range(kvmppc_booke_handlers,
1014 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len); 1327 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
1015 1328#endif /* !BOOKE_HV */
1016 return 0; 1329 return 0;
1017} 1330}
1018 1331
diff --git a/arch/powerpc/kvm/booke.h b/arch/powerpc/kvm/booke.h
index 2fe202705a3f..ba61974c1e20 100644
--- a/arch/powerpc/kvm/booke.h
+++ b/arch/powerpc/kvm/booke.h
@@ -23,6 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
25#include <asm/kvm_ppc.h> 25#include <asm/kvm_ppc.h>
26#include <asm/switch_to.h>
26#include "timing.h" 27#include "timing.h"
27 28
28/* interrupt priortity ordering */ 29/* interrupt priortity ordering */
@@ -48,7 +49,20 @@
48#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19 49#define BOOKE_IRQPRIO_PERFORMANCE_MONITOR 19
49/* Internal pseudo-irqprio for level triggered externals */ 50/* Internal pseudo-irqprio for level triggered externals */
50#define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20 51#define BOOKE_IRQPRIO_EXTERNAL_LEVEL 20
51#define BOOKE_IRQPRIO_MAX 20 52#define BOOKE_IRQPRIO_DBELL 21
53#define BOOKE_IRQPRIO_DBELL_CRIT 22
54#define BOOKE_IRQPRIO_MAX 23
55
56#define BOOKE_IRQMASK_EE ((1 << BOOKE_IRQPRIO_EXTERNAL_LEVEL) | \
57 (1 << BOOKE_IRQPRIO_PERFORMANCE_MONITOR) | \
58 (1 << BOOKE_IRQPRIO_DBELL) | \
59 (1 << BOOKE_IRQPRIO_DECREMENTER) | \
60 (1 << BOOKE_IRQPRIO_FIT) | \
61 (1 << BOOKE_IRQPRIO_EXTERNAL))
62
63#define BOOKE_IRQMASK_CE ((1 << BOOKE_IRQPRIO_DBELL_CRIT) | \
64 (1 << BOOKE_IRQPRIO_WATCHDOG) | \
65 (1 << BOOKE_IRQPRIO_CRITICAL))
52 66
53extern unsigned long kvmppc_booke_handlers; 67extern unsigned long kvmppc_booke_handlers;
54 68
@@ -61,8 +75,8 @@ void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits);
61 75
62int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 76int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
63 unsigned int inst, int *advance); 77 unsigned int inst, int *advance);
64int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt); 78int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
65int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs); 79int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
66 80
67/* low-level asm code to transfer guest state */ 81/* low-level asm code to transfer guest state */
68void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu); 82void kvmppc_load_guest_spe(struct kvm_vcpu *vcpu);
@@ -71,4 +85,46 @@ void kvmppc_save_guest_spe(struct kvm_vcpu *vcpu);
71/* high-level function, manages flags, host state */ 85/* high-level function, manages flags, host state */
72void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu); 86void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu);
73 87
88void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
89void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu);
90
91enum int_class {
92 INT_CLASS_NONCRIT,
93 INT_CLASS_CRIT,
94 INT_CLASS_MC,
95 INT_CLASS_DBG,
96};
97
98void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type);
99
100/*
101 * Load up guest vcpu FP state if it's needed.
102 * It also set the MSR_FP in thread so that host know
103 * we're holding FPU, and then host can help to save
104 * guest vcpu FP state if other threads require to use FPU.
105 * This simulates an FP unavailable fault.
106 *
107 * It requires to be called with preemption disabled.
108 */
109static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
110{
111#ifdef CONFIG_PPC_FPU
112 if (vcpu->fpu_active && !(current->thread.regs->msr & MSR_FP)) {
113 load_up_fpu();
114 current->thread.regs->msr |= MSR_FP;
115 }
116#endif
117}
118
119/*
120 * Save guest vcpu FP state into thread.
121 * It requires to be called with preemption disabled.
122 */
123static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
124{
125#ifdef CONFIG_PPC_FPU
126 if (vcpu->fpu_active && (current->thread.regs->msr & MSR_FP))
127 giveup_fpu(current);
128#endif
129}
74#endif /* __KVM_BOOKE_H__ */ 130#endif /* __KVM_BOOKE_H__ */
diff --git a/arch/powerpc/kvm/booke_emulate.c b/arch/powerpc/kvm/booke_emulate.c
index 3e652da36534..6c76397f2af4 100644
--- a/arch/powerpc/kvm/booke_emulate.c
+++ b/arch/powerpc/kvm/booke_emulate.c
@@ -40,8 +40,8 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
40 unsigned int inst, int *advance) 40 unsigned int inst, int *advance)
41{ 41{
42 int emulated = EMULATE_DONE; 42 int emulated = EMULATE_DONE;
43 int rs; 43 int rs = get_rs(inst);
44 int rt; 44 int rt = get_rt(inst);
45 45
46 switch (get_op(inst)) { 46 switch (get_op(inst)) {
47 case 19: 47 case 19:
@@ -62,19 +62,16 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
62 switch (get_xop(inst)) { 62 switch (get_xop(inst)) {
63 63
64 case OP_31_XOP_MFMSR: 64 case OP_31_XOP_MFMSR:
65 rt = get_rt(inst);
66 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); 65 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
67 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); 66 kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
68 break; 67 break;
69 68
70 case OP_31_XOP_MTMSR: 69 case OP_31_XOP_MTMSR:
71 rs = get_rs(inst);
72 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); 70 kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
73 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); 71 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
74 break; 72 break;
75 73
76 case OP_31_XOP_WRTEE: 74 case OP_31_XOP_WRTEE:
77 rs = get_rs(inst);
78 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) 75 vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
79 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); 76 | (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
80 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); 77 kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
@@ -99,22 +96,32 @@ int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
99 return emulated; 96 return emulated;
100} 97}
101 98
102int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 99/*
100 * NOTE: some of these registers are not emulated on BOOKE_HV (GS-mode).
101 * Their backing store is in real registers, and these functions
102 * will return the wrong result if called for them in another context
103 * (such as debugging).
104 */
105int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
103{ 106{
104 int emulated = EMULATE_DONE; 107 int emulated = EMULATE_DONE;
105 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
106 108
107 switch (sprn) { 109 switch (sprn) {
108 case SPRN_DEAR: 110 case SPRN_DEAR:
109 vcpu->arch.shared->dar = spr_val; break; 111 vcpu->arch.shared->dar = spr_val;
112 break;
110 case SPRN_ESR: 113 case SPRN_ESR:
111 vcpu->arch.shared->esr = spr_val; break; 114 vcpu->arch.shared->esr = spr_val;
115 break;
112 case SPRN_DBCR0: 116 case SPRN_DBCR0:
113 vcpu->arch.dbcr0 = spr_val; break; 117 vcpu->arch.dbcr0 = spr_val;
118 break;
114 case SPRN_DBCR1: 119 case SPRN_DBCR1:
115 vcpu->arch.dbcr1 = spr_val; break; 120 vcpu->arch.dbcr1 = spr_val;
121 break;
116 case SPRN_DBSR: 122 case SPRN_DBSR:
117 vcpu->arch.dbsr &= ~spr_val; break; 123 vcpu->arch.dbsr &= ~spr_val;
124 break;
118 case SPRN_TSR: 125 case SPRN_TSR:
119 kvmppc_clr_tsr_bits(vcpu, spr_val); 126 kvmppc_clr_tsr_bits(vcpu, spr_val);
120 break; 127 break;
@@ -122,20 +129,29 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
122 kvmppc_set_tcr(vcpu, spr_val); 129 kvmppc_set_tcr(vcpu, spr_val);
123 break; 130 break;
124 131
125 /* Note: SPRG4-7 are user-readable. These values are 132 /*
126 * loaded into the real SPRGs when resuming the 133 * Note: SPRG4-7 are user-readable.
127 * guest. */ 134 * These values are loaded into the real SPRGs when resuming the
135 * guest (PR-mode only).
136 */
128 case SPRN_SPRG4: 137 case SPRN_SPRG4:
129 vcpu->arch.shared->sprg4 = spr_val; break; 138 vcpu->arch.shared->sprg4 = spr_val;
139 break;
130 case SPRN_SPRG5: 140 case SPRN_SPRG5:
131 vcpu->arch.shared->sprg5 = spr_val; break; 141 vcpu->arch.shared->sprg5 = spr_val;
142 break;
132 case SPRN_SPRG6: 143 case SPRN_SPRG6:
133 vcpu->arch.shared->sprg6 = spr_val; break; 144 vcpu->arch.shared->sprg6 = spr_val;
145 break;
134 case SPRN_SPRG7: 146 case SPRN_SPRG7:
135 vcpu->arch.shared->sprg7 = spr_val; break; 147 vcpu->arch.shared->sprg7 = spr_val;
148 break;
136 149
137 case SPRN_IVPR: 150 case SPRN_IVPR:
138 vcpu->arch.ivpr = spr_val; 151 vcpu->arch.ivpr = spr_val;
152#ifdef CONFIG_KVM_BOOKE_HV
153 mtspr(SPRN_GIVPR, spr_val);
154#endif
139 break; 155 break;
140 case SPRN_IVOR0: 156 case SPRN_IVOR0:
141 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val; 157 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
@@ -145,6 +161,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
145 break; 161 break;
146 case SPRN_IVOR2: 162 case SPRN_IVOR2:
147 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val; 163 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
164#ifdef CONFIG_KVM_BOOKE_HV
165 mtspr(SPRN_GIVOR2, spr_val);
166#endif
148 break; 167 break;
149 case SPRN_IVOR3: 168 case SPRN_IVOR3:
150 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val; 169 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
@@ -163,6 +182,9 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
163 break; 182 break;
164 case SPRN_IVOR8: 183 case SPRN_IVOR8:
165 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val; 184 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
185#ifdef CONFIG_KVM_BOOKE_HV
186 mtspr(SPRN_GIVOR8, spr_val);
187#endif
166 break; 188 break;
167 case SPRN_IVOR9: 189 case SPRN_IVOR9:
168 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val; 190 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
@@ -193,75 +215,83 @@ int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
193 return emulated; 215 return emulated;
194} 216}
195 217
196int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 218int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
197{ 219{
198 int emulated = EMULATE_DONE; 220 int emulated = EMULATE_DONE;
199 221
200 switch (sprn) { 222 switch (sprn) {
201 case SPRN_IVPR: 223 case SPRN_IVPR:
202 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; 224 *spr_val = vcpu->arch.ivpr;
225 break;
203 case SPRN_DEAR: 226 case SPRN_DEAR:
204 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; 227 *spr_val = vcpu->arch.shared->dar;
228 break;
205 case SPRN_ESR: 229 case SPRN_ESR:
206 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; 230 *spr_val = vcpu->arch.shared->esr;
231 break;
207 case SPRN_DBCR0: 232 case SPRN_DBCR0:
208 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; 233 *spr_val = vcpu->arch.dbcr0;
234 break;
209 case SPRN_DBCR1: 235 case SPRN_DBCR1:
210 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; 236 *spr_val = vcpu->arch.dbcr1;
237 break;
211 case SPRN_DBSR: 238 case SPRN_DBSR:
212 kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; 239 *spr_val = vcpu->arch.dbsr;
240 break;
213 case SPRN_TSR: 241 case SPRN_TSR:
214 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; 242 *spr_val = vcpu->arch.tsr;
243 break;
215 case SPRN_TCR: 244 case SPRN_TCR:
216 kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; 245 *spr_val = vcpu->arch.tcr;
246 break;
217 247
218 case SPRN_IVOR0: 248 case SPRN_IVOR0:
219 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); 249 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
220 break; 250 break;
221 case SPRN_IVOR1: 251 case SPRN_IVOR1:
222 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); 252 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
223 break; 253 break;
224 case SPRN_IVOR2: 254 case SPRN_IVOR2:
225 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); 255 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
226 break; 256 break;
227 case SPRN_IVOR3: 257 case SPRN_IVOR3:
228 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); 258 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
229 break; 259 break;
230 case SPRN_IVOR4: 260 case SPRN_IVOR4:
231 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); 261 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
232 break; 262 break;
233 case SPRN_IVOR5: 263 case SPRN_IVOR5:
234 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); 264 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
235 break; 265 break;
236 case SPRN_IVOR6: 266 case SPRN_IVOR6:
237 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); 267 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
238 break; 268 break;
239 case SPRN_IVOR7: 269 case SPRN_IVOR7:
240 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); 270 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
241 break; 271 break;
242 case SPRN_IVOR8: 272 case SPRN_IVOR8:
243 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); 273 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
244 break; 274 break;
245 case SPRN_IVOR9: 275 case SPRN_IVOR9:
246 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); 276 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
247 break; 277 break;
248 case SPRN_IVOR10: 278 case SPRN_IVOR10:
249 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); 279 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
250 break; 280 break;
251 case SPRN_IVOR11: 281 case SPRN_IVOR11:
252 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); 282 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
253 break; 283 break;
254 case SPRN_IVOR12: 284 case SPRN_IVOR12:
255 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); 285 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
256 break; 286 break;
257 case SPRN_IVOR13: 287 case SPRN_IVOR13:
258 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); 288 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
259 break; 289 break;
260 case SPRN_IVOR14: 290 case SPRN_IVOR14:
261 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); 291 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
262 break; 292 break;
263 case SPRN_IVOR15: 293 case SPRN_IVOR15:
264 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); 294 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
265 break; 295 break;
266 296
267 default: 297 default:
diff --git a/arch/powerpc/kvm/booke_interrupts.S b/arch/powerpc/kvm/booke_interrupts.S
index c8c4b878795a..8feec2ff3928 100644
--- a/arch/powerpc/kvm/booke_interrupts.S
+++ b/arch/powerpc/kvm/booke_interrupts.S
@@ -419,13 +419,13 @@ lightweight_exit:
419 * written directly to the shared area, so we 419 * written directly to the shared area, so we
420 * need to reload them here with the guest's values. 420 * need to reload them here with the guest's values.
421 */ 421 */
422 lwz r3, VCPU_SHARED_SPRG4(r5) 422 PPC_LD(r3, VCPU_SHARED_SPRG4, r5)
423 mtspr SPRN_SPRG4W, r3 423 mtspr SPRN_SPRG4W, r3
424 lwz r3, VCPU_SHARED_SPRG5(r5) 424 PPC_LD(r3, VCPU_SHARED_SPRG5, r5)
425 mtspr SPRN_SPRG5W, r3 425 mtspr SPRN_SPRG5W, r3
426 lwz r3, VCPU_SHARED_SPRG6(r5) 426 PPC_LD(r3, VCPU_SHARED_SPRG6, r5)
427 mtspr SPRN_SPRG6W, r3 427 mtspr SPRN_SPRG6W, r3
428 lwz r3, VCPU_SHARED_SPRG7(r5) 428 PPC_LD(r3, VCPU_SHARED_SPRG7, r5)
429 mtspr SPRN_SPRG7W, r3 429 mtspr SPRN_SPRG7W, r3
430 430
431#ifdef CONFIG_KVM_EXIT_TIMING 431#ifdef CONFIG_KVM_EXIT_TIMING
diff --git a/arch/powerpc/kvm/bookehv_interrupts.S b/arch/powerpc/kvm/bookehv_interrupts.S
new file mode 100644
index 000000000000..6048a00515d7
--- /dev/null
+++ b/arch/powerpc/kvm/bookehv_interrupts.S
@@ -0,0 +1,597 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
16 *
17 * Author: Varun Sethi <varun.sethi@freescale.com>
18 * Author: Scott Wood <scotwood@freescale.com>
19 *
20 * This file is derived from arch/powerpc/kvm/booke_interrupts.S
21 */
22
23#include <asm/ppc_asm.h>
24#include <asm/kvm_asm.h>
25#include <asm/reg.h>
26#include <asm/mmu-44x.h>
27#include <asm/page.h>
28#include <asm/asm-compat.h>
29#include <asm/asm-offsets.h>
30#include <asm/bitsperlong.h>
31#include <asm/thread_info.h>
32
33#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
34
35#define GET_VCPU(vcpu, thread) \
36 PPC_LL vcpu, THREAD_KVM_VCPU(thread)
37
38#define LONGBYTES (BITS_PER_LONG / 8)
39
40#define VCPU_GPR(n) (VCPU_GPRS + (n * LONGBYTES))
41#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
42
43/* The host stack layout: */
44#define HOST_R1 (0 * LONGBYTES) /* Implied by stwu. */
45#define HOST_CALLEE_LR (1 * LONGBYTES)
46#define HOST_RUN (2 * LONGBYTES) /* struct kvm_run */
47/*
48 * r2 is special: it holds 'current', and it made nonvolatile in the
49 * kernel with the -ffixed-r2 gcc option.
50 */
51#define HOST_R2 (3 * LONGBYTES)
52#define HOST_CR (4 * LONGBYTES)
53#define HOST_NV_GPRS (5 * LONGBYTES)
54#define HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
55#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(31) + LONGBYTES)
56#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
57#define HOST_STACK_LR (HOST_STACK_SIZE + LONGBYTES) /* In caller stack frame. */
58
59#define NEED_EMU 0x00000001 /* emulation -- save nv regs */
60#define NEED_DEAR 0x00000002 /* save faulting DEAR */
61#define NEED_ESR 0x00000004 /* save faulting ESR */
62
63/*
64 * On entry:
65 * r4 = vcpu, r5 = srr0, r6 = srr1
66 * saved in vcpu: cr, ctr, r3-r13
67 */
68.macro kvm_handler_common intno, srr0, flags
69 /* Restore host stack pointer */
70 PPC_STL r1, VCPU_GPR(r1)(r4)
71 PPC_STL r2, VCPU_GPR(r2)(r4)
72 PPC_LL r1, VCPU_HOST_STACK(r4)
73 PPC_LL r2, HOST_R2(r1)
74
75 mfspr r10, SPRN_PID
76 lwz r8, VCPU_HOST_PID(r4)
77 PPC_LL r11, VCPU_SHARED(r4)
78 PPC_STL r14, VCPU_GPR(r14)(r4) /* We need a non-volatile GPR. */
79 li r14, \intno
80
81 stw r10, VCPU_GUEST_PID(r4)
82 mtspr SPRN_PID, r8
83
84#ifdef CONFIG_KVM_EXIT_TIMING
85 /* save exit time */
861: mfspr r7, SPRN_TBRU
87 mfspr r8, SPRN_TBRL
88 mfspr r9, SPRN_TBRU
89 cmpw r9, r7
90 stw r8, VCPU_TIMING_EXIT_TBL(r4)
91 bne- 1b
92 stw r9, VCPU_TIMING_EXIT_TBU(r4)
93#endif
94
95 oris r8, r6, MSR_CE@h
96 PPC_STD(r6, VCPU_SHARED_MSR, r11)
97 ori r8, r8, MSR_ME | MSR_RI
98 PPC_STL r5, VCPU_PC(r4)
99
100 /*
101 * Make sure CE/ME/RI are set (if appropriate for exception type)
102 * whether or not the guest had it set. Since mfmsr/mtmsr are
103 * somewhat expensive, skip in the common case where the guest
104 * had all these bits set (and thus they're still set if
105 * appropriate for the exception type).
106 */
107 cmpw r6, r8
108 beq 1f
109 mfmsr r7
110 .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0
111 oris r7, r7, MSR_CE@h
112 .endif
113 .if \srr0 != SPRN_MCSRR0
114 ori r7, r7, MSR_ME | MSR_RI
115 .endif
116 mtmsr r7
1171:
118
119 .if \flags & NEED_EMU
120 /*
121 * This assumes you have external PID support.
122 * To support a bookehv CPU without external PID, you'll
123 * need to look up the TLB entry and create a temporary mapping.
124 *
125 * FIXME: we don't currently handle if the lwepx faults. PR-mode
126 * booke doesn't handle it either. Since Linux doesn't use
127 * broadcast tlbivax anymore, the only way this should happen is
128 * if the guest maps its memory execute-but-not-read, or if we
129 * somehow take a TLB miss in the middle of this entry code and
130 * evict the relevant entry. On e500mc, all kernel lowmem is
131 * bolted into TLB1 large page mappings, and we don't use
132 * broadcast invalidates, so we should not take a TLB miss here.
133 *
134 * Later we'll need to deal with faults here. Disallowing guest
135 * mappings that are execute-but-not-read could be an option on
136 * e500mc, but not on chips with an LRAT if it is used.
137 */
138
139 mfspr r3, SPRN_EPLC /* will already have correct ELPID and EGS */
140 PPC_STL r15, VCPU_GPR(r15)(r4)
141 PPC_STL r16, VCPU_GPR(r16)(r4)
142 PPC_STL r17, VCPU_GPR(r17)(r4)
143 PPC_STL r18, VCPU_GPR(r18)(r4)
144 PPC_STL r19, VCPU_GPR(r19)(r4)
145 mr r8, r3
146 PPC_STL r20, VCPU_GPR(r20)(r4)
147 rlwimi r8, r6, EPC_EAS_SHIFT - MSR_IR_LG, EPC_EAS
148 PPC_STL r21, VCPU_GPR(r21)(r4)
149 rlwimi r8, r6, EPC_EPR_SHIFT - MSR_PR_LG, EPC_EPR
150 PPC_STL r22, VCPU_GPR(r22)(r4)
151 rlwimi r8, r10, EPC_EPID_SHIFT, EPC_EPID
152 PPC_STL r23, VCPU_GPR(r23)(r4)
153 PPC_STL r24, VCPU_GPR(r24)(r4)
154 PPC_STL r25, VCPU_GPR(r25)(r4)
155 PPC_STL r26, VCPU_GPR(r26)(r4)
156 PPC_STL r27, VCPU_GPR(r27)(r4)
157 PPC_STL r28, VCPU_GPR(r28)(r4)
158 PPC_STL r29, VCPU_GPR(r29)(r4)
159 PPC_STL r30, VCPU_GPR(r30)(r4)
160 PPC_STL r31, VCPU_GPR(r31)(r4)
161 mtspr SPRN_EPLC, r8
162
163 /* disable preemption, so we are sure we hit the fixup handler */
164#ifdef CONFIG_PPC64
165 clrrdi r8,r1,THREAD_SHIFT
166#else
167 rlwinm r8,r1,0,0,31-THREAD_SHIFT /* current thread_info */
168#endif
169 li r7, 1
170 stw r7, TI_PREEMPT(r8)
171
172 isync
173
174 /*
175 * In case the read goes wrong, we catch it and write an invalid value
176 * in LAST_INST instead.
177 */
1781: lwepx r9, 0, r5
1792:
180.section .fixup, "ax"
1813: li r9, KVM_INST_FETCH_FAILED
182 b 2b
183.previous
184.section __ex_table,"a"
185 PPC_LONG_ALIGN
186 PPC_LONG 1b,3b
187.previous
188
189 mtspr SPRN_EPLC, r3
190 li r7, 0
191 stw r7, TI_PREEMPT(r8)
192 stw r9, VCPU_LAST_INST(r4)
193 .endif
194
195 .if \flags & NEED_ESR
196 mfspr r8, SPRN_ESR
197 PPC_STL r8, VCPU_FAULT_ESR(r4)
198 .endif
199
200 .if \flags & NEED_DEAR
201 mfspr r9, SPRN_DEAR
202 PPC_STL r9, VCPU_FAULT_DEAR(r4)
203 .endif
204
205 b kvmppc_resume_host
206.endm
207
208/*
209 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
210 */
211.macro kvm_handler intno srr0, srr1, flags
212_GLOBAL(kvmppc_handler_\intno\()_\srr1)
213 GET_VCPU(r11, r10)
214 PPC_STL r3, VCPU_GPR(r3)(r11)
215 mfspr r3, SPRN_SPRG_RSCRATCH0
216 PPC_STL r4, VCPU_GPR(r4)(r11)
217 PPC_LL r4, THREAD_NORMSAVE(0)(r10)
218 PPC_STL r5, VCPU_GPR(r5)(r11)
219 stw r13, VCPU_CR(r11)
220 mfspr r5, \srr0
221 PPC_STL r3, VCPU_GPR(r10)(r11)
222 PPC_LL r3, THREAD_NORMSAVE(2)(r10)
223 PPC_STL r6, VCPU_GPR(r6)(r11)
224 PPC_STL r4, VCPU_GPR(r11)(r11)
225 mfspr r6, \srr1
226 PPC_STL r7, VCPU_GPR(r7)(r11)
227 PPC_STL r8, VCPU_GPR(r8)(r11)
228 PPC_STL r9, VCPU_GPR(r9)(r11)
229 PPC_STL r3, VCPU_GPR(r13)(r11)
230 mfctr r7
231 PPC_STL r12, VCPU_GPR(r12)(r11)
232 PPC_STL r7, VCPU_CTR(r11)
233 mr r4, r11
234 kvm_handler_common \intno, \srr0, \flags
235.endm
236
237.macro kvm_lvl_handler intno scratch srr0, srr1, flags
238_GLOBAL(kvmppc_handler_\intno\()_\srr1)
239 mfspr r10, SPRN_SPRG_THREAD
240 GET_VCPU(r11, r10)
241 PPC_STL r3, VCPU_GPR(r3)(r11)
242 mfspr r3, \scratch
243 PPC_STL r4, VCPU_GPR(r4)(r11)
244 PPC_LL r4, GPR9(r8)
245 PPC_STL r5, VCPU_GPR(r5)(r11)
246 stw r9, VCPU_CR(r11)
247 mfspr r5, \srr0
248 PPC_STL r3, VCPU_GPR(r8)(r11)
249 PPC_LL r3, GPR10(r8)
250 PPC_STL r6, VCPU_GPR(r6)(r11)
251 PPC_STL r4, VCPU_GPR(r9)(r11)
252 mfspr r6, \srr1
253 PPC_LL r4, GPR11(r8)
254 PPC_STL r7, VCPU_GPR(r7)(r11)
255 PPC_STL r3, VCPU_GPR(r10)(r11)
256 mfctr r7
257 PPC_STL r12, VCPU_GPR(r12)(r11)
258 PPC_STL r13, VCPU_GPR(r13)(r11)
259 PPC_STL r4, VCPU_GPR(r11)(r11)
260 PPC_STL r7, VCPU_CTR(r11)
261 mr r4, r11
262 kvm_handler_common \intno, \srr0, \flags
263.endm
264
265kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
266 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
267kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
268 SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
269kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
270 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR)
271kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
272kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
273kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
274 SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
275kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR
276kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
277kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
278kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
279kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0
280kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0
281kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
282 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
283kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
284 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
285kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
286kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
287kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
288kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
289kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
290kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
291kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
292 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
293kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU
294kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
295kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0
296kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \
297 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
298kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
299 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
300kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
301 SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
302
303
304/* Registers:
305 * SPRG_SCRATCH0: guest r10
306 * r4: vcpu pointer
307 * r11: vcpu->arch.shared
308 * r14: KVM exit number
309 */
310_GLOBAL(kvmppc_resume_host)
311 /* Save remaining volatile guest register state to vcpu. */
312 mfspr r3, SPRN_VRSAVE
313 PPC_STL r0, VCPU_GPR(r0)(r4)
314 mflr r5
315 mfspr r6, SPRN_SPRG4
316 PPC_STL r5, VCPU_LR(r4)
317 mfspr r7, SPRN_SPRG5
318 stw r3, VCPU_VRSAVE(r4)
319 PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
320 mfspr r8, SPRN_SPRG6
321 PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
322 mfspr r9, SPRN_SPRG7
323 PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
324 mfxer r3
325 PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
326
327 /* save guest MAS registers and restore host mas4 & mas6 */
328 mfspr r5, SPRN_MAS0
329 PPC_STL r3, VCPU_XER(r4)
330 mfspr r6, SPRN_MAS1
331 stw r5, VCPU_SHARED_MAS0(r11)
332 mfspr r7, SPRN_MAS2
333 stw r6, VCPU_SHARED_MAS1(r11)
334 PPC_STD(r7, VCPU_SHARED_MAS2, r11)
335 mfspr r5, SPRN_MAS3
336 mfspr r6, SPRN_MAS4
337 stw r5, VCPU_SHARED_MAS7_3+4(r11)
338 mfspr r7, SPRN_MAS6
339 stw r6, VCPU_SHARED_MAS4(r11)
340 mfspr r5, SPRN_MAS7
341 lwz r6, VCPU_HOST_MAS4(r4)
342 stw r7, VCPU_SHARED_MAS6(r11)
343 lwz r8, VCPU_HOST_MAS6(r4)
344 mtspr SPRN_MAS4, r6
345 stw r5, VCPU_SHARED_MAS7_3+0(r11)
346 mtspr SPRN_MAS6, r8
347 /* Enable MAS register updates via exception */
348 mfspr r3, SPRN_EPCR
349 rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH
350 mtspr SPRN_EPCR, r3
351 isync
352
353 /* Switch to kernel stack and jump to handler. */
354 PPC_LL r3, HOST_RUN(r1)
355 mr r5, r14 /* intno */
356 mr r14, r4 /* Save vcpu pointer. */
357 bl kvmppc_handle_exit
358
359 /* Restore vcpu pointer and the nonvolatiles we used. */
360 mr r4, r14
361 PPC_LL r14, VCPU_GPR(r14)(r4)
362
363 andi. r5, r3, RESUME_FLAG_NV
364 beq skip_nv_load
365 PPC_LL r15, VCPU_GPR(r15)(r4)
366 PPC_LL r16, VCPU_GPR(r16)(r4)
367 PPC_LL r17, VCPU_GPR(r17)(r4)
368 PPC_LL r18, VCPU_GPR(r18)(r4)
369 PPC_LL r19, VCPU_GPR(r19)(r4)
370 PPC_LL r20, VCPU_GPR(r20)(r4)
371 PPC_LL r21, VCPU_GPR(r21)(r4)
372 PPC_LL r22, VCPU_GPR(r22)(r4)
373 PPC_LL r23, VCPU_GPR(r23)(r4)
374 PPC_LL r24, VCPU_GPR(r24)(r4)
375 PPC_LL r25, VCPU_GPR(r25)(r4)
376 PPC_LL r26, VCPU_GPR(r26)(r4)
377 PPC_LL r27, VCPU_GPR(r27)(r4)
378 PPC_LL r28, VCPU_GPR(r28)(r4)
379 PPC_LL r29, VCPU_GPR(r29)(r4)
380 PPC_LL r30, VCPU_GPR(r30)(r4)
381 PPC_LL r31, VCPU_GPR(r31)(r4)
382skip_nv_load:
383 /* Should we return to the guest? */
384 andi. r5, r3, RESUME_FLAG_HOST
385 beq lightweight_exit
386
387 srawi r3, r3, 2 /* Shift -ERR back down. */
388
389heavyweight_exit:
390 /* Not returning to guest. */
391 PPC_LL r5, HOST_STACK_LR(r1)
392 lwz r6, HOST_CR(r1)
393
394 /*
395 * We already saved guest volatile register state; now save the
396 * non-volatiles.
397 */
398
399 PPC_STL r15, VCPU_GPR(r15)(r4)
400 PPC_STL r16, VCPU_GPR(r16)(r4)
401 PPC_STL r17, VCPU_GPR(r17)(r4)
402 PPC_STL r18, VCPU_GPR(r18)(r4)
403 PPC_STL r19, VCPU_GPR(r19)(r4)
404 PPC_STL r20, VCPU_GPR(r20)(r4)
405 PPC_STL r21, VCPU_GPR(r21)(r4)
406 PPC_STL r22, VCPU_GPR(r22)(r4)
407 PPC_STL r23, VCPU_GPR(r23)(r4)
408 PPC_STL r24, VCPU_GPR(r24)(r4)
409 PPC_STL r25, VCPU_GPR(r25)(r4)
410 PPC_STL r26, VCPU_GPR(r26)(r4)
411 PPC_STL r27, VCPU_GPR(r27)(r4)
412 PPC_STL r28, VCPU_GPR(r28)(r4)
413 PPC_STL r29, VCPU_GPR(r29)(r4)
414 PPC_STL r30, VCPU_GPR(r30)(r4)
415 PPC_STL r31, VCPU_GPR(r31)(r4)
416
417 /* Load host non-volatile register state from host stack. */
418 PPC_LL r14, HOST_NV_GPR(r14)(r1)
419 PPC_LL r15, HOST_NV_GPR(r15)(r1)
420 PPC_LL r16, HOST_NV_GPR(r16)(r1)
421 PPC_LL r17, HOST_NV_GPR(r17)(r1)
422 PPC_LL r18, HOST_NV_GPR(r18)(r1)
423 PPC_LL r19, HOST_NV_GPR(r19)(r1)
424 PPC_LL r20, HOST_NV_GPR(r20)(r1)
425 PPC_LL r21, HOST_NV_GPR(r21)(r1)
426 PPC_LL r22, HOST_NV_GPR(r22)(r1)
427 PPC_LL r23, HOST_NV_GPR(r23)(r1)
428 PPC_LL r24, HOST_NV_GPR(r24)(r1)
429 PPC_LL r25, HOST_NV_GPR(r25)(r1)
430 PPC_LL r26, HOST_NV_GPR(r26)(r1)
431 PPC_LL r27, HOST_NV_GPR(r27)(r1)
432 PPC_LL r28, HOST_NV_GPR(r28)(r1)
433 PPC_LL r29, HOST_NV_GPR(r29)(r1)
434 PPC_LL r30, HOST_NV_GPR(r30)(r1)
435 PPC_LL r31, HOST_NV_GPR(r31)(r1)
436
437 /* Return to kvm_vcpu_run(). */
438 mtlr r5
439 mtcr r6
440 addi r1, r1, HOST_STACK_SIZE
441 /* r3 still contains the return code from kvmppc_handle_exit(). */
442 blr
443
444/* Registers:
445 * r3: kvm_run pointer
446 * r4: vcpu pointer
447 */
448_GLOBAL(__kvmppc_vcpu_run)
449 stwu r1, -HOST_STACK_SIZE(r1)
450 PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
451
452 /* Save host state to stack. */
453 PPC_STL r3, HOST_RUN(r1)
454 mflr r3
455 mfcr r5
456 PPC_STL r3, HOST_STACK_LR(r1)
457
458 stw r5, HOST_CR(r1)
459
460 /* Save host non-volatile register state to stack. */
461 PPC_STL r14, HOST_NV_GPR(r14)(r1)
462 PPC_STL r15, HOST_NV_GPR(r15)(r1)
463 PPC_STL r16, HOST_NV_GPR(r16)(r1)
464 PPC_STL r17, HOST_NV_GPR(r17)(r1)
465 PPC_STL r18, HOST_NV_GPR(r18)(r1)
466 PPC_STL r19, HOST_NV_GPR(r19)(r1)
467 PPC_STL r20, HOST_NV_GPR(r20)(r1)
468 PPC_STL r21, HOST_NV_GPR(r21)(r1)
469 PPC_STL r22, HOST_NV_GPR(r22)(r1)
470 PPC_STL r23, HOST_NV_GPR(r23)(r1)
471 PPC_STL r24, HOST_NV_GPR(r24)(r1)
472 PPC_STL r25, HOST_NV_GPR(r25)(r1)
473 PPC_STL r26, HOST_NV_GPR(r26)(r1)
474 PPC_STL r27, HOST_NV_GPR(r27)(r1)
475 PPC_STL r28, HOST_NV_GPR(r28)(r1)
476 PPC_STL r29, HOST_NV_GPR(r29)(r1)
477 PPC_STL r30, HOST_NV_GPR(r30)(r1)
478 PPC_STL r31, HOST_NV_GPR(r31)(r1)
479
480 /* Load guest non-volatiles. */
481 PPC_LL r14, VCPU_GPR(r14)(r4)
482 PPC_LL r15, VCPU_GPR(r15)(r4)
483 PPC_LL r16, VCPU_GPR(r16)(r4)
484 PPC_LL r17, VCPU_GPR(r17)(r4)
485 PPC_LL r18, VCPU_GPR(r18)(r4)
486 PPC_LL r19, VCPU_GPR(r19)(r4)
487 PPC_LL r20, VCPU_GPR(r20)(r4)
488 PPC_LL r21, VCPU_GPR(r21)(r4)
489 PPC_LL r22, VCPU_GPR(r22)(r4)
490 PPC_LL r23, VCPU_GPR(r23)(r4)
491 PPC_LL r24, VCPU_GPR(r24)(r4)
492 PPC_LL r25, VCPU_GPR(r25)(r4)
493 PPC_LL r26, VCPU_GPR(r26)(r4)
494 PPC_LL r27, VCPU_GPR(r27)(r4)
495 PPC_LL r28, VCPU_GPR(r28)(r4)
496 PPC_LL r29, VCPU_GPR(r29)(r4)
497 PPC_LL r30, VCPU_GPR(r30)(r4)
498 PPC_LL r31, VCPU_GPR(r31)(r4)
499
500
501lightweight_exit:
502 PPC_STL r2, HOST_R2(r1)
503
504 mfspr r3, SPRN_PID
505 stw r3, VCPU_HOST_PID(r4)
506 lwz r3, VCPU_GUEST_PID(r4)
507 mtspr SPRN_PID, r3
508
509 PPC_LL r11, VCPU_SHARED(r4)
510 /* Disable MAS register updates via exception */
511 mfspr r3, SPRN_EPCR
512 oris r3, r3, SPRN_EPCR_DMIUH@h
513 mtspr SPRN_EPCR, r3
514 isync
515 /* Save host mas4 and mas6 and load guest MAS registers */
516 mfspr r3, SPRN_MAS4
517 stw r3, VCPU_HOST_MAS4(r4)
518 mfspr r3, SPRN_MAS6
519 stw r3, VCPU_HOST_MAS6(r4)
520 lwz r3, VCPU_SHARED_MAS0(r11)
521 lwz r5, VCPU_SHARED_MAS1(r11)
522 PPC_LD(r6, VCPU_SHARED_MAS2, r11)
523 lwz r7, VCPU_SHARED_MAS7_3+4(r11)
524 lwz r8, VCPU_SHARED_MAS4(r11)
525 mtspr SPRN_MAS0, r3
526 mtspr SPRN_MAS1, r5
527 mtspr SPRN_MAS2, r6
528 mtspr SPRN_MAS3, r7
529 mtspr SPRN_MAS4, r8
530 lwz r3, VCPU_SHARED_MAS6(r11)
531 lwz r5, VCPU_SHARED_MAS7_3+0(r11)
532 mtspr SPRN_MAS6, r3
533 mtspr SPRN_MAS7, r5
534
535 /*
536 * Host interrupt handlers may have clobbered these guest-readable
537 * SPRGs, so we need to reload them here with the guest's values.
538 */
539 lwz r3, VCPU_VRSAVE(r4)
540 PPC_LD(r5, VCPU_SHARED_SPRG4, r11)
541 mtspr SPRN_VRSAVE, r3
542 PPC_LD(r6, VCPU_SHARED_SPRG5, r11)
543 mtspr SPRN_SPRG4W, r5
544 PPC_LD(r7, VCPU_SHARED_SPRG6, r11)
545 mtspr SPRN_SPRG5W, r6
546 PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
547 mtspr SPRN_SPRG6W, r7
548 mtspr SPRN_SPRG7W, r8
549
550 /* Load some guest volatiles. */
551 PPC_LL r3, VCPU_LR(r4)
552 PPC_LL r5, VCPU_XER(r4)
553 PPC_LL r6, VCPU_CTR(r4)
554 lwz r7, VCPU_CR(r4)
555 PPC_LL r8, VCPU_PC(r4)
556 PPC_LD(r9, VCPU_SHARED_MSR, r11)
557 PPC_LL r0, VCPU_GPR(r0)(r4)
558 PPC_LL r1, VCPU_GPR(r1)(r4)
559 PPC_LL r2, VCPU_GPR(r2)(r4)
560 PPC_LL r10, VCPU_GPR(r10)(r4)
561 PPC_LL r11, VCPU_GPR(r11)(r4)
562 PPC_LL r12, VCPU_GPR(r12)(r4)
563 PPC_LL r13, VCPU_GPR(r13)(r4)
564 mtlr r3
565 mtxer r5
566 mtctr r6
567 mtsrr0 r8
568 mtsrr1 r9
569
570#ifdef CONFIG_KVM_EXIT_TIMING
571 /* save enter time */
5721:
573 mfspr r6, SPRN_TBRU
574 mfspr r9, SPRN_TBRL
575 mfspr r8, SPRN_TBRU
576 cmpw r8, r6
577 stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
578 bne 1b
579 stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
580#endif
581
582 /*
583 * Don't execute any instruction which can change CR after
584 * below instruction.
585 */
586 mtcr r7
587
588 /* Finish loading guest volatiles and jump to guest. */
589 PPC_LL r5, VCPU_GPR(r5)(r4)
590 PPC_LL r6, VCPU_GPR(r6)(r4)
591 PPC_LL r7, VCPU_GPR(r7)(r4)
592 PPC_LL r8, VCPU_GPR(r8)(r4)
593 PPC_LL r9, VCPU_GPR(r9)(r4)
594
595 PPC_LL r3, VCPU_GPR(r3)(r4)
596 PPC_LL r4, VCPU_GPR(r4)(r4)
597 rfi
diff --git a/arch/powerpc/kvm/e500.c b/arch/powerpc/kvm/e500.c
index ddcd896fa2ff..b479ed77c515 100644
--- a/arch/powerpc/kvm/e500.c
+++ b/arch/powerpc/kvm/e500.c
@@ -20,11 +20,282 @@
20#include <asm/reg.h> 20#include <asm/reg.h>
21#include <asm/cputable.h> 21#include <asm/cputable.h>
22#include <asm/tlbflush.h> 22#include <asm/tlbflush.h>
23#include <asm/kvm_e500.h>
24#include <asm/kvm_ppc.h> 23#include <asm/kvm_ppc.h>
25 24
25#include "../mm/mmu_decl.h"
26#include "booke.h" 26#include "booke.h"
27#include "e500_tlb.h" 27#include "e500.h"
28
29struct id {
30 unsigned long val;
31 struct id **pentry;
32};
33
34#define NUM_TIDS 256
35
36/*
37 * This table provide mappings from:
38 * (guestAS,guestTID,guestPR) --> ID of physical cpu
39 * guestAS [0..1]
40 * guestTID [0..255]
41 * guestPR [0..1]
42 * ID [1..255]
43 * Each vcpu keeps one vcpu_id_table.
44 */
45struct vcpu_id_table {
46 struct id id[2][NUM_TIDS][2];
47};
48
49/*
50 * This table provide reversed mappings of vcpu_id_table:
51 * ID --> address of vcpu_id_table item.
52 * Each physical core has one pcpu_id_table.
53 */
54struct pcpu_id_table {
55 struct id *entry[NUM_TIDS];
56};
57
58static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
59
60/* This variable keeps last used shadow ID on local core.
61 * The valid range of shadow ID is [1..255] */
62static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
63
64/*
65 * Allocate a free shadow id and setup a valid sid mapping in given entry.
66 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
67 *
68 * The caller must have preemption disabled, and keep it that way until
69 * it has finished with the returned shadow id (either written into the
70 * TLB or arch.shadow_pid, or discarded).
71 */
72static inline int local_sid_setup_one(struct id *entry)
73{
74 unsigned long sid;
75 int ret = -1;
76
77 sid = ++(__get_cpu_var(pcpu_last_used_sid));
78 if (sid < NUM_TIDS) {
79 __get_cpu_var(pcpu_sids).entry[sid] = entry;
80 entry->val = sid;
81 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
82 ret = sid;
83 }
84
85 /*
86 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
87 * the caller will invalidate everything and start over.
88 *
89 * sid > NUM_TIDS indicates a race, which we disable preemption to
90 * avoid.
91 */
92 WARN_ON(sid > NUM_TIDS);
93
94 return ret;
95}
96
97/*
98 * Check if given entry contain a valid shadow id mapping.
99 * An ID mapping is considered valid only if
100 * both vcpu and pcpu know this mapping.
101 *
102 * The caller must have preemption disabled, and keep it that way until
103 * it has finished with the returned shadow id (either written into the
104 * TLB or arch.shadow_pid, or discarded).
105 */
106static inline int local_sid_lookup(struct id *entry)
107{
108 if (entry && entry->val != 0 &&
109 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
110 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
111 return entry->val;
112 return -1;
113}
114
115/* Invalidate all id mappings on local core -- call with preempt disabled */
116static inline void local_sid_destroy_all(void)
117{
118 __get_cpu_var(pcpu_last_used_sid) = 0;
119 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
120}
121
122static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
123{
124 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
125 return vcpu_e500->idt;
126}
127
128static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
129{
130 kfree(vcpu_e500->idt);
131 vcpu_e500->idt = NULL;
132}
133
134/* Map guest pid to shadow.
135 * We use PID to keep shadow of current guest non-zero PID,
136 * and use PID1 to keep shadow of guest zero PID.
137 * So that guest tlbe with TID=0 can be accessed at any time */
138static void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
139{
140 preempt_disable();
141 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
142 get_cur_as(&vcpu_e500->vcpu),
143 get_cur_pid(&vcpu_e500->vcpu),
144 get_cur_pr(&vcpu_e500->vcpu), 1);
145 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
146 get_cur_as(&vcpu_e500->vcpu), 0,
147 get_cur_pr(&vcpu_e500->vcpu), 1);
148 preempt_enable();
149}
150
151/* Invalidate all mappings on vcpu */
152static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
153{
154 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
155
156 /* Update shadow pid when mappings are changed */
157 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
158}
159
160/* Invalidate one ID mapping on vcpu */
161static inline void kvmppc_e500_id_table_reset_one(
162 struct kvmppc_vcpu_e500 *vcpu_e500,
163 int as, int pid, int pr)
164{
165 struct vcpu_id_table *idt = vcpu_e500->idt;
166
167 BUG_ON(as >= 2);
168 BUG_ON(pid >= NUM_TIDS);
169 BUG_ON(pr >= 2);
170
171 idt->id[as][pid][pr].val = 0;
172 idt->id[as][pid][pr].pentry = NULL;
173
174 /* Update shadow pid when mappings are changed */
175 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
176}
177
178/*
179 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
180 * This function first lookup if a valid mapping exists,
181 * if not, then creates a new one.
182 *
183 * The caller must have preemption disabled, and keep it that way until
184 * it has finished with the returned shadow id (either written into the
185 * TLB or arch.shadow_pid, or discarded).
186 */
187unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
188 unsigned int as, unsigned int gid,
189 unsigned int pr, int avoid_recursion)
190{
191 struct vcpu_id_table *idt = vcpu_e500->idt;
192 int sid;
193
194 BUG_ON(as >= 2);
195 BUG_ON(gid >= NUM_TIDS);
196 BUG_ON(pr >= 2);
197
198 sid = local_sid_lookup(&idt->id[as][gid][pr]);
199
200 while (sid <= 0) {
201 /* No mapping yet */
202 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
203 if (sid <= 0) {
204 _tlbil_all();
205 local_sid_destroy_all();
206 }
207
208 /* Update shadow pid when mappings are changed */
209 if (!avoid_recursion)
210 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
211 }
212
213 return sid;
214}
215
216unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
217 struct kvm_book3e_206_tlb_entry *gtlbe)
218{
219 return kvmppc_e500_get_sid(to_e500(vcpu), get_tlb_ts(gtlbe),
220 get_tlb_tid(gtlbe), get_cur_pr(vcpu), 0);
221}
222
223void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
224{
225 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
226
227 if (vcpu->arch.pid != pid) {
228 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
229 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
230 }
231}
232
233/* gtlbe must not be mapped by more than one host tlbe */
234void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
235 struct kvm_book3e_206_tlb_entry *gtlbe)
236{
237 struct vcpu_id_table *idt = vcpu_e500->idt;
238 unsigned int pr, tid, ts, pid;
239 u32 val, eaddr;
240 unsigned long flags;
241
242 ts = get_tlb_ts(gtlbe);
243 tid = get_tlb_tid(gtlbe);
244
245 preempt_disable();
246
247 /* One guest ID may be mapped to two shadow IDs */
248 for (pr = 0; pr < 2; pr++) {
249 /*
250 * The shadow PID can have a valid mapping on at most one
251 * host CPU. In the common case, it will be valid on this
252 * CPU, in which case we do a local invalidation of the
253 * specific address.
254 *
255 * If the shadow PID is not valid on the current host CPU,
256 * we invalidate the entire shadow PID.
257 */
258 pid = local_sid_lookup(&idt->id[ts][tid][pr]);
259 if (pid <= 0) {
260 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
261 continue;
262 }
263
264 /*
265 * The guest is invalidating a 4K entry which is in a PID
266 * that has a valid shadow mapping on this host CPU. We
267 * search host TLB to invalidate it's shadow TLB entry,
268 * similar to __tlbil_va except that we need to look in AS1.
269 */
270 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
271 eaddr = get_tlb_eaddr(gtlbe);
272
273 local_irq_save(flags);
274
275 mtspr(SPRN_MAS6, val);
276 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
277 val = mfspr(SPRN_MAS1);
278 if (val & MAS1_VALID) {
279 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
280 asm volatile("tlbwe");
281 }
282
283 local_irq_restore(flags);
284 }
285
286 preempt_enable();
287}
288
289void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
290{
291 kvmppc_e500_id_table_reset_all(vcpu_e500);
292}
293
294void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
295{
296 /* Recalc shadow pid since MSR changes */
297 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
298}
28 299
29void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu) 300void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
30{ 301{
@@ -36,17 +307,20 @@ void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
36 307
37void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 308void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
38{ 309{
39 kvmppc_e500_tlb_load(vcpu, cpu); 310 kvmppc_booke_vcpu_load(vcpu, cpu);
311
312 /* Shadow PID may be expired on local core */
313 kvmppc_e500_recalc_shadow_pid(to_e500(vcpu));
40} 314}
41 315
42void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) 316void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
43{ 317{
44 kvmppc_e500_tlb_put(vcpu);
45
46#ifdef CONFIG_SPE 318#ifdef CONFIG_SPE
47 if (vcpu->arch.shadow_msr & MSR_SPE) 319 if (vcpu->arch.shadow_msr & MSR_SPE)
48 kvmppc_vcpu_disable_spe(vcpu); 320 kvmppc_vcpu_disable_spe(vcpu);
49#endif 321#endif
322
323 kvmppc_booke_vcpu_put(vcpu);
50} 324}
51 325
52int kvmppc_core_check_processor_compat(void) 326int kvmppc_core_check_processor_compat(void)
@@ -61,6 +335,23 @@ int kvmppc_core_check_processor_compat(void)
61 return r; 335 return r;
62} 336}
63 337
338static void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
339{
340 struct kvm_book3e_206_tlb_entry *tlbe;
341
342 /* Insert large initial mapping for guest. */
343 tlbe = get_entry(vcpu_e500, 1, 0);
344 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
345 tlbe->mas2 = 0;
346 tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
347
348 /* 4K map for serial output. Used by kernel wrapper. */
349 tlbe = get_entry(vcpu_e500, 1, 1);
350 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
351 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
352 tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
353}
354
64int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu) 355int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
65{ 356{
66 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 357 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -76,32 +367,6 @@ int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
76 return 0; 367 return 0;
77} 368}
78 369
79/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
80int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
81 struct kvm_translation *tr)
82{
83 int index;
84 gva_t eaddr;
85 u8 pid;
86 u8 as;
87
88 eaddr = tr->linear_address;
89 pid = (tr->linear_address >> 32) & 0xff;
90 as = (tr->linear_address >> 40) & 0x1;
91
92 index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
93 if (index < 0) {
94 tr->valid = 0;
95 return 0;
96 }
97
98 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
99 /* XXX what does "writeable" and "usermode" even mean? */
100 tr->valid = 1;
101
102 return 0;
103}
104
105void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 370void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
106{ 371{
107 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 372 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -115,19 +380,6 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
115 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0; 380 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
116 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar; 381 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
117 382
118 sregs->u.e.mas0 = vcpu->arch.shared->mas0;
119 sregs->u.e.mas1 = vcpu->arch.shared->mas1;
120 sregs->u.e.mas2 = vcpu->arch.shared->mas2;
121 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
122 sregs->u.e.mas4 = vcpu->arch.shared->mas4;
123 sregs->u.e.mas6 = vcpu->arch.shared->mas6;
124
125 sregs->u.e.mmucfg = mfspr(SPRN_MMUCFG);
126 sregs->u.e.tlbcfg[0] = vcpu_e500->tlb0cfg;
127 sregs->u.e.tlbcfg[1] = vcpu_e500->tlb1cfg;
128 sregs->u.e.tlbcfg[2] = 0;
129 sregs->u.e.tlbcfg[3] = 0;
130
131 sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]; 383 sregs->u.e.ivor_high[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
132 sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]; 384 sregs->u.e.ivor_high[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
133 sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]; 385 sregs->u.e.ivor_high[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
@@ -135,11 +387,13 @@ void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
135 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]; 387 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
136 388
137 kvmppc_get_sregs_ivor(vcpu, sregs); 389 kvmppc_get_sregs_ivor(vcpu, sregs);
390 kvmppc_get_sregs_e500_tlb(vcpu, sregs);
138} 391}
139 392
140int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) 393int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
141{ 394{
142 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 395 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
396 int ret;
143 397
144 if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) { 398 if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
145 vcpu_e500->svr = sregs->u.e.impl.fsl.svr; 399 vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
@@ -147,14 +401,9 @@ int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
147 vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar; 401 vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
148 } 402 }
149 403
150 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) { 404 ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
151 vcpu->arch.shared->mas0 = sregs->u.e.mas0; 405 if (ret < 0)
152 vcpu->arch.shared->mas1 = sregs->u.e.mas1; 406 return ret;
153 vcpu->arch.shared->mas2 = sregs->u.e.mas2;
154 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
155 vcpu->arch.shared->mas4 = sregs->u.e.mas4;
156 vcpu->arch.shared->mas6 = sregs->u.e.mas6;
157 }
158 407
159 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) 408 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
160 return 0; 409 return 0;
@@ -193,9 +442,12 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
193 if (err) 442 if (err)
194 goto free_vcpu; 443 goto free_vcpu;
195 444
445 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
446 goto uninit_vcpu;
447
196 err = kvmppc_e500_tlb_init(vcpu_e500); 448 err = kvmppc_e500_tlb_init(vcpu_e500);
197 if (err) 449 if (err)
198 goto uninit_vcpu; 450 goto uninit_id;
199 451
200 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO); 452 vcpu->arch.shared = (void*)__get_free_page(GFP_KERNEL|__GFP_ZERO);
201 if (!vcpu->arch.shared) 453 if (!vcpu->arch.shared)
@@ -205,6 +457,8 @@ struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
205 457
206uninit_tlb: 458uninit_tlb:
207 kvmppc_e500_tlb_uninit(vcpu_e500); 459 kvmppc_e500_tlb_uninit(vcpu_e500);
460uninit_id:
461 kvmppc_e500_id_table_free(vcpu_e500);
208uninit_vcpu: 462uninit_vcpu:
209 kvm_vcpu_uninit(vcpu); 463 kvm_vcpu_uninit(vcpu);
210free_vcpu: 464free_vcpu:
@@ -218,11 +472,21 @@ void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
218 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 472 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
219 473
220 free_page((unsigned long)vcpu->arch.shared); 474 free_page((unsigned long)vcpu->arch.shared);
221 kvm_vcpu_uninit(vcpu);
222 kvmppc_e500_tlb_uninit(vcpu_e500); 475 kvmppc_e500_tlb_uninit(vcpu_e500);
476 kvmppc_e500_id_table_free(vcpu_e500);
477 kvm_vcpu_uninit(vcpu);
223 kmem_cache_free(kvm_vcpu_cache, vcpu_e500); 478 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
224} 479}
225 480
481int kvmppc_core_init_vm(struct kvm *kvm)
482{
483 return 0;
484}
485
486void kvmppc_core_destroy_vm(struct kvm *kvm)
487{
488}
489
226static int __init kvmppc_e500_init(void) 490static int __init kvmppc_e500_init(void)
227{ 491{
228 int r, i; 492 int r, i;
diff --git a/arch/powerpc/kvm/e500.h b/arch/powerpc/kvm/e500.h
new file mode 100644
index 000000000000..aa8b81428bf4
--- /dev/null
+++ b/arch/powerpc/kvm/e500.h
@@ -0,0 +1,306 @@
1/*
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu <yu.liu@freescale.com>
5 * Scott Wood <scottwood@freescale.com>
6 * Ashish Kalra <ashish.kalra@freescale.com>
7 * Varun Sethi <varun.sethi@freescale.com>
8 *
9 * Description:
10 * This file is based on arch/powerpc/kvm/44x_tlb.h and
11 * arch/powerpc/include/asm/kvm_44x.h by Hollis Blanchard <hollisb@us.ibm.com>,
12 * Copyright IBM Corp. 2007-2008
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
17 */
18
19#ifndef KVM_E500_H
20#define KVM_E500_H
21
22#include <linux/kvm_host.h>
23#include <asm/mmu-book3e.h>
24#include <asm/tlb.h>
25
26#define E500_PID_NUM 3
27#define E500_TLB_NUM 2
28
29#define E500_TLB_VALID 1
30#define E500_TLB_DIRTY 2
31#define E500_TLB_BITMAP 4
32
33struct tlbe_ref {
34 pfn_t pfn;
35 unsigned int flags; /* E500_TLB_* */
36};
37
38struct tlbe_priv {
39 struct tlbe_ref ref; /* TLB0 only -- TLB1 uses tlb_refs */
40};
41
42#ifdef CONFIG_KVM_E500V2
43struct vcpu_id_table;
44#endif
45
46struct kvmppc_e500_tlb_params {
47 int entries, ways, sets;
48};
49
50struct kvmppc_vcpu_e500 {
51 struct kvm_vcpu vcpu;
52
53 /* Unmodified copy of the guest's TLB -- shared with host userspace. */
54 struct kvm_book3e_206_tlb_entry *gtlb_arch;
55
56 /* Starting entry number in gtlb_arch[] */
57 int gtlb_offset[E500_TLB_NUM];
58
59 /* KVM internal information associated with each guest TLB entry */
60 struct tlbe_priv *gtlb_priv[E500_TLB_NUM];
61
62 struct kvmppc_e500_tlb_params gtlb_params[E500_TLB_NUM];
63
64 unsigned int gtlb_nv[E500_TLB_NUM];
65
66 /*
67 * information associated with each host TLB entry --
68 * TLB1 only for now. If/when guest TLB1 entries can be
69 * mapped with host TLB0, this will be used for that too.
70 *
71 * We don't want to use this for guest TLB0 because then we'd
72 * have the overhead of doing the translation again even if
73 * the entry is still in the guest TLB (e.g. we swapped out
74 * and back, and our host TLB entries got evicted).
75 */
76 struct tlbe_ref *tlb_refs[E500_TLB_NUM];
77 unsigned int host_tlb1_nv;
78
79 u32 svr;
80 u32 l1csr0;
81 u32 l1csr1;
82 u32 hid0;
83 u32 hid1;
84 u64 mcar;
85
86 struct page **shared_tlb_pages;
87 int num_shared_tlb_pages;
88
89 u64 *g2h_tlb1_map;
90 unsigned int *h2g_tlb1_rmap;
91
92 /* Minimum and maximum address mapped my TLB1 */
93 unsigned long tlb1_min_eaddr;
94 unsigned long tlb1_max_eaddr;
95
96#ifdef CONFIG_KVM_E500V2
97 u32 pid[E500_PID_NUM];
98
99 /* vcpu id table */
100 struct vcpu_id_table *idt;
101#endif
102};
103
104static inline struct kvmppc_vcpu_e500 *to_e500(struct kvm_vcpu *vcpu)
105{
106 return container_of(vcpu, struct kvmppc_vcpu_e500, vcpu);
107}
108
109
110/* This geometry is the legacy default -- can be overridden by userspace */
111#define KVM_E500_TLB0_WAY_SIZE 128
112#define KVM_E500_TLB0_WAY_NUM 2
113
114#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
115#define KVM_E500_TLB1_SIZE 16
116
117#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
118#define tlbsel_of(index) ((index) >> 16)
119#define esel_of(index) ((index) & 0xFFFF)
120
121#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
122#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
123#define MAS2_ATTRIB_MASK \
124 (MAS2_X0 | MAS2_X1)
125#define MAS3_ATTRIB_MASK \
126 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
127 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
128
129int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500,
130 ulong value);
131int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu);
132int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu);
133int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb);
134int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb);
135int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb);
136int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500);
137void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500);
138
139void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
140int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
141
142
143#ifdef CONFIG_KVM_E500V2
144unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
145 unsigned int as, unsigned int gid,
146 unsigned int pr, int avoid_recursion);
147#endif
148
149/* TLB helper functions */
150static inline unsigned int
151get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
152{
153 return (tlbe->mas1 >> 7) & 0x1f;
154}
155
156static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
157{
158 return tlbe->mas2 & 0xfffff000;
159}
160
161static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
162{
163 unsigned int pgsize = get_tlb_size(tlbe);
164 return 1ULL << 10 << pgsize;
165}
166
167static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
168{
169 u64 bytes = get_tlb_bytes(tlbe);
170 return get_tlb_eaddr(tlbe) + bytes - 1;
171}
172
173static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
174{
175 return tlbe->mas7_3 & ~0xfffULL;
176}
177
178static inline unsigned int
179get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
180{
181 return (tlbe->mas1 >> 16) & 0xff;
182}
183
184static inline unsigned int
185get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
186{
187 return (tlbe->mas1 >> 12) & 0x1;
188}
189
190static inline unsigned int
191get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
192{
193 return (tlbe->mas1 >> 31) & 0x1;
194}
195
196static inline unsigned int
197get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
198{
199 return (tlbe->mas1 >> 30) & 0x1;
200}
201
202static inline unsigned int
203get_tlb_tsize(const struct kvm_book3e_206_tlb_entry *tlbe)
204{
205 return (tlbe->mas1 & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
206}
207
208static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
209{
210 return vcpu->arch.pid & 0xff;
211}
212
213static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
214{
215 return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
216}
217
218static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
219{
220 return !!(vcpu->arch.shared->msr & MSR_PR);
221}
222
223static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
224{
225 return (vcpu->arch.shared->mas6 >> 16) & 0xff;
226}
227
228static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
229{
230 return vcpu->arch.shared->mas6 & 0x1;
231}
232
233static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
234{
235 /*
236 * Manual says that tlbsel has 2 bits wide.
237 * Since we only have two TLBs, only lower bit is used.
238 */
239 return (vcpu->arch.shared->mas0 >> 28) & 0x1;
240}
241
242static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
243{
244 return vcpu->arch.shared->mas0 & 0xfff;
245}
246
247static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
248{
249 return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
250}
251
252static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
253 const struct kvm_book3e_206_tlb_entry *tlbe)
254{
255 gpa_t gpa;
256
257 if (!get_tlb_v(tlbe))
258 return 0;
259
260#ifndef CONFIG_KVM_BOOKE_HV
261 /* Does it match current guest AS? */
262 /* XXX what about IS != DS? */
263 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
264 return 0;
265#endif
266
267 gpa = get_tlb_raddr(tlbe);
268 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
269 /* Mapping is not for RAM. */
270 return 0;
271
272 return 1;
273}
274
275static inline struct kvm_book3e_206_tlb_entry *get_entry(
276 struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
277{
278 int offset = vcpu_e500->gtlb_offset[tlbsel];
279 return &vcpu_e500->gtlb_arch[offset + entry];
280}
281
282void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
283 struct kvm_book3e_206_tlb_entry *gtlbe);
284void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500);
285
286#ifdef CONFIG_KVM_BOOKE_HV
287#define kvmppc_e500_get_tlb_stid(vcpu, gtlbe) get_tlb_tid(gtlbe)
288#define get_tlbmiss_tid(vcpu) get_cur_pid(vcpu)
289#define get_tlb_sts(gtlbe) (gtlbe->mas1 & MAS1_TS)
290#else
291unsigned int kvmppc_e500_get_tlb_stid(struct kvm_vcpu *vcpu,
292 struct kvm_book3e_206_tlb_entry *gtlbe);
293
294static inline unsigned int get_tlbmiss_tid(struct kvm_vcpu *vcpu)
295{
296 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
297 unsigned int tidseld = (vcpu->arch.shared->mas4 >> 16) & 0xf;
298
299 return vcpu_e500->pid[tidseld];
300}
301
302/* Force TS=1 for all guest mappings. */
303#define get_tlb_sts(gtlbe) (MAS1_TS)
304#endif /* !BOOKE_HV */
305
306#endif /* KVM_E500_H */
diff --git a/arch/powerpc/kvm/e500_emulate.c b/arch/powerpc/kvm/e500_emulate.c
index 6d0b2bd54fb0..8b99e076dc81 100644
--- a/arch/powerpc/kvm/e500_emulate.c
+++ b/arch/powerpc/kvm/e500_emulate.c
@@ -14,27 +14,96 @@
14 14
15#include <asm/kvm_ppc.h> 15#include <asm/kvm_ppc.h>
16#include <asm/disassemble.h> 16#include <asm/disassemble.h>
17#include <asm/kvm_e500.h> 17#include <asm/dbell.h>
18 18
19#include "booke.h" 19#include "booke.h"
20#include "e500_tlb.h" 20#include "e500.h"
21 21
22#define XOP_MSGSND 206
23#define XOP_MSGCLR 238
22#define XOP_TLBIVAX 786 24#define XOP_TLBIVAX 786
23#define XOP_TLBSX 914 25#define XOP_TLBSX 914
24#define XOP_TLBRE 946 26#define XOP_TLBRE 946
25#define XOP_TLBWE 978 27#define XOP_TLBWE 978
28#define XOP_TLBILX 18
29
30#ifdef CONFIG_KVM_E500MC
31static int dbell2prio(ulong param)
32{
33 int msg = param & PPC_DBELL_TYPE_MASK;
34 int prio = -1;
35
36 switch (msg) {
37 case PPC_DBELL_TYPE(PPC_DBELL):
38 prio = BOOKE_IRQPRIO_DBELL;
39 break;
40 case PPC_DBELL_TYPE(PPC_DBELL_CRIT):
41 prio = BOOKE_IRQPRIO_DBELL_CRIT;
42 break;
43 default:
44 break;
45 }
46
47 return prio;
48}
49
50static int kvmppc_e500_emul_msgclr(struct kvm_vcpu *vcpu, int rb)
51{
52 ulong param = vcpu->arch.gpr[rb];
53 int prio = dbell2prio(param);
54
55 if (prio < 0)
56 return EMULATE_FAIL;
57
58 clear_bit(prio, &vcpu->arch.pending_exceptions);
59 return EMULATE_DONE;
60}
61
62static int kvmppc_e500_emul_msgsnd(struct kvm_vcpu *vcpu, int rb)
63{
64 ulong param = vcpu->arch.gpr[rb];
65 int prio = dbell2prio(rb);
66 int pir = param & PPC_DBELL_PIR_MASK;
67 int i;
68 struct kvm_vcpu *cvcpu;
69
70 if (prio < 0)
71 return EMULATE_FAIL;
72
73 kvm_for_each_vcpu(i, cvcpu, vcpu->kvm) {
74 int cpir = cvcpu->arch.shared->pir;
75 if ((param & PPC_DBELL_MSG_BRDCAST) || (cpir == pir)) {
76 set_bit(prio, &cvcpu->arch.pending_exceptions);
77 kvm_vcpu_kick(cvcpu);
78 }
79 }
80
81 return EMULATE_DONE;
82}
83#endif
26 84
27int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, 85int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
28 unsigned int inst, int *advance) 86 unsigned int inst, int *advance)
29{ 87{
30 int emulated = EMULATE_DONE; 88 int emulated = EMULATE_DONE;
31 int ra; 89 int ra = get_ra(inst);
32 int rb; 90 int rb = get_rb(inst);
91 int rt = get_rt(inst);
33 92
34 switch (get_op(inst)) { 93 switch (get_op(inst)) {
35 case 31: 94 case 31:
36 switch (get_xop(inst)) { 95 switch (get_xop(inst)) {
37 96
97#ifdef CONFIG_KVM_E500MC
98 case XOP_MSGSND:
99 emulated = kvmppc_e500_emul_msgsnd(vcpu, rb);
100 break;
101
102 case XOP_MSGCLR:
103 emulated = kvmppc_e500_emul_msgclr(vcpu, rb);
104 break;
105#endif
106
38 case XOP_TLBRE: 107 case XOP_TLBRE:
39 emulated = kvmppc_e500_emul_tlbre(vcpu); 108 emulated = kvmppc_e500_emul_tlbre(vcpu);
40 break; 109 break;
@@ -44,13 +113,14 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
44 break; 113 break;
45 114
46 case XOP_TLBSX: 115 case XOP_TLBSX:
47 rb = get_rb(inst);
48 emulated = kvmppc_e500_emul_tlbsx(vcpu,rb); 116 emulated = kvmppc_e500_emul_tlbsx(vcpu,rb);
49 break; 117 break;
50 118
119 case XOP_TLBILX:
120 emulated = kvmppc_e500_emul_tlbilx(vcpu, rt, ra, rb);
121 break;
122
51 case XOP_TLBIVAX: 123 case XOP_TLBIVAX:
52 ra = get_ra(inst);
53 rb = get_rb(inst);
54 emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb); 124 emulated = kvmppc_e500_emul_tlbivax(vcpu, ra, rb);
55 break; 125 break;
56 126
@@ -70,52 +140,63 @@ int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
70 return emulated; 140 return emulated;
71} 141}
72 142
73int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs) 143int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
74{ 144{
75 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 145 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
76 int emulated = EMULATE_DONE; 146 int emulated = EMULATE_DONE;
77 ulong spr_val = kvmppc_get_gpr(vcpu, rs);
78 147
79 switch (sprn) { 148 switch (sprn) {
149#ifndef CONFIG_KVM_BOOKE_HV
80 case SPRN_PID: 150 case SPRN_PID:
81 kvmppc_set_pid(vcpu, spr_val); 151 kvmppc_set_pid(vcpu, spr_val);
82 break; 152 break;
83 case SPRN_PID1: 153 case SPRN_PID1:
84 if (spr_val != 0) 154 if (spr_val != 0)
85 return EMULATE_FAIL; 155 return EMULATE_FAIL;
86 vcpu_e500->pid[1] = spr_val; break; 156 vcpu_e500->pid[1] = spr_val;
157 break;
87 case SPRN_PID2: 158 case SPRN_PID2:
88 if (spr_val != 0) 159 if (spr_val != 0)
89 return EMULATE_FAIL; 160 return EMULATE_FAIL;
90 vcpu_e500->pid[2] = spr_val; break; 161 vcpu_e500->pid[2] = spr_val;
162 break;
91 case SPRN_MAS0: 163 case SPRN_MAS0:
92 vcpu->arch.shared->mas0 = spr_val; break; 164 vcpu->arch.shared->mas0 = spr_val;
165 break;
93 case SPRN_MAS1: 166 case SPRN_MAS1:
94 vcpu->arch.shared->mas1 = spr_val; break; 167 vcpu->arch.shared->mas1 = spr_val;
168 break;
95 case SPRN_MAS2: 169 case SPRN_MAS2:
96 vcpu->arch.shared->mas2 = spr_val; break; 170 vcpu->arch.shared->mas2 = spr_val;
171 break;
97 case SPRN_MAS3: 172 case SPRN_MAS3:
98 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff; 173 vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
99 vcpu->arch.shared->mas7_3 |= spr_val; 174 vcpu->arch.shared->mas7_3 |= spr_val;
100 break; 175 break;
101 case SPRN_MAS4: 176 case SPRN_MAS4:
102 vcpu->arch.shared->mas4 = spr_val; break; 177 vcpu->arch.shared->mas4 = spr_val;
178 break;
103 case SPRN_MAS6: 179 case SPRN_MAS6:
104 vcpu->arch.shared->mas6 = spr_val; break; 180 vcpu->arch.shared->mas6 = spr_val;
181 break;
105 case SPRN_MAS7: 182 case SPRN_MAS7:
106 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff; 183 vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
107 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32; 184 vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
108 break; 185 break;
186#endif
109 case SPRN_L1CSR0: 187 case SPRN_L1CSR0:
110 vcpu_e500->l1csr0 = spr_val; 188 vcpu_e500->l1csr0 = spr_val;
111 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC); 189 vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
112 break; 190 break;
113 case SPRN_L1CSR1: 191 case SPRN_L1CSR1:
114 vcpu_e500->l1csr1 = spr_val; break; 192 vcpu_e500->l1csr1 = spr_val;
193 break;
115 case SPRN_HID0: 194 case SPRN_HID0:
116 vcpu_e500->hid0 = spr_val; break; 195 vcpu_e500->hid0 = spr_val;
196 break;
117 case SPRN_HID1: 197 case SPRN_HID1:
118 vcpu_e500->hid1 = spr_val; break; 198 vcpu_e500->hid1 = spr_val;
199 break;
119 200
120 case SPRN_MMUCSR0: 201 case SPRN_MMUCSR0:
121 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500, 202 emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
@@ -135,81 +216,112 @@ int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
135 case SPRN_IVOR35: 216 case SPRN_IVOR35:
136 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val; 217 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
137 break; 218 break;
138 219#ifdef CONFIG_KVM_BOOKE_HV
220 case SPRN_IVOR36:
221 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] = spr_val;
222 break;
223 case SPRN_IVOR37:
224 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] = spr_val;
225 break;
226#endif
139 default: 227 default:
140 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs); 228 emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, spr_val);
141 } 229 }
142 230
143 return emulated; 231 return emulated;
144} 232}
145 233
146int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) 234int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
147{ 235{
148 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 236 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
149 int emulated = EMULATE_DONE; 237 int emulated = EMULATE_DONE;
150 unsigned long val;
151 238
152 switch (sprn) { 239 switch (sprn) {
240#ifndef CONFIG_KVM_BOOKE_HV
153 case SPRN_PID: 241 case SPRN_PID:
154 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; 242 *spr_val = vcpu_e500->pid[0];
243 break;
155 case SPRN_PID1: 244 case SPRN_PID1:
156 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; 245 *spr_val = vcpu_e500->pid[1];
246 break;
157 case SPRN_PID2: 247 case SPRN_PID2:
158 kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; 248 *spr_val = vcpu_e500->pid[2];
249 break;
159 case SPRN_MAS0: 250 case SPRN_MAS0:
160 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; 251 *spr_val = vcpu->arch.shared->mas0;
252 break;
161 case SPRN_MAS1: 253 case SPRN_MAS1:
162 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; 254 *spr_val = vcpu->arch.shared->mas1;
255 break;
163 case SPRN_MAS2: 256 case SPRN_MAS2:
164 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; 257 *spr_val = vcpu->arch.shared->mas2;
258 break;
165 case SPRN_MAS3: 259 case SPRN_MAS3:
166 val = (u32)vcpu->arch.shared->mas7_3; 260 *spr_val = (u32)vcpu->arch.shared->mas7_3;
167 kvmppc_set_gpr(vcpu, rt, val);
168 break; 261 break;
169 case SPRN_MAS4: 262 case SPRN_MAS4:
170 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; 263 *spr_val = vcpu->arch.shared->mas4;
264 break;
171 case SPRN_MAS6: 265 case SPRN_MAS6:
172 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; 266 *spr_val = vcpu->arch.shared->mas6;
267 break;
173 case SPRN_MAS7: 268 case SPRN_MAS7:
174 val = vcpu->arch.shared->mas7_3 >> 32; 269 *spr_val = vcpu->arch.shared->mas7_3 >> 32;
175 kvmppc_set_gpr(vcpu, rt, val);
176 break; 270 break;
271#endif
177 case SPRN_TLB0CFG: 272 case SPRN_TLB0CFG:
178 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; 273 *spr_val = vcpu->arch.tlbcfg[0];
274 break;
179 case SPRN_TLB1CFG: 275 case SPRN_TLB1CFG:
180 kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; 276 *spr_val = vcpu->arch.tlbcfg[1];
277 break;
181 case SPRN_L1CSR0: 278 case SPRN_L1CSR0:
182 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; 279 *spr_val = vcpu_e500->l1csr0;
280 break;
183 case SPRN_L1CSR1: 281 case SPRN_L1CSR1:
184 kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; 282 *spr_val = vcpu_e500->l1csr1;
283 break;
185 case SPRN_HID0: 284 case SPRN_HID0:
186 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; 285 *spr_val = vcpu_e500->hid0;
286 break;
187 case SPRN_HID1: 287 case SPRN_HID1:
188 kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; 288 *spr_val = vcpu_e500->hid1;
289 break;
189 case SPRN_SVR: 290 case SPRN_SVR:
190 kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; 291 *spr_val = vcpu_e500->svr;
292 break;
191 293
192 case SPRN_MMUCSR0: 294 case SPRN_MMUCSR0:
193 kvmppc_set_gpr(vcpu, rt, 0); break; 295 *spr_val = 0;
296 break;
194 297
195 case SPRN_MMUCFG: 298 case SPRN_MMUCFG:
196 kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; 299 *spr_val = vcpu->arch.mmucfg;
300 break;
197 301
198 /* extra exceptions */ 302 /* extra exceptions */
199 case SPRN_IVOR32: 303 case SPRN_IVOR32:
200 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); 304 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL];
201 break; 305 break;
202 case SPRN_IVOR33: 306 case SPRN_IVOR33:
203 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); 307 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA];
204 break; 308 break;
205 case SPRN_IVOR34: 309 case SPRN_IVOR34:
206 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); 310 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND];
207 break; 311 break;
208 case SPRN_IVOR35: 312 case SPRN_IVOR35:
209 kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); 313 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
314 break;
315#ifdef CONFIG_KVM_BOOKE_HV
316 case SPRN_IVOR36:
317 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
318 break;
319 case SPRN_IVOR37:
320 *spr_val = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
210 break; 321 break;
322#endif
211 default: 323 default:
212 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); 324 emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, spr_val);
213 } 325 }
214 326
215 return emulated; 327 return emulated;
diff --git a/arch/powerpc/kvm/e500_tlb.c b/arch/powerpc/kvm/e500_tlb.c
index 6e53e4164de1..c510fc961302 100644
--- a/arch/powerpc/kvm/e500_tlb.c
+++ b/arch/powerpc/kvm/e500_tlb.c
@@ -2,6 +2,9 @@
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved. 2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 * 3 *
4 * Author: Yu Liu, yu.liu@freescale.com 4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
5 * 8 *
6 * Description: 9 * Description:
7 * This file is based on arch/powerpc/kvm/44x_tlb.c, 10 * This file is based on arch/powerpc/kvm/44x_tlb.c,
@@ -26,210 +29,15 @@
26#include <linux/vmalloc.h> 29#include <linux/vmalloc.h>
27#include <linux/hugetlb.h> 30#include <linux/hugetlb.h>
28#include <asm/kvm_ppc.h> 31#include <asm/kvm_ppc.h>
29#include <asm/kvm_e500.h>
30 32
31#include "../mm/mmu_decl.h" 33#include "e500.h"
32#include "e500_tlb.h"
33#include "trace.h" 34#include "trace.h"
34#include "timing.h" 35#include "timing.h"
35 36
36#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1) 37#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
37 38
38struct id {
39 unsigned long val;
40 struct id **pentry;
41};
42
43#define NUM_TIDS 256
44
45/*
46 * This table provide mappings from:
47 * (guestAS,guestTID,guestPR) --> ID of physical cpu
48 * guestAS [0..1]
49 * guestTID [0..255]
50 * guestPR [0..1]
51 * ID [1..255]
52 * Each vcpu keeps one vcpu_id_table.
53 */
54struct vcpu_id_table {
55 struct id id[2][NUM_TIDS][2];
56};
57
58/*
59 * This table provide reversed mappings of vcpu_id_table:
60 * ID --> address of vcpu_id_table item.
61 * Each physical core has one pcpu_id_table.
62 */
63struct pcpu_id_table {
64 struct id *entry[NUM_TIDS];
65};
66
67static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
68
69/* This variable keeps last used shadow ID on local core.
70 * The valid range of shadow ID is [1..255] */
71static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
72
73static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM]; 39static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
74 40
75static struct kvm_book3e_206_tlb_entry *get_entry(
76 struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel, int entry)
77{
78 int offset = vcpu_e500->gtlb_offset[tlbsel];
79 return &vcpu_e500->gtlb_arch[offset + entry];
80}
81
82/*
83 * Allocate a free shadow id and setup a valid sid mapping in given entry.
84 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
85 *
86 * The caller must have preemption disabled, and keep it that way until
87 * it has finished with the returned shadow id (either written into the
88 * TLB or arch.shadow_pid, or discarded).
89 */
90static inline int local_sid_setup_one(struct id *entry)
91{
92 unsigned long sid;
93 int ret = -1;
94
95 sid = ++(__get_cpu_var(pcpu_last_used_sid));
96 if (sid < NUM_TIDS) {
97 __get_cpu_var(pcpu_sids).entry[sid] = entry;
98 entry->val = sid;
99 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
100 ret = sid;
101 }
102
103 /*
104 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
105 * the caller will invalidate everything and start over.
106 *
107 * sid > NUM_TIDS indicates a race, which we disable preemption to
108 * avoid.
109 */
110 WARN_ON(sid > NUM_TIDS);
111
112 return ret;
113}
114
115/*
116 * Check if given entry contain a valid shadow id mapping.
117 * An ID mapping is considered valid only if
118 * both vcpu and pcpu know this mapping.
119 *
120 * The caller must have preemption disabled, and keep it that way until
121 * it has finished with the returned shadow id (either written into the
122 * TLB or arch.shadow_pid, or discarded).
123 */
124static inline int local_sid_lookup(struct id *entry)
125{
126 if (entry && entry->val != 0 &&
127 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
128 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
129 return entry->val;
130 return -1;
131}
132
133/* Invalidate all id mappings on local core -- call with preempt disabled */
134static inline void local_sid_destroy_all(void)
135{
136 __get_cpu_var(pcpu_last_used_sid) = 0;
137 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
138}
139
140static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
141{
142 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
143 return vcpu_e500->idt;
144}
145
146static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
147{
148 kfree(vcpu_e500->idt);
149}
150
151/* Invalidate all mappings on vcpu */
152static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
153{
154 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
155
156 /* Update shadow pid when mappings are changed */
157 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
158}
159
160/* Invalidate one ID mapping on vcpu */
161static inline void kvmppc_e500_id_table_reset_one(
162 struct kvmppc_vcpu_e500 *vcpu_e500,
163 int as, int pid, int pr)
164{
165 struct vcpu_id_table *idt = vcpu_e500->idt;
166
167 BUG_ON(as >= 2);
168 BUG_ON(pid >= NUM_TIDS);
169 BUG_ON(pr >= 2);
170
171 idt->id[as][pid][pr].val = 0;
172 idt->id[as][pid][pr].pentry = NULL;
173
174 /* Update shadow pid when mappings are changed */
175 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
176}
177
178/*
179 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
180 * This function first lookup if a valid mapping exists,
181 * if not, then creates a new one.
182 *
183 * The caller must have preemption disabled, and keep it that way until
184 * it has finished with the returned shadow id (either written into the
185 * TLB or arch.shadow_pid, or discarded).
186 */
187static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
188 unsigned int as, unsigned int gid,
189 unsigned int pr, int avoid_recursion)
190{
191 struct vcpu_id_table *idt = vcpu_e500->idt;
192 int sid;
193
194 BUG_ON(as >= 2);
195 BUG_ON(gid >= NUM_TIDS);
196 BUG_ON(pr >= 2);
197
198 sid = local_sid_lookup(&idt->id[as][gid][pr]);
199
200 while (sid <= 0) {
201 /* No mapping yet */
202 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
203 if (sid <= 0) {
204 _tlbil_all();
205 local_sid_destroy_all();
206 }
207
208 /* Update shadow pid when mappings are changed */
209 if (!avoid_recursion)
210 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
211 }
212
213 return sid;
214}
215
216/* Map guest pid to shadow.
217 * We use PID to keep shadow of current guest non-zero PID,
218 * and use PID1 to keep shadow of guest zero PID.
219 * So that guest tlbe with TID=0 can be accessed at any time */
220void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
221{
222 preempt_disable();
223 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
224 get_cur_as(&vcpu_e500->vcpu),
225 get_cur_pid(&vcpu_e500->vcpu),
226 get_cur_pr(&vcpu_e500->vcpu), 1);
227 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
228 get_cur_as(&vcpu_e500->vcpu), 0,
229 get_cur_pr(&vcpu_e500->vcpu), 1);
230 preempt_enable();
231}
232
233static inline unsigned int gtlb0_get_next_victim( 41static inline unsigned int gtlb0_get_next_victim(
234 struct kvmppc_vcpu_e500 *vcpu_e500) 42 struct kvmppc_vcpu_e500 *vcpu_e500)
235{ 43{
@@ -258,6 +66,7 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
258 /* Mask off reserved bits. */ 66 /* Mask off reserved bits. */
259 mas3 &= MAS3_ATTRIB_MASK; 67 mas3 &= MAS3_ATTRIB_MASK;
260 68
69#ifndef CONFIG_KVM_BOOKE_HV
261 if (!usermode) { 70 if (!usermode) {
262 /* Guest is in supervisor mode, 71 /* Guest is in supervisor mode,
263 * so we need to translate guest 72 * so we need to translate guest
@@ -265,8 +74,9 @@ static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
265 mas3 &= ~E500_TLB_USER_PERM_MASK; 74 mas3 &= ~E500_TLB_USER_PERM_MASK;
266 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1; 75 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
267 } 76 }
268 77 mas3 |= E500_TLB_SUPER_PERM_MASK;
269 return mas3 | E500_TLB_SUPER_PERM_MASK; 78#endif
79 return mas3;
270} 80}
271 81
272static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode) 82static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
@@ -292,7 +102,16 @@ static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
292 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2); 102 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
293 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3); 103 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
294 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32)); 104 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
105#ifdef CONFIG_KVM_BOOKE_HV
106 mtspr(SPRN_MAS8, stlbe->mas8);
107#endif
295 asm volatile("isync; tlbwe" : : : "memory"); 108 asm volatile("isync; tlbwe" : : : "memory");
109
110#ifdef CONFIG_KVM_BOOKE_HV
111 /* Must clear mas8 for other host tlbwe's */
112 mtspr(SPRN_MAS8, 0);
113 isync();
114#endif
296 local_irq_restore(flags); 115 local_irq_restore(flags);
297 116
298 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1, 117 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
@@ -337,6 +156,7 @@ static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
337 } 156 }
338} 157}
339 158
159#ifdef CONFIG_KVM_E500V2
340void kvmppc_map_magic(struct kvm_vcpu *vcpu) 160void kvmppc_map_magic(struct kvm_vcpu *vcpu)
341{ 161{
342 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 162 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
@@ -361,75 +181,41 @@ void kvmppc_map_magic(struct kvm_vcpu *vcpu)
361 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index)); 181 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
362 preempt_enable(); 182 preempt_enable();
363} 183}
364 184#endif
365void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
366{
367 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
368
369 /* Shadow PID may be expired on local core */
370 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
371}
372
373void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
374{
375}
376 185
377static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, 186static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
378 int tlbsel, int esel) 187 int tlbsel, int esel)
379{ 188{
380 struct kvm_book3e_206_tlb_entry *gtlbe = 189 struct kvm_book3e_206_tlb_entry *gtlbe =
381 get_entry(vcpu_e500, tlbsel, esel); 190 get_entry(vcpu_e500, tlbsel, esel);
382 struct vcpu_id_table *idt = vcpu_e500->idt;
383 unsigned int pr, tid, ts, pid;
384 u32 val, eaddr;
385 unsigned long flags;
386
387 ts = get_tlb_ts(gtlbe);
388 tid = get_tlb_tid(gtlbe);
389
390 preempt_disable();
391
392 /* One guest ID may be mapped to two shadow IDs */
393 for (pr = 0; pr < 2; pr++) {
394 /*
395 * The shadow PID can have a valid mapping on at most one
396 * host CPU. In the common case, it will be valid on this
397 * CPU, in which case (for TLB0) we do a local invalidation
398 * of the specific address.
399 *
400 * If the shadow PID is not valid on the current host CPU, or
401 * if we're invalidating a TLB1 entry, we invalidate the
402 * entire shadow PID.
403 */
404 if (tlbsel == 1 ||
405 (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
406 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
407 continue;
408 }
409 191
410 /* 192 if (tlbsel == 1 &&
411 * The guest is invalidating a TLB0 entry which is in a PID 193 vcpu_e500->gtlb_priv[1][esel].ref.flags & E500_TLB_BITMAP) {
412 * that has a valid shadow mapping on this host CPU. We 194 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
413 * search host TLB0 to invalidate it's shadow TLB entry, 195 int hw_tlb_indx;
414 * similar to __tlbil_va except that we need to look in AS1. 196 unsigned long flags;
415 */
416 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
417 eaddr = get_tlb_eaddr(gtlbe);
418 197
419 local_irq_save(flags); 198 local_irq_save(flags);
420 199 while (tmp) {
421 mtspr(SPRN_MAS6, val); 200 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
422 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr)); 201 mtspr(SPRN_MAS0,
423 val = mfspr(SPRN_MAS1); 202 MAS0_TLBSEL(1) |
424 if (val & MAS1_VALID) { 203 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
425 mtspr(SPRN_MAS1, val & ~MAS1_VALID); 204 mtspr(SPRN_MAS1, 0);
426 asm volatile("tlbwe"); 205 asm volatile("tlbwe");
206 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
207 tmp &= tmp - 1;
427 } 208 }
428 209 mb();
210 vcpu_e500->g2h_tlb1_map[esel] = 0;
211 vcpu_e500->gtlb_priv[1][esel].ref.flags &= ~E500_TLB_BITMAP;
429 local_irq_restore(flags); 212 local_irq_restore(flags);
213
214 return;
430 } 215 }
431 216
432 preempt_enable(); 217 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
218 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
433} 219}
434 220
435static int tlb0_set_base(gva_t addr, int sets, int ways) 221static int tlb0_set_base(gva_t addr, int sets, int ways)
@@ -475,6 +261,9 @@ static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
475 set_base = gtlb0_set_base(vcpu_e500, eaddr); 261 set_base = gtlb0_set_base(vcpu_e500, eaddr);
476 size = vcpu_e500->gtlb_params[0].ways; 262 size = vcpu_e500->gtlb_params[0].ways;
477 } else { 263 } else {
264 if (eaddr < vcpu_e500->tlb1_min_eaddr ||
265 eaddr > vcpu_e500->tlb1_max_eaddr)
266 return -1;
478 set_base = 0; 267 set_base = 0;
479 } 268 }
480 269
@@ -530,6 +319,16 @@ static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
530 } 319 }
531} 320}
532 321
322static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
323{
324 if (vcpu_e500->g2h_tlb1_map)
325 memset(vcpu_e500->g2h_tlb1_map,
326 sizeof(u64) * vcpu_e500->gtlb_params[1].entries, 0);
327 if (vcpu_e500->h2g_tlb1_rmap)
328 memset(vcpu_e500->h2g_tlb1_rmap,
329 sizeof(unsigned int) * host_tlb_params[1].entries, 0);
330}
331
533static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500) 332static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
534{ 333{
535 int tlbsel = 0; 334 int tlbsel = 0;
@@ -547,7 +346,7 @@ static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
547 int stlbsel = 1; 346 int stlbsel = 1;
548 int i; 347 int i;
549 348
550 kvmppc_e500_id_table_reset_all(vcpu_e500); 349 kvmppc_e500_tlbil_all(vcpu_e500);
551 350
552 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) { 351 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
553 struct tlbe_ref *ref = 352 struct tlbe_ref *ref =
@@ -562,19 +361,18 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
562 unsigned int eaddr, int as) 361 unsigned int eaddr, int as)
563{ 362{
564 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 363 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
565 unsigned int victim, pidsel, tsized; 364 unsigned int victim, tsized;
566 int tlbsel; 365 int tlbsel;
567 366
568 /* since we only have two TLBs, only lower bit is used. */ 367 /* since we only have two TLBs, only lower bit is used. */
569 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1; 368 tlbsel = (vcpu->arch.shared->mas4 >> 28) & 0x1;
570 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0; 369 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
571 pidsel = (vcpu->arch.shared->mas4 >> 16) & 0xf;
572 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f; 370 tsized = (vcpu->arch.shared->mas4 >> 7) & 0x1f;
573 371
574 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim) 372 vcpu->arch.shared->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
575 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]); 373 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
576 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0) 374 vcpu->arch.shared->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
577 | MAS1_TID(vcpu_e500->pid[pidsel]) 375 | MAS1_TID(get_tlbmiss_tid(vcpu))
578 | MAS1_TSIZE(tsized); 376 | MAS1_TSIZE(tsized);
579 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN) 377 vcpu->arch.shared->mas2 = (eaddr & MAS2_EPN)
580 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK); 378 | (vcpu->arch.shared->mas4 & MAS2_ATTRIB_MASK);
@@ -586,23 +384,26 @@ static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
586 384
587/* TID must be supplied by the caller */ 385/* TID must be supplied by the caller */
588static inline void kvmppc_e500_setup_stlbe( 386static inline void kvmppc_e500_setup_stlbe(
589 struct kvmppc_vcpu_e500 *vcpu_e500, 387 struct kvm_vcpu *vcpu,
590 struct kvm_book3e_206_tlb_entry *gtlbe, 388 struct kvm_book3e_206_tlb_entry *gtlbe,
591 int tsize, struct tlbe_ref *ref, u64 gvaddr, 389 int tsize, struct tlbe_ref *ref, u64 gvaddr,
592 struct kvm_book3e_206_tlb_entry *stlbe) 390 struct kvm_book3e_206_tlb_entry *stlbe)
593{ 391{
594 pfn_t pfn = ref->pfn; 392 pfn_t pfn = ref->pfn;
393 u32 pr = vcpu->arch.shared->msr & MSR_PR;
595 394
596 BUG_ON(!(ref->flags & E500_TLB_VALID)); 395 BUG_ON(!(ref->flags & E500_TLB_VALID));
597 396
598 /* Force TS=1 IPROT=0 for all guest mappings. */ 397 /* Force IPROT=0 for all guest mappings. */
599 stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID; 398 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
600 stlbe->mas2 = (gvaddr & MAS2_EPN) 399 stlbe->mas2 = (gvaddr & MAS2_EPN) |
601 | e500_shadow_mas2_attrib(gtlbe->mas2, 400 e500_shadow_mas2_attrib(gtlbe->mas2, pr);
602 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 401 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
603 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) 402 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
604 | e500_shadow_mas3_attrib(gtlbe->mas7_3, 403
605 vcpu_e500->vcpu.arch.shared->msr & MSR_PR); 404#ifdef CONFIG_KVM_BOOKE_HV
405 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
406#endif
606} 407}
607 408
608static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500, 409static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
@@ -736,7 +537,8 @@ static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
736 kvmppc_e500_ref_release(ref); 537 kvmppc_e500_ref_release(ref);
737 kvmppc_e500_ref_setup(ref, gtlbe, pfn); 538 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
738 539
739 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe); 540 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
541 ref, gvaddr, stlbe);
740} 542}
741 543
742/* XXX only map the one-one case, for now use TLB0 */ 544/* XXX only map the one-one case, for now use TLB0 */
@@ -760,7 +562,7 @@ static void kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
760/* XXX for both one-one and one-to-many , for now use TLB1 */ 562/* XXX for both one-one and one-to-many , for now use TLB1 */
761static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500, 563static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
762 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe, 564 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
763 struct kvm_book3e_206_tlb_entry *stlbe) 565 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
764{ 566{
765 struct tlbe_ref *ref; 567 struct tlbe_ref *ref;
766 unsigned int victim; 568 unsigned int victim;
@@ -773,15 +575,74 @@ static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
773 ref = &vcpu_e500->tlb_refs[1][victim]; 575 ref = &vcpu_e500->tlb_refs[1][victim];
774 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref); 576 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe, ref);
775 577
578 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << victim;
579 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
580 if (vcpu_e500->h2g_tlb1_rmap[victim]) {
581 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[victim];
582 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << victim);
583 }
584 vcpu_e500->h2g_tlb1_rmap[victim] = esel;
585
776 return victim; 586 return victim;
777} 587}
778 588
779void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr) 589static void kvmppc_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500)
590{
591 int size = vcpu_e500->gtlb_params[1].entries;
592 unsigned int offset;
593 gva_t eaddr;
594 int i;
595
596 vcpu_e500->tlb1_min_eaddr = ~0UL;
597 vcpu_e500->tlb1_max_eaddr = 0;
598 offset = vcpu_e500->gtlb_offset[1];
599
600 for (i = 0; i < size; i++) {
601 struct kvm_book3e_206_tlb_entry *tlbe =
602 &vcpu_e500->gtlb_arch[offset + i];
603
604 if (!get_tlb_v(tlbe))
605 continue;
606
607 eaddr = get_tlb_eaddr(tlbe);
608 vcpu_e500->tlb1_min_eaddr =
609 min(vcpu_e500->tlb1_min_eaddr, eaddr);
610
611 eaddr = get_tlb_end(tlbe);
612 vcpu_e500->tlb1_max_eaddr =
613 max(vcpu_e500->tlb1_max_eaddr, eaddr);
614 }
615}
616
617static int kvmppc_need_recalc_tlb1map_range(struct kvmppc_vcpu_e500 *vcpu_e500,
618 struct kvm_book3e_206_tlb_entry *gtlbe)
780{ 619{
620 unsigned long start, end, size;
621
622 size = get_tlb_bytes(gtlbe);
623 start = get_tlb_eaddr(gtlbe) & ~(size - 1);
624 end = start + size - 1;
625
626 return vcpu_e500->tlb1_min_eaddr == start ||
627 vcpu_e500->tlb1_max_eaddr == end;
628}
629
630/* This function is supposed to be called for a adding a new valid tlb entry */
631static void kvmppc_set_tlb1map_range(struct kvm_vcpu *vcpu,
632 struct kvm_book3e_206_tlb_entry *gtlbe)
633{
634 unsigned long start, end, size;
781 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 635 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
782 636
783 /* Recalc shadow pid since MSR changes */ 637 if (!get_tlb_v(gtlbe))
784 kvmppc_e500_recalc_shadow_pid(vcpu_e500); 638 return;
639
640 size = get_tlb_bytes(gtlbe);
641 start = get_tlb_eaddr(gtlbe) & ~(size - 1);
642 end = start + size - 1;
643
644 vcpu_e500->tlb1_min_eaddr = min(vcpu_e500->tlb1_min_eaddr, start);
645 vcpu_e500->tlb1_max_eaddr = max(vcpu_e500->tlb1_max_eaddr, end);
785} 646}
786 647
787static inline int kvmppc_e500_gtlbe_invalidate( 648static inline int kvmppc_e500_gtlbe_invalidate(
@@ -794,6 +655,9 @@ static inline int kvmppc_e500_gtlbe_invalidate(
794 if (unlikely(get_tlb_iprot(gtlbe))) 655 if (unlikely(get_tlb_iprot(gtlbe)))
795 return -1; 656 return -1;
796 657
658 if (tlbsel == 1 && kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
659 kvmppc_recalc_tlb1map_range(vcpu_e500);
660
797 gtlbe->mas1 = 0; 661 gtlbe->mas1 = 0;
798 662
799 return 0; 663 return 0;
@@ -811,7 +675,7 @@ int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
811 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel); 675 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
812 676
813 /* Invalidate all vcpu id mappings */ 677 /* Invalidate all vcpu id mappings */
814 kvmppc_e500_id_table_reset_all(vcpu_e500); 678 kvmppc_e500_tlbil_all(vcpu_e500);
815 679
816 return EMULATE_DONE; 680 return EMULATE_DONE;
817} 681}
@@ -844,7 +708,59 @@ int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
844 } 708 }
845 709
846 /* Invalidate all vcpu id mappings */ 710 /* Invalidate all vcpu id mappings */
847 kvmppc_e500_id_table_reset_all(vcpu_e500); 711 kvmppc_e500_tlbil_all(vcpu_e500);
712
713 return EMULATE_DONE;
714}
715
716static void tlbilx_all(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
717 int pid, int rt)
718{
719 struct kvm_book3e_206_tlb_entry *tlbe;
720 int tid, esel;
721
722 /* invalidate all entries */
723 for (esel = 0; esel < vcpu_e500->gtlb_params[tlbsel].entries; esel++) {
724 tlbe = get_entry(vcpu_e500, tlbsel, esel);
725 tid = get_tlb_tid(tlbe);
726 if (rt == 0 || tid == pid) {
727 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
728 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
729 }
730 }
731}
732
733static void tlbilx_one(struct kvmppc_vcpu_e500 *vcpu_e500, int pid,
734 int ra, int rb)
735{
736 int tlbsel, esel;
737 gva_t ea;
738
739 ea = kvmppc_get_gpr(&vcpu_e500->vcpu, rb);
740 if (ra)
741 ea += kvmppc_get_gpr(&vcpu_e500->vcpu, ra);
742
743 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
744 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, -1);
745 if (esel >= 0) {
746 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
747 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
748 break;
749 }
750 }
751}
752
753int kvmppc_e500_emul_tlbilx(struct kvm_vcpu *vcpu, int rt, int ra, int rb)
754{
755 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
756 int pid = get_cur_spid(vcpu);
757
758 if (rt == 0 || rt == 1) {
759 tlbilx_all(vcpu_e500, 0, pid, rt);
760 tlbilx_all(vcpu_e500, 1, pid, rt);
761 } else if (rt == 3) {
762 tlbilx_one(vcpu_e500, pid, ra, rb);
763 }
848 764
849 return EMULATE_DONE; 765 return EMULATE_DONE;
850} 766}
@@ -929,9 +845,7 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
929 int stid; 845 int stid;
930 846
931 preempt_disable(); 847 preempt_disable();
932 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe), 848 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
933 get_tlb_tid(gtlbe),
934 get_cur_pr(&vcpu_e500->vcpu), 0);
935 849
936 stlbe->mas1 |= MAS1_TID(stid); 850 stlbe->mas1 |= MAS1_TID(stid);
937 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe); 851 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
@@ -941,16 +855,21 @@ static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
941int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu) 855int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
942{ 856{
943 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 857 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
944 struct kvm_book3e_206_tlb_entry *gtlbe; 858 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
945 int tlbsel, esel; 859 int tlbsel, esel, stlbsel, sesel;
860 int recal = 0;
946 861
947 tlbsel = get_tlb_tlbsel(vcpu); 862 tlbsel = get_tlb_tlbsel(vcpu);
948 esel = get_tlb_esel(vcpu, tlbsel); 863 esel = get_tlb_esel(vcpu, tlbsel);
949 864
950 gtlbe = get_entry(vcpu_e500, tlbsel, esel); 865 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
951 866
952 if (get_tlb_v(gtlbe)) 867 if (get_tlb_v(gtlbe)) {
953 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel); 868 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
869 if ((tlbsel == 1) &&
870 kvmppc_need_recalc_tlb1map_range(vcpu_e500, gtlbe))
871 recal = 1;
872 }
954 873
955 gtlbe->mas1 = vcpu->arch.shared->mas1; 874 gtlbe->mas1 = vcpu->arch.shared->mas1;
956 gtlbe->mas2 = vcpu->arch.shared->mas2; 875 gtlbe->mas2 = vcpu->arch.shared->mas2;
@@ -959,10 +878,20 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
959 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1, 878 trace_kvm_booke206_gtlb_write(vcpu->arch.shared->mas0, gtlbe->mas1,
960 gtlbe->mas2, gtlbe->mas7_3); 879 gtlbe->mas2, gtlbe->mas7_3);
961 880
881 if (tlbsel == 1) {
882 /*
883 * If a valid tlb1 entry is overwritten then recalculate the
884 * min/max TLB1 map address range otherwise no need to look
885 * in tlb1 array.
886 */
887 if (recal)
888 kvmppc_recalc_tlb1map_range(vcpu_e500);
889 else
890 kvmppc_set_tlb1map_range(vcpu, gtlbe);
891 }
892
962 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */ 893 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
963 if (tlbe_is_host_safe(vcpu, gtlbe)) { 894 if (tlbe_is_host_safe(vcpu, gtlbe)) {
964 struct kvm_book3e_206_tlb_entry stlbe;
965 int stlbsel, sesel;
966 u64 eaddr; 895 u64 eaddr;
967 u64 raddr; 896 u64 raddr;
968 897
@@ -989,7 +918,7 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
989 * are mapped on the fly. */ 918 * are mapped on the fly. */
990 stlbsel = 1; 919 stlbsel = 1;
991 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, 920 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
992 raddr >> PAGE_SHIFT, gtlbe, &stlbe); 921 raddr >> PAGE_SHIFT, gtlbe, &stlbe, esel);
993 break; 922 break;
994 923
995 default: 924 default:
@@ -1003,6 +932,48 @@ int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
1003 return EMULATE_DONE; 932 return EMULATE_DONE;
1004} 933}
1005 934
935static int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
936 gva_t eaddr, unsigned int pid, int as)
937{
938 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
939 int esel, tlbsel;
940
941 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
942 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
943 if (esel >= 0)
944 return index_of(tlbsel, esel);
945 }
946
947 return -1;
948}
949
950/* 'linear_address' is actually an encoding of AS|PID|EADDR . */
951int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
952 struct kvm_translation *tr)
953{
954 int index;
955 gva_t eaddr;
956 u8 pid;
957 u8 as;
958
959 eaddr = tr->linear_address;
960 pid = (tr->linear_address >> 32) & 0xff;
961 as = (tr->linear_address >> 40) & 0x1;
962
963 index = kvmppc_e500_tlb_search(vcpu, eaddr, pid, as);
964 if (index < 0) {
965 tr->valid = 0;
966 return 0;
967 }
968
969 tr->physical_address = kvmppc_mmu_xlate(vcpu, index, eaddr);
970 /* XXX what does "writeable" and "usermode" even mean? */
971 tr->valid = 1;
972
973 return 0;
974}
975
976
1006int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr) 977int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
1007{ 978{
1008 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS); 979 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
@@ -1066,7 +1037,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1066 sesel = 0; /* unused */ 1037 sesel = 0; /* unused */
1067 priv = &vcpu_e500->gtlb_priv[tlbsel][esel]; 1038 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
1068 1039
1069 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K, 1040 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
1070 &priv->ref, eaddr, &stlbe); 1041 &priv->ref, eaddr, &stlbe);
1071 break; 1042 break;
1072 1043
@@ -1075,7 +1046,7 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1075 1046
1076 stlbsel = 1; 1047 stlbsel = 1;
1077 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, 1048 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
1078 gtlbe, &stlbe); 1049 gtlbe, &stlbe, esel);
1079 break; 1050 break;
1080 } 1051 }
1081 1052
@@ -1087,52 +1058,13 @@ void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1087 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel); 1058 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
1088} 1059}
1089 1060
1090int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
1091 gva_t eaddr, unsigned int pid, int as)
1092{
1093 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1094 int esel, tlbsel;
1095
1096 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
1097 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
1098 if (esel >= 0)
1099 return index_of(tlbsel, esel);
1100 }
1101
1102 return -1;
1103}
1104
1105void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
1106{
1107 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1108
1109 if (vcpu->arch.pid != pid) {
1110 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
1111 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
1112 }
1113}
1114
1115void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
1116{
1117 struct kvm_book3e_206_tlb_entry *tlbe;
1118
1119 /* Insert large initial mapping for guest. */
1120 tlbe = get_entry(vcpu_e500, 1, 0);
1121 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
1122 tlbe->mas2 = 0;
1123 tlbe->mas7_3 = E500_TLB_SUPER_PERM_MASK;
1124
1125 /* 4K map for serial output. Used by kernel wrapper. */
1126 tlbe = get_entry(vcpu_e500, 1, 1);
1127 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
1128 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
1129 tlbe->mas7_3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1130}
1131
1132static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500) 1061static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
1133{ 1062{
1134 int i; 1063 int i;
1135 1064
1065 clear_tlb1_bitmap(vcpu_e500);
1066 kfree(vcpu_e500->g2h_tlb1_map);
1067
1136 clear_tlb_refs(vcpu_e500); 1068 clear_tlb_refs(vcpu_e500);
1137 kfree(vcpu_e500->gtlb_priv[0]); 1069 kfree(vcpu_e500->gtlb_priv[0]);
1138 kfree(vcpu_e500->gtlb_priv[1]); 1070 kfree(vcpu_e500->gtlb_priv[1]);
@@ -1155,6 +1087,36 @@ static void free_gtlb(struct kvmppc_vcpu_e500 *vcpu_e500)
1155 vcpu_e500->gtlb_arch = NULL; 1087 vcpu_e500->gtlb_arch = NULL;
1156} 1088}
1157 1089
1090void kvmppc_get_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1091{
1092 sregs->u.e.mas0 = vcpu->arch.shared->mas0;
1093 sregs->u.e.mas1 = vcpu->arch.shared->mas1;
1094 sregs->u.e.mas2 = vcpu->arch.shared->mas2;
1095 sregs->u.e.mas7_3 = vcpu->arch.shared->mas7_3;
1096 sregs->u.e.mas4 = vcpu->arch.shared->mas4;
1097 sregs->u.e.mas6 = vcpu->arch.shared->mas6;
1098
1099 sregs->u.e.mmucfg = vcpu->arch.mmucfg;
1100 sregs->u.e.tlbcfg[0] = vcpu->arch.tlbcfg[0];
1101 sregs->u.e.tlbcfg[1] = vcpu->arch.tlbcfg[1];
1102 sregs->u.e.tlbcfg[2] = 0;
1103 sregs->u.e.tlbcfg[3] = 0;
1104}
1105
1106int kvmppc_set_sregs_e500_tlb(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1107{
1108 if (sregs->u.e.features & KVM_SREGS_E_ARCH206_MMU) {
1109 vcpu->arch.shared->mas0 = sregs->u.e.mas0;
1110 vcpu->arch.shared->mas1 = sregs->u.e.mas1;
1111 vcpu->arch.shared->mas2 = sregs->u.e.mas2;
1112 vcpu->arch.shared->mas7_3 = sregs->u.e.mas7_3;
1113 vcpu->arch.shared->mas4 = sregs->u.e.mas4;
1114 vcpu->arch.shared->mas6 = sregs->u.e.mas6;
1115 }
1116
1117 return 0;
1118}
1119
1158int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, 1120int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1159 struct kvm_config_tlb *cfg) 1121 struct kvm_config_tlb *cfg)
1160{ 1122{
@@ -1163,6 +1125,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1163 char *virt; 1125 char *virt;
1164 struct page **pages; 1126 struct page **pages;
1165 struct tlbe_priv *privs[2] = {}; 1127 struct tlbe_priv *privs[2] = {};
1128 u64 *g2h_bitmap = NULL;
1166 size_t array_len; 1129 size_t array_len;
1167 u32 sets; 1130 u32 sets;
1168 int num_pages, ret, i; 1131 int num_pages, ret, i;
@@ -1224,10 +1187,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1224 if (!privs[0] || !privs[1]) 1187 if (!privs[0] || !privs[1])
1225 goto err_put_page; 1188 goto err_put_page;
1226 1189
1190 g2h_bitmap = kzalloc(sizeof(u64) * params.tlb_sizes[1],
1191 GFP_KERNEL);
1192 if (!g2h_bitmap)
1193 goto err_put_page;
1194
1227 free_gtlb(vcpu_e500); 1195 free_gtlb(vcpu_e500);
1228 1196
1229 vcpu_e500->gtlb_priv[0] = privs[0]; 1197 vcpu_e500->gtlb_priv[0] = privs[0];
1230 vcpu_e500->gtlb_priv[1] = privs[1]; 1198 vcpu_e500->gtlb_priv[1] = privs[1];
1199 vcpu_e500->g2h_tlb1_map = g2h_bitmap;
1231 1200
1232 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *) 1201 vcpu_e500->gtlb_arch = (struct kvm_book3e_206_tlb_entry *)
1233 (virt + (cfg->array & (PAGE_SIZE - 1))); 1202 (virt + (cfg->array & (PAGE_SIZE - 1)));
@@ -1238,14 +1207,16 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1238 vcpu_e500->gtlb_offset[0] = 0; 1207 vcpu_e500->gtlb_offset[0] = 0;
1239 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0]; 1208 vcpu_e500->gtlb_offset[1] = params.tlb_sizes[0];
1240 1209
1241 vcpu_e500->tlb0cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); 1210 vcpu->arch.mmucfg = mfspr(SPRN_MMUCFG) & ~MMUCFG_LPIDSIZE;
1211
1212 vcpu->arch.tlbcfg[0] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1242 if (params.tlb_sizes[0] <= 2048) 1213 if (params.tlb_sizes[0] <= 2048)
1243 vcpu_e500->tlb0cfg |= params.tlb_sizes[0]; 1214 vcpu->arch.tlbcfg[0] |= params.tlb_sizes[0];
1244 vcpu_e500->tlb0cfg |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT; 1215 vcpu->arch.tlbcfg[0] |= params.tlb_ways[0] << TLBnCFG_ASSOC_SHIFT;
1245 1216
1246 vcpu_e500->tlb1cfg &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); 1217 vcpu->arch.tlbcfg[1] &= ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1247 vcpu_e500->tlb1cfg |= params.tlb_sizes[1]; 1218 vcpu->arch.tlbcfg[1] |= params.tlb_sizes[1];
1248 vcpu_e500->tlb1cfg |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT; 1219 vcpu->arch.tlbcfg[1] |= params.tlb_ways[1] << TLBnCFG_ASSOC_SHIFT;
1249 1220
1250 vcpu_e500->shared_tlb_pages = pages; 1221 vcpu_e500->shared_tlb_pages = pages;
1251 vcpu_e500->num_shared_tlb_pages = num_pages; 1222 vcpu_e500->num_shared_tlb_pages = num_pages;
@@ -1256,6 +1227,7 @@ int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
1256 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1]; 1227 vcpu_e500->gtlb_params[1].ways = params.tlb_sizes[1];
1257 vcpu_e500->gtlb_params[1].sets = 1; 1228 vcpu_e500->gtlb_params[1].sets = 1;
1258 1229
1230 kvmppc_recalc_tlb1map_range(vcpu_e500);
1259 return 0; 1231 return 0;
1260 1232
1261err_put_page: 1233err_put_page:
@@ -1274,13 +1246,14 @@ int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
1274 struct kvm_dirty_tlb *dirty) 1246 struct kvm_dirty_tlb *dirty)
1275{ 1247{
1276 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); 1248 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1277 1249 kvmppc_recalc_tlb1map_range(vcpu_e500);
1278 clear_tlb_refs(vcpu_e500); 1250 clear_tlb_refs(vcpu_e500);
1279 return 0; 1251 return 0;
1280} 1252}
1281 1253
1282int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500) 1254int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1283{ 1255{
1256 struct kvm_vcpu *vcpu = &vcpu_e500->vcpu;
1284 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry); 1257 int entry_size = sizeof(struct kvm_book3e_206_tlb_entry);
1285 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE; 1258 int entries = KVM_E500_TLB0_SIZE + KVM_E500_TLB1_SIZE;
1286 1259
@@ -1357,22 +1330,32 @@ int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1357 if (!vcpu_e500->gtlb_priv[1]) 1330 if (!vcpu_e500->gtlb_priv[1])
1358 goto err; 1331 goto err;
1359 1332
1360 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL) 1333 vcpu_e500->g2h_tlb1_map = kzalloc(sizeof(unsigned int) *
1334 vcpu_e500->gtlb_params[1].entries,
1335 GFP_KERNEL);
1336 if (!vcpu_e500->g2h_tlb1_map)
1337 goto err;
1338
1339 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
1340 host_tlb_params[1].entries,
1341 GFP_KERNEL);
1342 if (!vcpu_e500->h2g_tlb1_rmap)
1361 goto err; 1343 goto err;
1362 1344
1363 /* Init TLB configuration register */ 1345 /* Init TLB configuration register */
1364 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & 1346 vcpu->arch.tlbcfg[0] = mfspr(SPRN_TLB0CFG) &
1365 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); 1347 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1366 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[0].entries; 1348 vcpu->arch.tlbcfg[0] |= vcpu_e500->gtlb_params[0].entries;
1367 vcpu_e500->tlb0cfg |= 1349 vcpu->arch.tlbcfg[0] |=
1368 vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT; 1350 vcpu_e500->gtlb_params[0].ways << TLBnCFG_ASSOC_SHIFT;
1369 1351
1370 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & 1352 vcpu->arch.tlbcfg[1] = mfspr(SPRN_TLB1CFG) &
1371 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC); 1353 ~(TLBnCFG_N_ENTRY | TLBnCFG_ASSOC);
1372 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_params[1].entries; 1354 vcpu->arch.tlbcfg[1] |= vcpu_e500->gtlb_params[1].entries;
1373 vcpu_e500->tlb0cfg |= 1355 vcpu->arch.tlbcfg[1] |=
1374 vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT; 1356 vcpu_e500->gtlb_params[1].ways << TLBnCFG_ASSOC_SHIFT;
1375 1357
1358 kvmppc_recalc_tlb1map_range(vcpu_e500);
1376 return 0; 1359 return 0;
1377 1360
1378err: 1361err:
@@ -1385,8 +1368,7 @@ err:
1385void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500) 1368void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1386{ 1369{
1387 free_gtlb(vcpu_e500); 1370 free_gtlb(vcpu_e500);
1388 kvmppc_e500_id_table_free(vcpu_e500); 1371 kfree(vcpu_e500->h2g_tlb1_rmap);
1389
1390 kfree(vcpu_e500->tlb_refs[0]); 1372 kfree(vcpu_e500->tlb_refs[0]);
1391 kfree(vcpu_e500->tlb_refs[1]); 1373 kfree(vcpu_e500->tlb_refs[1]);
1392} 1374}
diff --git a/arch/powerpc/kvm/e500_tlb.h b/arch/powerpc/kvm/e500_tlb.h
deleted file mode 100644
index 5c6d2d7bf058..000000000000
--- a/arch/powerpc/kvm/e500_tlb.h
+++ /dev/null
@@ -1,174 +0,0 @@
1/*
2 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 *
6 * Description:
7 * This file is based on arch/powerpc/kvm/44x_tlb.h,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#ifndef __KVM_E500_TLB_H__
16#define __KVM_E500_TLB_H__
17
18#include <linux/kvm_host.h>
19#include <asm/mmu-book3e.h>
20#include <asm/tlb.h>
21#include <asm/kvm_e500.h>
22
23/* This geometry is the legacy default -- can be overridden by userspace */
24#define KVM_E500_TLB0_WAY_SIZE 128
25#define KVM_E500_TLB0_WAY_NUM 2
26
27#define KVM_E500_TLB0_SIZE (KVM_E500_TLB0_WAY_SIZE * KVM_E500_TLB0_WAY_NUM)
28#define KVM_E500_TLB1_SIZE 16
29
30#define index_of(tlbsel, esel) (((tlbsel) << 16) | ((esel) & 0xFFFF))
31#define tlbsel_of(index) ((index) >> 16)
32#define esel_of(index) ((index) & 0xFFFF)
33
34#define E500_TLB_USER_PERM_MASK (MAS3_UX|MAS3_UR|MAS3_UW)
35#define E500_TLB_SUPER_PERM_MASK (MAS3_SX|MAS3_SR|MAS3_SW)
36#define MAS2_ATTRIB_MASK \
37 (MAS2_X0 | MAS2_X1)
38#define MAS3_ATTRIB_MASK \
39 (MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3 \
40 | E500_TLB_USER_PERM_MASK | E500_TLB_SUPER_PERM_MASK)
41
42extern void kvmppc_dump_tlbs(struct kvm_vcpu *);
43extern int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *, ulong);
44extern int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *);
45extern int kvmppc_e500_emul_tlbre(struct kvm_vcpu *);
46extern int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *, int, int);
47extern int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *, int);
48extern int kvmppc_e500_tlb_search(struct kvm_vcpu *, gva_t, unsigned int, int);
49extern void kvmppc_e500_tlb_put(struct kvm_vcpu *);
50extern void kvmppc_e500_tlb_load(struct kvm_vcpu *, int);
51extern int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *);
52extern void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *);
53extern void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *);
54extern void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *);
55
56/* TLB helper functions */
57static inline unsigned int
58get_tlb_size(const struct kvm_book3e_206_tlb_entry *tlbe)
59{
60 return (tlbe->mas1 >> 7) & 0x1f;
61}
62
63static inline gva_t get_tlb_eaddr(const struct kvm_book3e_206_tlb_entry *tlbe)
64{
65 return tlbe->mas2 & 0xfffff000;
66}
67
68static inline u64 get_tlb_bytes(const struct kvm_book3e_206_tlb_entry *tlbe)
69{
70 unsigned int pgsize = get_tlb_size(tlbe);
71 return 1ULL << 10 << pgsize;
72}
73
74static inline gva_t get_tlb_end(const struct kvm_book3e_206_tlb_entry *tlbe)
75{
76 u64 bytes = get_tlb_bytes(tlbe);
77 return get_tlb_eaddr(tlbe) + bytes - 1;
78}
79
80static inline u64 get_tlb_raddr(const struct kvm_book3e_206_tlb_entry *tlbe)
81{
82 return tlbe->mas7_3 & ~0xfffULL;
83}
84
85static inline unsigned int
86get_tlb_tid(const struct kvm_book3e_206_tlb_entry *tlbe)
87{
88 return (tlbe->mas1 >> 16) & 0xff;
89}
90
91static inline unsigned int
92get_tlb_ts(const struct kvm_book3e_206_tlb_entry *tlbe)
93{
94 return (tlbe->mas1 >> 12) & 0x1;
95}
96
97static inline unsigned int
98get_tlb_v(const struct kvm_book3e_206_tlb_entry *tlbe)
99{
100 return (tlbe->mas1 >> 31) & 0x1;
101}
102
103static inline unsigned int
104get_tlb_iprot(const struct kvm_book3e_206_tlb_entry *tlbe)
105{
106 return (tlbe->mas1 >> 30) & 0x1;
107}
108
109static inline unsigned int get_cur_pid(struct kvm_vcpu *vcpu)
110{
111 return vcpu->arch.pid & 0xff;
112}
113
114static inline unsigned int get_cur_as(struct kvm_vcpu *vcpu)
115{
116 return !!(vcpu->arch.shared->msr & (MSR_IS | MSR_DS));
117}
118
119static inline unsigned int get_cur_pr(struct kvm_vcpu *vcpu)
120{
121 return !!(vcpu->arch.shared->msr & MSR_PR);
122}
123
124static inline unsigned int get_cur_spid(const struct kvm_vcpu *vcpu)
125{
126 return (vcpu->arch.shared->mas6 >> 16) & 0xff;
127}
128
129static inline unsigned int get_cur_sas(const struct kvm_vcpu *vcpu)
130{
131 return vcpu->arch.shared->mas6 & 0x1;
132}
133
134static inline unsigned int get_tlb_tlbsel(const struct kvm_vcpu *vcpu)
135{
136 /*
137 * Manual says that tlbsel has 2 bits wide.
138 * Since we only have two TLBs, only lower bit is used.
139 */
140 return (vcpu->arch.shared->mas0 >> 28) & 0x1;
141}
142
143static inline unsigned int get_tlb_nv_bit(const struct kvm_vcpu *vcpu)
144{
145 return vcpu->arch.shared->mas0 & 0xfff;
146}
147
148static inline unsigned int get_tlb_esel_bit(const struct kvm_vcpu *vcpu)
149{
150 return (vcpu->arch.shared->mas0 >> 16) & 0xfff;
151}
152
153static inline int tlbe_is_host_safe(const struct kvm_vcpu *vcpu,
154 const struct kvm_book3e_206_tlb_entry *tlbe)
155{
156 gpa_t gpa;
157
158 if (!get_tlb_v(tlbe))
159 return 0;
160
161 /* Does it match current guest AS? */
162 /* XXX what about IS != DS? */
163 if (get_tlb_ts(tlbe) != !!(vcpu->arch.shared->msr & MSR_IS))
164 return 0;
165
166 gpa = get_tlb_raddr(tlbe);
167 if (!gfn_to_memslot(vcpu->kvm, gpa >> PAGE_SHIFT))
168 /* Mapping is not for RAM. */
169 return 0;
170
171 return 1;
172}
173
174#endif /* __KVM_E500_TLB_H__ */
diff --git a/arch/powerpc/kvm/e500mc.c b/arch/powerpc/kvm/e500mc.c
new file mode 100644
index 000000000000..fe6c1de6b701
--- /dev/null
+++ b/arch/powerpc/kvm/e500mc.c
@@ -0,0 +1,342 @@
1/*
2 * Copyright (C) 2010 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Varun Sethi, <varun.sethi@freescale.com>
5 *
6 * Description:
7 * This file is derived from arch/powerpc/kvm/e500.c,
8 * by Yu Liu <yu.liu@freescale.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/kvm_host.h>
16#include <linux/slab.h>
17#include <linux/err.h>
18#include <linux/export.h>
19
20#include <asm/reg.h>
21#include <asm/cputable.h>
22#include <asm/tlbflush.h>
23#include <asm/kvm_ppc.h>
24#include <asm/dbell.h>
25
26#include "booke.h"
27#include "e500.h"
28
29void kvmppc_set_pending_interrupt(struct kvm_vcpu *vcpu, enum int_class type)
30{
31 enum ppc_dbell dbell_type;
32 unsigned long tag;
33
34 switch (type) {
35 case INT_CLASS_NONCRIT:
36 dbell_type = PPC_G_DBELL;
37 break;
38 case INT_CLASS_CRIT:
39 dbell_type = PPC_G_DBELL_CRIT;
40 break;
41 case INT_CLASS_MC:
42 dbell_type = PPC_G_DBELL_MC;
43 break;
44 default:
45 WARN_ONCE(1, "%s: unknown int type %d\n", __func__, type);
46 return;
47 }
48
49
50 tag = PPC_DBELL_LPID(vcpu->kvm->arch.lpid) | vcpu->vcpu_id;
51 mb();
52 ppc_msgsnd(dbell_type, 0, tag);
53}
54
55/* gtlbe must not be mapped by more than one host tlb entry */
56void kvmppc_e500_tlbil_one(struct kvmppc_vcpu_e500 *vcpu_e500,
57 struct kvm_book3e_206_tlb_entry *gtlbe)
58{
59 unsigned int tid, ts;
60 u32 val, eaddr, lpid;
61 unsigned long flags;
62
63 ts = get_tlb_ts(gtlbe);
64 tid = get_tlb_tid(gtlbe);
65 lpid = vcpu_e500->vcpu.kvm->arch.lpid;
66
67 /* We search the host TLB to invalidate its shadow TLB entry */
68 val = (tid << 16) | ts;
69 eaddr = get_tlb_eaddr(gtlbe);
70
71 local_irq_save(flags);
72
73 mtspr(SPRN_MAS6, val);
74 mtspr(SPRN_MAS5, MAS5_SGS | lpid);
75
76 asm volatile("tlbsx 0, %[eaddr]\n" : : [eaddr] "r" (eaddr));
77 val = mfspr(SPRN_MAS1);
78 if (val & MAS1_VALID) {
79 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
80 asm volatile("tlbwe");
81 }
82 mtspr(SPRN_MAS5, 0);
83 /* NOTE: tlbsx also updates mas8, so clear it for host tlbwe */
84 mtspr(SPRN_MAS8, 0);
85 isync();
86
87 local_irq_restore(flags);
88}
89
90void kvmppc_e500_tlbil_all(struct kvmppc_vcpu_e500 *vcpu_e500)
91{
92 unsigned long flags;
93
94 local_irq_save(flags);
95 mtspr(SPRN_MAS5, MAS5_SGS | vcpu_e500->vcpu.kvm->arch.lpid);
96 asm volatile("tlbilxlpid");
97 mtspr(SPRN_MAS5, 0);
98 local_irq_restore(flags);
99}
100
101void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
102{
103 vcpu->arch.pid = pid;
104}
105
106void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
107{
108}
109
110void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
111{
112 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
113
114 kvmppc_booke_vcpu_load(vcpu, cpu);
115
116 mtspr(SPRN_LPID, vcpu->kvm->arch.lpid);
117 mtspr(SPRN_EPCR, vcpu->arch.shadow_epcr);
118 mtspr(SPRN_GPIR, vcpu->vcpu_id);
119 mtspr(SPRN_MSRP, vcpu->arch.shadow_msrp);
120 mtspr(SPRN_EPLC, vcpu->arch.eplc);
121 mtspr(SPRN_EPSC, vcpu->arch.epsc);
122
123 mtspr(SPRN_GIVPR, vcpu->arch.ivpr);
124 mtspr(SPRN_GIVOR2, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]);
125 mtspr(SPRN_GIVOR8, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]);
126 mtspr(SPRN_GSPRG0, (unsigned long)vcpu->arch.shared->sprg0);
127 mtspr(SPRN_GSPRG1, (unsigned long)vcpu->arch.shared->sprg1);
128 mtspr(SPRN_GSPRG2, (unsigned long)vcpu->arch.shared->sprg2);
129 mtspr(SPRN_GSPRG3, (unsigned long)vcpu->arch.shared->sprg3);
130
131 mtspr(SPRN_GSRR0, vcpu->arch.shared->srr0);
132 mtspr(SPRN_GSRR1, vcpu->arch.shared->srr1);
133
134 mtspr(SPRN_GEPR, vcpu->arch.epr);
135 mtspr(SPRN_GDEAR, vcpu->arch.shared->dar);
136 mtspr(SPRN_GESR, vcpu->arch.shared->esr);
137
138 if (vcpu->arch.oldpir != mfspr(SPRN_PIR))
139 kvmppc_e500_tlbil_all(vcpu_e500);
140
141 kvmppc_load_guest_fp(vcpu);
142}
143
144void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
145{
146 vcpu->arch.eplc = mfspr(SPRN_EPLC);
147 vcpu->arch.epsc = mfspr(SPRN_EPSC);
148
149 vcpu->arch.shared->sprg0 = mfspr(SPRN_GSPRG0);
150 vcpu->arch.shared->sprg1 = mfspr(SPRN_GSPRG1);
151 vcpu->arch.shared->sprg2 = mfspr(SPRN_GSPRG2);
152 vcpu->arch.shared->sprg3 = mfspr(SPRN_GSPRG3);
153
154 vcpu->arch.shared->srr0 = mfspr(SPRN_GSRR0);
155 vcpu->arch.shared->srr1 = mfspr(SPRN_GSRR1);
156
157 vcpu->arch.epr = mfspr(SPRN_GEPR);
158 vcpu->arch.shared->dar = mfspr(SPRN_GDEAR);
159 vcpu->arch.shared->esr = mfspr(SPRN_GESR);
160
161 vcpu->arch.oldpir = mfspr(SPRN_PIR);
162
163 kvmppc_booke_vcpu_put(vcpu);
164}
165
166int kvmppc_core_check_processor_compat(void)
167{
168 int r;
169
170 if (strcmp(cur_cpu_spec->cpu_name, "e500mc") == 0)
171 r = 0;
172 else if (strcmp(cur_cpu_spec->cpu_name, "e5500") == 0)
173 r = 0;
174 else
175 r = -ENOTSUPP;
176
177 return r;
178}
179
180int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu)
181{
182 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
183
184 vcpu->arch.shadow_epcr = SPRN_EPCR_DSIGS | SPRN_EPCR_DGTMI | \
185 SPRN_EPCR_DUVD;
186 vcpu->arch.shadow_msrp = MSRP_UCLEP | MSRP_DEP | MSRP_PMMP;
187 vcpu->arch.eplc = EPC_EGS | (vcpu->kvm->arch.lpid << EPC_ELPID_SHIFT);
188 vcpu->arch.epsc = vcpu->arch.eplc;
189
190 vcpu->arch.pvr = mfspr(SPRN_PVR);
191 vcpu_e500->svr = mfspr(SPRN_SVR);
192
193 vcpu->arch.cpu_type = KVM_CPU_E500MC;
194
195 return 0;
196}
197
198void kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
199{
200 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
201
202 sregs->u.e.features |= KVM_SREGS_E_ARCH206_MMU | KVM_SREGS_E_PM |
203 KVM_SREGS_E_PC;
204 sregs->u.e.impl_id = KVM_SREGS_E_IMPL_FSL;
205
206 sregs->u.e.impl.fsl.features = 0;
207 sregs->u.e.impl.fsl.svr = vcpu_e500->svr;
208 sregs->u.e.impl.fsl.hid0 = vcpu_e500->hid0;
209 sregs->u.e.impl.fsl.mcar = vcpu_e500->mcar;
210
211 kvmppc_get_sregs_e500_tlb(vcpu, sregs);
212
213 sregs->u.e.ivor_high[3] =
214 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR];
215 sregs->u.e.ivor_high[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL];
216 sregs->u.e.ivor_high[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT];
217
218 kvmppc_get_sregs_ivor(vcpu, sregs);
219}
220
221int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
222{
223 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
224 int ret;
225
226 if (sregs->u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
227 vcpu_e500->svr = sregs->u.e.impl.fsl.svr;
228 vcpu_e500->hid0 = sregs->u.e.impl.fsl.hid0;
229 vcpu_e500->mcar = sregs->u.e.impl.fsl.mcar;
230 }
231
232 ret = kvmppc_set_sregs_e500_tlb(vcpu, sregs);
233 if (ret < 0)
234 return ret;
235
236 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
237 return 0;
238
239 if (sregs->u.e.features & KVM_SREGS_E_PM) {
240 vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] =
241 sregs->u.e.ivor_high[3];
242 }
243
244 if (sregs->u.e.features & KVM_SREGS_E_PC) {
245 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL] =
246 sregs->u.e.ivor_high[4];
247 vcpu->arch.ivor[BOOKE_IRQPRIO_DBELL_CRIT] =
248 sregs->u.e.ivor_high[5];
249 }
250
251 return kvmppc_set_sregs_ivor(vcpu, sregs);
252}
253
254struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
255{
256 struct kvmppc_vcpu_e500 *vcpu_e500;
257 struct kvm_vcpu *vcpu;
258 int err;
259
260 vcpu_e500 = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
261 if (!vcpu_e500) {
262 err = -ENOMEM;
263 goto out;
264 }
265 vcpu = &vcpu_e500->vcpu;
266
267 /* Invalid PIR value -- this LPID dosn't have valid state on any cpu */
268 vcpu->arch.oldpir = 0xffffffff;
269
270 err = kvm_vcpu_init(vcpu, kvm, id);
271 if (err)
272 goto free_vcpu;
273
274 err = kvmppc_e500_tlb_init(vcpu_e500);
275 if (err)
276 goto uninit_vcpu;
277
278 vcpu->arch.shared = (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
279 if (!vcpu->arch.shared)
280 goto uninit_tlb;
281
282 return vcpu;
283
284uninit_tlb:
285 kvmppc_e500_tlb_uninit(vcpu_e500);
286uninit_vcpu:
287 kvm_vcpu_uninit(vcpu);
288
289free_vcpu:
290 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
291out:
292 return ERR_PTR(err);
293}
294
295void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
296{
297 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
298
299 free_page((unsigned long)vcpu->arch.shared);
300 kvmppc_e500_tlb_uninit(vcpu_e500);
301 kvm_vcpu_uninit(vcpu);
302 kmem_cache_free(kvm_vcpu_cache, vcpu_e500);
303}
304
305int kvmppc_core_init_vm(struct kvm *kvm)
306{
307 int lpid;
308
309 lpid = kvmppc_alloc_lpid();
310 if (lpid < 0)
311 return lpid;
312
313 kvm->arch.lpid = lpid;
314 return 0;
315}
316
317void kvmppc_core_destroy_vm(struct kvm *kvm)
318{
319 kvmppc_free_lpid(kvm->arch.lpid);
320}
321
322static int __init kvmppc_e500mc_init(void)
323{
324 int r;
325
326 r = kvmppc_booke_init();
327 if (r)
328 return r;
329
330 kvmppc_init_lpid(64);
331 kvmppc_claim_lpid(0); /* host */
332
333 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_e500), 0, THIS_MODULE);
334}
335
336static void __exit kvmppc_e500mc_exit(void)
337{
338 kvmppc_booke_exit();
339}
340
341module_init(kvmppc_e500mc_init);
342module_exit(kvmppc_e500mc_exit);
diff --git a/arch/powerpc/kvm/emulate.c b/arch/powerpc/kvm/emulate.c
index 968f40101883..f90e86dea7a2 100644
--- a/arch/powerpc/kvm/emulate.c
+++ b/arch/powerpc/kvm/emulate.c
@@ -23,6 +23,7 @@
23#include <linux/types.h> 23#include <linux/types.h>
24#include <linux/string.h> 24#include <linux/string.h>
25#include <linux/kvm_host.h> 25#include <linux/kvm_host.h>
26#include <linux/clockchips.h>
26 27
27#include <asm/reg.h> 28#include <asm/reg.h>
28#include <asm/time.h> 29#include <asm/time.h>
@@ -35,7 +36,9 @@
35#define OP_TRAP 3 36#define OP_TRAP 3
36#define OP_TRAP_64 2 37#define OP_TRAP_64 2
37 38
39#define OP_31_XOP_TRAP 4
38#define OP_31_XOP_LWZX 23 40#define OP_31_XOP_LWZX 23
41#define OP_31_XOP_TRAP_64 68
39#define OP_31_XOP_LBZX 87 42#define OP_31_XOP_LBZX 87
40#define OP_31_XOP_STWX 151 43#define OP_31_XOP_STWX 151
41#define OP_31_XOP_STBX 215 44#define OP_31_XOP_STBX 215
@@ -102,8 +105,12 @@ void kvmppc_emulate_dec(struct kvm_vcpu *vcpu)
102 */ 105 */
103 106
104 dec_time = vcpu->arch.dec; 107 dec_time = vcpu->arch.dec;
105 dec_time *= 1000; 108 /*
106 do_div(dec_time, tb_ticks_per_usec); 109 * Guest timebase ticks at the same frequency as host decrementer.
110 * So use the host decrementer calculations for decrementer emulation.
111 */
112 dec_time = dec_time << decrementer_clockevent.shift;
113 do_div(dec_time, decrementer_clockevent.mult);
107 dec_nsec = do_div(dec_time, NSEC_PER_SEC); 114 dec_nsec = do_div(dec_time, NSEC_PER_SEC);
108 hrtimer_start(&vcpu->arch.dec_timer, 115 hrtimer_start(&vcpu->arch.dec_timer,
109 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); 116 ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL);
@@ -141,14 +148,13 @@ u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb)
141int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) 148int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
142{ 149{
143 u32 inst = kvmppc_get_last_inst(vcpu); 150 u32 inst = kvmppc_get_last_inst(vcpu);
144 u32 ea; 151 int ra = get_ra(inst);
145 int ra; 152 int rs = get_rs(inst);
146 int rb; 153 int rt = get_rt(inst);
147 int rs; 154 int sprn = get_sprn(inst);
148 int rt;
149 int sprn;
150 enum emulation_result emulated = EMULATE_DONE; 155 enum emulation_result emulated = EMULATE_DONE;
151 int advance = 1; 156 int advance = 1;
157 ulong spr_val = 0;
152 158
153 /* this default type might be overwritten by subcategories */ 159 /* this default type might be overwritten by subcategories */
154 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); 160 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
@@ -170,173 +176,143 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
170 case 31: 176 case 31:
171 switch (get_xop(inst)) { 177 switch (get_xop(inst)) {
172 178
179 case OP_31_XOP_TRAP:
180#ifdef CONFIG_64BIT
181 case OP_31_XOP_TRAP_64:
182#endif
183#ifdef CONFIG_PPC_BOOK3S
184 kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
185#else
186 kvmppc_core_queue_program(vcpu,
187 vcpu->arch.shared->esr | ESR_PTR);
188#endif
189 advance = 0;
190 break;
173 case OP_31_XOP_LWZX: 191 case OP_31_XOP_LWZX:
174 rt = get_rt(inst);
175 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 192 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
176 break; 193 break;
177 194
178 case OP_31_XOP_LBZX: 195 case OP_31_XOP_LBZX:
179 rt = get_rt(inst);
180 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 196 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
181 break; 197 break;
182 198
183 case OP_31_XOP_LBZUX: 199 case OP_31_XOP_LBZUX:
184 rt = get_rt(inst);
185 ra = get_ra(inst);
186 rb = get_rb(inst);
187
188 ea = kvmppc_get_gpr(vcpu, rb);
189 if (ra)
190 ea += kvmppc_get_gpr(vcpu, ra);
191
192 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 200 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
193 kvmppc_set_gpr(vcpu, ra, ea); 201 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
194 break; 202 break;
195 203
196 case OP_31_XOP_STWX: 204 case OP_31_XOP_STWX:
197 rs = get_rs(inst);
198 emulated = kvmppc_handle_store(run, vcpu, 205 emulated = kvmppc_handle_store(run, vcpu,
199 kvmppc_get_gpr(vcpu, rs), 206 kvmppc_get_gpr(vcpu, rs),
200 4, 1); 207 4, 1);
201 break; 208 break;
202 209
203 case OP_31_XOP_STBX: 210 case OP_31_XOP_STBX:
204 rs = get_rs(inst);
205 emulated = kvmppc_handle_store(run, vcpu, 211 emulated = kvmppc_handle_store(run, vcpu,
206 kvmppc_get_gpr(vcpu, rs), 212 kvmppc_get_gpr(vcpu, rs),
207 1, 1); 213 1, 1);
208 break; 214 break;
209 215
210 case OP_31_XOP_STBUX: 216 case OP_31_XOP_STBUX:
211 rs = get_rs(inst);
212 ra = get_ra(inst);
213 rb = get_rb(inst);
214
215 ea = kvmppc_get_gpr(vcpu, rb);
216 if (ra)
217 ea += kvmppc_get_gpr(vcpu, ra);
218
219 emulated = kvmppc_handle_store(run, vcpu, 217 emulated = kvmppc_handle_store(run, vcpu,
220 kvmppc_get_gpr(vcpu, rs), 218 kvmppc_get_gpr(vcpu, rs),
221 1, 1); 219 1, 1);
222 kvmppc_set_gpr(vcpu, rs, ea); 220 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
223 break; 221 break;
224 222
225 case OP_31_XOP_LHAX: 223 case OP_31_XOP_LHAX:
226 rt = get_rt(inst);
227 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 224 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
228 break; 225 break;
229 226
230 case OP_31_XOP_LHZX: 227 case OP_31_XOP_LHZX:
231 rt = get_rt(inst);
232 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 228 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
233 break; 229 break;
234 230
235 case OP_31_XOP_LHZUX: 231 case OP_31_XOP_LHZUX:
236 rt = get_rt(inst);
237 ra = get_ra(inst);
238 rb = get_rb(inst);
239
240 ea = kvmppc_get_gpr(vcpu, rb);
241 if (ra)
242 ea += kvmppc_get_gpr(vcpu, ra);
243
244 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 232 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
245 kvmppc_set_gpr(vcpu, ra, ea); 233 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
246 break; 234 break;
247 235
248 case OP_31_XOP_MFSPR: 236 case OP_31_XOP_MFSPR:
249 sprn = get_sprn(inst);
250 rt = get_rt(inst);
251
252 switch (sprn) { 237 switch (sprn) {
253 case SPRN_SRR0: 238 case SPRN_SRR0:
254 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr0); 239 spr_val = vcpu->arch.shared->srr0;
255 break; 240 break;
256 case SPRN_SRR1: 241 case SPRN_SRR1:
257 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->srr1); 242 spr_val = vcpu->arch.shared->srr1;
258 break; 243 break;
259 case SPRN_PVR: 244 case SPRN_PVR:
260 kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; 245 spr_val = vcpu->arch.pvr;
246 break;
261 case SPRN_PIR: 247 case SPRN_PIR:
262 kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; 248 spr_val = vcpu->vcpu_id;
249 break;
263 case SPRN_MSSSR0: 250 case SPRN_MSSSR0:
264 kvmppc_set_gpr(vcpu, rt, 0); break; 251 spr_val = 0;
252 break;
265 253
266 /* Note: mftb and TBRL/TBWL are user-accessible, so 254 /* Note: mftb and TBRL/TBWL are user-accessible, so
267 * the guest can always access the real TB anyways. 255 * the guest can always access the real TB anyways.
268 * In fact, we probably will never see these traps. */ 256 * In fact, we probably will never see these traps. */
269 case SPRN_TBWL: 257 case SPRN_TBWL:
270 kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; 258 spr_val = get_tb() >> 32;
259 break;
271 case SPRN_TBWU: 260 case SPRN_TBWU:
272 kvmppc_set_gpr(vcpu, rt, get_tb()); break; 261 spr_val = get_tb();
262 break;
273 263
274 case SPRN_SPRG0: 264 case SPRN_SPRG0:
275 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg0); 265 spr_val = vcpu->arch.shared->sprg0;
276 break; 266 break;
277 case SPRN_SPRG1: 267 case SPRN_SPRG1:
278 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg1); 268 spr_val = vcpu->arch.shared->sprg1;
279 break; 269 break;
280 case SPRN_SPRG2: 270 case SPRN_SPRG2:
281 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg2); 271 spr_val = vcpu->arch.shared->sprg2;
282 break; 272 break;
283 case SPRN_SPRG3: 273 case SPRN_SPRG3:
284 kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->sprg3); 274 spr_val = vcpu->arch.shared->sprg3;
285 break; 275 break;
286 /* Note: SPRG4-7 are user-readable, so we don't get 276 /* Note: SPRG4-7 are user-readable, so we don't get
287 * a trap. */ 277 * a trap. */
288 278
289 case SPRN_DEC: 279 case SPRN_DEC:
290 { 280 spr_val = kvmppc_get_dec(vcpu, get_tb());
291 kvmppc_set_gpr(vcpu, rt,
292 kvmppc_get_dec(vcpu, get_tb()));
293 break; 281 break;
294 }
295 default: 282 default:
296 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); 283 emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
297 if (emulated == EMULATE_FAIL) { 284 &spr_val);
298 printk("mfspr: unknown spr %x\n", sprn); 285 if (unlikely(emulated == EMULATE_FAIL)) {
299 kvmppc_set_gpr(vcpu, rt, 0); 286 printk(KERN_INFO "mfspr: unknown spr "
287 "0x%x\n", sprn);
300 } 288 }
301 break; 289 break;
302 } 290 }
291 kvmppc_set_gpr(vcpu, rt, spr_val);
303 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); 292 kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
304 break; 293 break;
305 294
306 case OP_31_XOP_STHX: 295 case OP_31_XOP_STHX:
307 rs = get_rs(inst);
308 ra = get_ra(inst);
309 rb = get_rb(inst);
310
311 emulated = kvmppc_handle_store(run, vcpu, 296 emulated = kvmppc_handle_store(run, vcpu,
312 kvmppc_get_gpr(vcpu, rs), 297 kvmppc_get_gpr(vcpu, rs),
313 2, 1); 298 2, 1);
314 break; 299 break;
315 300
316 case OP_31_XOP_STHUX: 301 case OP_31_XOP_STHUX:
317 rs = get_rs(inst);
318 ra = get_ra(inst);
319 rb = get_rb(inst);
320
321 ea = kvmppc_get_gpr(vcpu, rb);
322 if (ra)
323 ea += kvmppc_get_gpr(vcpu, ra);
324
325 emulated = kvmppc_handle_store(run, vcpu, 302 emulated = kvmppc_handle_store(run, vcpu,
326 kvmppc_get_gpr(vcpu, rs), 303 kvmppc_get_gpr(vcpu, rs),
327 2, 1); 304 2, 1);
328 kvmppc_set_gpr(vcpu, ra, ea); 305 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
329 break; 306 break;
330 307
331 case OP_31_XOP_MTSPR: 308 case OP_31_XOP_MTSPR:
332 sprn = get_sprn(inst); 309 spr_val = kvmppc_get_gpr(vcpu, rs);
333 rs = get_rs(inst);
334 switch (sprn) { 310 switch (sprn) {
335 case SPRN_SRR0: 311 case SPRN_SRR0:
336 vcpu->arch.shared->srr0 = kvmppc_get_gpr(vcpu, rs); 312 vcpu->arch.shared->srr0 = spr_val;
337 break; 313 break;
338 case SPRN_SRR1: 314 case SPRN_SRR1:
339 vcpu->arch.shared->srr1 = kvmppc_get_gpr(vcpu, rs); 315 vcpu->arch.shared->srr1 = spr_val;
340 break; 316 break;
341 317
342 /* XXX We need to context-switch the timebase for 318 /* XXX We need to context-switch the timebase for
@@ -347,27 +323,29 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
347 case SPRN_MSSSR0: break; 323 case SPRN_MSSSR0: break;
348 324
349 case SPRN_DEC: 325 case SPRN_DEC:
350 vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); 326 vcpu->arch.dec = spr_val;
351 kvmppc_emulate_dec(vcpu); 327 kvmppc_emulate_dec(vcpu);
352 break; 328 break;
353 329
354 case SPRN_SPRG0: 330 case SPRN_SPRG0:
355 vcpu->arch.shared->sprg0 = kvmppc_get_gpr(vcpu, rs); 331 vcpu->arch.shared->sprg0 = spr_val;
356 break; 332 break;
357 case SPRN_SPRG1: 333 case SPRN_SPRG1:
358 vcpu->arch.shared->sprg1 = kvmppc_get_gpr(vcpu, rs); 334 vcpu->arch.shared->sprg1 = spr_val;
359 break; 335 break;
360 case SPRN_SPRG2: 336 case SPRN_SPRG2:
361 vcpu->arch.shared->sprg2 = kvmppc_get_gpr(vcpu, rs); 337 vcpu->arch.shared->sprg2 = spr_val;
362 break; 338 break;
363 case SPRN_SPRG3: 339 case SPRN_SPRG3:
364 vcpu->arch.shared->sprg3 = kvmppc_get_gpr(vcpu, rs); 340 vcpu->arch.shared->sprg3 = spr_val;
365 break; 341 break;
366 342
367 default: 343 default:
368 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); 344 emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
345 spr_val);
369 if (emulated == EMULATE_FAIL) 346 if (emulated == EMULATE_FAIL)
370 printk("mtspr: unknown spr %x\n", sprn); 347 printk(KERN_INFO "mtspr: unknown spr "
348 "0x%x\n", sprn);
371 break; 349 break;
372 } 350 }
373 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); 351 kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
@@ -382,7 +360,6 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
382 break; 360 break;
383 361
384 case OP_31_XOP_LWBRX: 362 case OP_31_XOP_LWBRX:
385 rt = get_rt(inst);
386 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); 363 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
387 break; 364 break;
388 365
@@ -390,25 +367,16 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
390 break; 367 break;
391 368
392 case OP_31_XOP_STWBRX: 369 case OP_31_XOP_STWBRX:
393 rs = get_rs(inst);
394 ra = get_ra(inst);
395 rb = get_rb(inst);
396
397 emulated = kvmppc_handle_store(run, vcpu, 370 emulated = kvmppc_handle_store(run, vcpu,
398 kvmppc_get_gpr(vcpu, rs), 371 kvmppc_get_gpr(vcpu, rs),
399 4, 0); 372 4, 0);
400 break; 373 break;
401 374
402 case OP_31_XOP_LHBRX: 375 case OP_31_XOP_LHBRX:
403 rt = get_rt(inst);
404 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); 376 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
405 break; 377 break;
406 378
407 case OP_31_XOP_STHBRX: 379 case OP_31_XOP_STHBRX:
408 rs = get_rs(inst);
409 ra = get_ra(inst);
410 rb = get_rb(inst);
411
412 emulated = kvmppc_handle_store(run, vcpu, 380 emulated = kvmppc_handle_store(run, vcpu,
413 kvmppc_get_gpr(vcpu, rs), 381 kvmppc_get_gpr(vcpu, rs),
414 2, 0); 382 2, 0);
@@ -421,99 +389,78 @@ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
421 break; 389 break;
422 390
423 case OP_LWZ: 391 case OP_LWZ:
424 rt = get_rt(inst);
425 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 392 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
426 break; 393 break;
427 394
428 case OP_LWZU: 395 case OP_LWZU:
429 ra = get_ra(inst);
430 rt = get_rt(inst);
431 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); 396 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
432 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 397 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
433 break; 398 break;
434 399
435 case OP_LBZ: 400 case OP_LBZ:
436 rt = get_rt(inst);
437 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 401 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
438 break; 402 break;
439 403
440 case OP_LBZU: 404 case OP_LBZU:
441 ra = get_ra(inst);
442 rt = get_rt(inst);
443 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); 405 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
444 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 406 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
445 break; 407 break;
446 408
447 case OP_STW: 409 case OP_STW:
448 rs = get_rs(inst);
449 emulated = kvmppc_handle_store(run, vcpu, 410 emulated = kvmppc_handle_store(run, vcpu,
450 kvmppc_get_gpr(vcpu, rs), 411 kvmppc_get_gpr(vcpu, rs),
451 4, 1); 412 4, 1);
452 break; 413 break;
453 414
454 case OP_STWU: 415 case OP_STWU:
455 ra = get_ra(inst);
456 rs = get_rs(inst);
457 emulated = kvmppc_handle_store(run, vcpu, 416 emulated = kvmppc_handle_store(run, vcpu,
458 kvmppc_get_gpr(vcpu, rs), 417 kvmppc_get_gpr(vcpu, rs),
459 4, 1); 418 4, 1);
460 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 419 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
461 break; 420 break;
462 421
463 case OP_STB: 422 case OP_STB:
464 rs = get_rs(inst);
465 emulated = kvmppc_handle_store(run, vcpu, 423 emulated = kvmppc_handle_store(run, vcpu,
466 kvmppc_get_gpr(vcpu, rs), 424 kvmppc_get_gpr(vcpu, rs),
467 1, 1); 425 1, 1);
468 break; 426 break;
469 427
470 case OP_STBU: 428 case OP_STBU:
471 ra = get_ra(inst);
472 rs = get_rs(inst);
473 emulated = kvmppc_handle_store(run, vcpu, 429 emulated = kvmppc_handle_store(run, vcpu,
474 kvmppc_get_gpr(vcpu, rs), 430 kvmppc_get_gpr(vcpu, rs),
475 1, 1); 431 1, 1);
476 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 432 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
477 break; 433 break;
478 434
479 case OP_LHZ: 435 case OP_LHZ:
480 rt = get_rt(inst);
481 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 436 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
482 break; 437 break;
483 438
484 case OP_LHZU: 439 case OP_LHZU:
485 ra = get_ra(inst);
486 rt = get_rt(inst);
487 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); 440 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
488 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 441 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
489 break; 442 break;
490 443
491 case OP_LHA: 444 case OP_LHA:
492 rt = get_rt(inst);
493 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 445 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
494 break; 446 break;
495 447
496 case OP_LHAU: 448 case OP_LHAU:
497 ra = get_ra(inst);
498 rt = get_rt(inst);
499 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); 449 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
500 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 450 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
501 break; 451 break;
502 452
503 case OP_STH: 453 case OP_STH:
504 rs = get_rs(inst);
505 emulated = kvmppc_handle_store(run, vcpu, 454 emulated = kvmppc_handle_store(run, vcpu,
506 kvmppc_get_gpr(vcpu, rs), 455 kvmppc_get_gpr(vcpu, rs),
507 2, 1); 456 2, 1);
508 break; 457 break;
509 458
510 case OP_STHU: 459 case OP_STHU:
511 ra = get_ra(inst);
512 rs = get_rs(inst);
513 emulated = kvmppc_handle_store(run, vcpu, 460 emulated = kvmppc_handle_store(run, vcpu,
514 kvmppc_get_gpr(vcpu, rs), 461 kvmppc_get_gpr(vcpu, rs),
515 2, 1); 462 2, 1);
516 kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); 463 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
517 break; 464 break;
518 465
519 default: 466 default:
diff --git a/arch/powerpc/kvm/powerpc.c b/arch/powerpc/kvm/powerpc.c
index 00d7e345b3fe..1493c8de947b 100644
--- a/arch/powerpc/kvm/powerpc.c
+++ b/arch/powerpc/kvm/powerpc.c
@@ -43,6 +43,11 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
43 v->requests; 43 v->requests;
44} 44}
45 45
46int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
47{
48 return 1;
49}
50
46int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) 51int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
47{ 52{
48 int nr = kvmppc_get_gpr(vcpu, 11); 53 int nr = kvmppc_get_gpr(vcpu, 11);
@@ -74,7 +79,7 @@ int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
74 } 79 }
75 case HC_VENDOR_KVM | KVM_HC_FEATURES: 80 case HC_VENDOR_KVM | KVM_HC_FEATURES:
76 r = HC_EV_SUCCESS; 81 r = HC_EV_SUCCESS;
77#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500) 82#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
78 /* XXX Missing magic page on 44x */ 83 /* XXX Missing magic page on 44x */
79 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); 84 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
80#endif 85#endif
@@ -109,6 +114,11 @@ int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
109 goto out; 114 goto out;
110#endif 115#endif
111 116
117#ifdef CONFIG_KVM_BOOKE_HV
118 if (!cpu_has_feature(CPU_FTR_EMB_HV))
119 goto out;
120#endif
121
112 r = true; 122 r = true;
113 123
114out: 124out:
@@ -225,7 +235,7 @@ int kvm_dev_ioctl_check_extension(long ext)
225 case KVM_CAP_PPC_PAIRED_SINGLES: 235 case KVM_CAP_PPC_PAIRED_SINGLES:
226 case KVM_CAP_PPC_OSI: 236 case KVM_CAP_PPC_OSI:
227 case KVM_CAP_PPC_GET_PVINFO: 237 case KVM_CAP_PPC_GET_PVINFO:
228#ifdef CONFIG_KVM_E500 238#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
229 case KVM_CAP_SW_TLB: 239 case KVM_CAP_SW_TLB:
230#endif 240#endif
231 r = 1; 241 r = 1;
@@ -234,10 +244,12 @@ int kvm_dev_ioctl_check_extension(long ext)
234 r = KVM_COALESCED_MMIO_PAGE_OFFSET; 244 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
235 break; 245 break;
236#endif 246#endif
237#ifdef CONFIG_KVM_BOOK3S_64_HV 247#ifdef CONFIG_PPC_BOOK3S_64
238 case KVM_CAP_SPAPR_TCE: 248 case KVM_CAP_SPAPR_TCE:
239 r = 1; 249 r = 1;
240 break; 250 break;
251#endif /* CONFIG_PPC_BOOK3S_64 */
252#ifdef CONFIG_KVM_BOOK3S_64_HV
241 case KVM_CAP_PPC_SMT: 253 case KVM_CAP_PPC_SMT:
242 r = threads_per_core; 254 r = threads_per_core;
243 break; 255 break;
@@ -267,6 +279,11 @@ int kvm_dev_ioctl_check_extension(long ext)
267 case KVM_CAP_MAX_VCPUS: 279 case KVM_CAP_MAX_VCPUS:
268 r = KVM_MAX_VCPUS; 280 r = KVM_MAX_VCPUS;
269 break; 281 break;
282#ifdef CONFIG_PPC_BOOK3S_64
283 case KVM_CAP_PPC_GET_SMMU_INFO:
284 r = 1;
285 break;
286#endif
270 default: 287 default:
271 r = 0; 288 r = 0;
272 break; 289 break;
@@ -588,21 +605,6 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
588 return r; 605 return r;
589} 606}
590 607
591void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
592{
593 int me;
594 int cpu = vcpu->cpu;
595
596 me = get_cpu();
597 if (waitqueue_active(vcpu->arch.wqp)) {
598 wake_up_interruptible(vcpu->arch.wqp);
599 vcpu->stat.halt_wakeup++;
600 } else if (cpu != me && cpu != -1) {
601 smp_send_reschedule(vcpu->cpu);
602 }
603 put_cpu();
604}
605
606int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) 608int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
607{ 609{
608 if (irq->irq == KVM_INTERRUPT_UNSET) { 610 if (irq->irq == KVM_INTERRUPT_UNSET) {
@@ -611,6 +613,7 @@ int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
611 } 613 }
612 614
613 kvmppc_core_queue_external(vcpu, irq); 615 kvmppc_core_queue_external(vcpu, irq);
616
614 kvm_vcpu_kick(vcpu); 617 kvm_vcpu_kick(vcpu);
615 618
616 return 0; 619 return 0;
@@ -633,7 +636,7 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
633 r = 0; 636 r = 0;
634 vcpu->arch.papr_enabled = true; 637 vcpu->arch.papr_enabled = true;
635 break; 638 break;
636#ifdef CONFIG_KVM_E500 639#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
637 case KVM_CAP_SW_TLB: { 640 case KVM_CAP_SW_TLB: {
638 struct kvm_config_tlb cfg; 641 struct kvm_config_tlb cfg;
639 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; 642 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
@@ -710,7 +713,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
710 break; 713 break;
711 } 714 }
712 715
713#ifdef CONFIG_KVM_E500 716#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
714 case KVM_DIRTY_TLB: { 717 case KVM_DIRTY_TLB: {
715 struct kvm_dirty_tlb dirty; 718 struct kvm_dirty_tlb dirty;
716 r = -EFAULT; 719 r = -EFAULT;
@@ -720,7 +723,6 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
720 break; 723 break;
721 } 724 }
722#endif 725#endif
723
724 default: 726 default:
725 r = -EINVAL; 727 r = -EINVAL;
726 } 728 }
@@ -777,7 +779,7 @@ long kvm_arch_vm_ioctl(struct file *filp,
777 779
778 break; 780 break;
779 } 781 }
780#ifdef CONFIG_KVM_BOOK3S_64_HV 782#ifdef CONFIG_PPC_BOOK3S_64
781 case KVM_CREATE_SPAPR_TCE: { 783 case KVM_CREATE_SPAPR_TCE: {
782 struct kvm_create_spapr_tce create_tce; 784 struct kvm_create_spapr_tce create_tce;
783 struct kvm *kvm = filp->private_data; 785 struct kvm *kvm = filp->private_data;
@@ -788,7 +790,9 @@ long kvm_arch_vm_ioctl(struct file *filp,
788 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); 790 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
789 goto out; 791 goto out;
790 } 792 }
793#endif /* CONFIG_PPC_BOOK3S_64 */
791 794
795#ifdef CONFIG_KVM_BOOK3S_64_HV
792 case KVM_ALLOCATE_RMA: { 796 case KVM_ALLOCATE_RMA: {
793 struct kvm *kvm = filp->private_data; 797 struct kvm *kvm = filp->private_data;
794 struct kvm_allocate_rma rma; 798 struct kvm_allocate_rma rma;
@@ -800,6 +804,18 @@ long kvm_arch_vm_ioctl(struct file *filp,
800 } 804 }
801#endif /* CONFIG_KVM_BOOK3S_64_HV */ 805#endif /* CONFIG_KVM_BOOK3S_64_HV */
802 806
807#ifdef CONFIG_PPC_BOOK3S_64
808 case KVM_PPC_GET_SMMU_INFO: {
809 struct kvm *kvm = filp->private_data;
810 struct kvm_ppc_smmu_info info;
811
812 memset(&info, 0, sizeof(info));
813 r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
814 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
815 r = -EFAULT;
816 break;
817 }
818#endif /* CONFIG_PPC_BOOK3S_64 */
803 default: 819 default:
804 r = -ENOTTY; 820 r = -ENOTTY;
805 } 821 }
@@ -808,6 +824,40 @@ out:
808 return r; 824 return r;
809} 825}
810 826
827static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
828static unsigned long nr_lpids;
829
830long kvmppc_alloc_lpid(void)
831{
832 long lpid;
833
834 do {
835 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
836 if (lpid >= nr_lpids) {
837 pr_err("%s: No LPIDs free\n", __func__);
838 return -ENOMEM;
839 }
840 } while (test_and_set_bit(lpid, lpid_inuse));
841
842 return lpid;
843}
844
845void kvmppc_claim_lpid(long lpid)
846{
847 set_bit(lpid, lpid_inuse);
848}
849
850void kvmppc_free_lpid(long lpid)
851{
852 clear_bit(lpid, lpid_inuse);
853}
854
855void kvmppc_init_lpid(unsigned long nr_lpids_param)
856{
857 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
858 memset(lpid_inuse, 0, sizeof(lpid_inuse));
859}
860
811int kvm_arch_init(void *opaque) 861int kvm_arch_init(void *opaque)
812{ 862{
813 return 0; 863 return 0;
diff --git a/arch/powerpc/kvm/timing.h b/arch/powerpc/kvm/timing.h
index 8167d42a776f..bf191e72b2d8 100644
--- a/arch/powerpc/kvm/timing.h
+++ b/arch/powerpc/kvm/timing.h
@@ -93,6 +93,12 @@ static inline void kvmppc_account_exit_stat(struct kvm_vcpu *vcpu, int type)
93 case SIGNAL_EXITS: 93 case SIGNAL_EXITS:
94 vcpu->stat.signal_exits++; 94 vcpu->stat.signal_exits++;
95 break; 95 break;
96 case DBELL_EXITS:
97 vcpu->stat.dbell_exits++;
98 break;
99 case GDBELL_EXITS:
100 vcpu->stat.gdbell_exits++;
101 break;
96 } 102 }
97} 103}
98 104
diff --git a/arch/powerpc/lib/string.S b/arch/powerpc/lib/string.S
index 455881a5563f..093d6316435c 100644
--- a/arch/powerpc/lib/string.S
+++ b/arch/powerpc/lib/string.S
@@ -160,48 +160,3 @@ _GLOBAL(__clear_user)
160 PPC_LONG 1b,91b 160 PPC_LONG 1b,91b
161 PPC_LONG 8b,92b 161 PPC_LONG 8b,92b
162 .text 162 .text
163
164_GLOBAL(__strncpy_from_user)
165 addi r6,r3,-1
166 addi r4,r4,-1
167 cmpwi 0,r5,0
168 beq 2f
169 mtctr r5
1701: lbzu r0,1(r4)
171 cmpwi 0,r0,0
172 stbu r0,1(r6)
173 bdnzf 2,1b /* dec ctr, branch if ctr != 0 && !cr0.eq */
174 beq 3f
1752: addi r6,r6,1
1763: subf r3,r3,r6
177 blr
17899: li r3,-EFAULT
179 blr
180
181 .section __ex_table,"a"
182 PPC_LONG 1b,99b
183 .text
184
185/* r3 = str, r4 = len (> 0), r5 = top (highest addr) */
186_GLOBAL(__strnlen_user)
187 addi r7,r3,-1
188 subf r6,r7,r5 /* top+1 - str */
189 cmplw 0,r4,r6
190 bge 0f
191 mr r6,r4
1920: mtctr r6 /* ctr = min(len, top - str) */
1931: lbzu r0,1(r7) /* get next byte */
194 cmpwi 0,r0,0
195 bdnzf 2,1b /* loop if --ctr != 0 && byte != 0 */
196 addi r7,r7,1
197 subf r3,r3,r7 /* number of bytes we have looked at */
198 beqlr /* return if we found a 0 byte */
199 cmpw 0,r3,r4 /* did we look at all len bytes? */
200 blt 99f /* if not, must have hit top */
201 addi r3,r4,1 /* return len + 1 to indicate no null found */
202 blr
20399: li r3,0 /* bad address, return 0 */
204 blr
205
206 .section __ex_table,"a"
207 PPC_LONG 1b,99b
diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c
index 1d75c92ea8fb..66519d263da7 100644
--- a/arch/powerpc/platforms/cell/spufs/inode.c
+++ b/arch/powerpc/platforms/cell/spufs/inode.c
@@ -151,7 +151,7 @@ static void
151spufs_evict_inode(struct inode *inode) 151spufs_evict_inode(struct inode *inode)
152{ 152{
153 struct spufs_inode_info *ei = SPUFS_I(inode); 153 struct spufs_inode_info *ei = SPUFS_I(inode);
154 end_writeback(inode); 154 clear_inode(inode);
155 if (ei->i_ctx) 155 if (ei->i_ctx)
156 put_spu_context(ei->i_ctx); 156 put_spu_context(ei->i_ctx);
157 if (ei->i_gang) 157 if (ei->i_gang)
diff --git a/arch/s390/hypfs/inode.c b/arch/s390/hypfs/inode.c
index 6a2cb560e968..73dae8b9b77a 100644
--- a/arch/s390/hypfs/inode.c
+++ b/arch/s390/hypfs/inode.c
@@ -115,7 +115,7 @@ static struct inode *hypfs_make_inode(struct super_block *sb, umode_t mode)
115 115
116static void hypfs_evict_inode(struct inode *inode) 116static void hypfs_evict_inode(struct inode *inode)
117{ 117{
118 end_writeback(inode); 118 clear_inode(inode);
119 kfree(inode->i_private); 119 kfree(inode->i_private);
120} 120}
121 121
diff --git a/arch/s390/include/asm/kvm.h b/arch/s390/include/asm/kvm.h
index 96076676e224..bdcbe0f8dd7b 100644
--- a/arch/s390/include/asm/kvm.h
+++ b/arch/s390/include/asm/kvm.h
@@ -52,4 +52,9 @@ struct kvm_sync_regs {
52 __u32 acrs[16]; /* access registers */ 52 __u32 acrs[16]; /* access registers */
53 __u64 crs[16]; /* control registers */ 53 __u64 crs[16]; /* control registers */
54}; 54};
55
56#define KVM_REG_S390_TODPR (KVM_REG_S390 | KVM_REG_SIZE_U32 | 0x1)
57#define KVM_REG_S390_EPOCHDIFF (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x2)
58#define KVM_REG_S390_CPU_TIMER (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x3)
59#define KVM_REG_S390_CLOCK_COMP (KVM_REG_S390 | KVM_REG_SIZE_U64 | 0x4)
55#endif 60#endif
diff --git a/arch/s390/include/asm/kvm_host.h b/arch/s390/include/asm/kvm_host.h
index 7343872890a2..dd17537b9a9d 100644
--- a/arch/s390/include/asm/kvm_host.h
+++ b/arch/s390/include/asm/kvm_host.h
@@ -148,6 +148,7 @@ struct kvm_vcpu_stat {
148 u32 instruction_sigp_restart; 148 u32 instruction_sigp_restart;
149 u32 diagnose_10; 149 u32 diagnose_10;
150 u32 diagnose_44; 150 u32 diagnose_44;
151 u32 diagnose_9c;
151}; 152};
152 153
153struct kvm_s390_io_info { 154struct kvm_s390_io_info {
diff --git a/arch/s390/include/asm/kvm_para.h b/arch/s390/include/asm/kvm_para.h
index 6964db226f83..a98832961035 100644
--- a/arch/s390/include/asm/kvm_para.h
+++ b/arch/s390/include/asm/kvm_para.h
@@ -149,6 +149,11 @@ static inline unsigned int kvm_arch_para_features(void)
149 return 0; 149 return 0;
150} 150}
151 151
152static inline bool kvm_check_and_clear_guest_paused(void)
153{
154 return false;
155}
156
152#endif 157#endif
153 158
154#endif /* __S390_KVM_PARA_H */ 159#endif /* __S390_KVM_PARA_H */
diff --git a/arch/s390/include/asm/sclp.h b/arch/s390/include/asm/sclp.h
index fed7bee650a0..bf238c55740b 100644
--- a/arch/s390/include/asm/sclp.h
+++ b/arch/s390/include/asm/sclp.h
@@ -48,6 +48,7 @@ int sclp_cpu_deconfigure(u8 cpu);
48void sclp_facilities_detect(void); 48void sclp_facilities_detect(void);
49unsigned long long sclp_get_rnmax(void); 49unsigned long long sclp_get_rnmax(void);
50unsigned long long sclp_get_rzm(void); 50unsigned long long sclp_get_rzm(void);
51u8 sclp_get_fac85(void);
51int sclp_sdias_blk_count(void); 52int sclp_sdias_blk_count(void);
52int sclp_sdias_copy(void *dest, int blk_num, int nr_blks); 53int sclp_sdias_copy(void *dest, int blk_num, int nr_blks);
53int sclp_chp_configure(struct chp_id chpid); 54int sclp_chp_configure(struct chp_id chpid);
diff --git a/arch/s390/kvm/diag.c b/arch/s390/kvm/diag.c
index a353f0ea45c2..b23d9ac77dfc 100644
--- a/arch/s390/kvm/diag.c
+++ b/arch/s390/kvm/diag.c
@@ -47,9 +47,30 @@ static int __diag_time_slice_end(struct kvm_vcpu *vcpu)
47{ 47{
48 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); 48 VCPU_EVENT(vcpu, 5, "%s", "diag time slice end");
49 vcpu->stat.diagnose_44++; 49 vcpu->stat.diagnose_44++;
50 vcpu_put(vcpu); 50 kvm_vcpu_on_spin(vcpu);
51 yield(); 51 return 0;
52 vcpu_load(vcpu); 52}
53
54static int __diag_time_slice_end_directed(struct kvm_vcpu *vcpu)
55{
56 struct kvm *kvm = vcpu->kvm;
57 struct kvm_vcpu *tcpu;
58 int tid;
59 int i;
60
61 tid = vcpu->run->s.regs.gprs[(vcpu->arch.sie_block->ipa & 0xf0) >> 4];
62 vcpu->stat.diagnose_9c++;
63 VCPU_EVENT(vcpu, 5, "diag time slice end directed to %d", tid);
64
65 if (tid == vcpu->vcpu_id)
66 return 0;
67
68 kvm_for_each_vcpu(i, tcpu, kvm)
69 if (tcpu->vcpu_id == tid) {
70 kvm_vcpu_yield_to(tcpu);
71 break;
72 }
73
53 return 0; 74 return 0;
54} 75}
55 76
@@ -89,6 +110,8 @@ int kvm_s390_handle_diag(struct kvm_vcpu *vcpu)
89 return diag_release_pages(vcpu); 110 return diag_release_pages(vcpu);
90 case 0x44: 111 case 0x44:
91 return __diag_time_slice_end(vcpu); 112 return __diag_time_slice_end(vcpu);
113 case 0x9c:
114 return __diag_time_slice_end_directed(vcpu);
92 case 0x308: 115 case 0x308:
93 return __diag_ipl_functions(vcpu); 116 return __diag_ipl_functions(vcpu);
94 default: 117 default:
diff --git a/arch/s390/kvm/intercept.c b/arch/s390/kvm/intercept.c
index 361456577c6f..979cbe55bf5e 100644
--- a/arch/s390/kvm/intercept.c
+++ b/arch/s390/kvm/intercept.c
@@ -101,6 +101,7 @@ static int handle_lctl(struct kvm_vcpu *vcpu)
101} 101}
102 102
103static intercept_handler_t instruction_handlers[256] = { 103static intercept_handler_t instruction_handlers[256] = {
104 [0x01] = kvm_s390_handle_01,
104 [0x83] = kvm_s390_handle_diag, 105 [0x83] = kvm_s390_handle_diag,
105 [0xae] = kvm_s390_handle_sigp, 106 [0xae] = kvm_s390_handle_sigp,
106 [0xb2] = kvm_s390_handle_b2, 107 [0xb2] = kvm_s390_handle_b2,
diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c
index 217ce44395a4..664766d0c83c 100644
--- a/arch/s390/kvm/kvm-s390.c
+++ b/arch/s390/kvm/kvm-s390.c
@@ -28,6 +28,7 @@
28#include <asm/pgtable.h> 28#include <asm/pgtable.h>
29#include <asm/nmi.h> 29#include <asm/nmi.h>
30#include <asm/switch_to.h> 30#include <asm/switch_to.h>
31#include <asm/sclp.h>
31#include "kvm-s390.h" 32#include "kvm-s390.h"
32#include "gaccess.h" 33#include "gaccess.h"
33 34
@@ -74,6 +75,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
74 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) }, 75 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
75 { "diagnose_10", VCPU_STAT(diagnose_10) }, 76 { "diagnose_10", VCPU_STAT(diagnose_10) },
76 { "diagnose_44", VCPU_STAT(diagnose_44) }, 77 { "diagnose_44", VCPU_STAT(diagnose_44) },
78 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
77 { NULL } 79 { NULL }
78}; 80};
79 81
@@ -133,8 +135,16 @@ int kvm_dev_ioctl_check_extension(long ext)
133 case KVM_CAP_S390_UCONTROL: 135 case KVM_CAP_S390_UCONTROL:
134#endif 136#endif
135 case KVM_CAP_SYNC_REGS: 137 case KVM_CAP_SYNC_REGS:
138 case KVM_CAP_ONE_REG:
136 r = 1; 139 r = 1;
137 break; 140 break;
141 case KVM_CAP_NR_VCPUS:
142 case KVM_CAP_MAX_VCPUS:
143 r = KVM_MAX_VCPUS;
144 break;
145 case KVM_CAP_S390_COW:
146 r = sclp_get_fac85() & 0x2;
147 break;
138 default: 148 default:
139 r = 0; 149 r = 0;
140 } 150 }
@@ -423,6 +433,71 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
423 return 0; 433 return 0;
424} 434}
425 435
436int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
437{
438 /* kvm common code refers to this, but never calls it */
439 BUG();
440 return 0;
441}
442
443static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
444 struct kvm_one_reg *reg)
445{
446 int r = -EINVAL;
447
448 switch (reg->id) {
449 case KVM_REG_S390_TODPR:
450 r = put_user(vcpu->arch.sie_block->todpr,
451 (u32 __user *)reg->addr);
452 break;
453 case KVM_REG_S390_EPOCHDIFF:
454 r = put_user(vcpu->arch.sie_block->epoch,
455 (u64 __user *)reg->addr);
456 break;
457 case KVM_REG_S390_CPU_TIMER:
458 r = put_user(vcpu->arch.sie_block->cputm,
459 (u64 __user *)reg->addr);
460 break;
461 case KVM_REG_S390_CLOCK_COMP:
462 r = put_user(vcpu->arch.sie_block->ckc,
463 (u64 __user *)reg->addr);
464 break;
465 default:
466 break;
467 }
468
469 return r;
470}
471
472static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
473 struct kvm_one_reg *reg)
474{
475 int r = -EINVAL;
476
477 switch (reg->id) {
478 case KVM_REG_S390_TODPR:
479 r = get_user(vcpu->arch.sie_block->todpr,
480 (u32 __user *)reg->addr);
481 break;
482 case KVM_REG_S390_EPOCHDIFF:
483 r = get_user(vcpu->arch.sie_block->epoch,
484 (u64 __user *)reg->addr);
485 break;
486 case KVM_REG_S390_CPU_TIMER:
487 r = get_user(vcpu->arch.sie_block->cputm,
488 (u64 __user *)reg->addr);
489 break;
490 case KVM_REG_S390_CLOCK_COMP:
491 r = get_user(vcpu->arch.sie_block->ckc,
492 (u64 __user *)reg->addr);
493 break;
494 default:
495 break;
496 }
497
498 return r;
499}
500
426static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu) 501static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
427{ 502{
428 kvm_s390_vcpu_initial_reset(vcpu); 503 kvm_s390_vcpu_initial_reset(vcpu);
@@ -753,6 +828,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
753 case KVM_S390_INITIAL_RESET: 828 case KVM_S390_INITIAL_RESET:
754 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu); 829 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
755 break; 830 break;
831 case KVM_SET_ONE_REG:
832 case KVM_GET_ONE_REG: {
833 struct kvm_one_reg reg;
834 r = -EFAULT;
835 if (copy_from_user(&reg, argp, sizeof(reg)))
836 break;
837 if (ioctl == KVM_SET_ONE_REG)
838 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
839 else
840 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
841 break;
842 }
756#ifdef CONFIG_KVM_S390_UCONTROL 843#ifdef CONFIG_KVM_S390_UCONTROL
757 case KVM_S390_UCAS_MAP: { 844 case KVM_S390_UCAS_MAP: {
758 struct kvm_s390_ucas_mapping ucasmap; 845 struct kvm_s390_ucas_mapping ucasmap;
diff --git a/arch/s390/kvm/kvm-s390.h b/arch/s390/kvm/kvm-s390.h
index ff28f9d1c9eb..2294377975e8 100644
--- a/arch/s390/kvm/kvm-s390.h
+++ b/arch/s390/kvm/kvm-s390.h
@@ -79,6 +79,7 @@ int kvm_s390_inject_sigp_stop(struct kvm_vcpu *vcpu, int action);
79/* implemented in priv.c */ 79/* implemented in priv.c */
80int kvm_s390_handle_b2(struct kvm_vcpu *vcpu); 80int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
81int kvm_s390_handle_e5(struct kvm_vcpu *vcpu); 81int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
82int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
82 83
83/* implemented in sigp.c */ 84/* implemented in sigp.c */
84int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu); 85int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
diff --git a/arch/s390/kvm/priv.c b/arch/s390/kvm/priv.c
index e5a45dbd26ac..68a6b2ed16bf 100644
--- a/arch/s390/kvm/priv.c
+++ b/arch/s390/kvm/priv.c
@@ -380,3 +380,34 @@ int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
380 return -EOPNOTSUPP; 380 return -EOPNOTSUPP;
381} 381}
382 382
383static int handle_sckpf(struct kvm_vcpu *vcpu)
384{
385 u32 value;
386
387 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
388 return kvm_s390_inject_program_int(vcpu,
389 PGM_PRIVILEGED_OPERATION);
390
391 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
392 return kvm_s390_inject_program_int(vcpu,
393 PGM_SPECIFICATION);
394
395 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
396 vcpu->arch.sie_block->todpr = value;
397
398 return 0;
399}
400
401static intercept_handler_t x01_handlers[256] = {
402 [0x07] = handle_sckpf,
403};
404
405int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
406{
407 intercept_handler_t handler;
408
409 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
410 if (handler)
411 return handler(vcpu);
412 return -EOPNOTSUPP;
413}
diff --git a/arch/score/include/asm/kvm_para.h b/arch/score/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/score/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/sh/include/asm/kvm_para.h b/arch/sh/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/sh/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig
index 15e9e05740da..83bd051754e1 100644
--- a/arch/sparc/Kconfig
+++ b/arch/sparc/Kconfig
@@ -35,6 +35,7 @@ config SPARC
35 select GENERIC_CMOS_UPDATE 35 select GENERIC_CMOS_UPDATE
36 select GENERIC_CLOCKEVENTS 36 select GENERIC_CLOCKEVENTS
37 select GENERIC_STRNCPY_FROM_USER 37 select GENERIC_STRNCPY_FROM_USER
38 select GENERIC_STRNLEN_USER
38 39
39config SPARC32 40config SPARC32
40 def_bool !64BIT 41 def_bool !64BIT
diff --git a/arch/sparc/include/asm/Kbuild b/arch/sparc/include/asm/Kbuild
index 2c2e38821f60..67f83e0a0d68 100644
--- a/arch/sparc/include/asm/Kbuild
+++ b/arch/sparc/include/asm/Kbuild
@@ -21,3 +21,4 @@ generic-y += div64.h
21generic-y += local64.h 21generic-y += local64.h
22generic-y += irq_regs.h 22generic-y += irq_regs.h
23generic-y += local.h 23generic-y += local.h
24generic-y += word-at-a-time.h
diff --git a/arch/sparc/include/asm/kvm_para.h b/arch/sparc/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/sparc/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
index 59586b57ef1a..53a28dd59f59 100644
--- a/arch/sparc/include/asm/uaccess_32.h
+++ b/arch/sparc/include/asm/uaccess_32.h
@@ -16,6 +16,8 @@
16 16
17#ifndef __ASSEMBLY__ 17#ifndef __ASSEMBLY__
18 18
19#include <asm/processor.h>
20
19#define ARCH_HAS_SORT_EXTABLE 21#define ARCH_HAS_SORT_EXTABLE
20#define ARCH_HAS_SEARCH_EXTABLE 22#define ARCH_HAS_SEARCH_EXTABLE
21 23
@@ -304,24 +306,8 @@ static inline unsigned long clear_user(void __user *addr, unsigned long n)
304 return n; 306 return n;
305} 307}
306 308
307extern long __strlen_user(const char __user *); 309extern __must_check long strlen_user(const char __user *str);
308extern long __strnlen_user(const char __user *, long len); 310extern __must_check long strnlen_user(const char __user *str, long n);
309
310static inline long strlen_user(const char __user *str)
311{
312 if (!access_ok(VERIFY_READ, str, 0))
313 return 0;
314 else
315 return __strlen_user(str);
316}
317
318static inline long strnlen_user(const char __user *str, long len)
319{
320 if (!access_ok(VERIFY_READ, str, 0))
321 return 0;
322 else
323 return __strnlen_user(str, len);
324}
325 311
326#endif /* __ASSEMBLY__ */ 312#endif /* __ASSEMBLY__ */
327 313
diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
index dcdfb89cbf3f..7c831d848b4e 100644
--- a/arch/sparc/include/asm/uaccess_64.h
+++ b/arch/sparc/include/asm/uaccess_64.h
@@ -17,6 +17,8 @@
17 17
18#ifndef __ASSEMBLY__ 18#ifndef __ASSEMBLY__
19 19
20#include <asm/processor.h>
21
20/* 22/*
21 * Sparc64 is segmented, though more like the M68K than the I386. 23 * Sparc64 is segmented, though more like the M68K than the I386.
22 * We use the secondary ASI to address user memory, which references a 24 * We use the secondary ASI to address user memory, which references a
@@ -257,11 +259,9 @@ extern unsigned long __must_check __clear_user(void __user *, unsigned long);
257 259
258#define clear_user __clear_user 260#define clear_user __clear_user
259 261
260extern long __strlen_user(const char __user *); 262extern __must_check long strlen_user(const char __user *str);
261extern long __strnlen_user(const char __user *, long len); 263extern __must_check long strnlen_user(const char __user *str, long n);
262 264
263#define strlen_user __strlen_user
264#define strnlen_user __strnlen_user
265#define __copy_to_user_inatomic ___copy_to_user 265#define __copy_to_user_inatomic ___copy_to_user
266#define __copy_from_user_inatomic ___copy_from_user 266#define __copy_from_user_inatomic ___copy_from_user
267 267
diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
index 943d98dc4cdb..dff4096f3dec 100644
--- a/arch/sparc/lib/Makefile
+++ b/arch/sparc/lib/Makefile
@@ -10,7 +10,6 @@ lib-y += strlen.o
10lib-y += checksum_$(BITS).o 10lib-y += checksum_$(BITS).o
11lib-$(CONFIG_SPARC32) += blockops.o 11lib-$(CONFIG_SPARC32) += blockops.o
12lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o 12lib-y += memscan_$(BITS).o memcmp.o strncmp_$(BITS).o
13lib-y += strlen_user_$(BITS).o
14lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o 13lib-$(CONFIG_SPARC32) += divdi3.o udivdi3.o
15lib-$(CONFIG_SPARC32) += copy_user.o locks.o 14lib-$(CONFIG_SPARC32) += copy_user.o locks.o
16lib-$(CONFIG_SPARC64) += atomic_64.o 15lib-$(CONFIG_SPARC64) += atomic_64.o
diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
index 6b278abdb63d..3b31218cafc6 100644
--- a/arch/sparc/lib/ksyms.c
+++ b/arch/sparc/lib/ksyms.c
@@ -15,8 +15,6 @@
15 15
16/* string functions */ 16/* string functions */
17EXPORT_SYMBOL(strlen); 17EXPORT_SYMBOL(strlen);
18EXPORT_SYMBOL(__strlen_user);
19EXPORT_SYMBOL(__strnlen_user);
20EXPORT_SYMBOL(strncmp); 18EXPORT_SYMBOL(strncmp);
21 19
22/* mem* functions */ 20/* mem* functions */
diff --git a/arch/sparc/lib/strlen_user_32.S b/arch/sparc/lib/strlen_user_32.S
deleted file mode 100644
index 8c8a371df3c9..000000000000
--- a/arch/sparc/lib/strlen_user_32.S
+++ /dev/null
@@ -1,109 +0,0 @@
1/* strlen_user.S: Sparc optimized strlen_user code
2 *
3 * Return length of string in userspace including terminating 0
4 * or 0 for error
5 *
6 * Copyright (C) 1991,1996 Free Software Foundation
7 * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu)
8 * Copyright (C) 1996 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#define LO_MAGIC 0x01010101
12#define HI_MAGIC 0x80808080
13
1410:
15 ldub [%o0], %o5
16 cmp %o5, 0
17 be 1f
18 add %o0, 1, %o0
19 andcc %o0, 3, %g0
20 be 4f
21 or %o4, %lo(HI_MAGIC), %o3
2211:
23 ldub [%o0], %o5
24 cmp %o5, 0
25 be 2f
26 add %o0, 1, %o0
27 andcc %o0, 3, %g0
28 be 5f
29 sethi %hi(LO_MAGIC), %o4
3012:
31 ldub [%o0], %o5
32 cmp %o5, 0
33 be 3f
34 add %o0, 1, %o0
35 b 13f
36 or %o4, %lo(LO_MAGIC), %o2
371:
38 retl
39 mov 1, %o0
402:
41 retl
42 mov 2, %o0
433:
44 retl
45 mov 3, %o0
46
47 .align 4
48 .global __strlen_user, __strnlen_user
49__strlen_user:
50 sethi %hi(32768), %o1
51__strnlen_user:
52 mov %o1, %g1
53 mov %o0, %o1
54 andcc %o0, 3, %g0
55 bne 10b
56 sethi %hi(HI_MAGIC), %o4
57 or %o4, %lo(HI_MAGIC), %o3
584:
59 sethi %hi(LO_MAGIC), %o4
605:
61 or %o4, %lo(LO_MAGIC), %o2
6213:
63 ld [%o0], %o5
642:
65 sub %o5, %o2, %o4
66 andcc %o4, %o3, %g0
67 bne 82f
68 add %o0, 4, %o0
69 sub %o0, %o1, %g2
7081: cmp %g2, %g1
71 blu 13b
72 mov %o0, %o4
73 ba,a 1f
74
75 /* Check every byte. */
7682: srl %o5, 24, %g5
77 andcc %g5, 0xff, %g0
78 be 1f
79 add %o0, -3, %o4
80 srl %o5, 16, %g5
81 andcc %g5, 0xff, %g0
82 be 1f
83 add %o4, 1, %o4
84 srl %o5, 8, %g5
85 andcc %g5, 0xff, %g0
86 be 1f
87 add %o4, 1, %o4
88 andcc %o5, 0xff, %g0
89 bne 81b
90 sub %o0, %o1, %g2
91
92 add %o4, 1, %o4
931:
94 retl
95 sub %o4, %o1, %o0
96
97 .section .fixup,#alloc,#execinstr
98 .align 4
999:
100 retl
101 clr %o0
102
103 .section __ex_table,#alloc
104 .align 4
105
106 .word 10b, 9b
107 .word 11b, 9b
108 .word 12b, 9b
109 .word 13b, 9b
diff --git a/arch/sparc/lib/strlen_user_64.S b/arch/sparc/lib/strlen_user_64.S
deleted file mode 100644
index c3df71fa4928..000000000000
--- a/arch/sparc/lib/strlen_user_64.S
+++ /dev/null
@@ -1,97 +0,0 @@
1/* strlen_user.S: Sparc64 optimized strlen_user code
2 *
3 * Return length of string in userspace including terminating 0
4 * or 0 for error
5 *
6 * Copyright (C) 1991,1996 Free Software Foundation
7 * Copyright (C) 1996,1999 David S. Miller (davem@redhat.com)
8 * Copyright (C) 1996,1997 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
9 */
10
11#include <linux/linkage.h>
12#include <asm/asi.h>
13
14#define LO_MAGIC 0x01010101
15#define HI_MAGIC 0x80808080
16
17 .align 4
18ENTRY(__strlen_user)
19 sethi %hi(32768), %o1
20ENTRY(__strnlen_user)
21 mov %o1, %g1
22 mov %o0, %o1
23 andcc %o0, 3, %g0
24 be,pt %icc, 9f
25 sethi %hi(HI_MAGIC), %o4
2610: lduba [%o0] %asi, %o5
27 brz,pn %o5, 21f
28 add %o0, 1, %o0
29 andcc %o0, 3, %g0
30 be,pn %icc, 4f
31 or %o4, %lo(HI_MAGIC), %o3
3211: lduba [%o0] %asi, %o5
33 brz,pn %o5, 22f
34 add %o0, 1, %o0
35 andcc %o0, 3, %g0
36 be,pt %icc, 13f
37 srl %o3, 7, %o2
3812: lduba [%o0] %asi, %o5
39 brz,pn %o5, 23f
40 add %o0, 1, %o0
41 ba,pt %icc, 2f
4215: lda [%o0] %asi, %o5
439: or %o4, %lo(HI_MAGIC), %o3
444: srl %o3, 7, %o2
4513: lda [%o0] %asi, %o5
462: sub %o5, %o2, %o4
47 andcc %o4, %o3, %g0
48 bne,pn %icc, 82f
49 add %o0, 4, %o0
50 sub %o0, %o1, %g2
5181: cmp %g2, %g1
52 blu,pt %icc, 13b
53 mov %o0, %o4
54 ba,a,pt %xcc, 1f
55
56 /* Check every byte. */
5782: srl %o5, 24, %g7
58 andcc %g7, 0xff, %g0
59 be,pn %icc, 1f
60 add %o0, -3, %o4
61 srl %o5, 16, %g7
62 andcc %g7, 0xff, %g0
63 be,pn %icc, 1f
64 add %o4, 1, %o4
65 srl %o5, 8, %g7
66 andcc %g7, 0xff, %g0
67 be,pn %icc, 1f
68 add %o4, 1, %o4
69 andcc %o5, 0xff, %g0
70 bne,pt %icc, 81b
71 sub %o0, %o1, %g2
72 add %o4, 1, %o4
731: retl
74 sub %o4, %o1, %o0
7521: retl
76 mov 1, %o0
7722: retl
78 mov 2, %o0
7923: retl
80 mov 3, %o0
81ENDPROC(__strlen_user)
82ENDPROC(__strnlen_user)
83
84 .section .fixup,#alloc,#execinstr
85 .align 4
8630:
87 retl
88 clr %o0
89
90 .section __ex_table,"a"
91 .align 4
92
93 .word 10b, 30b
94 .word 11b, 30b
95 .word 12b, 30b
96 .word 15b, 30b
97 .word 13b, 30b
diff --git a/arch/tile/Kconfig b/arch/tile/Kconfig
index 6ad6219fc47e..fe128816c448 100644
--- a/arch/tile/Kconfig
+++ b/arch/tile/Kconfig
@@ -48,6 +48,14 @@ config NEED_PER_CPU_PAGE_FIRST_CHUNK
48config SYS_SUPPORTS_HUGETLBFS 48config SYS_SUPPORTS_HUGETLBFS
49 def_bool y 49 def_bool y
50 50
51# Support for additional huge page sizes besides HPAGE_SIZE.
52# The software support is currently only present in the TILE-Gx
53# hypervisor. TILEPro in any case does not support page sizes
54# larger than the default HPAGE_SIZE.
55config HUGETLB_SUPER_PAGES
56 depends on HUGETLB_PAGE && TILEGX
57 def_bool y
58
51# FIXME: tilegx can implement a more efficient rwsem. 59# FIXME: tilegx can implement a more efficient rwsem.
52config RWSEM_GENERIC_SPINLOCK 60config RWSEM_GENERIC_SPINLOCK
53 def_bool y 61 def_bool y
@@ -107,16 +115,14 @@ config HVC_TILE
107 select HVC_DRIVER 115 select HVC_DRIVER
108 def_bool y 116 def_bool y
109 117
110# Please note: TILE-Gx support is not yet finalized; this is
111# the preliminary support. TILE-Gx drivers are only provided
112# with the alpha or beta test versions for Tilera customers.
113config TILEGX 118config TILEGX
114 depends on EXPERIMENTAL
115 bool "Building with TILE-Gx (64-bit) compiler and toolchain" 119 bool "Building with TILE-Gx (64-bit) compiler and toolchain"
116 120
121config TILEPRO
122 def_bool !TILEGX
123
117config 64BIT 124config 64BIT
118 depends on TILEGX 125 def_bool TILEGX
119 def_bool y
120 126
121config ARCH_DEFCONFIG 127config ARCH_DEFCONFIG
122 string 128 string
@@ -137,6 +143,31 @@ config NR_CPUS
137 smaller kernel memory footprint results from using a smaller 143 smaller kernel memory footprint results from using a smaller
138 value on chips with fewer tiles. 144 value on chips with fewer tiles.
139 145
146if TILEGX
147
148choice
149 prompt "Kernel page size"
150 default PAGE_SIZE_64KB
151 help
152 This lets you select the page size of the kernel. For best
153 performance on memory-intensive applications, a page size of 64KB
154 is recommended. For workloads involving many small files, many
155 connections, etc., it may be better to select 16KB, which uses
156 memory more efficiently at some cost in TLB performance.
157
158 Note that this option is TILE-Gx specific; currently
159 TILEPro page size is set by rebuilding the hypervisor.
160
161config PAGE_SIZE_16KB
162 bool "16KB"
163
164config PAGE_SIZE_64KB
165 bool "64KB"
166
167endchoice
168
169endif
170
140source "kernel/Kconfig.hz" 171source "kernel/Kconfig.hz"
141 172
142config KEXEC 173config KEXEC
diff --git a/arch/tile/Makefile b/arch/tile/Makefile
index 9520bc5a4b7f..e20b0a0b64a1 100644
--- a/arch/tile/Makefile
+++ b/arch/tile/Makefile
@@ -34,7 +34,12 @@ LIBGCC_PATH := \
34 $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name) 34 $(shell $(CC) $(KBUILD_CFLAGS) $(KCFLAGS) -print-libgcc-file-name)
35 35
36# Provide the path to use for "make defconfig". 36# Provide the path to use for "make defconfig".
37KBUILD_DEFCONFIG := $(ARCH)_defconfig 37# We default to the newer TILE-Gx architecture if only "tile" is given.
38ifeq ($(ARCH),tile)
39 KBUILD_DEFCONFIG := tilegx_defconfig
40else
41 KBUILD_DEFCONFIG := $(ARCH)_defconfig
42endif
38 43
39# Used as a file extension when useful, e.g. head_$(BITS).o 44# Used as a file extension when useful, e.g. head_$(BITS).o
40# Not needed for (e.g.) "$(CC) -m32" since the compiler automatically 45# Not needed for (e.g.) "$(CC) -m32" since the compiler automatically
diff --git a/arch/tile/include/arch/spr_def_32.h b/arch/tile/include/arch/spr_def_32.h
index bbc1f4c924ee..78bbce2fb19a 100644
--- a/arch/tile/include/arch/spr_def_32.h
+++ b/arch/tile/include/arch/spr_def_32.h
@@ -65,6 +65,31 @@
65#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1 65#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
66#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4 66#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
67#define SPR_FAIL 0x4e09 67#define SPR_FAIL 0x4e09
68#define SPR_IDN_AVAIL_EN 0x3e05
69#define SPR_IDN_CA_DATA 0x0b00
70#define SPR_IDN_DATA_AVAIL 0x0b03
71#define SPR_IDN_DEADLOCK_TIMEOUT 0x3406
72#define SPR_IDN_DEMUX_CA_COUNT 0x0a05
73#define SPR_IDN_DEMUX_COUNT_0 0x0a06
74#define SPR_IDN_DEMUX_COUNT_1 0x0a07
75#define SPR_IDN_DEMUX_CTL 0x0a08
76#define SPR_IDN_DEMUX_QUEUE_SEL 0x0a0a
77#define SPR_IDN_DEMUX_STATUS 0x0a0b
78#define SPR_IDN_DEMUX_WRITE_FIFO 0x0a0c
79#define SPR_IDN_DIRECTION_PROTECT 0x2e05
80#define SPR_IDN_PENDING 0x0a0e
81#define SPR_IDN_REFILL_EN 0x0e05
82#define SPR_IDN_SP_FIFO_DATA 0x0a0f
83#define SPR_IDN_SP_FIFO_SEL 0x0a10
84#define SPR_IDN_SP_FREEZE 0x0a11
85#define SPR_IDN_SP_FREEZE__SP_FRZ_MASK 0x1
86#define SPR_IDN_SP_FREEZE__DEMUX_FRZ_MASK 0x2
87#define SPR_IDN_SP_FREEZE__NON_DEST_EXT_MASK 0x4
88#define SPR_IDN_SP_STATE 0x0a12
89#define SPR_IDN_TAG_0 0x0a13
90#define SPR_IDN_TAG_1 0x0a14
91#define SPR_IDN_TAG_VALID 0x0a15
92#define SPR_IDN_TILE_COORD 0x0a16
68#define SPR_INTCTRL_0_STATUS 0x4a07 93#define SPR_INTCTRL_0_STATUS 0x4a07
69#define SPR_INTCTRL_1_STATUS 0x4807 94#define SPR_INTCTRL_1_STATUS 0x4807
70#define SPR_INTCTRL_2_STATUS 0x4607 95#define SPR_INTCTRL_2_STATUS 0x4607
@@ -87,12 +112,36 @@
87#define SPR_INTERRUPT_MASK_SET_1_1 0x480e 112#define SPR_INTERRUPT_MASK_SET_1_1 0x480e
88#define SPR_INTERRUPT_MASK_SET_2_0 0x460c 113#define SPR_INTERRUPT_MASK_SET_2_0 0x460c
89#define SPR_INTERRUPT_MASK_SET_2_1 0x460d 114#define SPR_INTERRUPT_MASK_SET_2_1 0x460d
115#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x6000
116#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x6001
117#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x6002
90#define SPR_MPL_DMA_CPL_SET_0 0x5800 118#define SPR_MPL_DMA_CPL_SET_0 0x5800
91#define SPR_MPL_DMA_CPL_SET_1 0x5801 119#define SPR_MPL_DMA_CPL_SET_1 0x5801
92#define SPR_MPL_DMA_CPL_SET_2 0x5802 120#define SPR_MPL_DMA_CPL_SET_2 0x5802
93#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800 121#define SPR_MPL_DMA_NOTIFY_SET_0 0x3800
94#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801 122#define SPR_MPL_DMA_NOTIFY_SET_1 0x3801
95#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802 123#define SPR_MPL_DMA_NOTIFY_SET_2 0x3802
124#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
125#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
126#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
127#define SPR_MPL_IDN_AVAIL_SET_0 0x3e00
128#define SPR_MPL_IDN_AVAIL_SET_1 0x3e01
129#define SPR_MPL_IDN_AVAIL_SET_2 0x3e02
130#define SPR_MPL_IDN_CA_SET_0 0x3a00
131#define SPR_MPL_IDN_CA_SET_1 0x3a01
132#define SPR_MPL_IDN_CA_SET_2 0x3a02
133#define SPR_MPL_IDN_COMPLETE_SET_0 0x1200
134#define SPR_MPL_IDN_COMPLETE_SET_1 0x1201
135#define SPR_MPL_IDN_COMPLETE_SET_2 0x1202
136#define SPR_MPL_IDN_FIREWALL_SET_0 0x2e00
137#define SPR_MPL_IDN_FIREWALL_SET_1 0x2e01
138#define SPR_MPL_IDN_FIREWALL_SET_2 0x2e02
139#define SPR_MPL_IDN_REFILL_SET_0 0x0e00
140#define SPR_MPL_IDN_REFILL_SET_1 0x0e01
141#define SPR_MPL_IDN_REFILL_SET_2 0x0e02
142#define SPR_MPL_IDN_TIMER_SET_0 0x3400
143#define SPR_MPL_IDN_TIMER_SET_1 0x3401
144#define SPR_MPL_IDN_TIMER_SET_2 0x3402
96#define SPR_MPL_INTCTRL_0_SET_0 0x4a00 145#define SPR_MPL_INTCTRL_0_SET_0 0x4a00
97#define SPR_MPL_INTCTRL_0_SET_1 0x4a01 146#define SPR_MPL_INTCTRL_0_SET_1 0x4a01
98#define SPR_MPL_INTCTRL_0_SET_2 0x4a02 147#define SPR_MPL_INTCTRL_0_SET_2 0x4a02
@@ -102,6 +151,9 @@
102#define SPR_MPL_INTCTRL_2_SET_0 0x4600 151#define SPR_MPL_INTCTRL_2_SET_0 0x4600
103#define SPR_MPL_INTCTRL_2_SET_1 0x4601 152#define SPR_MPL_INTCTRL_2_SET_1 0x4601
104#define SPR_MPL_INTCTRL_2_SET_2 0x4602 153#define SPR_MPL_INTCTRL_2_SET_2 0x4602
154#define SPR_MPL_PERF_COUNT_SET_0 0x4200
155#define SPR_MPL_PERF_COUNT_SET_1 0x4201
156#define SPR_MPL_PERF_COUNT_SET_2 0x4202
105#define SPR_MPL_SN_ACCESS_SET_0 0x0800 157#define SPR_MPL_SN_ACCESS_SET_0 0x0800
106#define SPR_MPL_SN_ACCESS_SET_1 0x0801 158#define SPR_MPL_SN_ACCESS_SET_1 0x0801
107#define SPR_MPL_SN_ACCESS_SET_2 0x0802 159#define SPR_MPL_SN_ACCESS_SET_2 0x0802
@@ -181,6 +233,7 @@
181#define SPR_UDN_DEMUX_STATUS 0x0c0d 233#define SPR_UDN_DEMUX_STATUS 0x0c0d
182#define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e 234#define SPR_UDN_DEMUX_WRITE_FIFO 0x0c0e
183#define SPR_UDN_DIRECTION_PROTECT 0x3005 235#define SPR_UDN_DIRECTION_PROTECT 0x3005
236#define SPR_UDN_PENDING 0x0c10
184#define SPR_UDN_REFILL_EN 0x1005 237#define SPR_UDN_REFILL_EN 0x1005
185#define SPR_UDN_SP_FIFO_DATA 0x0c11 238#define SPR_UDN_SP_FIFO_DATA 0x0c11
186#define SPR_UDN_SP_FIFO_SEL 0x0c12 239#define SPR_UDN_SP_FIFO_SEL 0x0c12
@@ -195,6 +248,9 @@
195#define SPR_UDN_TAG_3 0x0c18 248#define SPR_UDN_TAG_3 0x0c18
196#define SPR_UDN_TAG_VALID 0x0c19 249#define SPR_UDN_TAG_VALID 0x0c19
197#define SPR_UDN_TILE_COORD 0x0c1a 250#define SPR_UDN_TILE_COORD 0x0c1a
251#define SPR_WATCH_CTL 0x4209
252#define SPR_WATCH_MASK 0x420a
253#define SPR_WATCH_VAL 0x420b
198 254
199#endif /* !defined(__ARCH_SPR_DEF_H__) */ 255#endif /* !defined(__ARCH_SPR_DEF_H__) */
200 256
diff --git a/arch/tile/include/arch/spr_def_64.h b/arch/tile/include/arch/spr_def_64.h
index cd3e5f95d5fd..0da86faa3370 100644
--- a/arch/tile/include/arch/spr_def_64.h
+++ b/arch/tile/include/arch/spr_def_64.h
@@ -52,6 +52,13 @@
52#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1 52#define SPR_EX_CONTEXT_2_1__ICS_RMASK 0x1
53#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4 53#define SPR_EX_CONTEXT_2_1__ICS_MASK 0x4
54#define SPR_FAIL 0x2707 54#define SPR_FAIL 0x2707
55#define SPR_IDN_AVAIL_EN 0x1a05
56#define SPR_IDN_DATA_AVAIL 0x0a80
57#define SPR_IDN_DEADLOCK_TIMEOUT 0x1806
58#define SPR_IDN_DEMUX_COUNT_0 0x0a05
59#define SPR_IDN_DEMUX_COUNT_1 0x0a06
60#define SPR_IDN_DIRECTION_PROTECT 0x1405
61#define SPR_IDN_PENDING 0x0a08
55#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1 62#define SPR_ILL_TRANS_REASON__I_STREAM_VA_RMASK 0x1
56#define SPR_INTCTRL_0_STATUS 0x2505 63#define SPR_INTCTRL_0_STATUS 0x2505
57#define SPR_INTCTRL_1_STATUS 0x2405 64#define SPR_INTCTRL_1_STATUS 0x2405
@@ -88,9 +95,27 @@
88#define SPR_IPI_MASK_SET_0 0x1f0a 95#define SPR_IPI_MASK_SET_0 0x1f0a
89#define SPR_IPI_MASK_SET_1 0x1e0a 96#define SPR_IPI_MASK_SET_1 0x1e0a
90#define SPR_IPI_MASK_SET_2 0x1d0a 97#define SPR_IPI_MASK_SET_2 0x1d0a
98#define SPR_MPL_AUX_PERF_COUNT_SET_0 0x2100
99#define SPR_MPL_AUX_PERF_COUNT_SET_1 0x2101
100#define SPR_MPL_AUX_PERF_COUNT_SET_2 0x2102
91#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700 101#define SPR_MPL_AUX_TILE_TIMER_SET_0 0x1700
92#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701 102#define SPR_MPL_AUX_TILE_TIMER_SET_1 0x1701
93#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702 103#define SPR_MPL_AUX_TILE_TIMER_SET_2 0x1702
104#define SPR_MPL_IDN_ACCESS_SET_0 0x0a00
105#define SPR_MPL_IDN_ACCESS_SET_1 0x0a01
106#define SPR_MPL_IDN_ACCESS_SET_2 0x0a02
107#define SPR_MPL_IDN_AVAIL_SET_0 0x1a00
108#define SPR_MPL_IDN_AVAIL_SET_1 0x1a01
109#define SPR_MPL_IDN_AVAIL_SET_2 0x1a02
110#define SPR_MPL_IDN_COMPLETE_SET_0 0x0500
111#define SPR_MPL_IDN_COMPLETE_SET_1 0x0501
112#define SPR_MPL_IDN_COMPLETE_SET_2 0x0502
113#define SPR_MPL_IDN_FIREWALL_SET_0 0x1400
114#define SPR_MPL_IDN_FIREWALL_SET_1 0x1401
115#define SPR_MPL_IDN_FIREWALL_SET_2 0x1402
116#define SPR_MPL_IDN_TIMER_SET_0 0x1800
117#define SPR_MPL_IDN_TIMER_SET_1 0x1801
118#define SPR_MPL_IDN_TIMER_SET_2 0x1802
94#define SPR_MPL_INTCTRL_0_SET_0 0x2500 119#define SPR_MPL_INTCTRL_0_SET_0 0x2500
95#define SPR_MPL_INTCTRL_0_SET_1 0x2501 120#define SPR_MPL_INTCTRL_0_SET_1 0x2501
96#define SPR_MPL_INTCTRL_0_SET_2 0x2502 121#define SPR_MPL_INTCTRL_0_SET_2 0x2502
@@ -100,6 +125,21 @@
100#define SPR_MPL_INTCTRL_2_SET_0 0x2300 125#define SPR_MPL_INTCTRL_2_SET_0 0x2300
101#define SPR_MPL_INTCTRL_2_SET_1 0x2301 126#define SPR_MPL_INTCTRL_2_SET_1 0x2301
102#define SPR_MPL_INTCTRL_2_SET_2 0x2302 127#define SPR_MPL_INTCTRL_2_SET_2 0x2302
128#define SPR_MPL_IPI_0 0x1f04
129#define SPR_MPL_IPI_0_SET_0 0x1f00
130#define SPR_MPL_IPI_0_SET_1 0x1f01
131#define SPR_MPL_IPI_0_SET_2 0x1f02
132#define SPR_MPL_IPI_1 0x1e04
133#define SPR_MPL_IPI_1_SET_0 0x1e00
134#define SPR_MPL_IPI_1_SET_1 0x1e01
135#define SPR_MPL_IPI_1_SET_2 0x1e02
136#define SPR_MPL_IPI_2 0x1d04
137#define SPR_MPL_IPI_2_SET_0 0x1d00
138#define SPR_MPL_IPI_2_SET_1 0x1d01
139#define SPR_MPL_IPI_2_SET_2 0x1d02
140#define SPR_MPL_PERF_COUNT_SET_0 0x2000
141#define SPR_MPL_PERF_COUNT_SET_1 0x2001
142#define SPR_MPL_PERF_COUNT_SET_2 0x2002
103#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00 143#define SPR_MPL_UDN_ACCESS_SET_0 0x0b00
104#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01 144#define SPR_MPL_UDN_ACCESS_SET_1 0x0b01
105#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02 145#define SPR_MPL_UDN_ACCESS_SET_2 0x0b02
@@ -167,6 +207,9 @@
167#define SPR_UDN_DEMUX_COUNT_2 0x0b07 207#define SPR_UDN_DEMUX_COUNT_2 0x0b07
168#define SPR_UDN_DEMUX_COUNT_3 0x0b08 208#define SPR_UDN_DEMUX_COUNT_3 0x0b08
169#define SPR_UDN_DIRECTION_PROTECT 0x1505 209#define SPR_UDN_DIRECTION_PROTECT 0x1505
210#define SPR_UDN_PENDING 0x0b0a
211#define SPR_WATCH_MASK 0x200a
212#define SPR_WATCH_VAL 0x200b
170 213
171#endif /* !defined(__ARCH_SPR_DEF_H__) */ 214#endif /* !defined(__ARCH_SPR_DEF_H__) */
172 215
diff --git a/arch/tile/include/asm/Kbuild b/arch/tile/include/asm/Kbuild
index 0bb42642343a..143473e3a0bb 100644
--- a/arch/tile/include/asm/Kbuild
+++ b/arch/tile/include/asm/Kbuild
@@ -2,6 +2,7 @@ include include/asm-generic/Kbuild.asm
2 2
3header-y += ../arch/ 3header-y += ../arch/
4 4
5header-y += cachectl.h
5header-y += ucontext.h 6header-y += ucontext.h
6header-y += hardwall.h 7header-y += hardwall.h
7 8
@@ -21,7 +22,6 @@ generic-y += ipcbuf.h
21generic-y += irq_regs.h 22generic-y += irq_regs.h
22generic-y += kdebug.h 23generic-y += kdebug.h
23generic-y += local.h 24generic-y += local.h
24generic-y += module.h
25generic-y += msgbuf.h 25generic-y += msgbuf.h
26generic-y += mutex.h 26generic-y += mutex.h
27generic-y += param.h 27generic-y += param.h
diff --git a/arch/tile/include/asm/atomic_32.h b/arch/tile/include/asm/atomic_32.h
index 54d1da826f93..e7fb5cfb9597 100644
--- a/arch/tile/include/asm/atomic_32.h
+++ b/arch/tile/include/asm/atomic_32.h
@@ -303,7 +303,14 @@ void __init_atomic_per_cpu(void);
303void __atomic_fault_unlock(int *lock_ptr); 303void __atomic_fault_unlock(int *lock_ptr);
304#endif 304#endif
305 305
306/* Return a pointer to the lock for the given address. */
307int *__atomic_hashed_lock(volatile void *v);
308
306/* Private helper routines in lib/atomic_asm_32.S */ 309/* Private helper routines in lib/atomic_asm_32.S */
310struct __get_user {
311 unsigned long val;
312 int err;
313};
307extern struct __get_user __atomic_cmpxchg(volatile int *p, 314extern struct __get_user __atomic_cmpxchg(volatile int *p,
308 int *lock, int o, int n); 315 int *lock, int o, int n);
309extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n); 316extern struct __get_user __atomic_xchg(volatile int *p, int *lock, int n);
@@ -319,6 +326,9 @@ extern u64 __atomic64_xchg_add(volatile u64 *p, int *lock, u64 n);
319extern u64 __atomic64_xchg_add_unless(volatile u64 *p, 326extern u64 __atomic64_xchg_add_unless(volatile u64 *p,
320 int *lock, u64 o, u64 n); 327 int *lock, u64 o, u64 n);
321 328
329/* Return failure from the atomic wrappers. */
330struct __get_user __atomic_bad_address(int __user *addr);
331
322#endif /* !__ASSEMBLY__ */ 332#endif /* !__ASSEMBLY__ */
323 333
324#endif /* _ASM_TILE_ATOMIC_32_H */ 334#endif /* _ASM_TILE_ATOMIC_32_H */
diff --git a/arch/tile/include/asm/bitops.h b/arch/tile/include/asm/bitops.h
index 16f1fa51fea1..bd186c4eaa50 100644
--- a/arch/tile/include/asm/bitops.h
+++ b/arch/tile/include/asm/bitops.h
@@ -77,6 +77,11 @@ static inline int ffs(int x)
77 return __builtin_ffs(x); 77 return __builtin_ffs(x);
78} 78}
79 79
80static inline int fls64(__u64 w)
81{
82 return (sizeof(__u64) * 8) - __builtin_clzll(w);
83}
84
80/** 85/**
81 * fls - find last set bit in word 86 * fls - find last set bit in word
82 * @x: the word to search 87 * @x: the word to search
@@ -90,12 +95,7 @@ static inline int ffs(int x)
90 */ 95 */
91static inline int fls(int x) 96static inline int fls(int x)
92{ 97{
93 return (sizeof(int) * 8) - __builtin_clz(x); 98 return fls64((unsigned int) x);
94}
95
96static inline int fls64(__u64 w)
97{
98 return (sizeof(__u64) * 8) - __builtin_clzll(w);
99} 99}
100 100
101static inline unsigned int __arch_hweight32(unsigned int w) 101static inline unsigned int __arch_hweight32(unsigned int w)
diff --git a/arch/tile/include/asm/byteorder.h b/arch/tile/include/asm/byteorder.h
index 9558416d578b..fb72ecf49218 100644
--- a/arch/tile/include/asm/byteorder.h
+++ b/arch/tile/include/asm/byteorder.h
@@ -1 +1,21 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#if defined (__BIG_ENDIAN__)
16#include <linux/byteorder/big_endian.h>
17#elif defined (__LITTLE_ENDIAN__)
1#include <linux/byteorder/little_endian.h> 18#include <linux/byteorder/little_endian.h>
19#else
20#error "__BIG_ENDIAN__ or __LITTLE_ENDIAN__ must be defined."
21#endif
diff --git a/arch/tile/include/asm/cachectl.h b/arch/tile/include/asm/cachectl.h
new file mode 100644
index 000000000000..af4c9f9154d1
--- /dev/null
+++ b/arch/tile/include/asm/cachectl.h
@@ -0,0 +1,42 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_CACHECTL_H
16#define _ASM_TILE_CACHECTL_H
17
18/*
19 * Options for cacheflush system call.
20 *
21 * The ICACHE flush is performed on all cores currently running the
22 * current process's address space. The intent is for user
23 * applications to be able to modify code, invoke the system call,
24 * then allow arbitrary other threads in the same address space to see
25 * the newly-modified code. Passing a length of CHIP_L1I_CACHE_SIZE()
26 * or more invalidates the entire icache on all cores in the address
27 * spaces. (Note: currently this option invalidates the entire icache
28 * regardless of the requested address and length, but we may choose
29 * to honor the arguments at some point.)
30 *
31 * Flush and invalidation of memory can normally be performed with the
32 * __insn_flush(), __insn_inv(), and __insn_finv() instructions from
33 * userspace. The DCACHE option to the system call allows userspace
34 * to flush the entire L1+L2 data cache from the core. In this case,
35 * the address and length arguments are not used. The DCACHE flush is
36 * restricted to the current core, not all cores in the address space.
37 */
38#define ICACHE (1<<0) /* invalidate L1 instruction cache */
39#define DCACHE (1<<1) /* flush and invalidate data cache */
40#define BCACHE (ICACHE|DCACHE) /* flush both caches */
41
42#endif /* _ASM_TILE_CACHECTL_H */
diff --git a/arch/tile/include/asm/compat.h b/arch/tile/include/asm/compat.h
index 4b4b28969a65..69adc08d36a5 100644
--- a/arch/tile/include/asm/compat.h
+++ b/arch/tile/include/asm/compat.h
@@ -242,9 +242,6 @@ long compat_sys_fallocate(int fd, int mode,
242long compat_sys_sched_rr_get_interval(compat_pid_t pid, 242long compat_sys_sched_rr_get_interval(compat_pid_t pid,
243 struct compat_timespec __user *interval); 243 struct compat_timespec __user *interval);
244 244
245/* Tilera Linux syscalls that don't have "compat" versions. */
246#define compat_sys_flush_cache sys_flush_cache
247
248/* These are the intvec_64.S trampolines. */ 245/* These are the intvec_64.S trampolines. */
249long _compat_sys_execve(const char __user *path, 246long _compat_sys_execve(const char __user *path,
250 const compat_uptr_t __user *argv, 247 const compat_uptr_t __user *argv,
diff --git a/arch/tile/include/asm/elf.h b/arch/tile/include/asm/elf.h
index 623a6bb741c1..d16d006d660e 100644
--- a/arch/tile/include/asm/elf.h
+++ b/arch/tile/include/asm/elf.h
@@ -44,7 +44,11 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
44#else 44#else
45#define ELF_CLASS ELFCLASS32 45#define ELF_CLASS ELFCLASS32
46#endif 46#endif
47#ifdef __BIG_ENDIAN__
48#define ELF_DATA ELFDATA2MSB
49#else
47#define ELF_DATA ELFDATA2LSB 50#define ELF_DATA ELFDATA2LSB
51#endif
48 52
49/* 53/*
50 * There seems to be a bug in how compat_binfmt_elf.c works: it 54 * There seems to be a bug in how compat_binfmt_elf.c works: it
@@ -59,6 +63,7 @@ enum { ELF_ARCH = CHIP_ELF_TYPE() };
59 */ 63 */
60#define elf_check_arch(x) \ 64#define elf_check_arch(x) \
61 ((x)->e_ident[EI_CLASS] == ELF_CLASS && \ 65 ((x)->e_ident[EI_CLASS] == ELF_CLASS && \
66 (x)->e_ident[EI_DATA] == ELF_DATA && \
62 (x)->e_machine == CHIP_ELF_TYPE()) 67 (x)->e_machine == CHIP_ELF_TYPE())
63 68
64/* The module loader only handles a few relocation types. */ 69/* The module loader only handles a few relocation types. */
diff --git a/arch/tile/include/asm/futex.h b/arch/tile/include/asm/futex.h
index d03ec124a598..5909ac3d7218 100644
--- a/arch/tile/include/asm/futex.h
+++ b/arch/tile/include/asm/futex.h
@@ -28,29 +28,81 @@
28#include <linux/futex.h> 28#include <linux/futex.h>
29#include <linux/uaccess.h> 29#include <linux/uaccess.h>
30#include <linux/errno.h> 30#include <linux/errno.h>
31#include <asm/atomic.h>
31 32
32extern struct __get_user futex_set(u32 __user *v, int i); 33/*
33extern struct __get_user futex_add(u32 __user *v, int n); 34 * Support macros for futex operations. Do not use these macros directly.
34extern struct __get_user futex_or(u32 __user *v, int n); 35 * They assume "ret", "val", "oparg", and "uaddr" in the lexical context.
35extern struct __get_user futex_andn(u32 __user *v, int n); 36 * __futex_cmpxchg() additionally assumes "oldval".
36extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); 37 */
38
39#ifdef __tilegx__
40
41#define __futex_asm(OP) \
42 asm("1: {" #OP " %1, %3, %4; movei %0, 0 }\n" \
43 ".pushsection .fixup,\"ax\"\n" \
44 "0: { movei %0, %5; j 9f }\n" \
45 ".section __ex_table,\"a\"\n" \
46 ".quad 1b, 0b\n" \
47 ".popsection\n" \
48 "9:" \
49 : "=r" (ret), "=r" (val), "+m" (*(uaddr)) \
50 : "r" (uaddr), "r" (oparg), "i" (-EFAULT))
51
52#define __futex_set() __futex_asm(exch4)
53#define __futex_add() __futex_asm(fetchadd4)
54#define __futex_or() __futex_asm(fetchor4)
55#define __futex_andn() ({ oparg = ~oparg; __futex_asm(fetchand4); })
56#define __futex_cmpxchg() \
57 ({ __insn_mtspr(SPR_CMPEXCH_VALUE, oldval); __futex_asm(cmpexch4); })
58
59#define __futex_xor() \
60 ({ \
61 u32 oldval, n = oparg; \
62 if ((ret = __get_user(oldval, uaddr)) == 0) { \
63 do { \
64 oparg = oldval ^ n; \
65 __futex_cmpxchg(); \
66 } while (ret == 0 && oldval != val); \
67 } \
68 })
69
70/* No need to prefetch, since the atomic ops go to the home cache anyway. */
71#define __futex_prolog()
37 72
38#ifndef __tilegx__
39extern struct __get_user futex_xor(u32 __user *v, int n);
40#else 73#else
41static inline struct __get_user futex_xor(u32 __user *uaddr, int n) 74
42{ 75#define __futex_call(FN) \
43 struct __get_user asm_ret = __get_user_4(uaddr); 76 { \
44 if (!asm_ret.err) { 77 struct __get_user gu = FN((u32 __force *)uaddr, lock, oparg); \
45 int oldval, newval; 78 val = gu.val; \
46 do { 79 ret = gu.err; \
47 oldval = asm_ret.val;
48 newval = oldval ^ n;
49 asm_ret = futex_cmpxchg(uaddr, oldval, newval);
50 } while (asm_ret.err == 0 && oldval != asm_ret.val);
51 } 80 }
52 return asm_ret; 81
53} 82#define __futex_set() __futex_call(__atomic_xchg)
83#define __futex_add() __futex_call(__atomic_xchg_add)
84#define __futex_or() __futex_call(__atomic_or)
85#define __futex_andn() __futex_call(__atomic_andn)
86#define __futex_xor() __futex_call(__atomic_xor)
87
88#define __futex_cmpxchg() \
89 { \
90 struct __get_user gu = __atomic_cmpxchg((u32 __force *)uaddr, \
91 lock, oldval, oparg); \
92 val = gu.val; \
93 ret = gu.err; \
94 }
95
96/*
97 * Find the lock pointer for the atomic calls to use, and issue a
98 * prefetch to the user address to bring it into cache. Similar to
99 * __atomic_setup(), but we can't do a read into the L1 since it might
100 * fault; instead we do a prefetch into the L2.
101 */
102#define __futex_prolog() \
103 int *lock; \
104 __insn_prefetch(uaddr); \
105 lock = __atomic_hashed_lock((int __force *)uaddr)
54#endif 106#endif
55 107
56static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) 108static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
@@ -59,8 +111,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
59 int cmp = (encoded_op >> 24) & 15; 111 int cmp = (encoded_op >> 24) & 15;
60 int oparg = (encoded_op << 8) >> 20; 112 int oparg = (encoded_op << 8) >> 20;
61 int cmparg = (encoded_op << 20) >> 20; 113 int cmparg = (encoded_op << 20) >> 20;
62 int ret; 114 int uninitialized_var(val), ret;
63 struct __get_user asm_ret; 115
116 __futex_prolog();
117
118 /* The 32-bit futex code makes this assumption, so validate it here. */
119 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
64 120
65 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) 121 if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
66 oparg = 1 << oparg; 122 oparg = 1 << oparg;
@@ -71,46 +127,45 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
71 pagefault_disable(); 127 pagefault_disable();
72 switch (op) { 128 switch (op) {
73 case FUTEX_OP_SET: 129 case FUTEX_OP_SET:
74 asm_ret = futex_set(uaddr, oparg); 130 __futex_set();
75 break; 131 break;
76 case FUTEX_OP_ADD: 132 case FUTEX_OP_ADD:
77 asm_ret = futex_add(uaddr, oparg); 133 __futex_add();
78 break; 134 break;
79 case FUTEX_OP_OR: 135 case FUTEX_OP_OR:
80 asm_ret = futex_or(uaddr, oparg); 136 __futex_or();
81 break; 137 break;
82 case FUTEX_OP_ANDN: 138 case FUTEX_OP_ANDN:
83 asm_ret = futex_andn(uaddr, oparg); 139 __futex_andn();
84 break; 140 break;
85 case FUTEX_OP_XOR: 141 case FUTEX_OP_XOR:
86 asm_ret = futex_xor(uaddr, oparg); 142 __futex_xor();
87 break; 143 break;
88 default: 144 default:
89 asm_ret.err = -ENOSYS; 145 ret = -ENOSYS;
146 break;
90 } 147 }
91 pagefault_enable(); 148 pagefault_enable();
92 149
93 ret = asm_ret.err;
94
95 if (!ret) { 150 if (!ret) {
96 switch (cmp) { 151 switch (cmp) {
97 case FUTEX_OP_CMP_EQ: 152 case FUTEX_OP_CMP_EQ:
98 ret = (asm_ret.val == cmparg); 153 ret = (val == cmparg);
99 break; 154 break;
100 case FUTEX_OP_CMP_NE: 155 case FUTEX_OP_CMP_NE:
101 ret = (asm_ret.val != cmparg); 156 ret = (val != cmparg);
102 break; 157 break;
103 case FUTEX_OP_CMP_LT: 158 case FUTEX_OP_CMP_LT:
104 ret = (asm_ret.val < cmparg); 159 ret = (val < cmparg);
105 break; 160 break;
106 case FUTEX_OP_CMP_GE: 161 case FUTEX_OP_CMP_GE:
107 ret = (asm_ret.val >= cmparg); 162 ret = (val >= cmparg);
108 break; 163 break;
109 case FUTEX_OP_CMP_LE: 164 case FUTEX_OP_CMP_LE:
110 ret = (asm_ret.val <= cmparg); 165 ret = (val <= cmparg);
111 break; 166 break;
112 case FUTEX_OP_CMP_GT: 167 case FUTEX_OP_CMP_GT:
113 ret = (asm_ret.val > cmparg); 168 ret = (val > cmparg);
114 break; 169 break;
115 default: 170 default:
116 ret = -ENOSYS; 171 ret = -ENOSYS;
@@ -120,22 +175,20 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
120} 175}
121 176
122static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, 177static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
123 u32 oldval, u32 newval) 178 u32 oldval, u32 oparg)
124{ 179{
125 struct __get_user asm_ret; 180 int ret, val;
181
182 __futex_prolog();
126 183
127 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) 184 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
128 return -EFAULT; 185 return -EFAULT;
129 186
130 asm_ret = futex_cmpxchg(uaddr, oldval, newval); 187 __futex_cmpxchg();
131 *uval = asm_ret.val;
132 return asm_ret.err;
133}
134 188
135#ifndef __tilegx__ 189 *uval = val;
136/* Return failure from the atomic wrappers. */ 190 return ret;
137struct __get_user __atomic_bad_address(int __user *addr); 191}
138#endif
139 192
140#endif /* !__ASSEMBLY__ */ 193#endif /* !__ASSEMBLY__ */
141 194
diff --git a/arch/tile/include/asm/hardwall.h b/arch/tile/include/asm/hardwall.h
index 2ac422848c7d..47514a58d685 100644
--- a/arch/tile/include/asm/hardwall.h
+++ b/arch/tile/include/asm/hardwall.h
@@ -11,12 +11,14 @@
11 * NON INFRINGEMENT. See the GNU General Public License for 11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details. 12 * more details.
13 * 13 *
14 * Provide methods for the HARDWALL_FILE for accessing the UDN. 14 * Provide methods for access control of per-cpu resources like
15 * UDN, IDN, or IPI.
15 */ 16 */
16 17
17#ifndef _ASM_TILE_HARDWALL_H 18#ifndef _ASM_TILE_HARDWALL_H
18#define _ASM_TILE_HARDWALL_H 19#define _ASM_TILE_HARDWALL_H
19 20
21#include <arch/chip.h>
20#include <linux/ioctl.h> 22#include <linux/ioctl.h>
21 23
22#define HARDWALL_IOCTL_BASE 0xa2 24#define HARDWALL_IOCTL_BASE 0xa2
@@ -24,8 +26,9 @@
24/* 26/*
25 * The HARDWALL_CREATE() ioctl is a macro with a "size" argument. 27 * The HARDWALL_CREATE() ioctl is a macro with a "size" argument.
26 * The resulting ioctl value is passed to the kernel in conjunction 28 * The resulting ioctl value is passed to the kernel in conjunction
27 * with a pointer to a little-endian bitmask of cpus, which must be 29 * with a pointer to a standard kernel bitmask of cpus.
28 * physically in a rectangular configuration on the chip. 30 * For network resources (UDN or IDN) the bitmask must physically
31 * represent a rectangular configuration on the chip.
29 * The "size" is the number of bytes of cpu mask data. 32 * The "size" is the number of bytes of cpu mask data.
30 */ 33 */
31#define _HARDWALL_CREATE 1 34#define _HARDWALL_CREATE 1
@@ -44,13 +47,7 @@
44#define HARDWALL_GET_ID \ 47#define HARDWALL_GET_ID \
45 _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID) 48 _IO(HARDWALL_IOCTL_BASE, _HARDWALL_GET_ID)
46 49
47#ifndef __KERNEL__ 50#ifdef __KERNEL__
48
49/* This is the canonical name expected by userspace. */
50#define HARDWALL_FILE "/dev/hardwall"
51
52#else
53
54/* /proc hooks for hardwall. */ 51/* /proc hooks for hardwall. */
55struct proc_dir_entry; 52struct proc_dir_entry;
56#ifdef CONFIG_HARDWALL 53#ifdef CONFIG_HARDWALL
@@ -59,7 +56,6 @@ int proc_pid_hardwall(struct task_struct *task, char *buffer);
59#else 56#else
60static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {} 57static inline void proc_tile_hardwall_init(struct proc_dir_entry *root) {}
61#endif 58#endif
62
63#endif 59#endif
64 60
65#endif /* _ASM_TILE_HARDWALL_H */ 61#endif /* _ASM_TILE_HARDWALL_H */
diff --git a/arch/tile/include/asm/hugetlb.h b/arch/tile/include/asm/hugetlb.h
index d396d1805163..b2042380a5aa 100644
--- a/arch/tile/include/asm/hugetlb.h
+++ b/arch/tile/include/asm/hugetlb.h
@@ -106,4 +106,25 @@ static inline void arch_release_hugepage(struct page *page)
106{ 106{
107} 107}
108 108
109#ifdef CONFIG_HUGETLB_SUPER_PAGES
110static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
111 struct page *page, int writable)
112{
113 size_t pagesize = huge_page_size(hstate_vma(vma));
114 if (pagesize != PUD_SIZE && pagesize != PMD_SIZE)
115 entry = pte_mksuper(entry);
116 return entry;
117}
118#define arch_make_huge_pte arch_make_huge_pte
119
120/* Sizes to scale up page size for PTEs with HV_PTE_SUPER bit. */
121enum {
122 HUGE_SHIFT_PGDIR = 0,
123 HUGE_SHIFT_PMD = 1,
124 HUGE_SHIFT_PAGE = 2,
125 HUGE_SHIFT_ENTRIES
126};
127extern int huge_shift[HUGE_SHIFT_ENTRIES];
128#endif
129
109#endif /* _ASM_TILE_HUGETLB_H */ 130#endif /* _ASM_TILE_HUGETLB_H */
diff --git a/arch/tile/include/asm/irqflags.h b/arch/tile/include/asm/irqflags.h
index 5db0ce54284d..b4e96fef2cf8 100644
--- a/arch/tile/include/asm/irqflags.h
+++ b/arch/tile/include/asm/irqflags.h
@@ -28,10 +28,10 @@
28 */ 28 */
29#if CHIP_HAS_AUX_PERF_COUNTERS() 29#if CHIP_HAS_AUX_PERF_COUNTERS()
30#define LINUX_MASKABLE_INTERRUPTS_HI \ 30#define LINUX_MASKABLE_INTERRUPTS_HI \
31 (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT))) 31 (~(INT_MASK_HI(INT_PERF_COUNT) | INT_MASK_HI(INT_AUX_PERF_COUNT)))
32#else 32#else
33#define LINUX_MASKABLE_INTERRUPTS_HI \ 33#define LINUX_MASKABLE_INTERRUPTS_HI \
34 (~(INT_MASK_HI(INT_PERF_COUNT))) 34 (~(INT_MASK_HI(INT_PERF_COUNT)))
35#endif 35#endif
36 36
37#else 37#else
@@ -90,6 +90,14 @@
90 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \ 90 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_0, (unsigned long)(__m)); \
91 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \ 91 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K_1, (unsigned long)(__m>>32)); \
92} while (0) 92} while (0)
93#define interrupt_mask_save_mask() \
94 (__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_0) | \
95 (((unsigned long long)__insn_mfspr(SPR_INTERRUPT_MASK_SET_K_1))<<32))
96#define interrupt_mask_restore_mask(mask) do { \
97 unsigned long long __m = (mask); \
98 __insn_mtspr(SPR_INTERRUPT_MASK_K_0, (unsigned long)(__m)); \
99 __insn_mtspr(SPR_INTERRUPT_MASK_K_1, (unsigned long)(__m>>32)); \
100} while (0)
93#else 101#else
94#define interrupt_mask_set(n) \ 102#define interrupt_mask_set(n) \
95 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n))) 103 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (1UL << (n)))
@@ -101,6 +109,10 @@
101 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask)) 109 __insn_mtspr(SPR_INTERRUPT_MASK_SET_K, (mask))
102#define interrupt_mask_reset_mask(mask) \ 110#define interrupt_mask_reset_mask(mask) \
103 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask)) 111 __insn_mtspr(SPR_INTERRUPT_MASK_RESET_K, (mask))
112#define interrupt_mask_save_mask() \
113 __insn_mfspr(SPR_INTERRUPT_MASK_K)
114#define interrupt_mask_restore_mask(mask) \
115 __insn_mtspr(SPR_INTERRUPT_MASK_K, (mask))
104#endif 116#endif
105 117
106/* 118/*
@@ -122,7 +134,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
122 134
123/* Disable all interrupts, including NMIs. */ 135/* Disable all interrupts, including NMIs. */
124#define arch_local_irq_disable_all() \ 136#define arch_local_irq_disable_all() \
125 interrupt_mask_set_mask(-1UL) 137 interrupt_mask_set_mask(-1ULL)
126 138
127/* Re-enable all maskable interrupts. */ 139/* Re-enable all maskable interrupts. */
128#define arch_local_irq_enable() \ 140#define arch_local_irq_enable() \
@@ -179,7 +191,7 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
179#ifdef __tilegx__ 191#ifdef __tilegx__
180 192
181#if INT_MEM_ERROR != 0 193#if INT_MEM_ERROR != 0
182# error Fix IRQ_DISABLED() macro 194# error Fix IRQS_DISABLED() macro
183#endif 195#endif
184 196
185/* Return 0 or 1 to indicate whether interrupts are currently disabled. */ 197/* Return 0 or 1 to indicate whether interrupts are currently disabled. */
@@ -207,9 +219,10 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
207 mtspr SPR_INTERRUPT_MASK_SET_K, tmp 219 mtspr SPR_INTERRUPT_MASK_SET_K, tmp
208 220
209/* Enable interrupts. */ 221/* Enable interrupts. */
210#define IRQ_ENABLE(tmp0, tmp1) \ 222#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
211 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 223 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
212 ld tmp0, tmp0; \ 224 ld tmp0, tmp0
225#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
213 mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0 226 mtspr SPR_INTERRUPT_MASK_RESET_K, tmp0
214 227
215#else /* !__tilegx__ */ 228#else /* !__tilegx__ */
@@ -253,17 +266,22 @@ DECLARE_PER_CPU(unsigned long long, interrupts_enabled_mask);
253 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp 266 mtspr SPR_INTERRUPT_MASK_SET_K_1, tmp
254 267
255/* Enable interrupts. */ 268/* Enable interrupts. */
256#define IRQ_ENABLE(tmp0, tmp1) \ 269#define IRQ_ENABLE_LOAD(tmp0, tmp1) \
257 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \ 270 GET_INTERRUPTS_ENABLED_MASK_PTR(tmp0); \
258 { \ 271 { \
259 lw tmp0, tmp0; \ 272 lw tmp0, tmp0; \
260 addi tmp1, tmp0, 4 \ 273 addi tmp1, tmp0, 4 \
261 }; \ 274 }; \
262 lw tmp1, tmp1; \ 275 lw tmp1, tmp1
276#define IRQ_ENABLE_APPLY(tmp0, tmp1) \
263 mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \ 277 mtspr SPR_INTERRUPT_MASK_RESET_K_0, tmp0; \
264 mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1 278 mtspr SPR_INTERRUPT_MASK_RESET_K_1, tmp1
265#endif 279#endif
266 280
281#define IRQ_ENABLE(tmp0, tmp1) \
282 IRQ_ENABLE_LOAD(tmp0, tmp1); \
283 IRQ_ENABLE_APPLY(tmp0, tmp1)
284
267/* 285/*
268 * Do the CPU's IRQ-state tracing from assembly code. We call a 286 * Do the CPU's IRQ-state tracing from assembly code. We call a
269 * C function, but almost everywhere we do, we don't mind clobbering 287 * C function, but almost everywhere we do, we don't mind clobbering
diff --git a/arch/tile/include/asm/kexec.h b/arch/tile/include/asm/kexec.h
index c11a6cc73bb8..fc98ccfc98ac 100644
--- a/arch/tile/include/asm/kexec.h
+++ b/arch/tile/include/asm/kexec.h
@@ -19,12 +19,24 @@
19 19
20#include <asm/page.h> 20#include <asm/page.h>
21 21
22#ifndef __tilegx__
22/* Maximum physical address we can use pages from. */ 23/* Maximum physical address we can use pages from. */
23#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE 24#define KEXEC_SOURCE_MEMORY_LIMIT TASK_SIZE
24/* Maximum address we can reach in physical address mode. */ 25/* Maximum address we can reach in physical address mode. */
25#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE 26#define KEXEC_DESTINATION_MEMORY_LIMIT TASK_SIZE
26/* Maximum address we can use for the control code buffer. */ 27/* Maximum address we can use for the control code buffer. */
27#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE 28#define KEXEC_CONTROL_MEMORY_LIMIT TASK_SIZE
29#else
30/* We need to limit the memory below PGDIR_SIZE since
31 * we only setup page table for [0, PGDIR_SIZE) before final kexec.
32 */
33/* Maximum physical address we can use pages from. */
34#define KEXEC_SOURCE_MEMORY_LIMIT PGDIR_SIZE
35/* Maximum address we can reach in physical address mode. */
36#define KEXEC_DESTINATION_MEMORY_LIMIT PGDIR_SIZE
37/* Maximum address we can use for the control code buffer. */
38#define KEXEC_CONTROL_MEMORY_LIMIT PGDIR_SIZE
39#endif
28 40
29#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE 41#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
30 42
diff --git a/arch/tile/include/asm/kvm_para.h b/arch/tile/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/tile/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/tile/include/asm/mmu.h b/arch/tile/include/asm/mmu.h
index 92f94c77b6e4..e2c789096795 100644
--- a/arch/tile/include/asm/mmu.h
+++ b/arch/tile/include/asm/mmu.h
@@ -21,7 +21,7 @@ struct mm_context {
21 * Written under the mmap_sem semaphore; read without the 21 * Written under the mmap_sem semaphore; read without the
22 * semaphore but atomically, but it is conservatively set. 22 * semaphore but atomically, but it is conservatively set.
23 */ 23 */
24 unsigned int priority_cached; 24 unsigned long priority_cached;
25}; 25};
26 26
27typedef struct mm_context mm_context_t; 27typedef struct mm_context mm_context_t;
diff --git a/arch/tile/include/asm/mmu_context.h b/arch/tile/include/asm/mmu_context.h
index 15fb24641120..37f0b741dee7 100644
--- a/arch/tile/include/asm/mmu_context.h
+++ b/arch/tile/include/asm/mmu_context.h
@@ -30,11 +30,15 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
30 return 0; 30 return 0;
31} 31}
32 32
33/* Note that arch/tile/kernel/head.S also calls hv_install_context() */ 33/*
34 * Note that arch/tile/kernel/head_NN.S and arch/tile/mm/migrate_NN.S
35 * also call hv_install_context().
36 */
34static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot) 37static inline void __install_page_table(pgd_t *pgdir, int asid, pgprot_t prot)
35{ 38{
36 /* FIXME: DIRECTIO should not always be set. FIXME. */ 39 /* FIXME: DIRECTIO should not always be set. FIXME. */
37 int rc = hv_install_context(__pa(pgdir), prot, asid, HV_CTX_DIRECTIO); 40 int rc = hv_install_context(__pa(pgdir), prot, asid,
41 HV_CTX_DIRECTIO | CTX_PAGE_FLAG);
38 if (rc < 0) 42 if (rc < 0)
39 panic("hv_install_context failed: %d", rc); 43 panic("hv_install_context failed: %d", rc);
40} 44}
diff --git a/arch/tile/include/asm/module.h b/arch/tile/include/asm/module.h
new file mode 100644
index 000000000000..44ed07ccd3d2
--- /dev/null
+++ b/arch/tile/include/asm/module.h
@@ -0,0 +1,40 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 */
14
15#ifndef _ASM_TILE_MODULE_H
16#define _ASM_TILE_MODULE_H
17
18#include <arch/chip.h>
19
20#include <asm-generic/module.h>
21
22/* We can't use modules built with different page sizes. */
23#if defined(CONFIG_PAGE_SIZE_16KB)
24# define MODULE_PGSZ " 16KB"
25#elif defined(CONFIG_PAGE_SIZE_64KB)
26# define MODULE_PGSZ " 64KB"
27#else
28# define MODULE_PGSZ ""
29#endif
30
31/* We don't really support no-SMP so tag if someone tries. */
32#ifdef CONFIG_SMP
33#define MODULE_NOSMP ""
34#else
35#define MODULE_NOSMP " nosmp"
36#endif
37
38#define MODULE_ARCH_VERMAGIC CHIP_ARCH_NAME MODULE_PGSZ MODULE_NOSMP
39
40#endif /* _ASM_TILE_MODULE_H */
diff --git a/arch/tile/include/asm/page.h b/arch/tile/include/asm/page.h
index db93518fac03..9d9131e5c552 100644
--- a/arch/tile/include/asm/page.h
+++ b/arch/tile/include/asm/page.h
@@ -20,8 +20,17 @@
20#include <arch/chip.h> 20#include <arch/chip.h>
21 21
22/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */ 22/* PAGE_SHIFT and HPAGE_SHIFT determine the page sizes. */
23#define PAGE_SHIFT HV_LOG2_PAGE_SIZE_SMALL 23#if defined(CONFIG_PAGE_SIZE_16KB)
24#define HPAGE_SHIFT HV_LOG2_PAGE_SIZE_LARGE 24#define PAGE_SHIFT 14
25#define CTX_PAGE_FLAG HV_CTX_PG_SM_16K
26#elif defined(CONFIG_PAGE_SIZE_64KB)
27#define PAGE_SHIFT 16
28#define CTX_PAGE_FLAG HV_CTX_PG_SM_64K
29#else
30#define PAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_SMALL
31#define CTX_PAGE_FLAG 0
32#endif
33#define HPAGE_SHIFT HV_LOG2_DEFAULT_PAGE_SIZE_LARGE
25 34
26#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) 35#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
27#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) 36#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
@@ -78,8 +87,7 @@ typedef HV_PTE pgprot_t;
78/* 87/*
79 * User L2 page tables are managed as one L2 page table per page, 88 * User L2 page tables are managed as one L2 page table per page,
80 * because we use the page allocator for them. This keeps the allocation 89 * because we use the page allocator for them. This keeps the allocation
81 * simple and makes it potentially useful to implement HIGHPTE at some point. 90 * simple, but it's also inefficient, since L2 page tables are much smaller
82 * However, it's also inefficient, since L2 page tables are much smaller
83 * than pages (currently 2KB vs 64KB). So we should revisit this. 91 * than pages (currently 2KB vs 64KB). So we should revisit this.
84 */ 92 */
85typedef struct page *pgtable_t; 93typedef struct page *pgtable_t;
@@ -128,7 +136,7 @@ static inline __attribute_const__ int get_order(unsigned long size)
128 136
129#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) 137#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
130 138
131#define HUGE_MAX_HSTATE 2 139#define HUGE_MAX_HSTATE 6
132 140
133#ifdef CONFIG_HUGETLB_PAGE 141#ifdef CONFIG_HUGETLB_PAGE
134#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA 142#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
diff --git a/arch/tile/include/asm/pgalloc.h b/arch/tile/include/asm/pgalloc.h
index e919c0bdc22d..1b902508b664 100644
--- a/arch/tile/include/asm/pgalloc.h
+++ b/arch/tile/include/asm/pgalloc.h
@@ -19,24 +19,24 @@
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/mmzone.h> 20#include <linux/mmzone.h>
21#include <asm/fixmap.h> 21#include <asm/fixmap.h>
22#include <asm/page.h>
22#include <hv/hypervisor.h> 23#include <hv/hypervisor.h>
23 24
24/* Bits for the size of the second-level page table. */ 25/* Bits for the size of the second-level page table. */
25#define L2_KERNEL_PGTABLE_SHIFT \ 26#define L2_KERNEL_PGTABLE_SHIFT _HV_LOG2_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
26 (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL + HV_LOG2_PTE_SIZE) 27
28/* How big is a kernel L2 page table? */
29#define L2_KERNEL_PGTABLE_SIZE (1UL << L2_KERNEL_PGTABLE_SHIFT)
27 30
28/* We currently allocate user L2 page tables by page (unlike kernel L2s). */ 31/* We currently allocate user L2 page tables by page (unlike kernel L2s). */
29#if L2_KERNEL_PGTABLE_SHIFT < HV_LOG2_PAGE_SIZE_SMALL 32#if L2_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
30#define L2_USER_PGTABLE_SHIFT HV_LOG2_PAGE_SIZE_SMALL 33#define L2_USER_PGTABLE_SHIFT PAGE_SHIFT
31#else 34#else
32#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT 35#define L2_USER_PGTABLE_SHIFT L2_KERNEL_PGTABLE_SHIFT
33#endif 36#endif
34 37
35/* How many pages do we need, as an "order", for a user L2 page table? */ 38/* How many pages do we need, as an "order", for a user L2 page table? */
36#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - HV_LOG2_PAGE_SIZE_SMALL) 39#define L2_USER_PGTABLE_ORDER (L2_USER_PGTABLE_SHIFT - PAGE_SHIFT)
37
38/* How big is a kernel L2 page table? */
39#define L2_KERNEL_PGTABLE_SIZE (1 << L2_KERNEL_PGTABLE_SHIFT)
40 40
41static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) 41static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
42{ 42{
@@ -50,14 +50,14 @@ static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
50static inline void pmd_populate_kernel(struct mm_struct *mm, 50static inline void pmd_populate_kernel(struct mm_struct *mm,
51 pmd_t *pmd, pte_t *ptep) 51 pmd_t *pmd, pte_t *ptep)
52{ 52{
53 set_pmd(pmd, ptfn_pmd(__pa(ptep) >> HV_LOG2_PAGE_TABLE_ALIGN, 53 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(__pa(ptep)),
54 __pgprot(_PAGE_PRESENT))); 54 __pgprot(_PAGE_PRESENT)));
55} 55}
56 56
57static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, 57static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
58 pgtable_t page) 58 pgtable_t page)
59{ 59{
60 set_pmd(pmd, ptfn_pmd(HV_PFN_TO_PTFN(page_to_pfn(page)), 60 set_pmd(pmd, ptfn_pmd(HV_CPA_TO_PTFN(PFN_PHYS(page_to_pfn(page))),
61 __pgprot(_PAGE_PRESENT))); 61 __pgprot(_PAGE_PRESENT)));
62} 62}
63 63
@@ -68,8 +68,20 @@ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
68extern pgd_t *pgd_alloc(struct mm_struct *mm); 68extern pgd_t *pgd_alloc(struct mm_struct *mm);
69extern void pgd_free(struct mm_struct *mm, pgd_t *pgd); 69extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
70 70
71extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address); 71extern pgtable_t pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
72extern void pte_free(struct mm_struct *mm, struct page *pte); 72 int order);
73extern void pgtable_free(struct mm_struct *mm, struct page *pte, int order);
74
75static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
76 unsigned long address)
77{
78 return pgtable_alloc_one(mm, address, L2_USER_PGTABLE_ORDER);
79}
80
81static inline void pte_free(struct mm_struct *mm, struct page *pte)
82{
83 pgtable_free(mm, pte, L2_USER_PGTABLE_ORDER);
84}
73 85
74#define pmd_pgtable(pmd) pmd_page(pmd) 86#define pmd_pgtable(pmd) pmd_page(pmd)
75 87
@@ -85,8 +97,13 @@ static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
85 pte_free(mm, virt_to_page(pte)); 97 pte_free(mm, virt_to_page(pte));
86} 98}
87 99
88extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 100extern void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
89 unsigned long address); 101 unsigned long address, int order);
102static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
103 unsigned long address)
104{
105 __pgtable_free_tlb(tlb, pte, address, L2_USER_PGTABLE_ORDER);
106}
90 107
91#define check_pgt_cache() do { } while (0) 108#define check_pgt_cache() do { } while (0)
92 109
@@ -104,19 +121,44 @@ void shatter_pmd(pmd_t *pmd);
104void shatter_huge_page(unsigned long addr); 121void shatter_huge_page(unsigned long addr);
105 122
106#ifdef __tilegx__ 123#ifdef __tilegx__
107/* We share a single page allocator for both L1 and L2 page tables. */ 124
108#if HV_L1_SIZE != HV_L2_SIZE
109# error Rework assumption that L1 and L2 page tables are same size.
110#endif
111#define L1_USER_PGTABLE_ORDER L2_USER_PGTABLE_ORDER
112#define pud_populate(mm, pud, pmd) \ 125#define pud_populate(mm, pud, pmd) \
113 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd)) 126 pmd_populate_kernel((mm), (pmd_t *)(pud), (pte_t *)(pmd))
114#define pmd_alloc_one(mm, addr) \ 127
115 ((pmd_t *)page_to_virt(pte_alloc_one((mm), (addr)))) 128/* Bits for the size of the L1 (intermediate) page table. */
116#define pmd_free(mm, pmdp) \ 129#define L1_KERNEL_PGTABLE_SHIFT _HV_LOG2_L1_SIZE(HPAGE_SHIFT)
117 pte_free((mm), virt_to_page(pmdp)) 130
118#define __pmd_free_tlb(tlb, pmdp, address) \ 131/* How big is a kernel L2 page table? */
119 __pte_free_tlb((tlb), virt_to_page(pmdp), (address)) 132#define L1_KERNEL_PGTABLE_SIZE (1UL << L1_KERNEL_PGTABLE_SHIFT)
133
134/* We currently allocate L1 page tables by page. */
135#if L1_KERNEL_PGTABLE_SHIFT < PAGE_SHIFT
136#define L1_USER_PGTABLE_SHIFT PAGE_SHIFT
137#else
138#define L1_USER_PGTABLE_SHIFT L1_KERNEL_PGTABLE_SHIFT
120#endif 139#endif
121 140
141/* How many pages do we need, as an "order", for an L1 page table? */
142#define L1_USER_PGTABLE_ORDER (L1_USER_PGTABLE_SHIFT - PAGE_SHIFT)
143
144static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
145{
146 struct page *p = pgtable_alloc_one(mm, address, L1_USER_PGTABLE_ORDER);
147 return (pmd_t *)page_to_virt(p);
148}
149
150static inline void pmd_free(struct mm_struct *mm, pmd_t *pmdp)
151{
152 pgtable_free(mm, virt_to_page(pmdp), L1_USER_PGTABLE_ORDER);
153}
154
155static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmdp,
156 unsigned long address)
157{
158 __pgtable_free_tlb(tlb, virt_to_page(pmdp), address,
159 L1_USER_PGTABLE_ORDER);
160}
161
162#endif /* __tilegx__ */
163
122#endif /* _ASM_TILE_PGALLOC_H */ 164#endif /* _ASM_TILE_PGALLOC_H */
diff --git a/arch/tile/include/asm/pgtable.h b/arch/tile/include/asm/pgtable.h
index 67490910774d..73b1a4c9ad03 100644
--- a/arch/tile/include/asm/pgtable.h
+++ b/arch/tile/include/asm/pgtable.h
@@ -27,8 +27,10 @@
27#include <linux/slab.h> 27#include <linux/slab.h>
28#include <linux/list.h> 28#include <linux/list.h>
29#include <linux/spinlock.h> 29#include <linux/spinlock.h>
30#include <linux/pfn.h>
30#include <asm/processor.h> 31#include <asm/processor.h>
31#include <asm/fixmap.h> 32#include <asm/fixmap.h>
33#include <asm/page.h>
32 34
33struct mm_struct; 35struct mm_struct;
34struct vm_area_struct; 36struct vm_area_struct;
@@ -69,6 +71,7 @@ extern void set_page_homes(void);
69 71
70#define _PAGE_PRESENT HV_PTE_PRESENT 72#define _PAGE_PRESENT HV_PTE_PRESENT
71#define _PAGE_HUGE_PAGE HV_PTE_PAGE 73#define _PAGE_HUGE_PAGE HV_PTE_PAGE
74#define _PAGE_SUPER_PAGE HV_PTE_SUPER
72#define _PAGE_READABLE HV_PTE_READABLE 75#define _PAGE_READABLE HV_PTE_READABLE
73#define _PAGE_WRITABLE HV_PTE_WRITABLE 76#define _PAGE_WRITABLE HV_PTE_WRITABLE
74#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE 77#define _PAGE_EXECUTABLE HV_PTE_EXECUTABLE
@@ -85,6 +88,7 @@ extern void set_page_homes(void);
85#define _PAGE_ALL (\ 88#define _PAGE_ALL (\
86 _PAGE_PRESENT | \ 89 _PAGE_PRESENT | \
87 _PAGE_HUGE_PAGE | \ 90 _PAGE_HUGE_PAGE | \
91 _PAGE_SUPER_PAGE | \
88 _PAGE_READABLE | \ 92 _PAGE_READABLE | \
89 _PAGE_WRITABLE | \ 93 _PAGE_WRITABLE | \
90 _PAGE_EXECUTABLE | \ 94 _PAGE_EXECUTABLE | \
@@ -162,7 +166,7 @@ extern void set_page_homes(void);
162 (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val } 166 (pgprot_t) { ((oldprot).val & ~_PAGE_ALL) | (newprot).val }
163 167
164/* Just setting the PFN to zero suffices. */ 168/* Just setting the PFN to zero suffices. */
165#define pte_pgprot(x) hv_pte_set_pfn((x), 0) 169#define pte_pgprot(x) hv_pte_set_pa((x), 0)
166 170
167/* 171/*
168 * For PTEs and PDEs, we must clear the Present bit first when 172 * For PTEs and PDEs, we must clear the Present bit first when
@@ -187,6 +191,7 @@ static inline void __pte_clear(pte_t *ptep)
187 * Undefined behaviour if not.. 191 * Undefined behaviour if not..
188 */ 192 */
189#define pte_present hv_pte_get_present 193#define pte_present hv_pte_get_present
194#define pte_mknotpresent hv_pte_clear_present
190#define pte_user hv_pte_get_user 195#define pte_user hv_pte_get_user
191#define pte_read hv_pte_get_readable 196#define pte_read hv_pte_get_readable
192#define pte_dirty hv_pte_get_dirty 197#define pte_dirty hv_pte_get_dirty
@@ -194,6 +199,7 @@ static inline void __pte_clear(pte_t *ptep)
194#define pte_write hv_pte_get_writable 199#define pte_write hv_pte_get_writable
195#define pte_exec hv_pte_get_executable 200#define pte_exec hv_pte_get_executable
196#define pte_huge hv_pte_get_page 201#define pte_huge hv_pte_get_page
202#define pte_super hv_pte_get_super
197#define pte_rdprotect hv_pte_clear_readable 203#define pte_rdprotect hv_pte_clear_readable
198#define pte_exprotect hv_pte_clear_executable 204#define pte_exprotect hv_pte_clear_executable
199#define pte_mkclean hv_pte_clear_dirty 205#define pte_mkclean hv_pte_clear_dirty
@@ -206,6 +212,7 @@ static inline void __pte_clear(pte_t *ptep)
206#define pte_mkyoung hv_pte_set_accessed 212#define pte_mkyoung hv_pte_set_accessed
207#define pte_mkwrite hv_pte_set_writable 213#define pte_mkwrite hv_pte_set_writable
208#define pte_mkhuge hv_pte_set_page 214#define pte_mkhuge hv_pte_set_page
215#define pte_mksuper hv_pte_set_super
209 216
210#define pte_special(pte) 0 217#define pte_special(pte) 0
211#define pte_mkspecial(pte) (pte) 218#define pte_mkspecial(pte) (pte)
@@ -261,7 +268,7 @@ static inline int pte_none(pte_t pte)
261 268
262static inline unsigned long pte_pfn(pte_t pte) 269static inline unsigned long pte_pfn(pte_t pte)
263{ 270{
264 return hv_pte_get_pfn(pte); 271 return PFN_DOWN(hv_pte_get_pa(pte));
265} 272}
266 273
267/* Set or get the remote cache cpu in a pgprot with remote caching. */ 274/* Set or get the remote cache cpu in a pgprot with remote caching. */
@@ -270,7 +277,7 @@ extern int get_remote_cache_cpu(pgprot_t prot);
270 277
271static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) 278static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
272{ 279{
273 return hv_pte_set_pfn(prot, pfn); 280 return hv_pte_set_pa(prot, PFN_PHYS(pfn));
274} 281}
275 282
276/* Support for priority mappings. */ 283/* Support for priority mappings. */
@@ -312,7 +319,7 @@ extern void check_mm_caching(struct mm_struct *prev, struct mm_struct *next);
312 */ 319 */
313static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) 320static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
314{ 321{
315 return pfn_pte(hv_pte_get_pfn(pte), newprot); 322 return pfn_pte(pte_pfn(pte), newprot);
316} 323}
317 324
318/* 325/*
@@ -335,13 +342,8 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
335 */ 342 */
336#define pgd_offset_k(address) pgd_offset(&init_mm, address) 343#define pgd_offset_k(address) pgd_offset(&init_mm, address)
337 344
338#if defined(CONFIG_HIGHPTE)
339extern pte_t *pte_offset_map(pmd_t *, unsigned long address);
340#define pte_unmap(pte) kunmap_atomic(pte)
341#else
342#define pte_offset_map(dir, address) pte_offset_kernel(dir, address) 345#define pte_offset_map(dir, address) pte_offset_kernel(dir, address)
343#define pte_unmap(pte) do { } while (0) 346#define pte_unmap(pte) do { } while (0)
344#endif
345 347
346/* Clear a non-executable kernel PTE and flush it from the TLB. */ 348/* Clear a non-executable kernel PTE and flush it from the TLB. */
347#define kpte_clear_flush(ptep, vaddr) \ 349#define kpte_clear_flush(ptep, vaddr) \
@@ -410,6 +412,46 @@ static inline unsigned long pmd_index(unsigned long address)
410 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1); 412 return (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
411} 413}
412 414
415#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
416static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
417 unsigned long address,
418 pmd_t *pmdp)
419{
420 return ptep_test_and_clear_young(vma, address, pmdp_ptep(pmdp));
421}
422
423#define __HAVE_ARCH_PMDP_SET_WRPROTECT
424static inline void pmdp_set_wrprotect(struct mm_struct *mm,
425 unsigned long address, pmd_t *pmdp)
426{
427 ptep_set_wrprotect(mm, address, pmdp_ptep(pmdp));
428}
429
430
431#define __HAVE_ARCH_PMDP_GET_AND_CLEAR
432static inline pmd_t pmdp_get_and_clear(struct mm_struct *mm,
433 unsigned long address,
434 pmd_t *pmdp)
435{
436 return pte_pmd(ptep_get_and_clear(mm, address, pmdp_ptep(pmdp)));
437}
438
439static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
440{
441 set_pte(pmdp_ptep(pmdp), pmd_pte(pmdval));
442}
443
444#define set_pmd_at(mm, addr, pmdp, pmdval) __set_pmd(pmdp, pmdval)
445
446/* Create a pmd from a PTFN. */
447static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
448{
449 return pte_pmd(hv_pte_set_ptfn(prot, ptfn));
450}
451
452/* Return the page-table frame number (ptfn) that a pmd_t points at. */
453#define pmd_ptfn(pmd) hv_pte_get_ptfn(pmd_pte(pmd))
454
413/* 455/*
414 * A given kernel pmd_t maps to a specific virtual address (either a 456 * A given kernel pmd_t maps to a specific virtual address (either a
415 * kernel huge page or a kernel pte_t table). Since kernel pte_t 457 * kernel huge page or a kernel pte_t table). Since kernel pte_t
@@ -430,7 +472,48 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
430 * OK for pte_lockptr(), since we just end up with potentially one 472 * OK for pte_lockptr(), since we just end up with potentially one
431 * lock being used for several pte_t arrays. 473 * lock being used for several pte_t arrays.
432 */ 474 */
433#define pmd_page(pmd) pfn_to_page(HV_PTFN_TO_PFN(pmd_ptfn(pmd))) 475#define pmd_page(pmd) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pmd_ptfn(pmd))))
476
477static inline void pmd_clear(pmd_t *pmdp)
478{
479 __pte_clear(pmdp_ptep(pmdp));
480}
481
482#define pmd_mknotpresent(pmd) pte_pmd(pte_mknotpresent(pmd_pte(pmd)))
483#define pmd_young(pmd) pte_young(pmd_pte(pmd))
484#define pmd_mkyoung(pmd) pte_pmd(pte_mkyoung(pmd_pte(pmd)))
485#define pmd_mkold(pmd) pte_pmd(pte_mkold(pmd_pte(pmd)))
486#define pmd_mkwrite(pmd) pte_pmd(pte_mkwrite(pmd_pte(pmd)))
487#define pmd_write(pmd) pte_write(pmd_pte(pmd))
488#define pmd_wrprotect(pmd) pte_pmd(pte_wrprotect(pmd_pte(pmd)))
489#define pmd_mkdirty(pmd) pte_pmd(pte_mkdirty(pmd_pte(pmd)))
490#define pmd_huge_page(pmd) pte_huge(pmd_pte(pmd))
491#define pmd_mkhuge(pmd) pte_pmd(pte_mkhuge(pmd_pte(pmd)))
492#define __HAVE_ARCH_PMD_WRITE
493
494#define pfn_pmd(pfn, pgprot) pte_pmd(pfn_pte((pfn), (pgprot)))
495#define pmd_pfn(pmd) pte_pfn(pmd_pte(pmd))
496#define mk_pmd(page, pgprot) pfn_pmd(page_to_pfn(page), (pgprot))
497
498static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
499{
500 return pfn_pmd(pmd_pfn(pmd), newprot);
501}
502
503#ifdef CONFIG_TRANSPARENT_HUGEPAGE
504#define has_transparent_hugepage() 1
505#define pmd_trans_huge pmd_huge_page
506
507static inline pmd_t pmd_mksplitting(pmd_t pmd)
508{
509 return pte_pmd(hv_pte_set_client2(pmd_pte(pmd)));
510}
511
512static inline int pmd_trans_splitting(pmd_t pmd)
513{
514 return hv_pte_get_client2(pmd_pte(pmd));
515}
516#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
434 517
435/* 518/*
436 * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE] 519 * The pte page can be thought of an array like this: pte_t[PTRS_PER_PTE]
@@ -448,17 +531,13 @@ static inline pte_t *pte_offset_kernel(pmd_t *pmd, unsigned long address)
448 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address); 531 return (pte_t *)pmd_page_vaddr(*pmd) + pte_index(address);
449} 532}
450 533
451static inline int pmd_huge_page(pmd_t pmd)
452{
453 return pmd_val(pmd) & _PAGE_HUGE_PAGE;
454}
455
456#include <asm-generic/pgtable.h> 534#include <asm-generic/pgtable.h>
457 535
458/* Support /proc/NN/pgtable API. */ 536/* Support /proc/NN/pgtable API. */
459struct seq_file; 537struct seq_file;
460int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm, 538int arch_proc_pgtable_show(struct seq_file *m, struct mm_struct *mm,
461 unsigned long vaddr, pte_t *ptep, void **datap); 539 unsigned long vaddr, unsigned long pagesize,
540 pte_t *ptep, void **datap);
462 541
463#endif /* !__ASSEMBLY__ */ 542#endif /* !__ASSEMBLY__ */
464 543
diff --git a/arch/tile/include/asm/pgtable_32.h b/arch/tile/include/asm/pgtable_32.h
index 9f98529761fd..4ce4a7a99c24 100644
--- a/arch/tile/include/asm/pgtable_32.h
+++ b/arch/tile/include/asm/pgtable_32.h
@@ -20,11 +20,12 @@
20 * The level-1 index is defined by the huge page size. A PGD is composed 20 * The level-1 index is defined by the huge page size. A PGD is composed
21 * of PTRS_PER_PGD pgd_t's and is the top level of the page table. 21 * of PTRS_PER_PGD pgd_t's and is the top level of the page table.
22 */ 22 */
23#define PGDIR_SHIFT HV_LOG2_PAGE_SIZE_LARGE 23#define PGDIR_SHIFT HPAGE_SHIFT
24#define PGDIR_SIZE HV_PAGE_SIZE_LARGE 24#define PGDIR_SIZE HPAGE_SIZE
25#define PGDIR_MASK (~(PGDIR_SIZE-1)) 25#define PGDIR_MASK (~(PGDIR_SIZE-1))
26#define PTRS_PER_PGD (1 << (32 - PGDIR_SHIFT)) 26#define PTRS_PER_PGD _HV_L1_ENTRIES(HPAGE_SHIFT)
27#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) 27#define PGD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
28#define SIZEOF_PGD _HV_L1_SIZE(HPAGE_SHIFT)
28 29
29/* 30/*
30 * The level-2 index is defined by the difference between the huge 31 * The level-2 index is defined by the difference between the huge
@@ -33,8 +34,9 @@
33 * Note that the hypervisor docs use PTE for what we call pte_t, so 34 * Note that the hypervisor docs use PTE for what we call pte_t, so
34 * this nomenclature is somewhat confusing. 35 * this nomenclature is somewhat confusing.
35 */ 36 */
36#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) 37#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
37#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) 38#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
39#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
38 40
39#ifndef __ASSEMBLY__ 41#ifndef __ASSEMBLY__
40 42
@@ -111,24 +113,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
111 return pte; 113 return pte;
112} 114}
113 115
114static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval) 116/*
115{ 117 * pmds are wrappers around pgds, which are the same as ptes.
116 set_pte(&pmdp->pud.pgd, pmdval.pud.pgd); 118 * It's often convenient to "cast" back and forth and use the pte methods,
117} 119 * which are the methods supplied by the hypervisor.
118 120 */
119/* Create a pmd from a PTFN. */ 121#define pmd_pte(pmd) ((pmd).pud.pgd)
120static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot) 122#define pmdp_ptep(pmdp) (&(pmdp)->pud.pgd)
121{ 123#define pte_pmd(pte) ((pmd_t){ { (pte) } })
122 return (pmd_t){ { hv_pte_set_ptfn(prot, ptfn) } };
123}
124
125/* Return the page-table frame number (ptfn) that a pmd_t points at. */
126#define pmd_ptfn(pmd) hv_pte_get_ptfn((pmd).pud.pgd)
127
128static inline void pmd_clear(pmd_t *pmdp)
129{
130 __pte_clear(&pmdp->pud.pgd);
131}
132 124
133#endif /* __ASSEMBLY__ */ 125#endif /* __ASSEMBLY__ */
134 126
diff --git a/arch/tile/include/asm/pgtable_64.h b/arch/tile/include/asm/pgtable_64.h
index fd80328523b4..2492fa5478e7 100644
--- a/arch/tile/include/asm/pgtable_64.h
+++ b/arch/tile/include/asm/pgtable_64.h
@@ -21,17 +21,19 @@
21#define PGDIR_SIZE HV_L1_SPAN 21#define PGDIR_SIZE HV_L1_SPAN
22#define PGDIR_MASK (~(PGDIR_SIZE-1)) 22#define PGDIR_MASK (~(PGDIR_SIZE-1))
23#define PTRS_PER_PGD HV_L0_ENTRIES 23#define PTRS_PER_PGD HV_L0_ENTRIES
24#define SIZEOF_PGD (PTRS_PER_PGD * sizeof(pgd_t)) 24#define PGD_INDEX(va) HV_L0_INDEX(va)
25#define SIZEOF_PGD HV_L0_SIZE
25 26
26/* 27/*
27 * The level-1 index is defined by the huge page size. A PMD is composed 28 * The level-1 index is defined by the huge page size. A PMD is composed
28 * of PTRS_PER_PMD pgd_t's and is the middle level of the page table. 29 * of PTRS_PER_PMD pgd_t's and is the middle level of the page table.
29 */ 30 */
30#define PMD_SHIFT HV_LOG2_PAGE_SIZE_LARGE 31#define PMD_SHIFT HPAGE_SHIFT
31#define PMD_SIZE HV_PAGE_SIZE_LARGE 32#define PMD_SIZE HPAGE_SIZE
32#define PMD_MASK (~(PMD_SIZE-1)) 33#define PMD_MASK (~(PMD_SIZE-1))
33#define PTRS_PER_PMD (1 << (PGDIR_SHIFT - PMD_SHIFT)) 34#define PTRS_PER_PMD _HV_L1_ENTRIES(HPAGE_SHIFT)
34#define SIZEOF_PMD (PTRS_PER_PMD * sizeof(pmd_t)) 35#define PMD_INDEX(va) _HV_L1_INDEX(va, HPAGE_SHIFT)
36#define SIZEOF_PMD _HV_L1_SIZE(HPAGE_SHIFT)
35 37
36/* 38/*
37 * The level-2 index is defined by the difference between the huge 39 * The level-2 index is defined by the difference between the huge
@@ -40,17 +42,19 @@
40 * Note that the hypervisor docs use PTE for what we call pte_t, so 42 * Note that the hypervisor docs use PTE for what we call pte_t, so
41 * this nomenclature is somewhat confusing. 43 * this nomenclature is somewhat confusing.
42 */ 44 */
43#define PTRS_PER_PTE (1 << (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL)) 45#define PTRS_PER_PTE _HV_L2_ENTRIES(HPAGE_SHIFT, PAGE_SHIFT)
44#define SIZEOF_PTE (PTRS_PER_PTE * sizeof(pte_t)) 46#define PTE_INDEX(va) _HV_L2_INDEX(va, HPAGE_SHIFT, PAGE_SHIFT)
47#define SIZEOF_PTE _HV_L2_SIZE(HPAGE_SHIFT, PAGE_SHIFT)
45 48
46/* 49/*
47 * Align the vmalloc area to an L2 page table, and leave a guard page 50 * Align the vmalloc area to an L2 page table. Omit guard pages at
48 * at the beginning and end. The vmalloc code also puts in an internal 51 * the beginning and end for simplicity (particularly in the per-cpu
52 * memory allocation code). The vmalloc code puts in an internal
49 * guard page between each allocation. 53 * guard page between each allocation.
50 */ 54 */
51#define _VMALLOC_END HUGE_VMAP_BASE 55#define _VMALLOC_END HUGE_VMAP_BASE
52#define VMALLOC_END (_VMALLOC_END - PAGE_SIZE) 56#define VMALLOC_END _VMALLOC_END
53#define VMALLOC_START (_VMALLOC_START + PAGE_SIZE) 57#define VMALLOC_START _VMALLOC_START
54 58
55#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE) 59#define HUGE_VMAP_END (HUGE_VMAP_BASE + PGDIR_SIZE)
56 60
@@ -98,7 +102,7 @@ static inline int pud_bad(pud_t pud)
98 * A pud_t points to a pmd_t array. Since we can have multiple per 102 * A pud_t points to a pmd_t array. Since we can have multiple per
99 * page, we don't have a one-to-one mapping of pud_t's to pages. 103 * page, we don't have a one-to-one mapping of pud_t's to pages.
100 */ 104 */
101#define pud_page(pud) pfn_to_page(HV_PTFN_TO_PFN(pud_ptfn(pud))) 105#define pud_page(pud) pfn_to_page(PFN_DOWN(HV_PTFN_TO_CPA(pud_ptfn(pud))))
102 106
103static inline unsigned long pud_index(unsigned long address) 107static inline unsigned long pud_index(unsigned long address)
104{ 108{
@@ -108,28 +112,6 @@ static inline unsigned long pud_index(unsigned long address)
108#define pmd_offset(pud, address) \ 112#define pmd_offset(pud, address) \
109 ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address)) 113 ((pmd_t *)pud_page_vaddr(*(pud)) + pmd_index(address))
110 114
111static inline void __set_pmd(pmd_t *pmdp, pmd_t pmdval)
112{
113 set_pte(pmdp, pmdval);
114}
115
116/* Create a pmd from a PTFN and pgprot. */
117static inline pmd_t ptfn_pmd(unsigned long ptfn, pgprot_t prot)
118{
119 return hv_pte_set_ptfn(prot, ptfn);
120}
121
122/* Return the page-table frame number (ptfn) that a pmd_t points at. */
123static inline unsigned long pmd_ptfn(pmd_t pmd)
124{
125 return hv_pte_get_ptfn(pmd);
126}
127
128static inline void pmd_clear(pmd_t *pmdp)
129{
130 __pte_clear(pmdp);
131}
132
133/* Normalize an address to having the correct high bits set. */ 115/* Normalize an address to having the correct high bits set. */
134#define pgd_addr_normalize pgd_addr_normalize 116#define pgd_addr_normalize pgd_addr_normalize
135static inline unsigned long pgd_addr_normalize(unsigned long addr) 117static inline unsigned long pgd_addr_normalize(unsigned long addr)
@@ -170,6 +152,13 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
170 return hv_pte(__insn_exch(&ptep->val, 0UL)); 152 return hv_pte(__insn_exch(&ptep->val, 0UL));
171} 153}
172 154
155/*
156 * pmds are the same as pgds and ptes, so converting is a no-op.
157 */
158#define pmd_pte(pmd) (pmd)
159#define pmdp_ptep(pmdp) (pmdp)
160#define pte_pmd(pte) (pte)
161
173#endif /* __ASSEMBLY__ */ 162#endif /* __ASSEMBLY__ */
174 163
175#endif /* _ASM_TILE_PGTABLE_64_H */ 164#endif /* _ASM_TILE_PGTABLE_64_H */
diff --git a/arch/tile/include/asm/processor.h b/arch/tile/include/asm/processor.h
index 15cd8a4a06ce..8c4dd9ff91eb 100644
--- a/arch/tile/include/asm/processor.h
+++ b/arch/tile/include/asm/processor.h
@@ -76,6 +76,17 @@ struct async_tlb {
76 76
77#ifdef CONFIG_HARDWALL 77#ifdef CONFIG_HARDWALL
78struct hardwall_info; 78struct hardwall_info;
79struct hardwall_task {
80 /* Which hardwall is this task tied to? (or NULL if none) */
81 struct hardwall_info *info;
82 /* Chains this task into the list at info->task_head. */
83 struct list_head list;
84};
85#ifdef __tilepro__
86#define HARDWALL_TYPES 1 /* udn */
87#else
88#define HARDWALL_TYPES 3 /* udn, idn, and ipi */
89#endif
79#endif 90#endif
80 91
81struct thread_struct { 92struct thread_struct {
@@ -116,10 +127,8 @@ struct thread_struct {
116 unsigned long dstream_pf; 127 unsigned long dstream_pf;
117#endif 128#endif
118#ifdef CONFIG_HARDWALL 129#ifdef CONFIG_HARDWALL
119 /* Is this task tied to an activated hardwall? */ 130 /* Hardwall information for various resources. */
120 struct hardwall_info *hardwall; 131 struct hardwall_task hardwall[HARDWALL_TYPES];
121 /* Chains this task into the list at hardwall->list. */
122 struct list_head hardwall_list;
123#endif 132#endif
124#if CHIP_HAS_TILE_DMA() 133#if CHIP_HAS_TILE_DMA()
125 /* Async DMA TLB fault information */ 134 /* Async DMA TLB fault information */
diff --git a/arch/tile/include/asm/setup.h b/arch/tile/include/asm/setup.h
index e58613e0752f..c67eb70ea78e 100644
--- a/arch/tile/include/asm/setup.h
+++ b/arch/tile/include/asm/setup.h
@@ -41,15 +41,15 @@ void restrict_dma_mpls(void);
41#ifdef CONFIG_HARDWALL 41#ifdef CONFIG_HARDWALL
42/* User-level network management functions */ 42/* User-level network management functions */
43void reset_network_state(void); 43void reset_network_state(void);
44void grant_network_mpls(void);
45void restrict_network_mpls(void);
46struct task_struct; 44struct task_struct;
47int hardwall_deactivate(struct task_struct *task); 45void hardwall_switch_tasks(struct task_struct *prev, struct task_struct *next);
46void hardwall_deactivate_all(struct task_struct *task);
47int hardwall_ipi_valid(int cpu);
48 48
49/* Hook hardwall code into changes in affinity. */ 49/* Hook hardwall code into changes in affinity. */
50#define arch_set_cpus_allowed(p, new_mask) do { \ 50#define arch_set_cpus_allowed(p, new_mask) do { \
51 if (p->thread.hardwall && !cpumask_equal(&p->cpus_allowed, new_mask)) \ 51 if (!cpumask_equal(&p->cpus_allowed, new_mask)) \
52 hardwall_deactivate(p); \ 52 hardwall_deactivate_all(p); \
53} while (0) 53} while (0)
54#endif 54#endif
55 55
diff --git a/arch/tile/include/asm/syscalls.h b/arch/tile/include/asm/syscalls.h
index 3b5507c31eae..06f0464cfed9 100644
--- a/arch/tile/include/asm/syscalls.h
+++ b/arch/tile/include/asm/syscalls.h
@@ -43,7 +43,8 @@ long sys32_fadvise64(int fd, u32 offset_lo, u32 offset_hi,
43 u32 len, int advice); 43 u32 len, int advice);
44int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi, 44int sys32_fadvise64_64(int fd, u32 offset_lo, u32 offset_hi,
45 u32 len_lo, u32 len_hi, int advice); 45 u32 len_lo, u32 len_hi, int advice);
46long sys_flush_cache(void); 46long sys_cacheflush(unsigned long addr, unsigned long len,
47 unsigned long flags);
47#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */ 48#ifndef __tilegx__ /* No mmap() in the 32-bit kernel. */
48#define sys_mmap sys_mmap 49#define sys_mmap sys_mmap
49#endif 50#endif
diff --git a/arch/tile/include/asm/tlbflush.h b/arch/tile/include/asm/tlbflush.h
index 96199d214fb8..dcf91b25a1e5 100644
--- a/arch/tile/include/asm/tlbflush.h
+++ b/arch/tile/include/asm/tlbflush.h
@@ -38,16 +38,11 @@ DECLARE_PER_CPU(int, current_asid);
38/* The hypervisor tells us what ASIDs are available to us. */ 38/* The hypervisor tells us what ASIDs are available to us. */
39extern int min_asid, max_asid; 39extern int min_asid, max_asid;
40 40
41static inline unsigned long hv_page_size(const struct vm_area_struct *vma)
42{
43 return (vma->vm_flags & VM_HUGETLB) ? HPAGE_SIZE : PAGE_SIZE;
44}
45
46/* Pass as vma pointer for non-executable mapping, if no vma available. */ 41/* Pass as vma pointer for non-executable mapping, if no vma available. */
47#define FLUSH_NONEXEC ((const struct vm_area_struct *)-1UL) 42#define FLUSH_NONEXEC ((struct vm_area_struct *)-1UL)
48 43
49/* Flush a single user page on this cpu. */ 44/* Flush a single user page on this cpu. */
50static inline void local_flush_tlb_page(const struct vm_area_struct *vma, 45static inline void local_flush_tlb_page(struct vm_area_struct *vma,
51 unsigned long addr, 46 unsigned long addr,
52 unsigned long page_size) 47 unsigned long page_size)
53{ 48{
@@ -60,7 +55,7 @@ static inline void local_flush_tlb_page(const struct vm_area_struct *vma,
60} 55}
61 56
62/* Flush range of user pages on this cpu. */ 57/* Flush range of user pages on this cpu. */
63static inline void local_flush_tlb_pages(const struct vm_area_struct *vma, 58static inline void local_flush_tlb_pages(struct vm_area_struct *vma,
64 unsigned long addr, 59 unsigned long addr,
65 unsigned long page_size, 60 unsigned long page_size,
66 unsigned long len) 61 unsigned long len)
@@ -117,10 +112,10 @@ extern void flush_tlb_all(void);
117extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); 112extern void flush_tlb_kernel_range(unsigned long start, unsigned long end);
118extern void flush_tlb_current_task(void); 113extern void flush_tlb_current_task(void);
119extern void flush_tlb_mm(struct mm_struct *); 114extern void flush_tlb_mm(struct mm_struct *);
120extern void flush_tlb_page(const struct vm_area_struct *, unsigned long); 115extern void flush_tlb_page(struct vm_area_struct *, unsigned long);
121extern void flush_tlb_page_mm(const struct vm_area_struct *, 116extern void flush_tlb_page_mm(struct vm_area_struct *,
122 struct mm_struct *, unsigned long); 117 struct mm_struct *, unsigned long);
123extern void flush_tlb_range(const struct vm_area_struct *, 118extern void flush_tlb_range(struct vm_area_struct *,
124 unsigned long start, unsigned long end); 119 unsigned long start, unsigned long end);
125 120
126#define flush_tlb() flush_tlb_current_task() 121#define flush_tlb() flush_tlb_current_task()
diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
index ef34d2caa5b1..c3dd275f25e2 100644
--- a/arch/tile/include/asm/uaccess.h
+++ b/arch/tile/include/asm/uaccess.h
@@ -114,45 +114,75 @@ struct exception_table_entry {
114extern int fixup_exception(struct pt_regs *regs); 114extern int fixup_exception(struct pt_regs *regs);
115 115
116/* 116/*
117 * We return the __get_user_N function results in a structure, 117 * Support macros for __get_user().
118 * thus in r0 and r1. If "err" is zero, "val" is the result 118 *
119 * of the read; otherwise, "err" is -EFAULT. 119 * Implementation note: The "case 8" logic of casting to the type of
120 * 120 * the result of subtracting the value from itself is basically a way
121 * We rarely need 8-byte values on a 32-bit architecture, but 121 * of keeping all integer types the same, but casting any pointers to
122 * we size the structure to accommodate. In practice, for the 122 * ptrdiff_t, i.e. also an integer type. This way there are no
123 * the smaller reads, we can zero the high word for free, and 123 * questionable casts seen by the compiler on an ILP32 platform.
124 * the caller will ignore it by virtue of casting anyway. 124 *
125 * Note that __get_user() and __put_user() assume proper alignment.
125 */ 126 */
126struct __get_user {
127 unsigned long long val;
128 int err;
129};
130 127
131/* 128#ifdef __LP64__
132 * FIXME: we should express these as inline extended assembler, since 129#define _ASM_PTR ".quad"
133 * they're fundamentally just a variable dereference and some 130#else
134 * supporting exception_table gunk. Note that (a la i386) we can 131#define _ASM_PTR ".long"
135 * extend the copy_to_user and copy_from_user routines to call into 132#endif
136 * such extended assembler routines, though we will have to use a 133
137 * different return code in that case (1, 2, or 4, rather than -EFAULT). 134#define __get_user_asm(OP, x, ptr, ret) \
138 */ 135 asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
139extern struct __get_user __get_user_1(const void __user *); 136 ".pushsection .fixup,\"ax\"\n" \
140extern struct __get_user __get_user_2(const void __user *); 137 "0: { movei %1, 0; movei %0, %3 }\n" \
141extern struct __get_user __get_user_4(const void __user *); 138 "j 9f\n" \
142extern struct __get_user __get_user_8(const void __user *); 139 ".section __ex_table,\"a\"\n" \
143extern int __put_user_1(long, void __user *); 140 _ASM_PTR " 1b, 0b\n" \
144extern int __put_user_2(long, void __user *); 141 ".popsection\n" \
145extern int __put_user_4(long, void __user *); 142 "9:" \
146extern int __put_user_8(long long, void __user *); 143 : "=r" (ret), "=r" (x) \
147 144 : "r" (ptr), "i" (-EFAULT))
148/* Unimplemented routines to cause linker failures */ 145
149extern struct __get_user __get_user_bad(void); 146#ifdef __tilegx__
150extern int __put_user_bad(void); 147#define __get_user_1(x, ptr, ret) __get_user_asm(ld1u, x, ptr, ret)
148#define __get_user_2(x, ptr, ret) __get_user_asm(ld2u, x, ptr, ret)
149#define __get_user_4(x, ptr, ret) __get_user_asm(ld4u, x, ptr, ret)
150#define __get_user_8(x, ptr, ret) __get_user_asm(ld, x, ptr, ret)
151#else
152#define __get_user_1(x, ptr, ret) __get_user_asm(lb_u, x, ptr, ret)
153#define __get_user_2(x, ptr, ret) __get_user_asm(lh_u, x, ptr, ret)
154#define __get_user_4(x, ptr, ret) __get_user_asm(lw, x, ptr, ret)
155#ifdef __LITTLE_ENDIAN
156#define __lo32(a, b) a
157#define __hi32(a, b) b
158#else
159#define __lo32(a, b) b
160#define __hi32(a, b) a
161#endif
162#define __get_user_8(x, ptr, ret) \
163 ({ \
164 unsigned int __a, __b; \
165 asm volatile("1: { lw %1, %3; addi %2, %3, 4 }\n" \
166 "2: { lw %2, %2; movei %0, 0 }\n" \
167 ".pushsection .fixup,\"ax\"\n" \
168 "0: { movei %1, 0; movei %2, 0 }\n" \
169 "{ movei %0, %4; j 9f }\n" \
170 ".section __ex_table,\"a\"\n" \
171 ".word 1b, 0b\n" \
172 ".word 2b, 0b\n" \
173 ".popsection\n" \
174 "9:" \
175 : "=r" (ret), "=r" (__a), "=&r" (__b) \
176 : "r" (ptr), "i" (-EFAULT)); \
177 (x) = (__typeof(x))(__typeof((x)-(x))) \
178 (((u64)__hi32(__a, __b) << 32) | \
179 __lo32(__a, __b)); \
180 })
181#endif
182
183extern int __get_user_bad(void)
184 __attribute__((warning("sizeof __get_user argument not 1, 2, 4 or 8")));
151 185
152/*
153 * Careful: we have to cast the result to the type of the pointer
154 * for sign reasons.
155 */
156/** 186/**
157 * __get_user: - Get a simple variable from user space, with less checking. 187 * __get_user: - Get a simple variable from user space, with less checking.
158 * @x: Variable to store result. 188 * @x: Variable to store result.
@@ -174,30 +204,62 @@ extern int __put_user_bad(void);
174 * function. 204 * function.
175 */ 205 */
176#define __get_user(x, ptr) \ 206#define __get_user(x, ptr) \
177({ struct __get_user __ret; \ 207 ({ \
178 __typeof__(*(ptr)) const __user *__gu_addr = (ptr); \ 208 int __ret; \
179 __chk_user_ptr(__gu_addr); \ 209 __chk_user_ptr(ptr); \
180 switch (sizeof(*(__gu_addr))) { \ 210 switch (sizeof(*(ptr))) { \
181 case 1: \ 211 case 1: __get_user_1(x, ptr, __ret); break; \
182 __ret = __get_user_1(__gu_addr); \ 212 case 2: __get_user_2(x, ptr, __ret); break; \
183 break; \ 213 case 4: __get_user_4(x, ptr, __ret); break; \
184 case 2: \ 214 case 8: __get_user_8(x, ptr, __ret); break; \
185 __ret = __get_user_2(__gu_addr); \ 215 default: __ret = __get_user_bad(); break; \
186 break; \ 216 } \
187 case 4: \ 217 __ret; \
188 __ret = __get_user_4(__gu_addr); \ 218 })
189 break; \ 219
190 case 8: \ 220/* Support macros for __put_user(). */
191 __ret = __get_user_8(__gu_addr); \ 221
192 break; \ 222#define __put_user_asm(OP, x, ptr, ret) \
193 default: \ 223 asm volatile("1: {" #OP " %1, %2; movei %0, 0 }\n" \
194 __ret = __get_user_bad(); \ 224 ".pushsection .fixup,\"ax\"\n" \
195 break; \ 225 "0: { movei %0, %3; j 9f }\n" \
196 } \ 226 ".section __ex_table,\"a\"\n" \
197 (x) = (__typeof__(*__gu_addr)) (__typeof__(*__gu_addr - *__gu_addr)) \ 227 _ASM_PTR " 1b, 0b\n" \
198 __ret.val; \ 228 ".popsection\n" \
199 __ret.err; \ 229 "9:" \
200}) 230 : "=r" (ret) \
231 : "r" (ptr), "r" (x), "i" (-EFAULT))
232
233#ifdef __tilegx__
234#define __put_user_1(x, ptr, ret) __put_user_asm(st1, x, ptr, ret)
235#define __put_user_2(x, ptr, ret) __put_user_asm(st2, x, ptr, ret)
236#define __put_user_4(x, ptr, ret) __put_user_asm(st4, x, ptr, ret)
237#define __put_user_8(x, ptr, ret) __put_user_asm(st, x, ptr, ret)
238#else
239#define __put_user_1(x, ptr, ret) __put_user_asm(sb, x, ptr, ret)
240#define __put_user_2(x, ptr, ret) __put_user_asm(sh, x, ptr, ret)
241#define __put_user_4(x, ptr, ret) __put_user_asm(sw, x, ptr, ret)
242#define __put_user_8(x, ptr, ret) \
243 ({ \
244 u64 __x = (__typeof((x)-(x)))(x); \
245 int __lo = (int) __x, __hi = (int) (__x >> 32); \
246 asm volatile("1: { sw %1, %2; addi %0, %1, 4 }\n" \
247 "2: { sw %0, %3; movei %0, 0 }\n" \
248 ".pushsection .fixup,\"ax\"\n" \
249 "0: { movei %0, %4; j 9f }\n" \
250 ".section __ex_table,\"a\"\n" \
251 ".word 1b, 0b\n" \
252 ".word 2b, 0b\n" \
253 ".popsection\n" \
254 "9:" \
255 : "=&r" (ret) \
256 : "r" (ptr), "r" (__lo32(__lo, __hi)), \
257 "r" (__hi32(__lo, __hi)), "i" (-EFAULT)); \
258 })
259#endif
260
261extern int __put_user_bad(void)
262 __attribute__((warning("sizeof __put_user argument not 1, 2, 4 or 8")));
201 263
202/** 264/**
203 * __put_user: - Write a simple value into user space, with less checking. 265 * __put_user: - Write a simple value into user space, with less checking.
@@ -217,39 +279,19 @@ extern int __put_user_bad(void);
217 * function. 279 * function.
218 * 280 *
219 * Returns zero on success, or -EFAULT on error. 281 * Returns zero on success, or -EFAULT on error.
220 *
221 * Implementation note: The "case 8" logic of casting to the type of
222 * the result of subtracting the value from itself is basically a way
223 * of keeping all integer types the same, but casting any pointers to
224 * ptrdiff_t, i.e. also an integer type. This way there are no
225 * questionable casts seen by the compiler on an ILP32 platform.
226 */ 282 */
227#define __put_user(x, ptr) \ 283#define __put_user(x, ptr) \
228({ \ 284({ \
229 int __pu_err = 0; \ 285 int __ret; \
230 __typeof__(*(ptr)) __user *__pu_addr = (ptr); \ 286 __chk_user_ptr(ptr); \
231 typeof(*__pu_addr) __pu_val = (x); \ 287 switch (sizeof(*(ptr))) { \
232 __chk_user_ptr(__pu_addr); \ 288 case 1: __put_user_1(x, ptr, __ret); break; \
233 switch (sizeof(__pu_val)) { \ 289 case 2: __put_user_2(x, ptr, __ret); break; \
234 case 1: \ 290 case 4: __put_user_4(x, ptr, __ret); break; \
235 __pu_err = __put_user_1((long)__pu_val, __pu_addr); \ 291 case 8: __put_user_8(x, ptr, __ret); break; \
236 break; \ 292 default: __ret = __put_user_bad(); break; \
237 case 2: \
238 __pu_err = __put_user_2((long)__pu_val, __pu_addr); \
239 break; \
240 case 4: \
241 __pu_err = __put_user_4((long)__pu_val, __pu_addr); \
242 break; \
243 case 8: \
244 __pu_err = \
245 __put_user_8((__typeof__(__pu_val - __pu_val))__pu_val,\
246 __pu_addr); \
247 break; \
248 default: \
249 __pu_err = __put_user_bad(); \
250 break; \
251 } \ 293 } \
252 __pu_err; \ 294 __ret; \
253}) 295})
254 296
255/* 297/*
@@ -378,7 +420,7 @@ static inline unsigned long __must_check copy_from_user(void *to,
378/** 420/**
379 * __copy_in_user() - copy data within user space, with less checking. 421 * __copy_in_user() - copy data within user space, with less checking.
380 * @to: Destination address, in user space. 422 * @to: Destination address, in user space.
381 * @from: Source address, in kernel space. 423 * @from: Source address, in user space.
382 * @n: Number of bytes to copy. 424 * @n: Number of bytes to copy.
383 * 425 *
384 * Context: User context only. This function may sleep. 426 * Context: User context only. This function may sleep.
diff --git a/arch/tile/include/asm/unistd.h b/arch/tile/include/asm/unistd.h
index f70bf1c541f1..a017246ca0ce 100644
--- a/arch/tile/include/asm/unistd.h
+++ b/arch/tile/include/asm/unistd.h
@@ -24,8 +24,8 @@
24#include <asm-generic/unistd.h> 24#include <asm-generic/unistd.h>
25 25
26/* Additional Tilera-specific syscalls. */ 26/* Additional Tilera-specific syscalls. */
27#define __NR_flush_cache (__NR_arch_specific_syscall + 1) 27#define __NR_cacheflush (__NR_arch_specific_syscall + 1)
28__SYSCALL(__NR_flush_cache, sys_flush_cache) 28__SYSCALL(__NR_cacheflush, sys_cacheflush)
29 29
30#ifndef __tilegx__ 30#ifndef __tilegx__
31/* "Fast" syscalls provide atomic support for 32-bit chips. */ 31/* "Fast" syscalls provide atomic support for 32-bit chips. */
diff --git a/arch/tile/include/hv/drv_xgbe_intf.h b/arch/tile/include/hv/drv_xgbe_intf.h
index f13188ac281a..2a20b266d944 100644
--- a/arch/tile/include/hv/drv_xgbe_intf.h
+++ b/arch/tile/include/hv/drv_xgbe_intf.h
@@ -460,7 +460,7 @@ typedef void* lepp_comp_t;
460 * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for 460 * linux's "MAX_SKB_FRAGS", and presumably over-estimates by one, for
461 * our page size of exactly 65536. We add one for a "body" fragment. 461 * our page size of exactly 65536. We add one for a "body" fragment.
462 */ 462 */
463#define LEPP_MAX_FRAGS (65536 / HV_PAGE_SIZE_SMALL + 2 + 1) 463#define LEPP_MAX_FRAGS (65536 / HV_DEFAULT_PAGE_SIZE_SMALL + 2 + 1)
464 464
465/** Total number of bytes needed for an lepp_tso_cmd_t. */ 465/** Total number of bytes needed for an lepp_tso_cmd_t. */
466#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \ 466#define LEPP_TSO_CMD_SIZE(num_frags, header_size) \
diff --git a/arch/tile/include/hv/hypervisor.h b/arch/tile/include/hv/hypervisor.h
index 72ec1e972f15..ccd847e2347f 100644
--- a/arch/tile/include/hv/hypervisor.h
+++ b/arch/tile/include/hv/hypervisor.h
@@ -17,8 +17,8 @@
17 * The hypervisor's public API. 17 * The hypervisor's public API.
18 */ 18 */
19 19
20#ifndef _TILE_HV_H 20#ifndef _HV_HV_H
21#define _TILE_HV_H 21#define _HV_HV_H
22 22
23#include <arch/chip.h> 23#include <arch/chip.h>
24 24
@@ -42,25 +42,45 @@
42 */ 42 */
43#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN) 43#define HV_L1_SPAN (__HV_SIZE_ONE << HV_LOG2_L1_SPAN)
44 44
45/** The log2 of the size of small pages, in bytes. This value should 45/** The log2 of the initial size of small pages, in bytes.
46 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 46 * See HV_DEFAULT_PAGE_SIZE_SMALL.
47 */ 47 */
48#define HV_LOG2_PAGE_SIZE_SMALL 16 48#define HV_LOG2_DEFAULT_PAGE_SIZE_SMALL 16
49 49
50/** The size of small pages, in bytes. This value should be verified 50/** The initial size of small pages, in bytes. This value should be verified
51 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL). 51 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_SMALL).
52 * It may also be modified when installing a new context.
52 */ 53 */
53#define HV_PAGE_SIZE_SMALL (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_SMALL) 54#define HV_DEFAULT_PAGE_SIZE_SMALL \
55 (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_SMALL)
54 56
55/** The log2 of the size of large pages, in bytes. This value should be 57/** The log2 of the initial size of large pages, in bytes.
56 * verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 58 * See HV_DEFAULT_PAGE_SIZE_LARGE.
57 */ 59 */
58#define HV_LOG2_PAGE_SIZE_LARGE 24 60#define HV_LOG2_DEFAULT_PAGE_SIZE_LARGE 24
59 61
60/** The size of large pages, in bytes. This value should be verified 62/** The initial size of large pages, in bytes. This value should be verified
61 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE). 63 * at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_LARGE).
64 * It may also be modified when installing a new context.
62 */ 65 */
63#define HV_PAGE_SIZE_LARGE (__HV_SIZE_ONE << HV_LOG2_PAGE_SIZE_LARGE) 66#define HV_DEFAULT_PAGE_SIZE_LARGE \
67 (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_LARGE)
68
69#if CHIP_VA_WIDTH() > 32
70
71/** The log2 of the initial size of jumbo pages, in bytes.
72 * See HV_DEFAULT_PAGE_SIZE_JUMBO.
73 */
74#define HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO 32
75
76/** The initial size of jumbo pages, in bytes. This value should
77 * be verified at runtime by calling hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO).
78 * It may also be modified when installing a new context.
79 */
80#define HV_DEFAULT_PAGE_SIZE_JUMBO \
81 (__HV_SIZE_ONE << HV_LOG2_DEFAULT_PAGE_SIZE_JUMBO)
82
83#endif
64 84
65/** The log2 of the granularity at which page tables must be aligned; 85/** The log2 of the granularity at which page tables must be aligned;
66 * in other words, the CPA for a page table must have this many zero 86 * in other words, the CPA for a page table must have this many zero
@@ -280,8 +300,11 @@
280#define HV_DISPATCH_GET_IPI_PTE 56 300#define HV_DISPATCH_GET_IPI_PTE 56
281#endif 301#endif
282 302
303/** hv_set_pte_super_shift */
304#define HV_DISPATCH_SET_PTE_SUPER_SHIFT 57
305
283/** One more than the largest dispatch value */ 306/** One more than the largest dispatch value */
284#define _HV_DISPATCH_END 57 307#define _HV_DISPATCH_END 58
285 308
286 309
287#ifndef __ASSEMBLER__ 310#ifndef __ASSEMBLER__
@@ -401,7 +424,18 @@ typedef enum {
401 * that the temperature has hit an upper limit and is no longer being 424 * that the temperature has hit an upper limit and is no longer being
402 * accurately tracked. 425 * accurately tracked.
403 */ 426 */
404 HV_SYSCONF_BOARD_TEMP = 6 427 HV_SYSCONF_BOARD_TEMP = 6,
428
429 /** Legal page size bitmask for hv_install_context().
430 * For example, if 16KB and 64KB small pages are supported,
431 * it would return "HV_CTX_PG_SM_16K | HV_CTX_PG_SM_64K".
432 */
433 HV_SYSCONF_VALID_PAGE_SIZES = 7,
434
435 /** The size of jumbo pages, in bytes.
436 * If no jumbo pages are available, zero will be returned.
437 */
438 HV_SYSCONF_PAGE_SIZE_JUMBO = 8,
405 439
406} HV_SysconfQuery; 440} HV_SysconfQuery;
407 441
@@ -474,7 +508,19 @@ typedef enum {
474 HV_CONFSTR_SWITCH_CONTROL = 14, 508 HV_CONFSTR_SWITCH_CONTROL = 14,
475 509
476 /** Chip revision level. */ 510 /** Chip revision level. */
477 HV_CONFSTR_CHIP_REV = 15 511 HV_CONFSTR_CHIP_REV = 15,
512
513 /** CPU module part number. */
514 HV_CONFSTR_CPUMOD_PART_NUM = 16,
515
516 /** CPU module serial number. */
517 HV_CONFSTR_CPUMOD_SERIAL_NUM = 17,
518
519 /** CPU module revision level. */
520 HV_CONFSTR_CPUMOD_REV = 18,
521
522 /** Human-readable CPU module description. */
523 HV_CONFSTR_CPUMOD_DESC = 19
478 524
479} HV_ConfstrQuery; 525} HV_ConfstrQuery;
480 526
@@ -494,11 +540,16 @@ int hv_confstr(HV_ConfstrQuery query, HV_VirtAddr buf, int len);
494/** Tile coordinate */ 540/** Tile coordinate */
495typedef struct 541typedef struct
496{ 542{
543#ifndef __BIG_ENDIAN__
497 /** X coordinate, relative to supervisor's top-left coordinate */ 544 /** X coordinate, relative to supervisor's top-left coordinate */
498 int x; 545 int x;
499 546
500 /** Y coordinate, relative to supervisor's top-left coordinate */ 547 /** Y coordinate, relative to supervisor's top-left coordinate */
501 int y; 548 int y;
549#else
550 int y;
551 int x;
552#endif
502} HV_Coord; 553} HV_Coord;
503 554
504 555
@@ -649,6 +700,12 @@ void hv_set_rtc(HV_RTCTime time);
649 * new page table does not need to contain any mapping for the 700 * new page table does not need to contain any mapping for the
650 * hv_install_context address itself. 701 * hv_install_context address itself.
651 * 702 *
703 * At most one HV_CTX_PG_SM_* flag may be specified in "flags";
704 * if multiple flags are specified, HV_EINVAL is returned.
705 * Specifying none of the flags results in using the default page size.
706 * All cores participating in a given client must request the same
707 * page size, or the results are undefined.
708 *
652 * @param page_table Root of the page table. 709 * @param page_table Root of the page table.
653 * @param access PTE providing info on how to read the page table. This 710 * @param access PTE providing info on how to read the page table. This
654 * value must be consistent between multiple tiles sharing a page table, 711 * value must be consistent between multiple tiles sharing a page table,
@@ -667,8 +724,36 @@ int hv_install_context(HV_PhysAddr page_table, HV_PTE access, HV_ASID asid,
667#define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from 724#define HV_CTX_DIRECTIO 0x1 /**< Direct I/O requests are accepted from
668 PL0. */ 725 PL0. */
669 726
727#define HV_CTX_PG_SM_4K 0x10 /**< Use 4K small pages, if available. */
728#define HV_CTX_PG_SM_16K 0x20 /**< Use 16K small pages, if available. */
729#define HV_CTX_PG_SM_64K 0x40 /**< Use 64K small pages, if available. */
730#define HV_CTX_PG_SM_MASK 0xf0 /**< Mask of all possible small pages. */
731
670#ifndef __ASSEMBLER__ 732#ifndef __ASSEMBLER__
671 733
734
735/** Set the number of pages ganged together by HV_PTE_SUPER at a
736 * particular level of the page table.
737 *
738 * The current TILE-Gx hardware only supports powers of four
739 * (i.e. log2_count must be a multiple of two), and the requested
740 * "super" page size must be less than the span of the next level in
741 * the page table. The largest size that can be requested is 64GB.
742 *
743 * The shift value is initially "0" for all page table levels,
744 * indicating that the HV_PTE_SUPER bit is effectively ignored.
745 *
746 * If you change the count from one non-zero value to another, the
747 * hypervisor will flush the entire TLB and TSB to avoid confusion.
748 *
749 * @param level Page table level (0, 1, or 2)
750 * @param log2_count Base-2 log of the number of pages to gang together,
751 * i.e. how much to shift left the base page size for the super page size.
752 * @return Zero on success, or a hypervisor error code on failure.
753 */
754int hv_set_pte_super_shift(int level, int log2_count);
755
756
672/** Value returned from hv_inquire_context(). */ 757/** Value returned from hv_inquire_context(). */
673typedef struct 758typedef struct
674{ 759{
@@ -986,8 +1071,13 @@ HV_VirtAddrRange hv_inquire_virtual(int idx);
986/** A range of ASID values. */ 1071/** A range of ASID values. */
987typedef struct 1072typedef struct
988{ 1073{
1074#ifndef __BIG_ENDIAN__
989 HV_ASID start; /**< First ASID in the range. */ 1075 HV_ASID start; /**< First ASID in the range. */
990 unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */ 1076 unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
1077#else
1078 unsigned int size; /**< Number of ASIDs. Zero for an invalid range. */
1079 HV_ASID start; /**< First ASID in the range. */
1080#endif
991} HV_ASIDRange; 1081} HV_ASIDRange;
992 1082
993/** Returns information about a range of ASIDs. 1083/** Returns information about a range of ASIDs.
@@ -1238,11 +1328,14 @@ HV_Errno hv_set_command_line(HV_VirtAddr buf, int length);
1238 * with the existing priority pages) or "red/black" (if they don't). 1328 * with the existing priority pages) or "red/black" (if they don't).
1239 * The bitmask provides information on which parts of the cache 1329 * The bitmask provides information on which parts of the cache
1240 * have been used for pinned pages so far on this tile; if (1 << N) 1330 * have been used for pinned pages so far on this tile; if (1 << N)
1241 * appears in the bitmask, that indicates that a page has been marked 1331 * appears in the bitmask, that indicates that a 4KB region of the
1242 * "priority" whose PFN equals N, mod 8. 1332 * cache starting at (N * 4KB) is in use by a "priority" page.
1333 * The portion of cache used by a particular page can be computed
1334 * by taking the page's PA, modulo CHIP_L2_CACHE_SIZE(), and setting
1335 * all the "4KB" bits corresponding to the actual page size.
1243 * @param bitmask A bitmap of priority page set values 1336 * @param bitmask A bitmap of priority page set values
1244 */ 1337 */
1245void hv_set_caching(unsigned int bitmask); 1338void hv_set_caching(unsigned long bitmask);
1246 1339
1247 1340
1248/** Zero out a specified number of pages. 1341/** Zero out a specified number of pages.
@@ -1308,6 +1401,7 @@ typedef enum
1308/** Message recipient. */ 1401/** Message recipient. */
1309typedef struct 1402typedef struct
1310{ 1403{
1404#ifndef __BIG_ENDIAN__
1311 /** X coordinate, relative to supervisor's top-left coordinate */ 1405 /** X coordinate, relative to supervisor's top-left coordinate */
1312 unsigned int x:11; 1406 unsigned int x:11;
1313 1407
@@ -1316,6 +1410,11 @@ typedef struct
1316 1410
1317 /** Status of this recipient */ 1411 /** Status of this recipient */
1318 HV_Recip_State state:10; 1412 HV_Recip_State state:10;
1413#else //__BIG_ENDIAN__
1414 HV_Recip_State state:10;
1415 unsigned int y:11;
1416 unsigned int x:11;
1417#endif
1319} HV_Recipient; 1418} HV_Recipient;
1320 1419
1321/** Send a message to a set of recipients. 1420/** Send a message to a set of recipients.
@@ -1851,12 +1950,12 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
1851#define HV_PTE_INDEX_USER 10 /**< Page is user-accessible */ 1950#define HV_PTE_INDEX_USER 10 /**< Page is user-accessible */
1852#define HV_PTE_INDEX_ACCESSED 11 /**< Page has been accessed */ 1951#define HV_PTE_INDEX_ACCESSED 11 /**< Page has been accessed */
1853#define HV_PTE_INDEX_DIRTY 12 /**< Page has been written */ 1952#define HV_PTE_INDEX_DIRTY 12 /**< Page has been written */
1854 /* Bits 13-15 are reserved for 1953 /* Bits 13-14 are reserved for
1855 future use. */ 1954 future use. */
1955#define HV_PTE_INDEX_SUPER 15 /**< Pages ganged together for TLB */
1856#define HV_PTE_INDEX_MODE 16 /**< Page mode; see HV_PTE_MODE_xxx */ 1956#define HV_PTE_INDEX_MODE 16 /**< Page mode; see HV_PTE_MODE_xxx */
1857#define HV_PTE_MODE_BITS 3 /**< Number of bits in mode */ 1957#define HV_PTE_MODE_BITS 3 /**< Number of bits in mode */
1858 /* Bit 19 is reserved for 1958#define HV_PTE_INDEX_CLIENT2 19 /**< Page client state 2 */
1859 future use. */
1860#define HV_PTE_INDEX_LOTAR 20 /**< Page's LOTAR; must be high bits 1959#define HV_PTE_INDEX_LOTAR 20 /**< Page's LOTAR; must be high bits
1861 of word */ 1960 of word */
1862#define HV_PTE_LOTAR_BITS 12 /**< Number of bits in a LOTAR */ 1961#define HV_PTE_LOTAR_BITS 12 /**< Number of bits in a LOTAR */
@@ -1869,15 +1968,6 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
1869 of word */ 1968 of word */
1870#define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */ 1969#define HV_PTE_PTFN_BITS 29 /**< Number of bits in a PTFN */
1871 1970
1872/** Position of the PFN field within the PTE (subset of the PTFN). */
1873#define HV_PTE_INDEX_PFN (HV_PTE_INDEX_PTFN + (HV_LOG2_PAGE_SIZE_SMALL - \
1874 HV_LOG2_PAGE_TABLE_ALIGN))
1875
1876/** Length of the PFN field within the PTE (subset of the PTFN). */
1877#define HV_PTE_INDEX_PFN_BITS (HV_PTE_INDEX_PTFN_BITS - \
1878 (HV_LOG2_PAGE_SIZE_SMALL - \
1879 HV_LOG2_PAGE_TABLE_ALIGN))
1880
1881/* 1971/*
1882 * Legal values for the PTE's mode field 1972 * Legal values for the PTE's mode field
1883 */ 1973 */
@@ -1957,7 +2047,10 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
1957 2047
1958/** Does this PTE map a page? 2048/** Does this PTE map a page?
1959 * 2049 *
1960 * If this bit is set in the level-1 page table, the entry should be 2050 * If this bit is set in a level-0 page table, the entry should be
2051 * interpreted as a level-2 page table entry mapping a jumbo page.
2052 *
2053 * If this bit is set in a level-1 page table, the entry should be
1961 * interpreted as a level-2 page table entry mapping a large page. 2054 * interpreted as a level-2 page table entry mapping a large page.
1962 * 2055 *
1963 * This bit should not be modified by the client while PRESENT is set, as 2056 * This bit should not be modified by the client while PRESENT is set, as
@@ -1967,6 +2060,18 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
1967 */ 2060 */
1968#define HV_PTE_PAGE (__HV_PTE_ONE << HV_PTE_INDEX_PAGE) 2061#define HV_PTE_PAGE (__HV_PTE_ONE << HV_PTE_INDEX_PAGE)
1969 2062
2063/** Does this PTE implicitly reference multiple pages?
2064 *
2065 * If this bit is set in the page table (either in the level-2 page table,
2066 * or in a higher level page table in conjunction with the PAGE bit)
2067 * then the PTE specifies a range of contiguous pages, not a single page.
2068 * The hv_set_pte_super_shift() allows you to specify the count for
2069 * each level of the page table.
2070 *
2071 * Note: this bit is not supported on TILEPro systems.
2072 */
2073#define HV_PTE_SUPER (__HV_PTE_ONE << HV_PTE_INDEX_SUPER)
2074
1970/** Is this a global (non-ASID) mapping? 2075/** Is this a global (non-ASID) mapping?
1971 * 2076 *
1972 * If this bit is set, the translations established by this PTE will 2077 * If this bit is set, the translations established by this PTE will
@@ -2046,6 +2151,13 @@ int hv_flush_remote(HV_PhysAddr cache_pa, unsigned long cache_control,
2046 */ 2151 */
2047#define HV_PTE_CLIENT1 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1) 2152#define HV_PTE_CLIENT1 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT1)
2048 2153
2154/** Client-private bit in PTE.
2155 *
2156 * This bit is guaranteed not to be inspected or modified by the
2157 * hypervisor.
2158 */
2159#define HV_PTE_CLIENT2 (__HV_PTE_ONE << HV_PTE_INDEX_CLIENT2)
2160
2049/** Non-coherent (NC) bit in PTE. 2161/** Non-coherent (NC) bit in PTE.
2050 * 2162 *
2051 * If this bit is set, the mapping that is set up will be non-coherent 2163 * If this bit is set, the mapping that is set up will be non-coherent
@@ -2178,8 +2290,10 @@ hv_pte_clear_##name(HV_PTE pte) \
2178 */ 2290 */
2179_HV_BIT(present, PRESENT) 2291_HV_BIT(present, PRESENT)
2180_HV_BIT(page, PAGE) 2292_HV_BIT(page, PAGE)
2293_HV_BIT(super, SUPER)
2181_HV_BIT(client0, CLIENT0) 2294_HV_BIT(client0, CLIENT0)
2182_HV_BIT(client1, CLIENT1) 2295_HV_BIT(client1, CLIENT1)
2296_HV_BIT(client2, CLIENT2)
2183_HV_BIT(migrating, MIGRATING) 2297_HV_BIT(migrating, MIGRATING)
2184_HV_BIT(nc, NC) 2298_HV_BIT(nc, NC)
2185_HV_BIT(readable, READABLE) 2299_HV_BIT(readable, READABLE)
@@ -2222,40 +2336,11 @@ hv_pte_set_mode(HV_PTE pte, unsigned int val)
2222 * 2336 *
2223 * This field contains the upper bits of the CPA (client physical 2337 * This field contains the upper bits of the CPA (client physical
2224 * address) of the target page; the complete CPA is this field with 2338 * address) of the target page; the complete CPA is this field with
2225 * HV_LOG2_PAGE_SIZE_SMALL zero bits appended to it. 2339 * HV_LOG2_PAGE_TABLE_ALIGN zero bits appended to it.
2226 *
2227 * For PTEs in a level-1 page table where the Page bit is set, the
2228 * CPA must be aligned modulo the large page size.
2229 */
2230static __inline unsigned int
2231hv_pte_get_pfn(const HV_PTE pte)
2232{
2233 return pte.val >> HV_PTE_INDEX_PFN;
2234}
2235
2236
2237/** Set the page frame number into a PTE. See hv_pte_get_pfn. */
2238static __inline HV_PTE
2239hv_pte_set_pfn(HV_PTE pte, unsigned int val)
2240{
2241 /*
2242 * Note that the use of "PTFN" in the next line is intentional; we
2243 * don't want any garbage lower bits left in that field.
2244 */
2245 pte.val &= ~(((1ULL << HV_PTE_PTFN_BITS) - 1) << HV_PTE_INDEX_PTFN);
2246 pte.val |= (__hv64) val << HV_PTE_INDEX_PFN;
2247 return pte;
2248}
2249
2250/** Get the page table frame number from the PTE.
2251 *
2252 * This field contains the upper bits of the CPA (client physical
2253 * address) of the target page table; the complete CPA is this field with
2254 * with HV_PAGE_TABLE_ALIGN zero bits appended to it.
2255 * 2340 *
2256 * For PTEs in a level-1 page table when the Page bit is not set, the 2341 * For all PTEs in the lowest-level page table, and for all PTEs with
2257 * CPA must be aligned modulo the sticter of HV_PAGE_TABLE_ALIGN and 2342 * the Page bit set in all page tables, the CPA must be aligned modulo
2258 * the level-2 page table size. 2343 * the relevant page size.
2259 */ 2344 */
2260static __inline unsigned long 2345static __inline unsigned long
2261hv_pte_get_ptfn(const HV_PTE pte) 2346hv_pte_get_ptfn(const HV_PTE pte)
@@ -2263,7 +2348,6 @@ hv_pte_get_ptfn(const HV_PTE pte)
2263 return pte.val >> HV_PTE_INDEX_PTFN; 2348 return pte.val >> HV_PTE_INDEX_PTFN;
2264} 2349}
2265 2350
2266
2267/** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */ 2351/** Set the page table frame number into a PTE. See hv_pte_get_ptfn. */
2268static __inline HV_PTE 2352static __inline HV_PTE
2269hv_pte_set_ptfn(HV_PTE pte, unsigned long val) 2353hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
@@ -2273,6 +2357,20 @@ hv_pte_set_ptfn(HV_PTE pte, unsigned long val)
2273 return pte; 2357 return pte;
2274} 2358}
2275 2359
2360/** Get the client physical address from the PTE. See hv_pte_set_ptfn. */
2361static __inline HV_PhysAddr
2362hv_pte_get_pa(const HV_PTE pte)
2363{
2364 return (__hv64) hv_pte_get_ptfn(pte) << HV_LOG2_PAGE_TABLE_ALIGN;
2365}
2366
2367/** Set the client physical address into a PTE. See hv_pte_get_ptfn. */
2368static __inline HV_PTE
2369hv_pte_set_pa(HV_PTE pte, HV_PhysAddr pa)
2370{
2371 return hv_pte_set_ptfn(pte, pa >> HV_LOG2_PAGE_TABLE_ALIGN);
2372}
2373
2276 2374
2277/** Get the remote tile caching this page. 2375/** Get the remote tile caching this page.
2278 * 2376 *
@@ -2308,28 +2406,20 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
2308 2406
2309#endif /* !__ASSEMBLER__ */ 2407#endif /* !__ASSEMBLER__ */
2310 2408
2311/** Converts a client physical address to a pfn. */
2312#define HV_CPA_TO_PFN(p) ((p) >> HV_LOG2_PAGE_SIZE_SMALL)
2313
2314/** Converts a pfn to a client physical address. */
2315#define HV_PFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_SIZE_SMALL)
2316
2317/** Converts a client physical address to a ptfn. */ 2409/** Converts a client physical address to a ptfn. */
2318#define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN) 2410#define HV_CPA_TO_PTFN(p) ((p) >> HV_LOG2_PAGE_TABLE_ALIGN)
2319 2411
2320/** Converts a ptfn to a client physical address. */ 2412/** Converts a ptfn to a client physical address. */
2321#define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN) 2413#define HV_PTFN_TO_CPA(p) (((HV_PhysAddr)(p)) << HV_LOG2_PAGE_TABLE_ALIGN)
2322 2414
2323/** Converts a ptfn to a pfn. */
2324#define HV_PTFN_TO_PFN(p) \
2325 ((p) >> (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
2326
2327/** Converts a pfn to a ptfn. */
2328#define HV_PFN_TO_PTFN(p) \
2329 ((p) << (HV_LOG2_PAGE_SIZE_SMALL - HV_LOG2_PAGE_TABLE_ALIGN))
2330
2331#if CHIP_VA_WIDTH() > 32 2415#if CHIP_VA_WIDTH() > 32
2332 2416
2417/*
2418 * Note that we currently do not allow customizing the page size
2419 * of the L0 pages, but fix them at 4GB, so we do not use the
2420 * "_HV_xxx" nomenclature for the L0 macros.
2421 */
2422
2333/** Log number of HV_PTE entries in L0 page table */ 2423/** Log number of HV_PTE entries in L0 page table */
2334#define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN) 2424#define HV_LOG2_L0_ENTRIES (CHIP_VA_WIDTH() - HV_LOG2_L1_SPAN)
2335 2425
@@ -2359,69 +2449,104 @@ hv_pte_set_lotar(HV_PTE pte, unsigned int val)
2359#endif /* CHIP_VA_WIDTH() > 32 */ 2449#endif /* CHIP_VA_WIDTH() > 32 */
2360 2450
2361/** Log number of HV_PTE entries in L1 page table */ 2451/** Log number of HV_PTE entries in L1 page table */
2362#define HV_LOG2_L1_ENTRIES (HV_LOG2_L1_SPAN - HV_LOG2_PAGE_SIZE_LARGE) 2452#define _HV_LOG2_L1_ENTRIES(log2_page_size_large) \
2453 (HV_LOG2_L1_SPAN - log2_page_size_large)
2363 2454
2364/** Number of HV_PTE entries in L1 page table */ 2455/** Number of HV_PTE entries in L1 page table */
2365#define HV_L1_ENTRIES (1 << HV_LOG2_L1_ENTRIES) 2456#define _HV_L1_ENTRIES(log2_page_size_large) \
2457 (1 << _HV_LOG2_L1_ENTRIES(log2_page_size_large))
2366 2458
2367/** Log size of L1 page table in bytes */ 2459/** Log size of L1 page table in bytes */
2368#define HV_LOG2_L1_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L1_ENTRIES) 2460#define _HV_LOG2_L1_SIZE(log2_page_size_large) \
2461 (HV_LOG2_PTE_SIZE + _HV_LOG2_L1_ENTRIES(log2_page_size_large))
2369 2462
2370/** Size of L1 page table in bytes */ 2463/** Size of L1 page table in bytes */
2371#define HV_L1_SIZE (1 << HV_LOG2_L1_SIZE) 2464#define _HV_L1_SIZE(log2_page_size_large) \
2465 (1 << _HV_LOG2_L1_SIZE(log2_page_size_large))
2372 2466
2373/** Log number of HV_PTE entries in level-2 page table */ 2467/** Log number of HV_PTE entries in level-2 page table */
2374#define HV_LOG2_L2_ENTRIES (HV_LOG2_PAGE_SIZE_LARGE - HV_LOG2_PAGE_SIZE_SMALL) 2468#define _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
2469 (log2_page_size_large - log2_page_size_small)
2375 2470
2376/** Number of HV_PTE entries in level-2 page table */ 2471/** Number of HV_PTE entries in level-2 page table */
2377#define HV_L2_ENTRIES (1 << HV_LOG2_L2_ENTRIES) 2472#define _HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) \
2473 (1 << _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
2378 2474
2379/** Log size of level-2 page table in bytes */ 2475/** Log size of level-2 page table in bytes */
2380#define HV_LOG2_L2_SIZE (HV_LOG2_PTE_SIZE + HV_LOG2_L2_ENTRIES) 2476#define _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small) \
2477 (HV_LOG2_PTE_SIZE + \
2478 _HV_LOG2_L2_ENTRIES(log2_page_size_large, log2_page_size_small))
2381 2479
2382/** Size of level-2 page table in bytes */ 2480/** Size of level-2 page table in bytes */
2383#define HV_L2_SIZE (1 << HV_LOG2_L2_SIZE) 2481#define _HV_L2_SIZE(log2_page_size_large, log2_page_size_small) \
2482 (1 << _HV_LOG2_L2_SIZE(log2_page_size_large, log2_page_size_small))
2384 2483
2385#ifdef __ASSEMBLER__ 2484#ifdef __ASSEMBLER__
2386 2485
2387#if CHIP_VA_WIDTH() > 32 2486#if CHIP_VA_WIDTH() > 32
2388 2487
2389/** Index in L1 for a specific VA */ 2488/** Index in L1 for a specific VA */
2390#define HV_L1_INDEX(va) \ 2489#define _HV_L1_INDEX(va, log2_page_size_large) \
2391 (((va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) 2490 (((va) >> log2_page_size_large) & (_HV_L1_ENTRIES(log2_page_size_large) - 1))
2392 2491
2393#else /* CHIP_VA_WIDTH() > 32 */ 2492#else /* CHIP_VA_WIDTH() > 32 */
2394 2493
2395/** Index in L1 for a specific VA */ 2494/** Index in L1 for a specific VA */
2396#define HV_L1_INDEX(va) \ 2495#define _HV_L1_INDEX(va, log2_page_size_large) \
2397 (((va) >> HV_LOG2_PAGE_SIZE_LARGE)) 2496 (((va) >> log2_page_size_large))
2398 2497
2399#endif /* CHIP_VA_WIDTH() > 32 */ 2498#endif /* CHIP_VA_WIDTH() > 32 */
2400 2499
2401/** Index in level-2 page table for a specific VA */ 2500/** Index in level-2 page table for a specific VA */
2402#define HV_L2_INDEX(va) \ 2501#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
2403 (((va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) 2502 (((va) >> log2_page_size_small) & \
2503 (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
2404 2504
2405#else /* __ASSEMBLER __ */ 2505#else /* __ASSEMBLER __ */
2406 2506
2407#if CHIP_VA_WIDTH() > 32 2507#if CHIP_VA_WIDTH() > 32
2408 2508
2409/** Index in L1 for a specific VA */ 2509/** Index in L1 for a specific VA */
2410#define HV_L1_INDEX(va) \ 2510#define _HV_L1_INDEX(va, log2_page_size_large) \
2411 (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE) & (HV_L1_ENTRIES - 1)) 2511 (((HV_VirtAddr)(va) >> log2_page_size_large) & \
2512 (_HV_L1_ENTRIES(log2_page_size_large) - 1))
2412 2513
2413#else /* CHIP_VA_WIDTH() > 32 */ 2514#else /* CHIP_VA_WIDTH() > 32 */
2414 2515
2415/** Index in L1 for a specific VA */ 2516/** Index in L1 for a specific VA */
2416#define HV_L1_INDEX(va) \ 2517#define _HV_L1_INDEX(va, log2_page_size_large) \
2417 (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_LARGE)) 2518 (((HV_VirtAddr)(va) >> log2_page_size_large))
2418 2519
2419#endif /* CHIP_VA_WIDTH() > 32 */ 2520#endif /* CHIP_VA_WIDTH() > 32 */
2420 2521
2421/** Index in level-2 page table for a specific VA */ 2522/** Index in level-2 page table for a specific VA */
2422#define HV_L2_INDEX(va) \ 2523#define _HV_L2_INDEX(va, log2_page_size_large, log2_page_size_small) \
2423 (((HV_VirtAddr)(va) >> HV_LOG2_PAGE_SIZE_SMALL) & (HV_L2_ENTRIES - 1)) 2524 (((HV_VirtAddr)(va) >> log2_page_size_small) & \
2525 (_HV_L2_ENTRIES(log2_page_size_large, log2_page_size_small) - 1))
2424 2526
2425#endif /* __ASSEMBLER __ */ 2527#endif /* __ASSEMBLER __ */
2426 2528
2427#endif /* _TILE_HV_H */ 2529/** Position of the PFN field within the PTE (subset of the PTFN). */
2530#define _HV_PTE_INDEX_PFN(log2_page_size) \
2531 (HV_PTE_INDEX_PTFN + (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
2532
2533/** Length of the PFN field within the PTE (subset of the PTFN). */
2534#define _HV_PTE_INDEX_PFN_BITS(log2_page_size) \
2535 (HV_PTE_INDEX_PTFN_BITS - (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
2536
2537/** Converts a client physical address to a pfn. */
2538#define _HV_CPA_TO_PFN(p, log2_page_size) ((p) >> log2_page_size)
2539
2540/** Converts a pfn to a client physical address. */
2541#define _HV_PFN_TO_CPA(p, log2_page_size) \
2542 (((HV_PhysAddr)(p)) << log2_page_size)
2543
2544/** Converts a ptfn to a pfn. */
2545#define _HV_PTFN_TO_PFN(p, log2_page_size) \
2546 ((p) >> (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
2547
2548/** Converts a pfn to a ptfn. */
2549#define _HV_PFN_TO_PTFN(p, log2_page_size) \
2550 ((p) << (log2_page_size - HV_LOG2_PAGE_TABLE_ALIGN))
2551
2552#endif /* _HV_HV_H */
diff --git a/arch/tile/kernel/Makefile b/arch/tile/kernel/Makefile
index 0d826faf8f35..5de99248d8df 100644
--- a/arch/tile/kernel/Makefile
+++ b/arch/tile/kernel/Makefile
@@ -9,10 +9,9 @@ obj-y := backtrace.o entry.o irq.o messaging.o \
9 intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o 9 intvec_$(BITS).o regs_$(BITS).o tile-desc_$(BITS).o
10 10
11obj-$(CONFIG_HARDWALL) += hardwall.o 11obj-$(CONFIG_HARDWALL) += hardwall.o
12obj-$(CONFIG_TILEGX) += futex_64.o
13obj-$(CONFIG_COMPAT) += compat.o compat_signal.o 12obj-$(CONFIG_COMPAT) += compat.o compat_signal.o
14obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o 13obj-$(CONFIG_SMP) += smpboot.o smp.o tlb.o
15obj-$(CONFIG_MODULES) += module.o 14obj-$(CONFIG_MODULES) += module.o
16obj-$(CONFIG_EARLY_PRINTK) += early_printk.o 15obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
17obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o 16obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel_$(BITS).o
18obj-$(CONFIG_PCI) += pci.o 17obj-$(CONFIG_PCI) += pci.o
diff --git a/arch/tile/kernel/entry.S b/arch/tile/kernel/entry.S
index ec91568df880..133c4b56a99e 100644
--- a/arch/tile/kernel/entry.S
+++ b/arch/tile/kernel/entry.S
@@ -100,8 +100,9 @@ STD_ENTRY(smp_nap)
100 */ 100 */
101STD_ENTRY(_cpu_idle) 101STD_ENTRY(_cpu_idle)
102 movei r1, 1 102 movei r1, 1
103 IRQ_ENABLE_LOAD(r2, r3)
103 mtspr INTERRUPT_CRITICAL_SECTION, r1 104 mtspr INTERRUPT_CRITICAL_SECTION, r1
104 IRQ_ENABLE(r2, r3) /* unmask, but still with ICS set */ 105 IRQ_ENABLE_APPLY(r2, r3) /* unmask, but still with ICS set */
105 mtspr INTERRUPT_CRITICAL_SECTION, zero 106 mtspr INTERRUPT_CRITICAL_SECTION, zero
106 .global _cpu_idle_nap 107 .global _cpu_idle_nap
107_cpu_idle_nap: 108_cpu_idle_nap:
diff --git a/arch/tile/kernel/hardwall.c b/arch/tile/kernel/hardwall.c
index 8c41891aab34..20273ee37deb 100644
--- a/arch/tile/kernel/hardwall.c
+++ b/arch/tile/kernel/hardwall.c
@@ -33,59 +33,157 @@
33 33
34 34
35/* 35/*
36 * This data structure tracks the rectangle data, etc., associated 36 * Implement a per-cpu "hardwall" resource class such as UDN or IPI.
37 * one-to-one with a "struct file *" from opening HARDWALL_FILE. 37 * We use "hardwall" nomenclature throughout for historical reasons.
38 * The lock here controls access to the list data structure as well as
39 * to the items on the list.
40 */
41struct hardwall_type {
42 int index;
43 int is_xdn;
44 int is_idn;
45 int disabled;
46 const char *name;
47 struct list_head list;
48 spinlock_t lock;
49 struct proc_dir_entry *proc_dir;
50};
51
52enum hardwall_index {
53 HARDWALL_UDN = 0,
54#ifndef __tilepro__
55 HARDWALL_IDN = 1,
56 HARDWALL_IPI = 2,
57#endif
58 _HARDWALL_TYPES
59};
60
61static struct hardwall_type hardwall_types[] = {
62 { /* user-space access to UDN */
63 0,
64 1,
65 0,
66 0,
67 "udn",
68 LIST_HEAD_INIT(hardwall_types[HARDWALL_UDN].list),
69 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_UDN].lock),
70 NULL
71 },
72#ifndef __tilepro__
73 { /* user-space access to IDN */
74 1,
75 1,
76 1,
77 1, /* disabled pending hypervisor support */
78 "idn",
79 LIST_HEAD_INIT(hardwall_types[HARDWALL_IDN].list),
80 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IDN].lock),
81 NULL
82 },
83 { /* access to user-space IPI */
84 2,
85 0,
86 0,
87 0,
88 "ipi",
89 LIST_HEAD_INIT(hardwall_types[HARDWALL_IPI].list),
90 __SPIN_LOCK_INITIALIZER(hardwall_types[HARDWALL_IPI].lock),
91 NULL
92 },
93#endif
94};
95
96/*
97 * This data structure tracks the cpu data, etc., associated
98 * one-to-one with a "struct file *" from opening a hardwall device file.
38 * Note that the file's private data points back to this structure. 99 * Note that the file's private data points back to this structure.
39 */ 100 */
40struct hardwall_info { 101struct hardwall_info {
41 struct list_head list; /* "rectangles" list */ 102 struct list_head list; /* for hardwall_types.list */
42 struct list_head task_head; /* head of tasks in this hardwall */ 103 struct list_head task_head; /* head of tasks in this hardwall */
43 struct cpumask cpumask; /* cpus in the rectangle */ 104 struct hardwall_type *type; /* type of this resource */
105 struct cpumask cpumask; /* cpus reserved */
106 int id; /* integer id for this hardwall */
107 int teardown_in_progress; /* are we tearing this one down? */
108
109 /* Remaining fields only valid for user-network resources. */
44 int ulhc_x; /* upper left hand corner x coord */ 110 int ulhc_x; /* upper left hand corner x coord */
45 int ulhc_y; /* upper left hand corner y coord */ 111 int ulhc_y; /* upper left hand corner y coord */
46 int width; /* rectangle width */ 112 int width; /* rectangle width */
47 int height; /* rectangle height */ 113 int height; /* rectangle height */
48 int id; /* integer id for this hardwall */ 114#if CHIP_HAS_REV1_XDN()
49 int teardown_in_progress; /* are we tearing this one down? */ 115 atomic_t xdn_pending_count; /* cores in phase 1 of drain */
116#endif
50}; 117};
51 118
52/* Currently allocated hardwall rectangles */
53static LIST_HEAD(rectangles);
54 119
55/* /proc/tile/hardwall */ 120/* /proc/tile/hardwall */
56static struct proc_dir_entry *hardwall_proc_dir; 121static struct proc_dir_entry *hardwall_proc_dir;
57 122
58/* Functions to manage files in /proc/tile/hardwall. */ 123/* Functions to manage files in /proc/tile/hardwall. */
59static void hardwall_add_proc(struct hardwall_info *rect); 124static void hardwall_add_proc(struct hardwall_info *);
60static void hardwall_remove_proc(struct hardwall_info *rect); 125static void hardwall_remove_proc(struct hardwall_info *);
61
62/*
63 * Guard changes to the hardwall data structures.
64 * This could be finer grained (e.g. one lock for the list of hardwall
65 * rectangles, then separate embedded locks for each one's list of tasks),
66 * but there are subtle correctness issues when trying to start with
67 * a task's "hardwall" pointer and lock the correct rectangle's embedded
68 * lock in the presence of a simultaneous deactivation, so it seems
69 * easier to have a single lock, given that none of these data
70 * structures are touched very frequently during normal operation.
71 */
72static DEFINE_SPINLOCK(hardwall_lock);
73 126
74/* Allow disabling UDN access. */ 127/* Allow disabling UDN access. */
75static int udn_disabled;
76static int __init noudn(char *str) 128static int __init noudn(char *str)
77{ 129{
78 pr_info("User-space UDN access is disabled\n"); 130 pr_info("User-space UDN access is disabled\n");
79 udn_disabled = 1; 131 hardwall_types[HARDWALL_UDN].disabled = 1;
80 return 0; 132 return 0;
81} 133}
82early_param("noudn", noudn); 134early_param("noudn", noudn);
83 135
136#ifndef __tilepro__
137/* Allow disabling IDN access. */
138static int __init noidn(char *str)
139{
140 pr_info("User-space IDN access is disabled\n");
141 hardwall_types[HARDWALL_IDN].disabled = 1;
142 return 0;
143}
144early_param("noidn", noidn);
145
146/* Allow disabling IPI access. */
147static int __init noipi(char *str)
148{
149 pr_info("User-space IPI access is disabled\n");
150 hardwall_types[HARDWALL_IPI].disabled = 1;
151 return 0;
152}
153early_param("noipi", noipi);
154#endif
155
84 156
85/* 157/*
86 * Low-level primitives 158 * Low-level primitives for UDN/IDN
87 */ 159 */
88 160
161#ifdef __tilepro__
162#define mtspr_XDN(hwt, name, val) \
163 do { (void)(hwt); __insn_mtspr(SPR_UDN_##name, (val)); } while (0)
164#define mtspr_MPL_XDN(hwt, name, val) \
165 do { (void)(hwt); __insn_mtspr(SPR_MPL_UDN_##name, (val)); } while (0)
166#define mfspr_XDN(hwt, name) \
167 ((void)(hwt), __insn_mfspr(SPR_UDN_##name))
168#else
169#define mtspr_XDN(hwt, name, val) \
170 do { \
171 if ((hwt)->is_idn) \
172 __insn_mtspr(SPR_IDN_##name, (val)); \
173 else \
174 __insn_mtspr(SPR_UDN_##name, (val)); \
175 } while (0)
176#define mtspr_MPL_XDN(hwt, name, val) \
177 do { \
178 if ((hwt)->is_idn) \
179 __insn_mtspr(SPR_MPL_IDN_##name, (val)); \
180 else \
181 __insn_mtspr(SPR_MPL_UDN_##name, (val)); \
182 } while (0)
183#define mfspr_XDN(hwt, name) \
184 ((hwt)->is_idn ? __insn_mfspr(SPR_IDN_##name) : __insn_mfspr(SPR_UDN_##name))
185#endif
186
89/* Set a CPU bit if the CPU is online. */ 187/* Set a CPU bit if the CPU is online. */
90#define cpu_online_set(cpu, dst) do { \ 188#define cpu_online_set(cpu, dst) do { \
91 if (cpu_online(cpu)) \ 189 if (cpu_online(cpu)) \
@@ -101,7 +199,7 @@ static int contains(struct hardwall_info *r, int x, int y)
101} 199}
102 200
103/* Compute the rectangle parameters and validate the cpumask. */ 201/* Compute the rectangle parameters and validate the cpumask. */
104static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask) 202static int check_rectangle(struct hardwall_info *r, struct cpumask *mask)
105{ 203{
106 int x, y, cpu, ulhc, lrhc; 204 int x, y, cpu, ulhc, lrhc;
107 205
@@ -114,8 +212,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
114 r->ulhc_y = cpu_y(ulhc); 212 r->ulhc_y = cpu_y(ulhc);
115 r->width = cpu_x(lrhc) - r->ulhc_x + 1; 213 r->width = cpu_x(lrhc) - r->ulhc_x + 1;
116 r->height = cpu_y(lrhc) - r->ulhc_y + 1; 214 r->height = cpu_y(lrhc) - r->ulhc_y + 1;
117 cpumask_copy(&r->cpumask, mask);
118 r->id = ulhc; /* The ulhc cpu id can be the hardwall id. */
119 215
120 /* Width and height must be positive */ 216 /* Width and height must be positive */
121 if (r->width <= 0 || r->height <= 0) 217 if (r->width <= 0 || r->height <= 0)
@@ -128,7 +224,7 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
128 return -EINVAL; 224 return -EINVAL;
129 225
130 /* 226 /*
131 * Note that offline cpus can't be drained when this UDN 227 * Note that offline cpus can't be drained when this user network
132 * rectangle eventually closes. We used to detect this 228 * rectangle eventually closes. We used to detect this
133 * situation and print a warning, but it annoyed users and 229 * situation and print a warning, but it annoyed users and
134 * they ignored it anyway, so now we just return without a 230 * they ignored it anyway, so now we just return without a
@@ -137,16 +233,6 @@ static int setup_rectangle(struct hardwall_info *r, struct cpumask *mask)
137 return 0; 233 return 0;
138} 234}
139 235
140/* Do the two given rectangles overlap on any cpu? */
141static int overlaps(struct hardwall_info *a, struct hardwall_info *b)
142{
143 return a->ulhc_x + a->width > b->ulhc_x && /* A not to the left */
144 b->ulhc_x + b->width > a->ulhc_x && /* B not to the left */
145 a->ulhc_y + a->height > b->ulhc_y && /* A not above */
146 b->ulhc_y + b->height > a->ulhc_y; /* B not above */
147}
148
149
150/* 236/*
151 * Hardware management of hardwall setup, teardown, trapping, 237 * Hardware management of hardwall setup, teardown, trapping,
152 * and enabling/disabling PL0 access to the networks. 238 * and enabling/disabling PL0 access to the networks.
@@ -157,23 +243,35 @@ enum direction_protect {
157 N_PROTECT = (1 << 0), 243 N_PROTECT = (1 << 0),
158 E_PROTECT = (1 << 1), 244 E_PROTECT = (1 << 1),
159 S_PROTECT = (1 << 2), 245 S_PROTECT = (1 << 2),
160 W_PROTECT = (1 << 3) 246 W_PROTECT = (1 << 3),
247 C_PROTECT = (1 << 4),
161}; 248};
162 249
163static void enable_firewall_interrupts(void) 250static inline int xdn_which_interrupt(struct hardwall_type *hwt)
251{
252#ifndef __tilepro__
253 if (hwt->is_idn)
254 return INT_IDN_FIREWALL;
255#endif
256 return INT_UDN_FIREWALL;
257}
258
259static void enable_firewall_interrupts(struct hardwall_type *hwt)
164{ 260{
165 arch_local_irq_unmask_now(INT_UDN_FIREWALL); 261 arch_local_irq_unmask_now(xdn_which_interrupt(hwt));
166} 262}
167 263
168static void disable_firewall_interrupts(void) 264static void disable_firewall_interrupts(struct hardwall_type *hwt)
169{ 265{
170 arch_local_irq_mask_now(INT_UDN_FIREWALL); 266 arch_local_irq_mask_now(xdn_which_interrupt(hwt));
171} 267}
172 268
173/* Set up hardwall on this cpu based on the passed hardwall_info. */ 269/* Set up hardwall on this cpu based on the passed hardwall_info. */
174static void hardwall_setup_ipi_func(void *info) 270static void hardwall_setup_func(void *info)
175{ 271{
176 struct hardwall_info *r = info; 272 struct hardwall_info *r = info;
273 struct hardwall_type *hwt = r->type;
274
177 int cpu = smp_processor_id(); 275 int cpu = smp_processor_id();
178 int x = cpu % smp_width; 276 int x = cpu % smp_width;
179 int y = cpu / smp_width; 277 int y = cpu / smp_width;
@@ -187,13 +285,12 @@ static void hardwall_setup_ipi_func(void *info)
187 if (y == r->ulhc_y + r->height - 1) 285 if (y == r->ulhc_y + r->height - 1)
188 bits |= S_PROTECT; 286 bits |= S_PROTECT;
189 BUG_ON(bits == 0); 287 BUG_ON(bits == 0);
190 __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, bits); 288 mtspr_XDN(hwt, DIRECTION_PROTECT, bits);
191 enable_firewall_interrupts(); 289 enable_firewall_interrupts(hwt);
192
193} 290}
194 291
195/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */ 292/* Set up all cpus on edge of rectangle to enable/disable hardwall SPRs. */
196static void hardwall_setup(struct hardwall_info *r) 293static void hardwall_protect_rectangle(struct hardwall_info *r)
197{ 294{
198 int x, y, cpu, delta; 295 int x, y, cpu, delta;
199 struct cpumask rect_cpus; 296 struct cpumask rect_cpus;
@@ -217,37 +314,50 @@ static void hardwall_setup(struct hardwall_info *r)
217 } 314 }
218 315
219 /* Then tell all the cpus to set up their protection SPR */ 316 /* Then tell all the cpus to set up their protection SPR */
220 on_each_cpu_mask(&rect_cpus, hardwall_setup_ipi_func, r, 1); 317 on_each_cpu_mask(&rect_cpus, hardwall_setup_func, r, 1);
221} 318}
222 319
223void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num) 320void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
224{ 321{
225 struct hardwall_info *rect; 322 struct hardwall_info *rect;
323 struct hardwall_type *hwt;
226 struct task_struct *p; 324 struct task_struct *p;
227 struct siginfo info; 325 struct siginfo info;
228 int x, y;
229 int cpu = smp_processor_id(); 326 int cpu = smp_processor_id();
230 int found_processes; 327 int found_processes;
231 unsigned long flags; 328 unsigned long flags;
232
233 struct pt_regs *old_regs = set_irq_regs(regs); 329 struct pt_regs *old_regs = set_irq_regs(regs);
330
234 irq_enter(); 331 irq_enter();
235 332
333 /* Figure out which network trapped. */
334 switch (fault_num) {
335#ifndef __tilepro__
336 case INT_IDN_FIREWALL:
337 hwt = &hardwall_types[HARDWALL_IDN];
338 break;
339#endif
340 case INT_UDN_FIREWALL:
341 hwt = &hardwall_types[HARDWALL_UDN];
342 break;
343 default:
344 BUG();
345 }
346 BUG_ON(hwt->disabled);
347
236 /* This tile trapped a network access; find the rectangle. */ 348 /* This tile trapped a network access; find the rectangle. */
237 x = cpu % smp_width; 349 spin_lock_irqsave(&hwt->lock, flags);
238 y = cpu / smp_width; 350 list_for_each_entry(rect, &hwt->list, list) {
239 spin_lock_irqsave(&hardwall_lock, flags); 351 if (cpumask_test_cpu(cpu, &rect->cpumask))
240 list_for_each_entry(rect, &rectangles, list) {
241 if (contains(rect, x, y))
242 break; 352 break;
243 } 353 }
244 354
245 /* 355 /*
246 * It shouldn't be possible not to find this cpu on the 356 * It shouldn't be possible not to find this cpu on the
247 * rectangle list, since only cpus in rectangles get hardwalled. 357 * rectangle list, since only cpus in rectangles get hardwalled.
248 * The hardwall is only removed after the UDN is drained. 358 * The hardwall is only removed after the user network is drained.
249 */ 359 */
250 BUG_ON(&rect->list == &rectangles); 360 BUG_ON(&rect->list == &hwt->list);
251 361
252 /* 362 /*
253 * If we already started teardown on this hardwall, don't worry; 363 * If we already started teardown on this hardwall, don't worry;
@@ -255,30 +365,32 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
255 * to quiesce. 365 * to quiesce.
256 */ 366 */
257 if (rect->teardown_in_progress) { 367 if (rect->teardown_in_progress) {
258 pr_notice("cpu %d: detected hardwall violation %#lx" 368 pr_notice("cpu %d: detected %s hardwall violation %#lx"
259 " while teardown already in progress\n", 369 " while teardown already in progress\n",
260 cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); 370 cpu, hwt->name,
371 (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
261 goto done; 372 goto done;
262 } 373 }
263 374
264 /* 375 /*
265 * Kill off any process that is activated in this rectangle. 376 * Kill off any process that is activated in this rectangle.
266 * We bypass security to deliver the signal, since it must be 377 * We bypass security to deliver the signal, since it must be
267 * one of the activated processes that generated the UDN 378 * one of the activated processes that generated the user network
268 * message that caused this trap, and all the activated 379 * message that caused this trap, and all the activated
269 * processes shared a single open file so are pretty tightly 380 * processes shared a single open file so are pretty tightly
270 * bound together from a security point of view to begin with. 381 * bound together from a security point of view to begin with.
271 */ 382 */
272 rect->teardown_in_progress = 1; 383 rect->teardown_in_progress = 1;
273 wmb(); /* Ensure visibility of rectangle before notifying processes. */ 384 wmb(); /* Ensure visibility of rectangle before notifying processes. */
274 pr_notice("cpu %d: detected hardwall violation %#lx...\n", 385 pr_notice("cpu %d: detected %s hardwall violation %#lx...\n",
275 cpu, (long) __insn_mfspr(SPR_UDN_DIRECTION_PROTECT)); 386 cpu, hwt->name, (long)mfspr_XDN(hwt, DIRECTION_PROTECT));
276 info.si_signo = SIGILL; 387 info.si_signo = SIGILL;
277 info.si_errno = 0; 388 info.si_errno = 0;
278 info.si_code = ILL_HARDWALL; 389 info.si_code = ILL_HARDWALL;
279 found_processes = 0; 390 found_processes = 0;
280 list_for_each_entry(p, &rect->task_head, thread.hardwall_list) { 391 list_for_each_entry(p, &rect->task_head,
281 BUG_ON(p->thread.hardwall != rect); 392 thread.hardwall[hwt->index].list) {
393 BUG_ON(p->thread.hardwall[hwt->index].info != rect);
282 if (!(p->flags & PF_EXITING)) { 394 if (!(p->flags & PF_EXITING)) {
283 found_processes = 1; 395 found_processes = 1;
284 pr_notice("hardwall: killing %d\n", p->pid); 396 pr_notice("hardwall: killing %d\n", p->pid);
@@ -289,7 +401,7 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
289 pr_notice("hardwall: no associated processes!\n"); 401 pr_notice("hardwall: no associated processes!\n");
290 402
291 done: 403 done:
292 spin_unlock_irqrestore(&hardwall_lock, flags); 404 spin_unlock_irqrestore(&hwt->lock, flags);
293 405
294 /* 406 /*
295 * We have to disable firewall interrupts now, or else when we 407 * We have to disable firewall interrupts now, or else when we
@@ -298,48 +410,87 @@ void __kprobes do_hardwall_trap(struct pt_regs* regs, int fault_num)
298 * haven't yet drained the network, and that would allow packets 410 * haven't yet drained the network, and that would allow packets
299 * to cross out of the hardwall region. 411 * to cross out of the hardwall region.
300 */ 412 */
301 disable_firewall_interrupts(); 413 disable_firewall_interrupts(hwt);
302 414
303 irq_exit(); 415 irq_exit();
304 set_irq_regs(old_regs); 416 set_irq_regs(old_regs);
305} 417}
306 418
307/* Allow access from user space to the UDN. */ 419/* Allow access from user space to the user network. */
308void grant_network_mpls(void) 420void grant_hardwall_mpls(struct hardwall_type *hwt)
309{ 421{
310 __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_0, 1); 422#ifndef __tilepro__
311 __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_0, 1); 423 if (!hwt->is_xdn) {
312 __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_0, 1); 424 __insn_mtspr(SPR_MPL_IPI_0_SET_0, 1);
313 __insn_mtspr(SPR_MPL_UDN_TIMER_SET_0, 1); 425 return;
426 }
427#endif
428 mtspr_MPL_XDN(hwt, ACCESS_SET_0, 1);
429 mtspr_MPL_XDN(hwt, AVAIL_SET_0, 1);
430 mtspr_MPL_XDN(hwt, COMPLETE_SET_0, 1);
431 mtspr_MPL_XDN(hwt, TIMER_SET_0, 1);
314#if !CHIP_HAS_REV1_XDN() 432#if !CHIP_HAS_REV1_XDN()
315 __insn_mtspr(SPR_MPL_UDN_REFILL_SET_0, 1); 433 mtspr_MPL_XDN(hwt, REFILL_SET_0, 1);
316 __insn_mtspr(SPR_MPL_UDN_CA_SET_0, 1); 434 mtspr_MPL_XDN(hwt, CA_SET_0, 1);
317#endif 435#endif
318} 436}
319 437
320/* Deny access from user space to the UDN. */ 438/* Deny access from user space to the user network. */
321void restrict_network_mpls(void) 439void restrict_hardwall_mpls(struct hardwall_type *hwt)
322{ 440{
323 __insn_mtspr(SPR_MPL_UDN_ACCESS_SET_1, 1); 441#ifndef __tilepro__
324 __insn_mtspr(SPR_MPL_UDN_AVAIL_SET_1, 1); 442 if (!hwt->is_xdn) {
325 __insn_mtspr(SPR_MPL_UDN_COMPLETE_SET_1, 1); 443 __insn_mtspr(SPR_MPL_IPI_0_SET_1, 1);
326 __insn_mtspr(SPR_MPL_UDN_TIMER_SET_1, 1); 444 return;
445 }
446#endif
447 mtspr_MPL_XDN(hwt, ACCESS_SET_1, 1);
448 mtspr_MPL_XDN(hwt, AVAIL_SET_1, 1);
449 mtspr_MPL_XDN(hwt, COMPLETE_SET_1, 1);
450 mtspr_MPL_XDN(hwt, TIMER_SET_1, 1);
327#if !CHIP_HAS_REV1_XDN() 451#if !CHIP_HAS_REV1_XDN()
328 __insn_mtspr(SPR_MPL_UDN_REFILL_SET_1, 1); 452 mtspr_MPL_XDN(hwt, REFILL_SET_1, 1);
329 __insn_mtspr(SPR_MPL_UDN_CA_SET_1, 1); 453 mtspr_MPL_XDN(hwt, CA_SET_1, 1);
330#endif 454#endif
331} 455}
332 456
457/* Restrict or deny as necessary for the task we're switching to. */
458void hardwall_switch_tasks(struct task_struct *prev,
459 struct task_struct *next)
460{
461 int i;
462 for (i = 0; i < HARDWALL_TYPES; ++i) {
463 if (prev->thread.hardwall[i].info != NULL) {
464 if (next->thread.hardwall[i].info == NULL)
465 restrict_hardwall_mpls(&hardwall_types[i]);
466 } else if (next->thread.hardwall[i].info != NULL) {
467 grant_hardwall_mpls(&hardwall_types[i]);
468 }
469 }
470}
471
472/* Does this task have the right to IPI the given cpu? */
473int hardwall_ipi_valid(int cpu)
474{
475#ifdef __tilegx__
476 struct hardwall_info *info =
477 current->thread.hardwall[HARDWALL_IPI].info;
478 return info && cpumask_test_cpu(cpu, &info->cpumask);
479#else
480 return 0;
481#endif
482}
333 483
334/* 484/*
335 * Code to create, activate, deactivate, and destroy hardwall rectangles. 485 * Code to create, activate, deactivate, and destroy hardwall resources.
336 */ 486 */
337 487
338/* Create a hardwall for the given rectangle */ 488/* Create a hardwall for the given resource */
339static struct hardwall_info *hardwall_create( 489static struct hardwall_info *hardwall_create(struct hardwall_type *hwt,
340 size_t size, const unsigned char __user *bits) 490 size_t size,
491 const unsigned char __user *bits)
341{ 492{
342 struct hardwall_info *iter, *rect; 493 struct hardwall_info *iter, *info;
343 struct cpumask mask; 494 struct cpumask mask;
344 unsigned long flags; 495 unsigned long flags;
345 int rc; 496 int rc;
@@ -370,55 +521,62 @@ static struct hardwall_info *hardwall_create(
370 } 521 }
371 } 522 }
372 523
373 /* Allocate a new rectangle optimistically. */ 524 /* Allocate a new hardwall_info optimistically. */
374 rect = kmalloc(sizeof(struct hardwall_info), 525 info = kmalloc(sizeof(struct hardwall_info),
375 GFP_KERNEL | __GFP_ZERO); 526 GFP_KERNEL | __GFP_ZERO);
376 if (rect == NULL) 527 if (info == NULL)
377 return ERR_PTR(-ENOMEM); 528 return ERR_PTR(-ENOMEM);
378 INIT_LIST_HEAD(&rect->task_head); 529 INIT_LIST_HEAD(&info->task_head);
530 info->type = hwt;
379 531
380 /* Compute the rectangle size and validate that it's plausible. */ 532 /* Compute the rectangle size and validate that it's plausible. */
381 rc = setup_rectangle(rect, &mask); 533 cpumask_copy(&info->cpumask, &mask);
382 if (rc != 0) { 534 info->id = find_first_bit(cpumask_bits(&mask), nr_cpumask_bits);
383 kfree(rect); 535 if (hwt->is_xdn) {
384 return ERR_PTR(rc); 536 rc = check_rectangle(info, &mask);
537 if (rc != 0) {
538 kfree(info);
539 return ERR_PTR(rc);
540 }
385 } 541 }
386 542
387 /* Confirm it doesn't overlap and add it to the list. */ 543 /* Confirm it doesn't overlap and add it to the list. */
388 spin_lock_irqsave(&hardwall_lock, flags); 544 spin_lock_irqsave(&hwt->lock, flags);
389 list_for_each_entry(iter, &rectangles, list) { 545 list_for_each_entry(iter, &hwt->list, list) {
390 if (overlaps(iter, rect)) { 546 if (cpumask_intersects(&iter->cpumask, &info->cpumask)) {
391 spin_unlock_irqrestore(&hardwall_lock, flags); 547 spin_unlock_irqrestore(&hwt->lock, flags);
392 kfree(rect); 548 kfree(info);
393 return ERR_PTR(-EBUSY); 549 return ERR_PTR(-EBUSY);
394 } 550 }
395 } 551 }
396 list_add_tail(&rect->list, &rectangles); 552 list_add_tail(&info->list, &hwt->list);
397 spin_unlock_irqrestore(&hardwall_lock, flags); 553 spin_unlock_irqrestore(&hwt->lock, flags);
398 554
399 /* Set up appropriate hardwalling on all affected cpus. */ 555 /* Set up appropriate hardwalling on all affected cpus. */
400 hardwall_setup(rect); 556 if (hwt->is_xdn)
557 hardwall_protect_rectangle(info);
401 558
402 /* Create a /proc/tile/hardwall entry. */ 559 /* Create a /proc/tile/hardwall entry. */
403 hardwall_add_proc(rect); 560 hardwall_add_proc(info);
404 561
405 return rect; 562 return info;
406} 563}
407 564
408/* Activate a given hardwall on this cpu for this process. */ 565/* Activate a given hardwall on this cpu for this process. */
409static int hardwall_activate(struct hardwall_info *rect) 566static int hardwall_activate(struct hardwall_info *info)
410{ 567{
411 int cpu, x, y; 568 int cpu;
412 unsigned long flags; 569 unsigned long flags;
413 struct task_struct *p = current; 570 struct task_struct *p = current;
414 struct thread_struct *ts = &p->thread; 571 struct thread_struct *ts = &p->thread;
572 struct hardwall_type *hwt;
415 573
416 /* Require a rectangle. */ 574 /* Require a hardwall. */
417 if (rect == NULL) 575 if (info == NULL)
418 return -ENODATA; 576 return -ENODATA;
419 577
420 /* Not allowed to activate a rectangle that is being torn down. */ 578 /* Not allowed to activate a hardwall that is being torn down. */
421 if (rect->teardown_in_progress) 579 if (info->teardown_in_progress)
422 return -EINVAL; 580 return -EINVAL;
423 581
424 /* 582 /*
@@ -428,78 +586,87 @@ static int hardwall_activate(struct hardwall_info *rect)
428 if (cpumask_weight(&p->cpus_allowed) != 1) 586 if (cpumask_weight(&p->cpus_allowed) != 1)
429 return -EPERM; 587 return -EPERM;
430 588
431 /* Make sure we are bound to a cpu in this rectangle. */ 589 /* Make sure we are bound to a cpu assigned to this resource. */
432 cpu = smp_processor_id(); 590 cpu = smp_processor_id();
433 BUG_ON(cpumask_first(&p->cpus_allowed) != cpu); 591 BUG_ON(cpumask_first(&p->cpus_allowed) != cpu);
434 x = cpu_x(cpu); 592 if (!cpumask_test_cpu(cpu, &info->cpumask))
435 y = cpu_y(cpu);
436 if (!contains(rect, x, y))
437 return -EINVAL; 593 return -EINVAL;
438 594
439 /* If we are already bound to this hardwall, it's a no-op. */ 595 /* If we are already bound to this hardwall, it's a no-op. */
440 if (ts->hardwall) { 596 hwt = info->type;
441 BUG_ON(ts->hardwall != rect); 597 if (ts->hardwall[hwt->index].info) {
598 BUG_ON(ts->hardwall[hwt->index].info != info);
442 return 0; 599 return 0;
443 } 600 }
444 601
445 /* Success! This process gets to use the user networks on this cpu. */ 602 /* Success! This process gets to use the resource on this cpu. */
446 ts->hardwall = rect; 603 ts->hardwall[hwt->index].info = info;
447 spin_lock_irqsave(&hardwall_lock, flags); 604 spin_lock_irqsave(&hwt->lock, flags);
448 list_add(&ts->hardwall_list, &rect->task_head); 605 list_add(&ts->hardwall[hwt->index].list, &info->task_head);
449 spin_unlock_irqrestore(&hardwall_lock, flags); 606 spin_unlock_irqrestore(&hwt->lock, flags);
450 grant_network_mpls(); 607 grant_hardwall_mpls(hwt);
451 printk(KERN_DEBUG "Pid %d (%s) activated for hardwall: cpu %d\n", 608 printk(KERN_DEBUG "Pid %d (%s) activated for %s hardwall: cpu %d\n",
452 p->pid, p->comm, cpu); 609 p->pid, p->comm, hwt->name, cpu);
453 return 0; 610 return 0;
454} 611}
455 612
456/* 613/*
457 * Deactivate a task's hardwall. Must hold hardwall_lock. 614 * Deactivate a task's hardwall. Must hold lock for hardwall_type.
458 * This method may be called from free_task(), so we don't want to 615 * This method may be called from free_task(), so we don't want to
459 * rely on too many fields of struct task_struct still being valid. 616 * rely on too many fields of struct task_struct still being valid.
460 * We assume the cpus_allowed, pid, and comm fields are still valid. 617 * We assume the cpus_allowed, pid, and comm fields are still valid.
461 */ 618 */
462static void _hardwall_deactivate(struct task_struct *task) 619static void _hardwall_deactivate(struct hardwall_type *hwt,
620 struct task_struct *task)
463{ 621{
464 struct thread_struct *ts = &task->thread; 622 struct thread_struct *ts = &task->thread;
465 623
466 if (cpumask_weight(&task->cpus_allowed) != 1) { 624 if (cpumask_weight(&task->cpus_allowed) != 1) {
467 pr_err("pid %d (%s) releasing networks with" 625 pr_err("pid %d (%s) releasing %s hardwall with"
468 " an affinity mask containing %d cpus!\n", 626 " an affinity mask containing %d cpus!\n",
469 task->pid, task->comm, 627 task->pid, task->comm, hwt->name,
470 cpumask_weight(&task->cpus_allowed)); 628 cpumask_weight(&task->cpus_allowed));
471 BUG(); 629 BUG();
472 } 630 }
473 631
474 BUG_ON(ts->hardwall == NULL); 632 BUG_ON(ts->hardwall[hwt->index].info == NULL);
475 ts->hardwall = NULL; 633 ts->hardwall[hwt->index].info = NULL;
476 list_del(&ts->hardwall_list); 634 list_del(&ts->hardwall[hwt->index].list);
477 if (task == current) 635 if (task == current)
478 restrict_network_mpls(); 636 restrict_hardwall_mpls(hwt);
479} 637}
480 638
481/* Deactivate a task's hardwall. */ 639/* Deactivate a task's hardwall. */
482int hardwall_deactivate(struct task_struct *task) 640static int hardwall_deactivate(struct hardwall_type *hwt,
641 struct task_struct *task)
483{ 642{
484 unsigned long flags; 643 unsigned long flags;
485 int activated; 644 int activated;
486 645
487 spin_lock_irqsave(&hardwall_lock, flags); 646 spin_lock_irqsave(&hwt->lock, flags);
488 activated = (task->thread.hardwall != NULL); 647 activated = (task->thread.hardwall[hwt->index].info != NULL);
489 if (activated) 648 if (activated)
490 _hardwall_deactivate(task); 649 _hardwall_deactivate(hwt, task);
491 spin_unlock_irqrestore(&hardwall_lock, flags); 650 spin_unlock_irqrestore(&hwt->lock, flags);
492 651
493 if (!activated) 652 if (!activated)
494 return -EINVAL; 653 return -EINVAL;
495 654
496 printk(KERN_DEBUG "Pid %d (%s) deactivated for hardwall: cpu %d\n", 655 printk(KERN_DEBUG "Pid %d (%s) deactivated for %s hardwall: cpu %d\n",
497 task->pid, task->comm, smp_processor_id()); 656 task->pid, task->comm, hwt->name, smp_processor_id());
498 return 0; 657 return 0;
499} 658}
500 659
501/* Stop a UDN switch before draining the network. */ 660void hardwall_deactivate_all(struct task_struct *task)
502static void stop_udn_switch(void *ignored) 661{
662 int i;
663 for (i = 0; i < HARDWALL_TYPES; ++i)
664 if (task->thread.hardwall[i].info)
665 hardwall_deactivate(&hardwall_types[i], task);
666}
667
668/* Stop the switch before draining the network. */
669static void stop_xdn_switch(void *arg)
503{ 670{
504#if !CHIP_HAS_REV1_XDN() 671#if !CHIP_HAS_REV1_XDN()
505 /* Freeze the switch and the demux. */ 672 /* Freeze the switch and the demux. */
@@ -507,13 +674,71 @@ static void stop_udn_switch(void *ignored)
507 SPR_UDN_SP_FREEZE__SP_FRZ_MASK | 674 SPR_UDN_SP_FREEZE__SP_FRZ_MASK |
508 SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK | 675 SPR_UDN_SP_FREEZE__DEMUX_FRZ_MASK |
509 SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK); 676 SPR_UDN_SP_FREEZE__NON_DEST_EXT_MASK);
677#else
678 /*
679 * Drop all packets bound for the core or off the edge.
680 * We rely on the normal hardwall protection setup code
681 * to have set the low four bits to trigger firewall interrupts,
682 * and shift those bits up to trigger "drop on send" semantics,
683 * plus adding "drop on send to core" for all switches.
684 * In practice it seems the switches latch the DIRECTION_PROTECT
685 * SPR so they won't start dropping if they're already
686 * delivering the last message to the core, but it doesn't
687 * hurt to enable it here.
688 */
689 struct hardwall_type *hwt = arg;
690 unsigned long protect = mfspr_XDN(hwt, DIRECTION_PROTECT);
691 mtspr_XDN(hwt, DIRECTION_PROTECT, (protect | C_PROTECT) << 5);
510#endif 692#endif
511} 693}
512 694
695static void empty_xdn_demuxes(struct hardwall_type *hwt)
696{
697#ifndef __tilepro__
698 if (hwt->is_idn) {
699 while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 0))
700 (void) __tile_idn0_receive();
701 while (__insn_mfspr(SPR_IDN_DATA_AVAIL) & (1 << 1))
702 (void) __tile_idn1_receive();
703 return;
704 }
705#endif
706 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0))
707 (void) __tile_udn0_receive();
708 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
709 (void) __tile_udn1_receive();
710 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
711 (void) __tile_udn2_receive();
712 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
713 (void) __tile_udn3_receive();
714}
715
513/* Drain all the state from a stopped switch. */ 716/* Drain all the state from a stopped switch. */
514static void drain_udn_switch(void *ignored) 717static void drain_xdn_switch(void *arg)
515{ 718{
516#if !CHIP_HAS_REV1_XDN() 719 struct hardwall_info *info = arg;
720 struct hardwall_type *hwt = info->type;
721
722#if CHIP_HAS_REV1_XDN()
723 /*
724 * The switches have been configured to drop any messages
725 * destined for cores (or off the edge of the rectangle).
726 * But the current message may continue to be delivered,
727 * so we wait until all the cores have finished any pending
728 * messages before we stop draining.
729 */
730 int pending = mfspr_XDN(hwt, PENDING);
731 while (pending--) {
732 empty_xdn_demuxes(hwt);
733 if (hwt->is_idn)
734 __tile_idn_send(0);
735 else
736 __tile_udn_send(0);
737 }
738 atomic_dec(&info->xdn_pending_count);
739 while (atomic_read(&info->xdn_pending_count))
740 empty_xdn_demuxes(hwt);
741#else
517 int i; 742 int i;
518 int from_tile_words, ca_count; 743 int from_tile_words, ca_count;
519 744
@@ -533,15 +758,7 @@ static void drain_udn_switch(void *ignored)
533 (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO); 758 (void) __insn_mfspr(SPR_UDN_DEMUX_WRITE_FIFO);
534 759
535 /* Empty out demuxes. */ 760 /* Empty out demuxes. */
536 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 0)) 761 empty_xdn_demuxes(hwt);
537 (void) __tile_udn0_receive();
538 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 1))
539 (void) __tile_udn1_receive();
540 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 2))
541 (void) __tile_udn2_receive();
542 while (__insn_mfspr(SPR_UDN_DATA_AVAIL) & (1 << 3))
543 (void) __tile_udn3_receive();
544 BUG_ON((__insn_mfspr(SPR_UDN_DATA_AVAIL) & 0xF) != 0);
545 762
546 /* Empty out catch all. */ 763 /* Empty out catch all. */
547 ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT); 764 ca_count = __insn_mfspr(SPR_UDN_DEMUX_CA_COUNT);
@@ -563,21 +780,25 @@ static void drain_udn_switch(void *ignored)
563#endif 780#endif
564} 781}
565 782
566/* Reset random UDN state registers at boot up and during hardwall teardown. */ 783/* Reset random XDN state registers at boot up and during hardwall teardown. */
567void reset_network_state(void) 784static void reset_xdn_network_state(struct hardwall_type *hwt)
568{ 785{
569#if !CHIP_HAS_REV1_XDN() 786 if (hwt->disabled)
570 /* Reset UDN coordinates to their standard value */
571 unsigned int cpu = smp_processor_id();
572 unsigned int x = cpu % smp_width;
573 unsigned int y = cpu / smp_width;
574#endif
575
576 if (udn_disabled)
577 return; 787 return;
578 788
789 /* Clear out other random registers so we have a clean slate. */
790 mtspr_XDN(hwt, DIRECTION_PROTECT, 0);
791 mtspr_XDN(hwt, AVAIL_EN, 0);
792 mtspr_XDN(hwt, DEADLOCK_TIMEOUT, 0);
793
579#if !CHIP_HAS_REV1_XDN() 794#if !CHIP_HAS_REV1_XDN()
580 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7)); 795 /* Reset UDN coordinates to their standard value */
796 {
797 unsigned int cpu = smp_processor_id();
798 unsigned int x = cpu % smp_width;
799 unsigned int y = cpu / smp_width;
800 __insn_mtspr(SPR_UDN_TILE_COORD, (x << 18) | (y << 7));
801 }
581 802
582 /* Set demux tags to predefined values and enable them. */ 803 /* Set demux tags to predefined values and enable them. */
583 __insn_mtspr(SPR_UDN_TAG_VALID, 0xf); 804 __insn_mtspr(SPR_UDN_TAG_VALID, 0xf);
@@ -585,56 +806,50 @@ void reset_network_state(void)
585 __insn_mtspr(SPR_UDN_TAG_1, (1 << 1)); 806 __insn_mtspr(SPR_UDN_TAG_1, (1 << 1));
586 __insn_mtspr(SPR_UDN_TAG_2, (1 << 2)); 807 __insn_mtspr(SPR_UDN_TAG_2, (1 << 2));
587 __insn_mtspr(SPR_UDN_TAG_3, (1 << 3)); 808 __insn_mtspr(SPR_UDN_TAG_3, (1 << 3));
588#endif
589 809
590 /* Clear out other random registers so we have a clean slate. */ 810 /* Set other rev0 random registers to a clean state. */
591 __insn_mtspr(SPR_UDN_AVAIL_EN, 0);
592 __insn_mtspr(SPR_UDN_DEADLOCK_TIMEOUT, 0);
593#if !CHIP_HAS_REV1_XDN()
594 __insn_mtspr(SPR_UDN_REFILL_EN, 0); 811 __insn_mtspr(SPR_UDN_REFILL_EN, 0);
595 __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0); 812 __insn_mtspr(SPR_UDN_DEMUX_QUEUE_SEL, 0);
596 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0); 813 __insn_mtspr(SPR_UDN_SP_FIFO_SEL, 0);
597#endif
598 814
599 /* Start the switch and demux. */ 815 /* Start the switch and demux. */
600#if !CHIP_HAS_REV1_XDN()
601 __insn_mtspr(SPR_UDN_SP_FREEZE, 0); 816 __insn_mtspr(SPR_UDN_SP_FREEZE, 0);
602#endif 817#endif
603} 818}
604 819
605/* Restart a UDN switch after draining. */ 820void reset_network_state(void)
606static void restart_udn_switch(void *ignored)
607{ 821{
608 reset_network_state(); 822 reset_xdn_network_state(&hardwall_types[HARDWALL_UDN]);
609 823#ifndef __tilepro__
610 /* Disable firewall interrupts. */ 824 reset_xdn_network_state(&hardwall_types[HARDWALL_IDN]);
611 __insn_mtspr(SPR_UDN_DIRECTION_PROTECT, 0); 825#endif
612 disable_firewall_interrupts();
613} 826}
614 827
615/* Build a struct cpumask containing all valid tiles in bounding rectangle. */ 828/* Restart an XDN switch after draining. */
616static void fill_mask(struct hardwall_info *r, struct cpumask *result) 829static void restart_xdn_switch(void *arg)
617{ 830{
618 int x, y, cpu; 831 struct hardwall_type *hwt = arg;
619 832
620 cpumask_clear(result); 833#if CHIP_HAS_REV1_XDN()
834 /* One last drain step to avoid races with injection and draining. */
835 empty_xdn_demuxes(hwt);
836#endif
621 837
622 cpu = r->ulhc_y * smp_width + r->ulhc_x; 838 reset_xdn_network_state(hwt);
623 for (y = 0; y < r->height; ++y, cpu += smp_width - r->width) { 839
624 for (x = 0; x < r->width; ++x, ++cpu) 840 /* Disable firewall interrupts. */
625 cpu_online_set(cpu, result); 841 disable_firewall_interrupts(hwt);
626 }
627} 842}
628 843
629/* Last reference to a hardwall is gone, so clear the network. */ 844/* Last reference to a hardwall is gone, so clear the network. */
630static void hardwall_destroy(struct hardwall_info *rect) 845static void hardwall_destroy(struct hardwall_info *info)
631{ 846{
632 struct task_struct *task; 847 struct task_struct *task;
848 struct hardwall_type *hwt;
633 unsigned long flags; 849 unsigned long flags;
634 struct cpumask mask;
635 850
636 /* Make sure this file actually represents a rectangle. */ 851 /* Make sure this file actually represents a hardwall. */
637 if (rect == NULL) 852 if (info == NULL)
638 return; 853 return;
639 854
640 /* 855 /*
@@ -644,39 +859,53 @@ static void hardwall_destroy(struct hardwall_info *rect)
644 * deactivate any remaining tasks before freeing the 859 * deactivate any remaining tasks before freeing the
645 * hardwall_info object itself. 860 * hardwall_info object itself.
646 */ 861 */
647 spin_lock_irqsave(&hardwall_lock, flags); 862 hwt = info->type;
648 list_for_each_entry(task, &rect->task_head, thread.hardwall_list) 863 info->teardown_in_progress = 1;
649 _hardwall_deactivate(task); 864 spin_lock_irqsave(&hwt->lock, flags);
650 spin_unlock_irqrestore(&hardwall_lock, flags); 865 list_for_each_entry(task, &info->task_head,
651 866 thread.hardwall[hwt->index].list)
652 /* Drain the UDN. */ 867 _hardwall_deactivate(hwt, task);
653 printk(KERN_DEBUG "Clearing hardwall rectangle %dx%d %d,%d\n", 868 spin_unlock_irqrestore(&hwt->lock, flags);
654 rect->width, rect->height, rect->ulhc_x, rect->ulhc_y); 869
655 fill_mask(rect, &mask); 870 if (hwt->is_xdn) {
656 on_each_cpu_mask(&mask, stop_udn_switch, NULL, 1); 871 /* Configure the switches for draining the user network. */
657 on_each_cpu_mask(&mask, drain_udn_switch, NULL, 1); 872 printk(KERN_DEBUG
873 "Clearing %s hardwall rectangle %dx%d %d,%d\n",
874 hwt->name, info->width, info->height,
875 info->ulhc_x, info->ulhc_y);
876 on_each_cpu_mask(&info->cpumask, stop_xdn_switch, hwt, 1);
877
878 /* Drain the network. */
879#if CHIP_HAS_REV1_XDN()
880 atomic_set(&info->xdn_pending_count,
881 cpumask_weight(&info->cpumask));
882 on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 0);
883#else
884 on_each_cpu_mask(&info->cpumask, drain_xdn_switch, info, 1);
885#endif
658 886
659 /* Restart switch and disable firewall. */ 887 /* Restart switch and disable firewall. */
660 on_each_cpu_mask(&mask, restart_udn_switch, NULL, 1); 888 on_each_cpu_mask(&info->cpumask, restart_xdn_switch, hwt, 1);
889 }
661 890
662 /* Remove the /proc/tile/hardwall entry. */ 891 /* Remove the /proc/tile/hardwall entry. */
663 hardwall_remove_proc(rect); 892 hardwall_remove_proc(info);
664 893
665 /* Now free the rectangle from the list. */ 894 /* Now free the hardwall from the list. */
666 spin_lock_irqsave(&hardwall_lock, flags); 895 spin_lock_irqsave(&hwt->lock, flags);
667 BUG_ON(!list_empty(&rect->task_head)); 896 BUG_ON(!list_empty(&info->task_head));
668 list_del(&rect->list); 897 list_del(&info->list);
669 spin_unlock_irqrestore(&hardwall_lock, flags); 898 spin_unlock_irqrestore(&hwt->lock, flags);
670 kfree(rect); 899 kfree(info);
671} 900}
672 901
673 902
674static int hardwall_proc_show(struct seq_file *sf, void *v) 903static int hardwall_proc_show(struct seq_file *sf, void *v)
675{ 904{
676 struct hardwall_info *rect = sf->private; 905 struct hardwall_info *info = sf->private;
677 char buf[256]; 906 char buf[256];
678 907
679 int rc = cpulist_scnprintf(buf, sizeof(buf), &rect->cpumask); 908 int rc = cpulist_scnprintf(buf, sizeof(buf), &info->cpumask);
680 buf[rc++] = '\n'; 909 buf[rc++] = '\n';
681 seq_write(sf, buf, rc); 910 seq_write(sf, buf, rc);
682 return 0; 911 return 0;
@@ -695,31 +924,45 @@ static const struct file_operations hardwall_proc_fops = {
695 .release = single_release, 924 .release = single_release,
696}; 925};
697 926
698static void hardwall_add_proc(struct hardwall_info *rect) 927static void hardwall_add_proc(struct hardwall_info *info)
699{ 928{
700 char buf[64]; 929 char buf[64];
701 snprintf(buf, sizeof(buf), "%d", rect->id); 930 snprintf(buf, sizeof(buf), "%d", info->id);
702 proc_create_data(buf, 0444, hardwall_proc_dir, 931 proc_create_data(buf, 0444, info->type->proc_dir,
703 &hardwall_proc_fops, rect); 932 &hardwall_proc_fops, info);
704} 933}
705 934
706static void hardwall_remove_proc(struct hardwall_info *rect) 935static void hardwall_remove_proc(struct hardwall_info *info)
707{ 936{
708 char buf[64]; 937 char buf[64];
709 snprintf(buf, sizeof(buf), "%d", rect->id); 938 snprintf(buf, sizeof(buf), "%d", info->id);
710 remove_proc_entry(buf, hardwall_proc_dir); 939 remove_proc_entry(buf, info->type->proc_dir);
711} 940}
712 941
713int proc_pid_hardwall(struct task_struct *task, char *buffer) 942int proc_pid_hardwall(struct task_struct *task, char *buffer)
714{ 943{
715 struct hardwall_info *rect = task->thread.hardwall; 944 int i;
716 return rect ? sprintf(buffer, "%d\n", rect->id) : 0; 945 int n = 0;
946 for (i = 0; i < HARDWALL_TYPES; ++i) {
947 struct hardwall_info *info = task->thread.hardwall[i].info;
948 if (info)
949 n += sprintf(&buffer[n], "%s: %d\n",
950 info->type->name, info->id);
951 }
952 return n;
717} 953}
718 954
719void proc_tile_hardwall_init(struct proc_dir_entry *root) 955void proc_tile_hardwall_init(struct proc_dir_entry *root)
720{ 956{
721 if (!udn_disabled) 957 int i;
722 hardwall_proc_dir = proc_mkdir("hardwall", root); 958 for (i = 0; i < HARDWALL_TYPES; ++i) {
959 struct hardwall_type *hwt = &hardwall_types[i];
960 if (hwt->disabled)
961 continue;
962 if (hardwall_proc_dir == NULL)
963 hardwall_proc_dir = proc_mkdir("hardwall", root);
964 hwt->proc_dir = proc_mkdir(hwt->name, hardwall_proc_dir);
965 }
723} 966}
724 967
725 968
@@ -729,34 +972,45 @@ void proc_tile_hardwall_init(struct proc_dir_entry *root)
729 972
730static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b) 973static long hardwall_ioctl(struct file *file, unsigned int a, unsigned long b)
731{ 974{
732 struct hardwall_info *rect = file->private_data; 975 struct hardwall_info *info = file->private_data;
976 int minor = iminor(file->f_mapping->host);
977 struct hardwall_type* hwt;
733 978
734 if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE) 979 if (_IOC_TYPE(a) != HARDWALL_IOCTL_BASE)
735 return -EINVAL; 980 return -EINVAL;
736 981
982 BUILD_BUG_ON(HARDWALL_TYPES != _HARDWALL_TYPES);
983 BUILD_BUG_ON(HARDWALL_TYPES !=
984 sizeof(hardwall_types)/sizeof(hardwall_types[0]));
985
986 if (minor < 0 || minor >= HARDWALL_TYPES)
987 return -EINVAL;
988 hwt = &hardwall_types[minor];
989 WARN_ON(info && hwt != info->type);
990
737 switch (_IOC_NR(a)) { 991 switch (_IOC_NR(a)) {
738 case _HARDWALL_CREATE: 992 case _HARDWALL_CREATE:
739 if (udn_disabled) 993 if (hwt->disabled)
740 return -ENOSYS; 994 return -ENOSYS;
741 if (rect != NULL) 995 if (info != NULL)
742 return -EALREADY; 996 return -EALREADY;
743 rect = hardwall_create(_IOC_SIZE(a), 997 info = hardwall_create(hwt, _IOC_SIZE(a),
744 (const unsigned char __user *)b); 998 (const unsigned char __user *)b);
745 if (IS_ERR(rect)) 999 if (IS_ERR(info))
746 return PTR_ERR(rect); 1000 return PTR_ERR(info);
747 file->private_data = rect; 1001 file->private_data = info;
748 return 0; 1002 return 0;
749 1003
750 case _HARDWALL_ACTIVATE: 1004 case _HARDWALL_ACTIVATE:
751 return hardwall_activate(rect); 1005 return hardwall_activate(info);
752 1006
753 case _HARDWALL_DEACTIVATE: 1007 case _HARDWALL_DEACTIVATE:
754 if (current->thread.hardwall != rect) 1008 if (current->thread.hardwall[hwt->index].info != info)
755 return -EINVAL; 1009 return -EINVAL;
756 return hardwall_deactivate(current); 1010 return hardwall_deactivate(hwt, current);
757 1011
758 case _HARDWALL_GET_ID: 1012 case _HARDWALL_GET_ID:
759 return rect ? rect->id : -EINVAL; 1013 return info ? info->id : -EINVAL;
760 1014
761 default: 1015 default:
762 return -EINVAL; 1016 return -EINVAL;
@@ -775,26 +1029,28 @@ static long hardwall_compat_ioctl(struct file *file,
775/* The user process closed the file; revoke access to user networks. */ 1029/* The user process closed the file; revoke access to user networks. */
776static int hardwall_flush(struct file *file, fl_owner_t owner) 1030static int hardwall_flush(struct file *file, fl_owner_t owner)
777{ 1031{
778 struct hardwall_info *rect = file->private_data; 1032 struct hardwall_info *info = file->private_data;
779 struct task_struct *task, *tmp; 1033 struct task_struct *task, *tmp;
780 unsigned long flags; 1034 unsigned long flags;
781 1035
782 if (rect) { 1036 if (info) {
783 /* 1037 /*
784 * NOTE: if multiple threads are activated on this hardwall 1038 * NOTE: if multiple threads are activated on this hardwall
785 * file, the other threads will continue having access to the 1039 * file, the other threads will continue having access to the
786 * UDN until they are context-switched out and back in again. 1040 * user network until they are context-switched out and back
1041 * in again.
787 * 1042 *
788 * NOTE: A NULL files pointer means the task is being torn 1043 * NOTE: A NULL files pointer means the task is being torn
789 * down, so in that case we also deactivate it. 1044 * down, so in that case we also deactivate it.
790 */ 1045 */
791 spin_lock_irqsave(&hardwall_lock, flags); 1046 struct hardwall_type *hwt = info->type;
792 list_for_each_entry_safe(task, tmp, &rect->task_head, 1047 spin_lock_irqsave(&hwt->lock, flags);
793 thread.hardwall_list) { 1048 list_for_each_entry_safe(task, tmp, &info->task_head,
1049 thread.hardwall[hwt->index].list) {
794 if (task->files == owner || task->files == NULL) 1050 if (task->files == owner || task->files == NULL)
795 _hardwall_deactivate(task); 1051 _hardwall_deactivate(hwt, task);
796 } 1052 }
797 spin_unlock_irqrestore(&hardwall_lock, flags); 1053 spin_unlock_irqrestore(&hwt->lock, flags);
798 } 1054 }
799 1055
800 return 0; 1056 return 0;
@@ -824,11 +1080,11 @@ static int __init dev_hardwall_init(void)
824 int rc; 1080 int rc;
825 dev_t dev; 1081 dev_t dev;
826 1082
827 rc = alloc_chrdev_region(&dev, 0, 1, "hardwall"); 1083 rc = alloc_chrdev_region(&dev, 0, HARDWALL_TYPES, "hardwall");
828 if (rc < 0) 1084 if (rc < 0)
829 return rc; 1085 return rc;
830 cdev_init(&hardwall_dev, &dev_hardwall_fops); 1086 cdev_init(&hardwall_dev, &dev_hardwall_fops);
831 rc = cdev_add(&hardwall_dev, dev, 1); 1087 rc = cdev_add(&hardwall_dev, dev, HARDWALL_TYPES);
832 if (rc < 0) 1088 if (rc < 0)
833 return rc; 1089 return rc;
834 1090
diff --git a/arch/tile/kernel/head_32.S b/arch/tile/kernel/head_32.S
index 1a39b7c1c87e..f71bfeeaf1a9 100644
--- a/arch/tile/kernel/head_32.S
+++ b/arch/tile/kernel/head_32.S
@@ -69,7 +69,7 @@ ENTRY(_start)
69 } 69 }
70 { 70 {
71 moveli lr, lo16(1f) 71 moveli lr, lo16(1f)
72 move r5, zero 72 moveli r5, CTX_PAGE_FLAG
73 } 73 }
74 { 74 {
75 auli lr, lr, ha16(1f) 75 auli lr, lr, ha16(1f)
@@ -141,11 +141,11 @@ ENTRY(empty_zero_page)
141 141
142 .macro PTE va, cpa, bits1, no_org=0 142 .macro PTE va, cpa, bits1, no_org=0
143 .ifeq \no_org 143 .ifeq \no_org
144 .org swapper_pg_dir + HV_L1_INDEX(\va) * HV_PTE_SIZE 144 .org swapper_pg_dir + PGD_INDEX(\va) * HV_PTE_SIZE
145 .endif 145 .endif
146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \ 146 .word HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED | \
147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) 147 (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE)
148 .word (\bits1) | (HV_CPA_TO_PFN(\cpa) << (HV_PTE_INDEX_PFN - 32)) 148 .word (\bits1) | (HV_CPA_TO_PTFN(\cpa) << (HV_PTE_INDEX_PTFN - 32))
149 .endm 149 .endm
150 150
151__PAGE_ALIGNED_DATA 151__PAGE_ALIGNED_DATA
@@ -166,7 +166,7 @@ ENTRY(swapper_pg_dir)
166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */ 166 /* The true text VAs are mapped as VA = PA + MEM_SV_INTRPT */
167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \ 167 PTE MEM_SV_INTRPT, 0, (1 << (HV_PTE_INDEX_READABLE - 32)) | \
168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32)) 168 (1 << (HV_PTE_INDEX_EXECUTABLE - 32))
169 .org swapper_pg_dir + HV_L1_SIZE 169 .org swapper_pg_dir + PGDIR_SIZE
170 END(swapper_pg_dir) 170 END(swapper_pg_dir)
171 171
172 /* 172 /*
diff --git a/arch/tile/kernel/head_64.S b/arch/tile/kernel/head_64.S
index 6bc3a932fe45..f9a2734f7b82 100644
--- a/arch/tile/kernel/head_64.S
+++ b/arch/tile/kernel/head_64.S
@@ -114,7 +114,7 @@ ENTRY(_start)
114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET) 114 shl16insli r0, r0, hw0(swapper_pg_dir - PAGE_OFFSET)
115 } 115 }
116 { 116 {
117 move r3, zero 117 moveli r3, CTX_PAGE_FLAG
118 j hv_install_context 118 j hv_install_context
119 } 119 }
1201: 1201:
@@ -210,19 +210,19 @@ ENTRY(empty_zero_page)
210 .macro PTE cpa, bits1 210 .macro PTE cpa, bits1
211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\ 211 .quad HV_PTE_PAGE | HV_PTE_DIRTY | HV_PTE_PRESENT | HV_PTE_ACCESSED |\
212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\ 212 HV_PTE_GLOBAL | (HV_PTE_MODE_CACHE_NO_L3 << HV_PTE_INDEX_MODE) |\
213 (\bits1) | (HV_CPA_TO_PFN(\cpa) << HV_PTE_INDEX_PFN) 213 (\bits1) | (HV_CPA_TO_PTFN(\cpa) << HV_PTE_INDEX_PTFN)
214 .endm 214 .endm
215 215
216__PAGE_ALIGNED_DATA 216__PAGE_ALIGNED_DATA
217 .align PAGE_SIZE 217 .align PAGE_SIZE
218ENTRY(swapper_pg_dir) 218ENTRY(swapper_pg_dir)
219 .org swapper_pg_dir + HV_L0_INDEX(PAGE_OFFSET) * HV_PTE_SIZE 219 .org swapper_pg_dir + PGD_INDEX(PAGE_OFFSET) * HV_PTE_SIZE
220.Lsv_data_pmd: 220.Lsv_data_pmd:
221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */ 221 .quad 0 /* PTE temp_data_pmd - PAGE_OFFSET, 0 */
222 .org swapper_pg_dir + HV_L0_INDEX(MEM_SV_START) * HV_PTE_SIZE 222 .org swapper_pg_dir + PGD_INDEX(MEM_SV_START) * HV_PTE_SIZE
223.Lsv_code_pmd: 223.Lsv_code_pmd:
224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */ 224 .quad 0 /* PTE temp_code_pmd - PAGE_OFFSET, 0 */
225 .org swapper_pg_dir + HV_L0_SIZE 225 .org swapper_pg_dir + SIZEOF_PGD
226 END(swapper_pg_dir) 226 END(swapper_pg_dir)
227 227
228 .align HV_PAGE_TABLE_ALIGN 228 .align HV_PAGE_TABLE_ALIGN
@@ -233,11 +233,11 @@ ENTRY(temp_data_pmd)
233 * permissions later. 233 * permissions later.
234 */ 234 */
235 .set addr, 0 235 .set addr, 0
236 .rept HV_L1_ENTRIES 236 .rept PTRS_PER_PMD
237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE 237 PTE addr, HV_PTE_READABLE | HV_PTE_WRITABLE
238 .set addr, addr + HV_PAGE_SIZE_LARGE 238 .set addr, addr + HPAGE_SIZE
239 .endr 239 .endr
240 .org temp_data_pmd + HV_L1_SIZE 240 .org temp_data_pmd + SIZEOF_PMD
241 END(temp_data_pmd) 241 END(temp_data_pmd)
242 242
243 .align HV_PAGE_TABLE_ALIGN 243 .align HV_PAGE_TABLE_ALIGN
@@ -248,11 +248,11 @@ ENTRY(temp_code_pmd)
248 * permissions later. 248 * permissions later.
249 */ 249 */
250 .set addr, 0 250 .set addr, 0
251 .rept HV_L1_ENTRIES 251 .rept PTRS_PER_PMD
252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE 252 PTE addr, HV_PTE_READABLE | HV_PTE_EXECUTABLE
253 .set addr, addr + HV_PAGE_SIZE_LARGE 253 .set addr, addr + HPAGE_SIZE
254 .endr 254 .endr
255 .org temp_code_pmd + HV_L1_SIZE 255 .org temp_code_pmd + SIZEOF_PMD
256 END(temp_code_pmd) 256 END(temp_code_pmd)
257 257
258 /* 258 /*
diff --git a/arch/tile/kernel/hvglue.lds b/arch/tile/kernel/hvglue.lds
index 2b7cd0a659a9..d44c5a67a1ed 100644
--- a/arch/tile/kernel/hvglue.lds
+++ b/arch/tile/kernel/hvglue.lds
@@ -55,4 +55,5 @@ hv_store_mapping = TEXT_OFFSET + 0x106a0;
55hv_inquire_realpa = TEXT_OFFSET + 0x106c0; 55hv_inquire_realpa = TEXT_OFFSET + 0x106c0;
56hv_flush_all = TEXT_OFFSET + 0x106e0; 56hv_flush_all = TEXT_OFFSET + 0x106e0;
57hv_get_ipi_pte = TEXT_OFFSET + 0x10700; 57hv_get_ipi_pte = TEXT_OFFSET + 0x10700;
58hv_glue_internals = TEXT_OFFSET + 0x10720; 58hv_set_pte_super_shift = TEXT_OFFSET + 0x10720;
59hv_glue_internals = TEXT_OFFSET + 0x10740;
diff --git a/arch/tile/kernel/intvec_64.S b/arch/tile/kernel/intvec_64.S
index 30ae76e50c44..7c06d597ffd0 100644
--- a/arch/tile/kernel/intvec_64.S
+++ b/arch/tile/kernel/intvec_64.S
@@ -220,7 +220,9 @@ intvec_\vecname:
220 * This routine saves just the first four registers, plus the 220 * This routine saves just the first four registers, plus the
221 * stack context so we can do proper backtracing right away, 221 * stack context so we can do proper backtracing right away,
222 * and defers to handle_interrupt to save the rest. 222 * and defers to handle_interrupt to save the rest.
223 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum. 223 * The backtracer needs pc, ex1, lr, sp, r52, and faultnum,
224 * and needs sp set to its final location at the bottom of
225 * the stack frame.
224 */ 226 */
225 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP) 227 addli r0, r0, PTREGS_OFFSET_LR - (PTREGS_SIZE + KSTK_PTREGS_GAP)
226 wh64 r0 /* cache line 7 */ 228 wh64 r0 /* cache line 7 */
@@ -450,23 +452,6 @@ intvec_\vecname:
450 push_reg r5, r52 452 push_reg r5, r52
451 st r52, r4 453 st r52, r4
452 454
453 /* Load tp with our per-cpu offset. */
454#ifdef CONFIG_SMP
455 {
456 mfspr r20, SPR_SYSTEM_SAVE_K_0
457 moveli r21, hw2_last(__per_cpu_offset)
458 }
459 {
460 shl16insli r21, r21, hw1(__per_cpu_offset)
461 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
462 }
463 shl16insli r21, r21, hw0(__per_cpu_offset)
464 shl3add r20, r20, r21
465 ld tp, r20
466#else
467 move tp, zero
468#endif
469
470 /* 455 /*
471 * If we will be returning to the kernel, we will need to 456 * If we will be returning to the kernel, we will need to
472 * reset the interrupt masks to the state they had before. 457 * reset the interrupt masks to the state they had before.
@@ -489,6 +474,44 @@ intvec_\vecname:
489 .endif 474 .endif
490 st r21, r32 475 st r21, r32
491 476
477 /*
478 * we've captured enough state to the stack (including in
479 * particular our EX_CONTEXT state) that we can now release
480 * the interrupt critical section and replace it with our
481 * standard "interrupts disabled" mask value. This allows
482 * synchronous interrupts (and profile interrupts) to punch
483 * through from this point onwards.
484 *
485 * It's important that no code before this point touch memory
486 * other than our own stack (to keep the invariant that this
487 * is all that gets touched under ICS), and that no code after
488 * this point reference any interrupt-specific SPR, in particular
489 * the EX_CONTEXT_K_ values.
490 */
491 .ifc \function,handle_nmi
492 IRQ_DISABLE_ALL(r20)
493 .else
494 IRQ_DISABLE(r20, r21)
495 .endif
496 mtspr INTERRUPT_CRITICAL_SECTION, zero
497
498 /* Load tp with our per-cpu offset. */
499#ifdef CONFIG_SMP
500 {
501 mfspr r20, SPR_SYSTEM_SAVE_K_0
502 moveli r21, hw2_last(__per_cpu_offset)
503 }
504 {
505 shl16insli r21, r21, hw1(__per_cpu_offset)
506 bfextu r20, r20, 0, LOG2_THREAD_SIZE-1
507 }
508 shl16insli r21, r21, hw0(__per_cpu_offset)
509 shl3add r20, r20, r21
510 ld tp, r20
511#else
512 move tp, zero
513#endif
514
492#ifdef __COLLECT_LINKER_FEEDBACK__ 515#ifdef __COLLECT_LINKER_FEEDBACK__
493 /* 516 /*
494 * Notify the feedback routines that we were in the 517 * Notify the feedback routines that we were in the
@@ -513,21 +536,6 @@ intvec_\vecname:
513#endif 536#endif
514 537
515 /* 538 /*
516 * we've captured enough state to the stack (including in
517 * particular our EX_CONTEXT state) that we can now release
518 * the interrupt critical section and replace it with our
519 * standard "interrupts disabled" mask value. This allows
520 * synchronous interrupts (and profile interrupts) to punch
521 * through from this point onwards.
522 */
523 .ifc \function,handle_nmi
524 IRQ_DISABLE_ALL(r20)
525 .else
526 IRQ_DISABLE(r20, r21)
527 .endif
528 mtspr INTERRUPT_CRITICAL_SECTION, zero
529
530 /*
531 * Prepare the first 256 stack bytes to be rapidly accessible 539 * Prepare the first 256 stack bytes to be rapidly accessible
532 * without having to fetch the background data. 540 * without having to fetch the background data.
533 */ 541 */
@@ -736,9 +744,10 @@ STD_ENTRY(interrupt_return)
736 beqzt r30, .Lrestore_regs 744 beqzt r30, .Lrestore_regs
737 j 3f 745 j 3f
7382: TRACE_IRQS_ON 7462: TRACE_IRQS_ON
747 IRQ_ENABLE_LOAD(r20, r21)
739 movei r0, 1 748 movei r0, 1
740 mtspr INTERRUPT_CRITICAL_SECTION, r0 749 mtspr INTERRUPT_CRITICAL_SECTION, r0
741 IRQ_ENABLE(r20, r21) 750 IRQ_ENABLE_APPLY(r20, r21)
742 beqzt r30, .Lrestore_regs 751 beqzt r30, .Lrestore_regs
7433: 7523:
744 753
@@ -755,7 +764,6 @@ STD_ENTRY(interrupt_return)
755 * that will save some cycles if this turns out to be a syscall. 764 * that will save some cycles if this turns out to be a syscall.
756 */ 765 */
757.Lrestore_regs: 766.Lrestore_regs:
758 FEEDBACK_REENTER(interrupt_return) /* called from elsewhere */
759 767
760 /* 768 /*
761 * Rotate so we have one high bit and one low bit to test. 769 * Rotate so we have one high bit and one low bit to test.
@@ -1249,7 +1257,7 @@ STD_ENTRY(fill_ra_stack)
1249 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign 1257 int_hand INT_UNALIGN_DATA, UNALIGN_DATA, int_unalign
1250 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault 1258 int_hand INT_DTLB_MISS, DTLB_MISS, do_page_fault
1251 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault 1259 int_hand INT_DTLB_ACCESS, DTLB_ACCESS, do_page_fault
1252 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, bad_intr 1260 int_hand INT_IDN_FIREWALL, IDN_FIREWALL, do_hardwall_trap
1253 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap 1261 int_hand INT_UDN_FIREWALL, UDN_FIREWALL, do_hardwall_trap
1254 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt 1262 int_hand INT_TILE_TIMER, TILE_TIMER, do_timer_interrupt
1255 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr 1263 int_hand INT_IDN_TIMER, IDN_TIMER, bad_intr
diff --git a/arch/tile/kernel/machine_kexec.c b/arch/tile/kernel/machine_kexec.c
index 6255f2eab112..f0b54a934712 100644
--- a/arch/tile/kernel/machine_kexec.c
+++ b/arch/tile/kernel/machine_kexec.c
@@ -31,6 +31,8 @@
31#include <asm/pgalloc.h> 31#include <asm/pgalloc.h>
32#include <asm/cacheflush.h> 32#include <asm/cacheflush.h>
33#include <asm/checksum.h> 33#include <asm/checksum.h>
34#include <asm/tlbflush.h>
35#include <asm/homecache.h>
34#include <hv/hypervisor.h> 36#include <hv/hypervisor.h>
35 37
36 38
@@ -222,11 +224,22 @@ struct page *kimage_alloc_pages_arch(gfp_t gfp_mask, unsigned int order)
222 return alloc_pages_node(0, gfp_mask, order); 224 return alloc_pages_node(0, gfp_mask, order);
223} 225}
224 226
227/*
228 * Address range in which pa=va mapping is set in setup_quasi_va_is_pa().
229 * For tilepro, PAGE_OFFSET is used since this is the largest possbile value
230 * for tilepro, while for tilegx, we limit it to entire middle level page
231 * table which we assume has been allocated and is undoubtedly large enough.
232 */
233#ifndef __tilegx__
234#define QUASI_VA_IS_PA_ADDR_RANGE PAGE_OFFSET
235#else
236#define QUASI_VA_IS_PA_ADDR_RANGE PGDIR_SIZE
237#endif
238
225static void setup_quasi_va_is_pa(void) 239static void setup_quasi_va_is_pa(void)
226{ 240{
227 HV_PTE *pgtable;
228 HV_PTE pte; 241 HV_PTE pte;
229 int i; 242 unsigned long i;
230 243
231 /* 244 /*
232 * Flush our TLB to prevent conflicts between the previous contents 245 * Flush our TLB to prevent conflicts between the previous contents
@@ -234,16 +247,22 @@ static void setup_quasi_va_is_pa(void)
234 */ 247 */
235 local_flush_tlb_all(); 248 local_flush_tlb_all();
236 249
237 /* setup VA is PA, at least up to PAGE_OFFSET */ 250 /*
238 251 * setup VA is PA, at least up to QUASI_VA_IS_PA_ADDR_RANGE.
239 pgtable = (HV_PTE *)current->mm->pgd; 252 * Note here we assume that level-1 page table is defined by
253 * HPAGE_SIZE.
254 */
240 pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE); 255 pte = hv_pte(_PAGE_KERNEL | _PAGE_HUGE_PAGE);
241 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3); 256 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_NO_L3);
242 257 for (i = 0; i < (QUASI_VA_IS_PA_ADDR_RANGE >> HPAGE_SHIFT); i++) {
243 for (i = 0; i < pgd_index(PAGE_OFFSET); i++) { 258 unsigned long vaddr = i << HPAGE_SHIFT;
259 pgd_t *pgd = pgd_offset(current->mm, vaddr);
260 pud_t *pud = pud_offset(pgd, vaddr);
261 pte_t *ptep = (pte_t *) pmd_offset(pud, vaddr);
244 unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT); 262 unsigned long pfn = i << (HPAGE_SHIFT - PAGE_SHIFT);
263
245 if (pfn_valid(pfn)) 264 if (pfn_valid(pfn))
246 __set_pte(&pgtable[i], pfn_pte(pfn, pte)); 265 __set_pte(ptep, pfn_pte(pfn, pte));
247 } 266 }
248} 267}
249 268
@@ -251,6 +270,7 @@ static void setup_quasi_va_is_pa(void)
251void machine_kexec(struct kimage *image) 270void machine_kexec(struct kimage *image)
252{ 271{
253 void *reboot_code_buffer; 272 void *reboot_code_buffer;
273 pte_t *ptep;
254 void (*rnk)(unsigned long, void *, unsigned long) 274 void (*rnk)(unsigned long, void *, unsigned long)
255 __noreturn; 275 __noreturn;
256 276
@@ -266,8 +286,10 @@ void machine_kexec(struct kimage *image)
266 */ 286 */
267 homecache_change_page_home(image->control_code_page, 0, 287 homecache_change_page_home(image->control_code_page, 0,
268 smp_processor_id()); 288 smp_processor_id());
269 reboot_code_buffer = vmap(&image->control_code_page, 1, 0, 289 reboot_code_buffer = page_address(image->control_code_page);
270 __pgprot(_PAGE_KERNEL | _PAGE_EXECUTABLE)); 290 BUG_ON(reboot_code_buffer == NULL);
291 ptep = virt_to_pte(NULL, (unsigned long)reboot_code_buffer);
292 __set_pte(ptep, pte_mkexec(*ptep));
271 memcpy(reboot_code_buffer, relocate_new_kernel, 293 memcpy(reboot_code_buffer, relocate_new_kernel,
272 relocate_new_kernel_size); 294 relocate_new_kernel_size);
273 __flush_icache_range( 295 __flush_icache_range(
diff --git a/arch/tile/kernel/module.c b/arch/tile/kernel/module.c
index 98d476920106..001cbfa10ac6 100644
--- a/arch/tile/kernel/module.c
+++ b/arch/tile/kernel/module.c
@@ -159,7 +159,17 @@ int apply_relocate_add(Elf_Shdr *sechdrs,
159 159
160 switch (ELF_R_TYPE(rel[i].r_info)) { 160 switch (ELF_R_TYPE(rel[i].r_info)) {
161 161
162#define MUNGE(func) (*location = ((*location & ~func(-1)) | func(value))) 162#ifdef __LITTLE_ENDIAN
163# define MUNGE(func) \
164 (*location = ((*location & ~func(-1)) | func(value)))
165#else
166/*
167 * Instructions are always little-endian, so when we read them as data,
168 * we have to swap them around before and after modifying them.
169 */
170# define MUNGE(func) \
171 (*location = swab64((swab64(*location) & ~func(-1)) | func(value)))
172#endif
163 173
164#ifndef __tilegx__ 174#ifndef __tilegx__
165 case R_TILE_32: 175 case R_TILE_32:
diff --git a/arch/tile/kernel/proc.c b/arch/tile/kernel/proc.c
index 446a7f52cc11..dafc447b5125 100644
--- a/arch/tile/kernel/proc.c
+++ b/arch/tile/kernel/proc.c
@@ -22,6 +22,7 @@
22#include <linux/proc_fs.h> 22#include <linux/proc_fs.h>
23#include <linux/sysctl.h> 23#include <linux/sysctl.h>
24#include <linux/hardirq.h> 24#include <linux/hardirq.h>
25#include <linux/hugetlb.h>
25#include <linux/mman.h> 26#include <linux/mman.h>
26#include <asm/unaligned.h> 27#include <asm/unaligned.h>
27#include <asm/pgtable.h> 28#include <asm/pgtable.h>
diff --git a/arch/tile/kernel/process.c b/arch/tile/kernel/process.c
index f572c19c4082..ba1023d8a021 100644
--- a/arch/tile/kernel/process.c
+++ b/arch/tile/kernel/process.c
@@ -128,10 +128,10 @@ void arch_release_thread_info(struct thread_info *info)
128 * Calling deactivate here just frees up the data structures. 128 * Calling deactivate here just frees up the data structures.
129 * If the task we're freeing held the last reference to a 129 * If the task we're freeing held the last reference to a
130 * hardwall fd, it would have been released prior to this point 130 * hardwall fd, it would have been released prior to this point
131 * anyway via exit_files(), and "hardwall" would be NULL by now. 131 * anyway via exit_files(), and the hardwall_task.info pointers
132 * would be NULL by now.
132 */ 133 */
133 if (info->task->thread.hardwall) 134 hardwall_deactivate_all(info->task);
134 hardwall_deactivate(info->task);
135#endif 135#endif
136 136
137 if (step_state) { 137 if (step_state) {
@@ -245,7 +245,8 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
245 245
246#ifdef CONFIG_HARDWALL 246#ifdef CONFIG_HARDWALL
247 /* New thread does not own any networks. */ 247 /* New thread does not own any networks. */
248 p->thread.hardwall = NULL; 248 memset(&p->thread.hardwall[0], 0,
249 sizeof(struct hardwall_task) * HARDWALL_TYPES);
249#endif 250#endif
250 251
251 252
@@ -515,12 +516,7 @@ struct task_struct *__sched _switch_to(struct task_struct *prev,
515 516
516#ifdef CONFIG_HARDWALL 517#ifdef CONFIG_HARDWALL
517 /* Enable or disable access to the network registers appropriately. */ 518 /* Enable or disable access to the network registers appropriately. */
518 if (prev->thread.hardwall != NULL) { 519 hardwall_switch_tasks(prev, next);
519 if (next->thread.hardwall == NULL)
520 restrict_network_mpls();
521 } else if (next->thread.hardwall != NULL) {
522 grant_network_mpls();
523 }
524#endif 520#endif
525 521
526 /* 522 /*
diff --git a/arch/tile/kernel/relocate_kernel.S b/arch/tile/kernel/relocate_kernel_32.S
index 010b418515f8..010b418515f8 100644
--- a/arch/tile/kernel/relocate_kernel.S
+++ b/arch/tile/kernel/relocate_kernel_32.S
diff --git a/arch/tile/kernel/relocate_kernel_64.S b/arch/tile/kernel/relocate_kernel_64.S
new file mode 100644
index 000000000000..1c09a4f5a4ea
--- /dev/null
+++ b/arch/tile/kernel/relocate_kernel_64.S
@@ -0,0 +1,260 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * copy new kernel into place and then call hv_reexec
15 *
16 */
17
18#include <linux/linkage.h>
19#include <arch/chip.h>
20#include <asm/page.h>
21#include <hv/hypervisor.h>
22
23#undef RELOCATE_NEW_KERNEL_VERBOSE
24
25STD_ENTRY(relocate_new_kernel)
26
27 move r30, r0 /* page list */
28 move r31, r1 /* address of page we are on */
29 move r32, r2 /* start address of new kernel */
30
31 shrui r1, r1, PAGE_SHIFT
32 addi r1, r1, 1
33 shli sp, r1, PAGE_SHIFT
34 addi sp, sp, -8
35 /* we now have a stack (whether we need one or not) */
36
37 moveli r40, hw2_last(hv_console_putc)
38 shl16insli r40, r40, hw1(hv_console_putc)
39 shl16insli r40, r40, hw0(hv_console_putc)
40
41#ifdef RELOCATE_NEW_KERNEL_VERBOSE
42 moveli r0, 'r'
43 jalr r40
44
45 moveli r0, '_'
46 jalr r40
47
48 moveli r0, 'n'
49 jalr r40
50
51 moveli r0, '_'
52 jalr r40
53
54 moveli r0, 'k'
55 jalr r40
56
57 moveli r0, '\n'
58 jalr r40
59#endif
60
61 /*
62 * Throughout this code r30 is pointer to the element of page
63 * list we are working on.
64 *
65 * Normally we get to the next element of the page list by
66 * incrementing r30 by eight. The exception is if the element
67 * on the page list is an IND_INDIRECTION in which case we use
68 * the element with the low bits masked off as the new value
69 * of r30.
70 *
71 * To get this started, we need the value passed to us (which
72 * will always be an IND_INDIRECTION) in memory somewhere with
73 * r30 pointing at it. To do that, we push the value passed
74 * to us on the stack and make r30 point to it.
75 */
76
77 st sp, r30
78 move r30, sp
79 addi sp, sp, -16
80
81#if CHIP_HAS_CBOX_HOME_MAP()
82 /*
83 * On TILE-GX, we need to flush all tiles' caches, since we may
84 * have been doing hash-for-home caching there. Note that we
85 * must do this _after_ we're completely done modifying any memory
86 * other than our output buffer (which we know is locally cached).
87 * We want the caches to be fully clean when we do the reexec,
88 * because the hypervisor is going to do this flush again at that
89 * point, and we don't want that second flush to overwrite any memory.
90 */
91 {
92 move r0, zero /* cache_pa */
93 moveli r1, hw2_last(HV_FLUSH_EVICT_L2)
94 }
95 {
96 shl16insli r1, r1, hw1(HV_FLUSH_EVICT_L2)
97 movei r2, -1 /* cache_cpumask; -1 means all client tiles */
98 }
99 {
100 shl16insli r1, r1, hw0(HV_FLUSH_EVICT_L2) /* cache_control */
101 move r3, zero /* tlb_va */
102 }
103 {
104 move r4, zero /* tlb_length */
105 move r5, zero /* tlb_pgsize */
106 }
107 {
108 move r6, zero /* tlb_cpumask */
109 move r7, zero /* asids */
110 }
111 {
112 moveli r20, hw2_last(hv_flush_remote)
113 move r8, zero /* asidcount */
114 }
115 shl16insli r20, r20, hw1(hv_flush_remote)
116 shl16insli r20, r20, hw0(hv_flush_remote)
117
118 jalr r20
119#endif
120
121 /* r33 is destination pointer, default to zero */
122
123 moveli r33, 0
124
125.Lloop: ld r10, r30
126
127 andi r9, r10, 0xf /* low 4 bits tell us what type it is */
128 xor r10, r10, r9 /* r10 is now value with low 4 bits stripped */
129
130 cmpeqi r0, r9, 0x1 /* IND_DESTINATION */
131 beqzt r0, .Ltry2
132
133 move r33, r10
134
135#ifdef RELOCATE_NEW_KERNEL_VERBOSE
136 moveli r0, 'd'
137 jalr r40
138#endif
139
140 addi r30, r30, 8
141 j .Lloop
142
143.Ltry2:
144 cmpeqi r0, r9, 0x2 /* IND_INDIRECTION */
145 beqzt r0, .Ltry4
146
147 move r30, r10
148
149#ifdef RELOCATE_NEW_KERNEL_VERBOSE
150 moveli r0, 'i'
151 jalr r40
152#endif
153
154 j .Lloop
155
156.Ltry4:
157 cmpeqi r0, r9, 0x4 /* IND_DONE */
158 beqzt r0, .Ltry8
159
160 mf
161
162#ifdef RELOCATE_NEW_KERNEL_VERBOSE
163 moveli r0, 'D'
164 jalr r40
165 moveli r0, '\n'
166 jalr r40
167#endif
168
169 move r0, r32
170
171 moveli r41, hw2_last(hv_reexec)
172 shl16insli r41, r41, hw1(hv_reexec)
173 shl16insli r41, r41, hw0(hv_reexec)
174
175 jalr r41
176
177 /* we should not get here */
178
179 moveli r0, '?'
180 jalr r40
181 moveli r0, '\n'
182 jalr r40
183
184 j .Lhalt
185
186.Ltry8: cmpeqi r0, r9, 0x8 /* IND_SOURCE */
187 beqz r0, .Lerr /* unknown type */
188
189 /* copy page at r10 to page at r33 */
190
191 move r11, r33
192
193 moveli r0, hw2_last(PAGE_SIZE)
194 shl16insli r0, r0, hw1(PAGE_SIZE)
195 shl16insli r0, r0, hw0(PAGE_SIZE)
196 add r33, r33, r0
197
198 /* copy word at r10 to word at r11 until r11 equals r33 */
199
200 /* We know page size must be multiple of 8, so we can unroll
201 * 8 times safely without any edge case checking.
202 *
203 * Issue a flush of the destination every 8 words to avoid
204 * incoherence when starting the new kernel. (Now this is
205 * just good paranoia because the hv_reexec call will also
206 * take care of this.)
207 */
208
2091:
210 { ld r0, r10; addi r10, r10, 8 }
211 { st r11, r0; addi r11, r11, 8 }
212 { ld r0, r10; addi r10, r10, 8 }
213 { st r11, r0; addi r11, r11, 8 }
214 { ld r0, r10; addi r10, r10, 8 }
215 { st r11, r0; addi r11, r11, 8 }
216 { ld r0, r10; addi r10, r10, 8 }
217 { st r11, r0; addi r11, r11, 8 }
218 { ld r0, r10; addi r10, r10, 8 }
219 { st r11, r0; addi r11, r11, 8 }
220 { ld r0, r10; addi r10, r10, 8 }
221 { st r11, r0; addi r11, r11, 8 }
222 { ld r0, r10; addi r10, r10, 8 }
223 { st r11, r0; addi r11, r11, 8 }
224 { ld r0, r10; addi r10, r10, 8 }
225 { st r11, r0 }
226 { flush r11 ; addi r11, r11, 8 }
227
228 cmpeq r0, r33, r11
229 beqzt r0, 1b
230
231#ifdef RELOCATE_NEW_KERNEL_VERBOSE
232 moveli r0, 's'
233 jalr r40
234#endif
235
236 addi r30, r30, 8
237 j .Lloop
238
239
240.Lerr: moveli r0, 'e'
241 jalr r40
242 moveli r0, 'r'
243 jalr r40
244 moveli r0, 'r'
245 jalr r40
246 moveli r0, '\n'
247 jalr r40
248.Lhalt:
249 moveli r41, hw2_last(hv_halt)
250 shl16insli r41, r41, hw1(hv_halt)
251 shl16insli r41, r41, hw0(hv_halt)
252
253 jalr r41
254 STD_ENDPROC(relocate_new_kernel)
255
256 .section .rodata,"a"
257
258 .globl relocate_new_kernel_size
259relocate_new_kernel_size:
260 .long .Lend_relocate_new_kernel - relocate_new_kernel
diff --git a/arch/tile/kernel/setup.c b/arch/tile/kernel/setup.c
index 98d80eb49ddb..6098ccc59be2 100644
--- a/arch/tile/kernel/setup.c
+++ b/arch/tile/kernel/setup.c
@@ -28,6 +28,7 @@
28#include <linux/highmem.h> 28#include <linux/highmem.h>
29#include <linux/smp.h> 29#include <linux/smp.h>
30#include <linux/timex.h> 30#include <linux/timex.h>
31#include <linux/hugetlb.h>
31#include <asm/setup.h> 32#include <asm/setup.h>
32#include <asm/sections.h> 33#include <asm/sections.h>
33#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
@@ -49,9 +50,6 @@ char chip_model[64] __write_once;
49struct pglist_data node_data[MAX_NUMNODES] __read_mostly; 50struct pglist_data node_data[MAX_NUMNODES] __read_mostly;
50EXPORT_SYMBOL(node_data); 51EXPORT_SYMBOL(node_data);
51 52
52/* We only create bootmem data on node 0. */
53static bootmem_data_t __initdata node0_bdata;
54
55/* Information on the NUMA nodes that we compute early */ 53/* Information on the NUMA nodes that we compute early */
56unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES]; 54unsigned long __cpuinitdata node_start_pfn[MAX_NUMNODES];
57unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES]; 55unsigned long __cpuinitdata node_end_pfn[MAX_NUMNODES];
@@ -534,37 +532,96 @@ static void __init setup_memory(void)
534#endif 532#endif
535} 533}
536 534
537static void __init setup_bootmem_allocator(void) 535/*
536 * On 32-bit machines, we only put bootmem on the low controller,
537 * since PAs > 4GB can't be used in bootmem. In principle one could
538 * imagine, e.g., multiple 1 GB controllers all of which could support
539 * bootmem, but in practice using controllers this small isn't a
540 * particularly interesting scenario, so we just keep it simple and
541 * use only the first controller for bootmem on 32-bit machines.
542 */
543static inline int node_has_bootmem(int nid)
538{ 544{
539 unsigned long bootmap_size, first_alloc_pfn, last_alloc_pfn; 545#ifdef CONFIG_64BIT
546 return 1;
547#else
548 return nid == 0;
549#endif
550}
540 551
541 /* Provide a node 0 bdata. */ 552static inline unsigned long alloc_bootmem_pfn(int nid,
542 NODE_DATA(0)->bdata = &node0_bdata; 553 unsigned long size,
554 unsigned long goal)
555{
556 void *kva = __alloc_bootmem_node(NODE_DATA(nid), size,
557 PAGE_SIZE, goal);
558 unsigned long pfn = kaddr_to_pfn(kva);
559 BUG_ON(goal && PFN_PHYS(pfn) != goal);
560 return pfn;
561}
543 562
544#ifdef CONFIG_PCI 563static void __init setup_bootmem_allocator_node(int i)
545 /* Don't let boot memory alias the PCI region. */ 564{
546 last_alloc_pfn = min(max_low_pfn, pci_reserve_start_pfn); 565 unsigned long start, end, mapsize, mapstart;
566
567 if (node_has_bootmem(i)) {
568 NODE_DATA(i)->bdata = &bootmem_node_data[i];
569 } else {
570 /* Share controller zero's bdata for now. */
571 NODE_DATA(i)->bdata = &bootmem_node_data[0];
572 return;
573 }
574
575 /* Skip up to after the bss in node 0. */
576 start = (i == 0) ? min_low_pfn : node_start_pfn[i];
577
578 /* Only lowmem, if we're a HIGHMEM build. */
579#ifdef CONFIG_HIGHMEM
580 end = node_lowmem_end_pfn[i];
547#else 581#else
548 last_alloc_pfn = max_low_pfn; 582 end = node_end_pfn[i];
549#endif 583#endif
550 584
551 /* 585 /* No memory here. */
552 * Initialize the boot-time allocator (with low memory only): 586 if (end == start)
553 * The first argument says where to put the bitmap, and the 587 return;
554 * second says where the end of allocatable memory is. 588
555 */ 589 /* Figure out where the bootmem bitmap is located. */
556 bootmap_size = init_bootmem(min_low_pfn, last_alloc_pfn); 590 mapsize = bootmem_bootmap_pages(end - start);
591 if (i == 0) {
592 /* Use some space right before the heap on node 0. */
593 mapstart = start;
594 start += mapsize;
595 } else {
596 /* Allocate bitmap on node 0 to avoid page table issues. */
597 mapstart = alloc_bootmem_pfn(0, PFN_PHYS(mapsize), 0);
598 }
557 599
600 /* Initialize a node. */
601 init_bootmem_node(NODE_DATA(i), mapstart, start, end);
602
603 /* Free all the space back into the allocator. */
604 free_bootmem(PFN_PHYS(start), PFN_PHYS(end - start));
605
606#if defined(CONFIG_PCI)
558 /* 607 /*
559 * Let the bootmem allocator use all the space we've given it 608 * Throw away any memory aliased by the PCI region. FIXME: this
560 * except for its own bitmap. 609 * is a temporary hack to work around bug 10502, and needs to be
610 * fixed properly.
561 */ 611 */
562 first_alloc_pfn = min_low_pfn + PFN_UP(bootmap_size); 612 if (pci_reserve_start_pfn < end && pci_reserve_end_pfn > start)
563 if (first_alloc_pfn >= last_alloc_pfn) 613 reserve_bootmem(PFN_PHYS(pci_reserve_start_pfn),
564 early_panic("Not enough memory on controller 0 for bootmem\n"); 614 PFN_PHYS(pci_reserve_end_pfn -
615 pci_reserve_start_pfn),
616 BOOTMEM_EXCLUSIVE);
617#endif
618}
565 619
566 free_bootmem(PFN_PHYS(first_alloc_pfn), 620static void __init setup_bootmem_allocator(void)
567 PFN_PHYS(last_alloc_pfn - first_alloc_pfn)); 621{
622 int i;
623 for (i = 0; i < MAX_NUMNODES; ++i)
624 setup_bootmem_allocator_node(i);
568 625
569#ifdef CONFIG_KEXEC 626#ifdef CONFIG_KEXEC
570 if (crashk_res.start != crashk_res.end) 627 if (crashk_res.start != crashk_res.end)
@@ -595,14 +652,6 @@ static int __init percpu_size(void)
595 return size; 652 return size;
596} 653}
597 654
598static inline unsigned long alloc_bootmem_pfn(int size, unsigned long goal)
599{
600 void *kva = __alloc_bootmem(size, PAGE_SIZE, goal);
601 unsigned long pfn = kaddr_to_pfn(kva);
602 BUG_ON(goal && PFN_PHYS(pfn) != goal);
603 return pfn;
604}
605
606static void __init zone_sizes_init(void) 655static void __init zone_sizes_init(void)
607{ 656{
608 unsigned long zones_size[MAX_NR_ZONES] = { 0 }; 657 unsigned long zones_size[MAX_NR_ZONES] = { 0 };
@@ -640,21 +689,22 @@ static void __init zone_sizes_init(void)
640 * though, there'll be no lowmem, so we just alloc_bootmem 689 * though, there'll be no lowmem, so we just alloc_bootmem
641 * the memmap. There will be no percpu memory either. 690 * the memmap. There will be no percpu memory either.
642 */ 691 */
643 if (__pfn_to_highbits(start) == 0) { 692 if (i != 0 && cpu_isset(i, isolnodes)) {
644 /* In low PAs, allocate via bootmem. */ 693 node_memmap_pfn[i] =
694 alloc_bootmem_pfn(0, memmap_size, 0);
695 BUG_ON(node_percpu[i] != 0);
696 } else if (node_has_bootmem(start)) {
645 unsigned long goal = 0; 697 unsigned long goal = 0;
646 node_memmap_pfn[i] = 698 node_memmap_pfn[i] =
647 alloc_bootmem_pfn(memmap_size, goal); 699 alloc_bootmem_pfn(i, memmap_size, 0);
648 if (kdata_huge) 700 if (kdata_huge)
649 goal = PFN_PHYS(lowmem_end) - node_percpu[i]; 701 goal = PFN_PHYS(lowmem_end) - node_percpu[i];
650 if (node_percpu[i]) 702 if (node_percpu[i])
651 node_percpu_pfn[i] = 703 node_percpu_pfn[i] =
652 alloc_bootmem_pfn(node_percpu[i], goal); 704 alloc_bootmem_pfn(i, node_percpu[i],
653 } else if (cpu_isset(i, isolnodes)) { 705 goal);
654 node_memmap_pfn[i] = alloc_bootmem_pfn(memmap_size, 0);
655 BUG_ON(node_percpu[i] != 0);
656 } else { 706 } else {
657 /* In high PAs, just reserve some pages. */ 707 /* In non-bootmem zones, just reserve some pages. */
658 node_memmap_pfn[i] = node_free_pfn[i]; 708 node_memmap_pfn[i] = node_free_pfn[i];
659 node_free_pfn[i] += PFN_UP(memmap_size); 709 node_free_pfn[i] += PFN_UP(memmap_size);
660 if (!kdata_huge) { 710 if (!kdata_huge) {
@@ -678,16 +728,9 @@ static void __init zone_sizes_init(void)
678 zones_size[ZONE_NORMAL] = end - start; 728 zones_size[ZONE_NORMAL] = end - start;
679#endif 729#endif
680 730
681 /* 731 /* Take zone metadata from controller 0 if we're isolnode. */
682 * Everyone shares node 0's bootmem allocator, but 732 if (node_isset(i, isolnodes))
683 * we use alloc_remap(), above, to put the actual 733 NODE_DATA(i)->bdata = &bootmem_node_data[0];
684 * struct page array on the individual controllers,
685 * which is most of the data that we actually care about.
686 * We can't place bootmem allocators on the other
687 * controllers since the bootmem allocator can only
688 * operate on 32-bit physical addresses.
689 */
690 NODE_DATA(i)->bdata = NODE_DATA(0)->bdata;
691 734
692 free_area_init_node(i, zones_size, start, NULL); 735 free_area_init_node(i, zones_size, start, NULL);
693 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n", 736 printk(KERN_DEBUG " Normal zone: %ld per-cpu pages\n",
@@ -870,6 +913,22 @@ subsys_initcall(topology_init);
870 913
871#endif /* CONFIG_NUMA */ 914#endif /* CONFIG_NUMA */
872 915
916/*
917 * Initialize hugepage support on this cpu. We do this on all cores
918 * early in boot: before argument parsing for the boot cpu, and after
919 * argument parsing but before the init functions run on the secondaries.
920 * So the values we set up here in the hypervisor may be overridden on
921 * the boot cpu as arguments are parsed.
922 */
923static __cpuinit void init_super_pages(void)
924{
925#ifdef CONFIG_HUGETLB_SUPER_PAGES
926 int i;
927 for (i = 0; i < HUGE_SHIFT_ENTRIES; ++i)
928 hv_set_pte_super_shift(i, huge_shift[i]);
929#endif
930}
931
873/** 932/**
874 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization. 933 * setup_cpu() - Do all necessary per-cpu, tile-specific initialization.
875 * @boot: Is this the boot cpu? 934 * @boot: Is this the boot cpu?
@@ -924,6 +983,8 @@ void __cpuinit setup_cpu(int boot)
924 /* Reset the network state on this cpu. */ 983 /* Reset the network state on this cpu. */
925 reset_network_state(); 984 reset_network_state();
926#endif 985#endif
986
987 init_super_pages();
927} 988}
928 989
929#ifdef CONFIG_BLK_DEV_INITRD 990#ifdef CONFIG_BLK_DEV_INITRD
@@ -1412,13 +1473,13 @@ void __init setup_per_cpu_areas(void)
1412 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) { 1473 for (i = 0; i < size; i += PAGE_SIZE, ++pfn, ++pg) {
1413 1474
1414 /* Update the vmalloc mapping and page home. */ 1475 /* Update the vmalloc mapping and page home. */
1415 pte_t *ptep = 1476 unsigned long addr = (unsigned long)ptr + i;
1416 virt_to_pte(NULL, (unsigned long)ptr + i); 1477 pte_t *ptep = virt_to_pte(NULL, addr);
1417 pte_t pte = *ptep; 1478 pte_t pte = *ptep;
1418 BUG_ON(pfn != pte_pfn(pte)); 1479 BUG_ON(pfn != pte_pfn(pte));
1419 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3); 1480 pte = hv_pte_set_mode(pte, HV_PTE_MODE_CACHE_TILE_L3);
1420 pte = set_remote_cache_cpu(pte, cpu); 1481 pte = set_remote_cache_cpu(pte, cpu);
1421 set_pte(ptep, pte); 1482 set_pte_at(&init_mm, addr, ptep, pte);
1422 1483
1423 /* Update the lowmem mapping for consistency. */ 1484 /* Update the lowmem mapping for consistency. */
1424 lowmem_va = (unsigned long)pfn_to_kaddr(pfn); 1485 lowmem_va = (unsigned long)pfn_to_kaddr(pfn);
@@ -1431,7 +1492,7 @@ void __init setup_per_cpu_areas(void)
1431 BUG_ON(pte_huge(*ptep)); 1492 BUG_ON(pte_huge(*ptep));
1432 } 1493 }
1433 BUG_ON(pfn != pte_pfn(*ptep)); 1494 BUG_ON(pfn != pte_pfn(*ptep));
1434 set_pte(ptep, pte); 1495 set_pte_at(&init_mm, lowmem_va, ptep, pte);
1435 } 1496 }
1436 } 1497 }
1437 1498
diff --git a/arch/tile/kernel/single_step.c b/arch/tile/kernel/single_step.c
index 89529c9f0605..27742e87e255 100644
--- a/arch/tile/kernel/single_step.c
+++ b/arch/tile/kernel/single_step.c
@@ -172,9 +172,6 @@ static tile_bundle_bits rewrite_load_store_unaligned(
172 return (tilepro_bundle_bits) 0; 172 return (tilepro_bundle_bits) 0;
173 } 173 }
174 174
175#ifndef __LITTLE_ENDIAN
176# error We assume little-endian representation with copy_xx_user size 2 here
177#endif
178 /* Handle unaligned load/store */ 175 /* Handle unaligned load/store */
179 if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) { 176 if (mem_op == MEMOP_LOAD || mem_op == MEMOP_LOAD_POSTINCR) {
180 unsigned short val_16; 177 unsigned short val_16;
@@ -195,8 +192,19 @@ static tile_bundle_bits rewrite_load_store_unaligned(
195 state->update = 1; 192 state->update = 1;
196 } 193 }
197 } else { 194 } else {
195 unsigned short val_16;
198 val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg]; 196 val = (val_reg == TREG_ZERO) ? 0 : regs->regs[val_reg];
199 err = copy_to_user(addr, &val, size); 197 switch (size) {
198 case 2:
199 val_16 = val;
200 err = copy_to_user(addr, &val_16, sizeof(val_16));
201 break;
202 case 4:
203 err = copy_to_user(addr, &val, sizeof(val));
204 break;
205 default:
206 BUG();
207 }
200 } 208 }
201 209
202 if (err) { 210 if (err) {
diff --git a/arch/tile/kernel/smp.c b/arch/tile/kernel/smp.c
index 91da0f721958..cbc73a8b8fe1 100644
--- a/arch/tile/kernel/smp.c
+++ b/arch/tile/kernel/smp.c
@@ -203,7 +203,7 @@ void __init ipi_init(void)
203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0) 203 if (hv_get_ipi_pte(tile, KERNEL_PL, &pte) != 0)
204 panic("Failed to initialize IPI for cpu %d\n", cpu); 204 panic("Failed to initialize IPI for cpu %d\n", cpu);
205 205
206 offset = hv_pte_get_pfn(pte) << PAGE_SHIFT; 206 offset = PFN_PHYS(pte_pfn(pte));
207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte); 207 ipi_mappings[cpu] = ioremap_prot(offset, PAGE_SIZE, pte);
208 } 208 }
209#endif 209#endif
diff --git a/arch/tile/kernel/sys.c b/arch/tile/kernel/sys.c
index cb44ba7ccd2d..b08095b402d6 100644
--- a/arch/tile/kernel/sys.c
+++ b/arch/tile/kernel/sys.c
@@ -32,11 +32,17 @@
32#include <asm/syscalls.h> 32#include <asm/syscalls.h>
33#include <asm/pgtable.h> 33#include <asm/pgtable.h>
34#include <asm/homecache.h> 34#include <asm/homecache.h>
35#include <asm/cachectl.h>
35#include <arch/chip.h> 36#include <arch/chip.h>
36 37
37SYSCALL_DEFINE0(flush_cache) 38SYSCALL_DEFINE3(cacheflush, unsigned long, addr, unsigned long, len,
39 unsigned long, flags)
38{ 40{
39 homecache_evict(cpumask_of(smp_processor_id())); 41 if (flags & DCACHE)
42 homecache_evict(cpumask_of(smp_processor_id()));
43 if (flags & ICACHE)
44 flush_remote(0, HV_FLUSH_EVICT_L1I, mm_cpumask(current->mm),
45 0, 0, 0, NULL, NULL, 0);
40 return 0; 46 return 0;
41} 47}
42 48
diff --git a/arch/tile/kernel/sysfs.c b/arch/tile/kernel/sysfs.c
index 71ae728e9d0b..e25b0a89c18f 100644
--- a/arch/tile/kernel/sysfs.c
+++ b/arch/tile/kernel/sysfs.c
@@ -93,6 +93,10 @@ HV_CONF_ATTR(mezz_part, HV_CONFSTR_MEZZ_PART_NUM)
93HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM) 93HV_CONF_ATTR(mezz_serial, HV_CONFSTR_MEZZ_SERIAL_NUM)
94HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV) 94HV_CONF_ATTR(mezz_revision, HV_CONFSTR_MEZZ_REV)
95HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC) 95HV_CONF_ATTR(mezz_description, HV_CONFSTR_MEZZ_DESC)
96HV_CONF_ATTR(cpumod_part, HV_CONFSTR_CPUMOD_PART_NUM)
97HV_CONF_ATTR(cpumod_serial, HV_CONFSTR_CPUMOD_SERIAL_NUM)
98HV_CONF_ATTR(cpumod_revision, HV_CONFSTR_CPUMOD_REV)
99HV_CONF_ATTR(cpumod_description,HV_CONFSTR_CPUMOD_DESC)
96HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL) 100HV_CONF_ATTR(switch_control, HV_CONFSTR_SWITCH_CONTROL)
97 101
98static struct attribute *board_attrs[] = { 102static struct attribute *board_attrs[] = {
@@ -104,6 +108,10 @@ static struct attribute *board_attrs[] = {
104 &dev_attr_mezz_serial.attr, 108 &dev_attr_mezz_serial.attr,
105 &dev_attr_mezz_revision.attr, 109 &dev_attr_mezz_revision.attr,
106 &dev_attr_mezz_description.attr, 110 &dev_attr_mezz_description.attr,
111 &dev_attr_cpumod_part.attr,
112 &dev_attr_cpumod_serial.attr,
113 &dev_attr_cpumod_revision.attr,
114 &dev_attr_cpumod_description.attr,
107 &dev_attr_switch_control.attr, 115 &dev_attr_switch_control.attr,
108 NULL 116 NULL
109}; 117};
diff --git a/arch/tile/kernel/tlb.c b/arch/tile/kernel/tlb.c
index a5f241c24cac..3fd54d5bbd4c 100644
--- a/arch/tile/kernel/tlb.c
+++ b/arch/tile/kernel/tlb.c
@@ -15,6 +15,7 @@
15 15
16#include <linux/cpumask.h> 16#include <linux/cpumask.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/hugetlb.h>
18#include <asm/tlbflush.h> 19#include <asm/tlbflush.h>
19#include <asm/homecache.h> 20#include <asm/homecache.h>
20#include <hv/hypervisor.h> 21#include <hv/hypervisor.h>
@@ -49,25 +50,25 @@ void flush_tlb_current_task(void)
49 flush_tlb_mm(current->mm); 50 flush_tlb_mm(current->mm);
50} 51}
51 52
52void flush_tlb_page_mm(const struct vm_area_struct *vma, struct mm_struct *mm, 53void flush_tlb_page_mm(struct vm_area_struct *vma, struct mm_struct *mm,
53 unsigned long va) 54 unsigned long va)
54{ 55{
55 unsigned long size = hv_page_size(vma); 56 unsigned long size = vma_kernel_pagesize(vma);
56 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 57 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
57 flush_remote(0, cache, mm_cpumask(mm), 58 flush_remote(0, cache, mm_cpumask(mm),
58 va, size, size, mm_cpumask(mm), NULL, 0); 59 va, size, size, mm_cpumask(mm), NULL, 0);
59} 60}
60 61
61void flush_tlb_page(const struct vm_area_struct *vma, unsigned long va) 62void flush_tlb_page(struct vm_area_struct *vma, unsigned long va)
62{ 63{
63 flush_tlb_page_mm(vma, vma->vm_mm, va); 64 flush_tlb_page_mm(vma, vma->vm_mm, va);
64} 65}
65EXPORT_SYMBOL(flush_tlb_page); 66EXPORT_SYMBOL(flush_tlb_page);
66 67
67void flush_tlb_range(const struct vm_area_struct *vma, 68void flush_tlb_range(struct vm_area_struct *vma,
68 unsigned long start, unsigned long end) 69 unsigned long start, unsigned long end)
69{ 70{
70 unsigned long size = hv_page_size(vma); 71 unsigned long size = vma_kernel_pagesize(vma);
71 struct mm_struct *mm = vma->vm_mm; 72 struct mm_struct *mm = vma->vm_mm;
72 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0; 73 int cache = (vma->vm_flags & VM_EXEC) ? HV_FLUSH_EVICT_L1I : 0;
73 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size, 74 flush_remote(0, cache, mm_cpumask(mm), start, end - start, size,
diff --git a/arch/tile/kernel/traps.c b/arch/tile/kernel/traps.c
index 73cff814ac57..5b19a23c8908 100644
--- a/arch/tile/kernel/traps.c
+++ b/arch/tile/kernel/traps.c
@@ -195,6 +195,25 @@ static int special_ill(bundle_bits bundle, int *sigp, int *codep)
195 return 1; 195 return 1;
196} 196}
197 197
198static const char *const int_name[] = {
199 [INT_MEM_ERROR] = "Memory error",
200 [INT_ILL] = "Illegal instruction",
201 [INT_GPV] = "General protection violation",
202 [INT_UDN_ACCESS] = "UDN access",
203 [INT_IDN_ACCESS] = "IDN access",
204#if CHIP_HAS_SN()
205 [INT_SN_ACCESS] = "SN access",
206#endif
207 [INT_SWINT_3] = "Software interrupt 3",
208 [INT_SWINT_2] = "Software interrupt 2",
209 [INT_SWINT_0] = "Software interrupt 0",
210 [INT_UNALIGN_DATA] = "Unaligned data",
211 [INT_DOUBLE_FAULT] = "Double fault",
212#ifdef __tilegx__
213 [INT_ILL_TRANS] = "Illegal virtual address",
214#endif
215};
216
198void __kprobes do_trap(struct pt_regs *regs, int fault_num, 217void __kprobes do_trap(struct pt_regs *regs, int fault_num,
199 unsigned long reason) 218 unsigned long reason)
200{ 219{
@@ -211,10 +230,17 @@ void __kprobes do_trap(struct pt_regs *regs, int fault_num,
211 * current process and hope for the best. 230 * current process and hope for the best.
212 */ 231 */
213 if (!user_mode(regs)) { 232 if (!user_mode(regs)) {
233 const char *name;
214 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */ 234 if (fixup_exception(regs)) /* only UNALIGN_DATA in practice */
215 return; 235 return;
216 pr_alert("Kernel took bad trap %d at PC %#lx\n", 236 if (fault_num >= 0 &&
217 fault_num, regs->pc); 237 fault_num < sizeof(int_name)/sizeof(int_name[0]) &&
238 int_name[fault_num] != NULL)
239 name = int_name[fault_num];
240 else
241 name = "Unknown interrupt";
242 pr_alert("Kernel took bad trap %d (%s) at PC %#lx\n",
243 fault_num, name, regs->pc);
218 if (fault_num == INT_GPV) 244 if (fault_num == INT_GPV)
219 pr_alert("GPV_REASON is %#lx\n", reason); 245 pr_alert("GPV_REASON is %#lx\n", reason);
220 show_regs(regs); 246 show_regs(regs);
diff --git a/arch/tile/lib/atomic_32.c b/arch/tile/lib/atomic_32.c
index 771b251b409d..f5cada70c3c8 100644
--- a/arch/tile/lib/atomic_32.c
+++ b/arch/tile/lib/atomic_32.c
@@ -18,7 +18,6 @@
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/mm.h> 19#include <linux/mm.h>
20#include <linux/atomic.h> 20#include <linux/atomic.h>
21#include <asm/futex.h>
22#include <arch/chip.h> 21#include <arch/chip.h>
23 22
24/* See <asm/atomic_32.h> */ 23/* See <asm/atomic_32.h> */
@@ -50,7 +49,7 @@ int atomic_locks[PAGE_SIZE / sizeof(int)] __page_aligned_bss;
50 49
51#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 50#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
52 51
53static inline int *__atomic_hashed_lock(volatile void *v) 52int *__atomic_hashed_lock(volatile void *v)
54{ 53{
55 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */ 54 /* NOTE: this code must match "sys_cmpxchg" in kernel/intvec_32.S */
56#if ATOMIC_LOCKS_FOUND_VIA_TABLE() 55#if ATOMIC_LOCKS_FOUND_VIA_TABLE()
@@ -191,47 +190,6 @@ u64 _atomic64_cmpxchg(atomic64_t *v, u64 o, u64 n)
191EXPORT_SYMBOL(_atomic64_cmpxchg); 190EXPORT_SYMBOL(_atomic64_cmpxchg);
192 191
193 192
194static inline int *__futex_setup(int __user *v)
195{
196 /*
197 * Issue a prefetch to the counter to bring it into cache.
198 * As for __atomic_setup, but we can't do a read into the L1
199 * since it might fault; instead we do a prefetch into the L2.
200 */
201 __insn_prefetch(v);
202 return __atomic_hashed_lock((int __force *)v);
203}
204
205struct __get_user futex_set(u32 __user *v, int i)
206{
207 return __atomic_xchg((int __force *)v, __futex_setup(v), i);
208}
209
210struct __get_user futex_add(u32 __user *v, int n)
211{
212 return __atomic_xchg_add((int __force *)v, __futex_setup(v), n);
213}
214
215struct __get_user futex_or(u32 __user *v, int n)
216{
217 return __atomic_or((int __force *)v, __futex_setup(v), n);
218}
219
220struct __get_user futex_andn(u32 __user *v, int n)
221{
222 return __atomic_andn((int __force *)v, __futex_setup(v), n);
223}
224
225struct __get_user futex_xor(u32 __user *v, int n)
226{
227 return __atomic_xor((int __force *)v, __futex_setup(v), n);
228}
229
230struct __get_user futex_cmpxchg(u32 __user *v, int o, int n)
231{
232 return __atomic_cmpxchg((int __force *)v, __futex_setup(v), o, n);
233}
234
235/* 193/*
236 * If any of the atomic or futex routines hit a bad address (not in 194 * If any of the atomic or futex routines hit a bad address (not in
237 * the page tables at kernel PL) this routine is called. The futex 195 * the page tables at kernel PL) this routine is called. The futex
@@ -323,7 +281,4 @@ void __init __init_atomic_per_cpu(void)
323 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE); 281 BUILD_BUG_ON((PAGE_SIZE >> 3) > ATOMIC_HASH_SIZE);
324 282
325#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */ 283#endif /* ATOMIC_LOCKS_FOUND_VIA_TABLE() */
326
327 /* The futex code makes this assumption, so we validate it here. */
328 BUILD_BUG_ON(sizeof(atomic_t) != sizeof(int));
329} 284}
diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c
index 2a81d32de0da..dd5f0a33fdaf 100644
--- a/arch/tile/lib/exports.c
+++ b/arch/tile/lib/exports.c
@@ -18,14 +18,6 @@
18 18
19/* arch/tile/lib/usercopy.S */ 19/* arch/tile/lib/usercopy.S */
20#include <linux/uaccess.h> 20#include <linux/uaccess.h>
21EXPORT_SYMBOL(__get_user_1);
22EXPORT_SYMBOL(__get_user_2);
23EXPORT_SYMBOL(__get_user_4);
24EXPORT_SYMBOL(__get_user_8);
25EXPORT_SYMBOL(__put_user_1);
26EXPORT_SYMBOL(__put_user_2);
27EXPORT_SYMBOL(__put_user_4);
28EXPORT_SYMBOL(__put_user_8);
29EXPORT_SYMBOL(strnlen_user_asm); 21EXPORT_SYMBOL(strnlen_user_asm);
30EXPORT_SYMBOL(strncpy_from_user_asm); 22EXPORT_SYMBOL(strncpy_from_user_asm);
31EXPORT_SYMBOL(clear_user_asm); 23EXPORT_SYMBOL(clear_user_asm);
diff --git a/arch/tile/lib/memchr_64.c b/arch/tile/lib/memchr_64.c
index 84fdc8d8e735..6f867dbf7c56 100644
--- a/arch/tile/lib/memchr_64.c
+++ b/arch/tile/lib/memchr_64.c
@@ -15,6 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#include "string-endian.h"
18 19
19void *memchr(const void *s, int c, size_t n) 20void *memchr(const void *s, int c, size_t n)
20{ 21{
@@ -39,11 +40,8 @@ void *memchr(const void *s, int c, size_t n)
39 40
40 /* Read the first word, but munge it so that bytes before the array 41 /* Read the first word, but munge it so that bytes before the array
41 * will not match goal. 42 * will not match goal.
42 *
43 * Note that this shift count expression works because we know
44 * shift counts are taken mod 64.
45 */ 43 */
46 before_mask = (1ULL << (s_int << 3)) - 1; 44 before_mask = MASK(s_int);
47 v = (*p | before_mask) ^ (goal & before_mask); 45 v = (*p | before_mask) ^ (goal & before_mask);
48 46
49 /* Compute the address of the last byte. */ 47 /* Compute the address of the last byte. */
@@ -65,7 +63,7 @@ void *memchr(const void *s, int c, size_t n)
65 /* We found a match, but it might be in a byte past the end 63 /* We found a match, but it might be in a byte past the end
66 * of the array. 64 * of the array.
67 */ 65 */
68 ret = ((char *)p) + (__insn_ctz(bits) >> 3); 66 ret = ((char *)p) + (CFZ(bits) >> 3);
69 return (ret <= last_byte_ptr) ? ret : NULL; 67 return (ret <= last_byte_ptr) ? ret : NULL;
70} 68}
71EXPORT_SYMBOL(memchr); 69EXPORT_SYMBOL(memchr);
diff --git a/arch/tile/lib/memcpy_64.c b/arch/tile/lib/memcpy_64.c
index 3fab9a6a2bbe..c79b8e7c6828 100644
--- a/arch/tile/lib/memcpy_64.c
+++ b/arch/tile/lib/memcpy_64.c
@@ -15,7 +15,6 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/module.h> 17#include <linux/module.h>
18#define __memcpy memcpy
19/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */ 18/* EXPORT_SYMBOL() is in arch/tile/lib/exports.c since this should be asm. */
20 19
21/* Must be 8 bytes in size. */ 20/* Must be 8 bytes in size. */
@@ -188,6 +187,7 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
188 187
189 /* n != 0 if we get here. Write out any trailing bytes. */ 188 /* n != 0 if we get here. Write out any trailing bytes. */
190 dst1 = (char *)dst8; 189 dst1 = (char *)dst8;
190#ifndef __BIG_ENDIAN__
191 if (n & 4) { 191 if (n & 4) {
192 ST4((uint32_t *)dst1, final); 192 ST4((uint32_t *)dst1, final);
193 dst1 += 4; 193 dst1 += 4;
@@ -202,11 +202,30 @@ int USERCOPY_FUNC(void *__restrict dstv, const void *__restrict srcv, size_t n)
202 } 202 }
203 if (n) 203 if (n)
204 ST1((uint8_t *)dst1, final); 204 ST1((uint8_t *)dst1, final);
205#else
206 if (n & 4) {
207 ST4((uint32_t *)dst1, final >> 32);
208 dst1 += 4;
209 }
210 else
211 {
212 final >>= 32;
213 }
214 if (n & 2) {
215 ST2((uint16_t *)dst1, final >> 16);
216 dst1 += 2;
217 }
218 else
219 {
220 final >>= 16;
221 }
222 if (n & 1)
223 ST1((uint8_t *)dst1, final >> 8);
224#endif
205 225
206 return RETVAL; 226 return RETVAL;
207} 227}
208 228
209
210#ifdef USERCOPY_FUNC 229#ifdef USERCOPY_FUNC
211#undef ST1 230#undef ST1
212#undef ST2 231#undef ST2
diff --git a/arch/tile/lib/memcpy_tile64.c b/arch/tile/lib/memcpy_tile64.c
index b2fe15e01075..3bc4b4e40d93 100644
--- a/arch/tile/lib/memcpy_tile64.c
+++ b/arch/tile/lib/memcpy_tile64.c
@@ -160,7 +160,7 @@ retry_source:
160 break; 160 break;
161 if (get_remote_cache_cpu(src_pte) == smp_processor_id()) 161 if (get_remote_cache_cpu(src_pte) == smp_processor_id())
162 break; 162 break;
163 src_page = pfn_to_page(hv_pte_get_pfn(src_pte)); 163 src_page = pfn_to_page(pte_pfn(src_pte));
164 get_page(src_page); 164 get_page(src_page);
165 if (pte_val(src_pte) != pte_val(*src_ptep)) { 165 if (pte_val(src_pte) != pte_val(*src_ptep)) {
166 put_page(src_page); 166 put_page(src_page);
@@ -168,7 +168,7 @@ retry_source:
168 } 168 }
169 if (pte_huge(src_pte)) { 169 if (pte_huge(src_pte)) {
170 /* Adjust the PTE to correspond to a small page */ 170 /* Adjust the PTE to correspond to a small page */
171 int pfn = hv_pte_get_pfn(src_pte); 171 int pfn = pte_pfn(src_pte);
172 pfn += (((unsigned long)source & (HPAGE_SIZE-1)) 172 pfn += (((unsigned long)source & (HPAGE_SIZE-1))
173 >> PAGE_SHIFT); 173 >> PAGE_SHIFT);
174 src_pte = pfn_pte(pfn, src_pte); 174 src_pte = pfn_pte(pfn, src_pte);
@@ -188,7 +188,7 @@ retry_dest:
188 put_page(src_page); 188 put_page(src_page);
189 break; 189 break;
190 } 190 }
191 dst_page = pfn_to_page(hv_pte_get_pfn(dst_pte)); 191 dst_page = pfn_to_page(pte_pfn(dst_pte));
192 if (dst_page == src_page) { 192 if (dst_page == src_page) {
193 /* 193 /*
194 * Source and dest are on the same page; this 194 * Source and dest are on the same page; this
@@ -206,7 +206,7 @@ retry_dest:
206 } 206 }
207 if (pte_huge(dst_pte)) { 207 if (pte_huge(dst_pte)) {
208 /* Adjust the PTE to correspond to a small page */ 208 /* Adjust the PTE to correspond to a small page */
209 int pfn = hv_pte_get_pfn(dst_pte); 209 int pfn = pte_pfn(dst_pte);
210 pfn += (((unsigned long)dest & (HPAGE_SIZE-1)) 210 pfn += (((unsigned long)dest & (HPAGE_SIZE-1))
211 >> PAGE_SHIFT); 211 >> PAGE_SHIFT);
212 dst_pte = pfn_pte(pfn, dst_pte); 212 dst_pte = pfn_pte(pfn, dst_pte);
diff --git a/arch/tile/lib/strchr_64.c b/arch/tile/lib/strchr_64.c
index 617a9273aaa8..f39f9dc422b0 100644
--- a/arch/tile/lib/strchr_64.c
+++ b/arch/tile/lib/strchr_64.c
@@ -15,8 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18#include "string-endian.h"
19#undef strchr
20 19
21char *strchr(const char *s, int c) 20char *strchr(const char *s, int c)
22{ 21{
@@ -33,13 +32,9 @@ char *strchr(const char *s, int c)
33 * match neither zero nor goal (we make sure the high bit of each 32 * match neither zero nor goal (we make sure the high bit of each
34 * byte is 1, and the low 7 bits are all the opposite of the goal 33 * byte is 1, and the low 7 bits are all the opposite of the goal
35 * byte). 34 * byte).
36 *
37 * Note that this shift count expression works because we know shift
38 * counts are taken mod 64.
39 */ 35 */
40 const uint64_t before_mask = (1ULL << (s_int << 3)) - 1; 36 const uint64_t before_mask = MASK(s_int);
41 uint64_t v = (*p | before_mask) ^ 37 uint64_t v = (*p | before_mask) ^ (goal & __insn_v1shrui(before_mask, 1));
42 (goal & __insn_v1shrsi(before_mask, 1));
43 38
44 uint64_t zero_matches, goal_matches; 39 uint64_t zero_matches, goal_matches;
45 while (1) { 40 while (1) {
@@ -55,8 +50,8 @@ char *strchr(const char *s, int c)
55 v = *++p; 50 v = *++p;
56 } 51 }
57 52
58 z = __insn_ctz(zero_matches); 53 z = CFZ(zero_matches);
59 g = __insn_ctz(goal_matches); 54 g = CFZ(goal_matches);
60 55
61 /* If we found c before '\0' we got a match. Note that if c == '\0' 56 /* If we found c before '\0' we got a match. Note that if c == '\0'
62 * then g == z, and we correctly return the address of the '\0' 57 * then g == z, and we correctly return the address of the '\0'
diff --git a/arch/tile/lib/string-endian.h b/arch/tile/lib/string-endian.h
new file mode 100644
index 000000000000..c0eed7ce69c3
--- /dev/null
+++ b/arch/tile/lib/string-endian.h
@@ -0,0 +1,33 @@
1/*
2 * Copyright 2011 Tilera Corporation. All Rights Reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation, version 2.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
11 * NON INFRINGEMENT. See the GNU General Public License for
12 * more details.
13 *
14 * Provide a mask based on the pointer alignment that
15 * sets up non-zero bytes before the beginning of the string.
16 * The MASK expression works because shift counts are taken mod 64.
17 * Also, specify how to count "first" and "last" bits
18 * when the bits have been read as a word.
19 */
20
21#include <asm/byteorder.h>
22
23#ifdef __LITTLE_ENDIAN
24#define MASK(x) (__insn_shl(1ULL, (x << 3)) - 1)
25#define NULMASK(x) ((2ULL << x) - 1)
26#define CFZ(x) __insn_ctz(x)
27#define REVCZ(x) __insn_clz(x)
28#else
29#define MASK(x) (__insn_shl(-2LL, ((-x << 3) - 1)))
30#define NULMASK(x) (-2LL << (63 - x))
31#define CFZ(x) __insn_clz(x)
32#define REVCZ(x) __insn_ctz(x)
33#endif
diff --git a/arch/tile/lib/strlen_64.c b/arch/tile/lib/strlen_64.c
index 1c92d46202a8..9583fc3361fa 100644
--- a/arch/tile/lib/strlen_64.c
+++ b/arch/tile/lib/strlen_64.c
@@ -15,8 +15,7 @@
15#include <linux/types.h> 15#include <linux/types.h>
16#include <linux/string.h> 16#include <linux/string.h>
17#include <linux/module.h> 17#include <linux/module.h>
18 18#include "string-endian.h"
19#undef strlen
20 19
21size_t strlen(const char *s) 20size_t strlen(const char *s)
22{ 21{
@@ -24,15 +23,13 @@ size_t strlen(const char *s)
24 const uintptr_t s_int = (uintptr_t) s; 23 const uintptr_t s_int = (uintptr_t) s;
25 const uint64_t *p = (const uint64_t *)(s_int & -8); 24 const uint64_t *p = (const uint64_t *)(s_int & -8);
26 25
27 /* Read the first word, but force bytes before the string to be nonzero. 26 /* Read and MASK the first word. */
28 * This expression works because we know shift counts are taken mod 64. 27 uint64_t v = *p | MASK(s_int);
29 */
30 uint64_t v = *p | ((1ULL << (s_int << 3)) - 1);
31 28
32 uint64_t bits; 29 uint64_t bits;
33 while ((bits = __insn_v1cmpeqi(v, 0)) == 0) 30 while ((bits = __insn_v1cmpeqi(v, 0)) == 0)
34 v = *++p; 31 v = *++p;
35 32
36 return ((const char *)p) + (__insn_ctz(bits) >> 3) - s; 33 return ((const char *)p) + (CFZ(bits) >> 3) - s;
37} 34}
38EXPORT_SYMBOL(strlen); 35EXPORT_SYMBOL(strlen);
diff --git a/arch/tile/lib/usercopy_32.S b/arch/tile/lib/usercopy_32.S
index 979f76d83746..b62d002af009 100644
--- a/arch/tile/lib/usercopy_32.S
+++ b/arch/tile/lib/usercopy_32.S
@@ -19,82 +19,6 @@
19 19
20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */ 20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
21 21
22 .pushsection .fixup,"ax"
23
24get_user_fault:
25 { move r0, zero; move r1, zero }
26 { movei r2, -EFAULT; jrp lr }
27 ENDPROC(get_user_fault)
28
29put_user_fault:
30 { movei r0, -EFAULT; jrp lr }
31 ENDPROC(put_user_fault)
32
33 .popsection
34
35/*
36 * __get_user_N functions take a pointer in r0, and return 0 in r2
37 * on success, with the value in r0; or else -EFAULT in r2.
38 */
39#define __get_user_N(bytes, LOAD) \
40 STD_ENTRY(__get_user_##bytes); \
411: { LOAD r0, r0; move r1, zero; move r2, zero }; \
42 jrp lr; \
43 STD_ENDPROC(__get_user_##bytes); \
44 .pushsection __ex_table,"a"; \
45 .word 1b, get_user_fault; \
46 .popsection
47
48__get_user_N(1, lb_u)
49__get_user_N(2, lh_u)
50__get_user_N(4, lw)
51
52/*
53 * __get_user_8 takes a pointer in r0, and returns 0 in r2
54 * on success, with the value in r0/r1; or else -EFAULT in r2.
55 */
56 STD_ENTRY(__get_user_8);
571: { lw r0, r0; addi r1, r0, 4 };
582: { lw r1, r1; move r2, zero };
59 jrp lr;
60 STD_ENDPROC(__get_user_8);
61 .pushsection __ex_table,"a";
62 .word 1b, get_user_fault;
63 .word 2b, get_user_fault;
64 .popsection
65
66/*
67 * __put_user_N functions take a value in r0 and a pointer in r1,
68 * and return 0 in r0 on success or -EFAULT on failure.
69 */
70#define __put_user_N(bytes, STORE) \
71 STD_ENTRY(__put_user_##bytes); \
721: { STORE r1, r0; move r0, zero }; \
73 jrp lr; \
74 STD_ENDPROC(__put_user_##bytes); \
75 .pushsection __ex_table,"a"; \
76 .word 1b, put_user_fault; \
77 .popsection
78
79__put_user_N(1, sb)
80__put_user_N(2, sh)
81__put_user_N(4, sw)
82
83/*
84 * __put_user_8 takes a value in r0/r1 and a pointer in r2,
85 * and returns 0 in r0 on success or -EFAULT on failure.
86 */
87STD_ENTRY(__put_user_8)
881: { sw r2, r0; addi r2, r2, 4 }
892: { sw r2, r1; move r0, zero }
90 jrp lr
91 STD_ENDPROC(__put_user_8)
92 .pushsection __ex_table,"a"
93 .word 1b, put_user_fault
94 .word 2b, put_user_fault
95 .popsection
96
97
98/* 22/*
99 * strnlen_user_asm takes the pointer in r0, and the length bound in r1. 23 * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
100 * It returns the length, including the terminating NUL, or zero on exception. 24 * It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/lib/usercopy_64.S b/arch/tile/lib/usercopy_64.S
index 2ff44f87b78e..adb2dbbc70cd 100644
--- a/arch/tile/lib/usercopy_64.S
+++ b/arch/tile/lib/usercopy_64.S
@@ -19,55 +19,6 @@
19 19
20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */ 20/* Access user memory, but use MMU to avoid propagating kernel exceptions. */
21 21
22 .pushsection .fixup,"ax"
23
24get_user_fault:
25 { movei r1, -EFAULT; move r0, zero }
26 jrp lr
27 ENDPROC(get_user_fault)
28
29put_user_fault:
30 { movei r0, -EFAULT; jrp lr }
31 ENDPROC(put_user_fault)
32
33 .popsection
34
35/*
36 * __get_user_N functions take a pointer in r0, and return 0 in r1
37 * on success, with the value in r0; or else -EFAULT in r1.
38 */
39#define __get_user_N(bytes, LOAD) \
40 STD_ENTRY(__get_user_##bytes); \
411: { LOAD r0, r0; move r1, zero }; \
42 jrp lr; \
43 STD_ENDPROC(__get_user_##bytes); \
44 .pushsection __ex_table,"a"; \
45 .quad 1b, get_user_fault; \
46 .popsection
47
48__get_user_N(1, ld1u)
49__get_user_N(2, ld2u)
50__get_user_N(4, ld4u)
51__get_user_N(8, ld)
52
53/*
54 * __put_user_N functions take a value in r0 and a pointer in r1,
55 * and return 0 in r0 on success or -EFAULT on failure.
56 */
57#define __put_user_N(bytes, STORE) \
58 STD_ENTRY(__put_user_##bytes); \
591: { STORE r1, r0; move r0, zero }; \
60 jrp lr; \
61 STD_ENDPROC(__put_user_##bytes); \
62 .pushsection __ex_table,"a"; \
63 .quad 1b, put_user_fault; \
64 .popsection
65
66__put_user_N(1, st1)
67__put_user_N(2, st2)
68__put_user_N(4, st4)
69__put_user_N(8, st)
70
71/* 22/*
72 * strnlen_user_asm takes the pointer in r0, and the length bound in r1. 23 * strnlen_user_asm takes the pointer in r0, and the length bound in r1.
73 * It returns the length, including the terminating NUL, or zero on exception. 24 * It returns the length, including the terminating NUL, or zero on exception.
diff --git a/arch/tile/mm/fault.c b/arch/tile/mm/fault.c
index 22e58f51ed23..84ce7abbf5af 100644
--- a/arch/tile/mm/fault.c
+++ b/arch/tile/mm/fault.c
@@ -187,7 +187,7 @@ static pgd_t *get_current_pgd(void)
187 HV_Context ctx = hv_inquire_context(); 187 HV_Context ctx = hv_inquire_context();
188 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT; 188 unsigned long pgd_pfn = ctx.page_table >> PAGE_SHIFT;
189 struct page *pgd_page = pfn_to_page(pgd_pfn); 189 struct page *pgd_page = pfn_to_page(pgd_pfn);
190 BUG_ON(PageHighMem(pgd_page)); /* oops, HIGHPTE? */ 190 BUG_ON(PageHighMem(pgd_page));
191 return (pgd_t *) __va(ctx.page_table); 191 return (pgd_t *) __va(ctx.page_table);
192} 192}
193 193
@@ -273,11 +273,15 @@ static int handle_page_fault(struct pt_regs *regs,
273 int si_code; 273 int si_code;
274 int is_kernel_mode; 274 int is_kernel_mode;
275 pgd_t *pgd; 275 pgd_t *pgd;
276 unsigned int flags;
276 277
277 /* on TILE, protection faults are always writes */ 278 /* on TILE, protection faults are always writes */
278 if (!is_page_fault) 279 if (!is_page_fault)
279 write = 1; 280 write = 1;
280 281
282 flags = (FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
283 (write ? FAULT_FLAG_WRITE : 0));
284
281 is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL); 285 is_kernel_mode = (EX1_PL(regs->ex1) != USER_PL);
282 286
283 tsk = validate_current(); 287 tsk = validate_current();
@@ -382,6 +386,8 @@ static int handle_page_fault(struct pt_regs *regs,
382 vma = NULL; /* happy compiler */ 386 vma = NULL; /* happy compiler */
383 goto bad_area_nosemaphore; 387 goto bad_area_nosemaphore;
384 } 388 }
389
390retry:
385 down_read(&mm->mmap_sem); 391 down_read(&mm->mmap_sem);
386 } 392 }
387 393
@@ -429,7 +435,11 @@ good_area:
429 * make sure we exit gracefully rather than endlessly redo 435 * make sure we exit gracefully rather than endlessly redo
430 * the fault. 436 * the fault.
431 */ 437 */
432 fault = handle_mm_fault(mm, vma, address, write); 438 fault = handle_mm_fault(mm, vma, address, flags);
439
440 if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
441 return 0;
442
433 if (unlikely(fault & VM_FAULT_ERROR)) { 443 if (unlikely(fault & VM_FAULT_ERROR)) {
434 if (fault & VM_FAULT_OOM) 444 if (fault & VM_FAULT_OOM)
435 goto out_of_memory; 445 goto out_of_memory;
@@ -437,10 +447,22 @@ good_area:
437 goto do_sigbus; 447 goto do_sigbus;
438 BUG(); 448 BUG();
439 } 449 }
440 if (fault & VM_FAULT_MAJOR) 450 if (flags & FAULT_FLAG_ALLOW_RETRY) {
441 tsk->maj_flt++; 451 if (fault & VM_FAULT_MAJOR)
442 else 452 tsk->maj_flt++;
443 tsk->min_flt++; 453 else
454 tsk->min_flt++;
455 if (fault & VM_FAULT_RETRY) {
456 flags &= ~FAULT_FLAG_ALLOW_RETRY;
457
458 /*
459 * No need to up_read(&mm->mmap_sem) as we would
460 * have already released it in __lock_page_or_retry
461 * in mm/filemap.c.
462 */
463 goto retry;
464 }
465 }
444 466
445#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC() 467#if CHIP_HAS_TILE_DMA() || CHIP_HAS_SN_PROC()
446 /* 468 /*
diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 499f73770b05..dbcbdf7b8aa8 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -30,6 +30,7 @@
30#include <linux/cache.h> 30#include <linux/cache.h>
31#include <linux/smp.h> 31#include <linux/smp.h>
32#include <linux/module.h> 32#include <linux/module.h>
33#include <linux/hugetlb.h>
33 34
34#include <asm/page.h> 35#include <asm/page.h>
35#include <asm/sections.h> 36#include <asm/sections.h>
diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
index 42cfcba4e1ef..812e2d037972 100644
--- a/arch/tile/mm/hugetlbpage.c
+++ b/arch/tile/mm/hugetlbpage.c
@@ -27,85 +27,161 @@
27#include <linux/mman.h> 27#include <linux/mman.h>
28#include <asm/tlb.h> 28#include <asm/tlb.h>
29#include <asm/tlbflush.h> 29#include <asm/tlbflush.h>
30#include <asm/setup.h>
31
32#ifdef CONFIG_HUGETLB_SUPER_PAGES
33
34/*
35 * Provide an additional huge page size (in addition to the regular default
36 * huge page size) if no "hugepagesz" arguments are specified.
37 * Note that it must be smaller than the default huge page size so
38 * that it's possible to allocate them on demand from the buddy allocator.
39 * You can change this to 64K (on a 16K build), 256K, 1M, or 4M,
40 * or not define it at all.
41 */
42#define ADDITIONAL_HUGE_SIZE (1024 * 1024UL)
43
44/* "Extra" page-size multipliers, one per level of the page table. */
45int huge_shift[HUGE_SHIFT_ENTRIES] = {
46#ifdef ADDITIONAL_HUGE_SIZE
47#define ADDITIONAL_HUGE_SHIFT __builtin_ctzl(ADDITIONAL_HUGE_SIZE / PAGE_SIZE)
48 [HUGE_SHIFT_PAGE] = ADDITIONAL_HUGE_SHIFT
49#endif
50};
51
52/*
53 * This routine is a hybrid of pte_alloc_map() and pte_alloc_kernel().
54 * It assumes that L2 PTEs are never in HIGHMEM (we don't support that).
55 * It locks the user pagetable, and bumps up the mm->nr_ptes field,
56 * but otherwise allocate the page table using the kernel versions.
57 */
58static pte_t *pte_alloc_hugetlb(struct mm_struct *mm, pmd_t *pmd,
59 unsigned long address)
60{
61 pte_t *new;
62
63 if (pmd_none(*pmd)) {
64 new = pte_alloc_one_kernel(mm, address);
65 if (!new)
66 return NULL;
67
68 smp_wmb(); /* See comment in __pte_alloc */
69
70 spin_lock(&mm->page_table_lock);
71 if (likely(pmd_none(*pmd))) { /* Has another populated it ? */
72 mm->nr_ptes++;
73 pmd_populate_kernel(mm, pmd, new);
74 new = NULL;
75 } else
76 VM_BUG_ON(pmd_trans_splitting(*pmd));
77 spin_unlock(&mm->page_table_lock);
78 if (new)
79 pte_free_kernel(mm, new);
80 }
81
82 return pte_offset_kernel(pmd, address);
83}
84#endif
30 85
31pte_t *huge_pte_alloc(struct mm_struct *mm, 86pte_t *huge_pte_alloc(struct mm_struct *mm,
32 unsigned long addr, unsigned long sz) 87 unsigned long addr, unsigned long sz)
33{ 88{
34 pgd_t *pgd; 89 pgd_t *pgd;
35 pud_t *pud; 90 pud_t *pud;
36 pte_t *pte = NULL;
37 91
38 /* We do not yet support multiple huge page sizes. */ 92 addr &= -sz; /* Mask off any low bits in the address. */
39 BUG_ON(sz != PMD_SIZE);
40 93
41 pgd = pgd_offset(mm, addr); 94 pgd = pgd_offset(mm, addr);
42 pud = pud_alloc(mm, pgd, addr); 95 pud = pud_alloc(mm, pgd, addr);
43 if (pud)
44 pte = (pte_t *) pmd_alloc(mm, pud, addr);
45 BUG_ON(pte && !pte_none(*pte) && !pte_huge(*pte));
46 96
47 return pte; 97#ifdef CONFIG_HUGETLB_SUPER_PAGES
98 if (sz >= PGDIR_SIZE) {
99 BUG_ON(sz != PGDIR_SIZE &&
100 sz != PGDIR_SIZE << huge_shift[HUGE_SHIFT_PGDIR]);
101 return (pte_t *)pud;
102 } else {
103 pmd_t *pmd = pmd_alloc(mm, pud, addr);
104 if (sz >= PMD_SIZE) {
105 BUG_ON(sz != PMD_SIZE &&
106 sz != (PMD_SIZE << huge_shift[HUGE_SHIFT_PMD]));
107 return (pte_t *)pmd;
108 }
109 else {
110 if (sz != PAGE_SIZE << huge_shift[HUGE_SHIFT_PAGE])
111 panic("Unexpected page size %#lx\n", sz);
112 return pte_alloc_hugetlb(mm, pmd, addr);
113 }
114 }
115#else
116 BUG_ON(sz != PMD_SIZE);
117 return (pte_t *) pmd_alloc(mm, pud, addr);
118#endif
48} 119}
49 120
50pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) 121static pte_t *get_pte(pte_t *base, int index, int level)
51{ 122{
52 pgd_t *pgd; 123 pte_t *ptep = base + index;
53 pud_t *pud; 124#ifdef CONFIG_HUGETLB_SUPER_PAGES
54 pmd_t *pmd = NULL; 125 if (!pte_present(*ptep) && huge_shift[level] != 0) {
55 126 unsigned long mask = -1UL << huge_shift[level];
56 pgd = pgd_offset(mm, addr); 127 pte_t *super_ptep = base + (index & mask);
57 if (pgd_present(*pgd)) { 128 pte_t pte = *super_ptep;
58 pud = pud_offset(pgd, addr); 129 if (pte_present(pte) && pte_super(pte))
59 if (pud_present(*pud)) 130 ptep = super_ptep;
60 pmd = pmd_offset(pud, addr);
61 } 131 }
62 return (pte_t *) pmd; 132#endif
133 return ptep;
63} 134}
64 135
65#ifdef HUGETLB_TEST 136pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr)
66struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
67 int write)
68{ 137{
69 unsigned long start = address; 138 pgd_t *pgd;
70 int length = 1; 139 pud_t *pud;
71 int nr; 140 pmd_t *pmd;
72 struct page *page; 141#ifdef CONFIG_HUGETLB_SUPER_PAGES
73 struct vm_area_struct *vma; 142 pte_t *pte;
74 143#endif
75 vma = find_vma(mm, addr);
76 if (!vma || !is_vm_hugetlb_page(vma))
77 return ERR_PTR(-EINVAL);
78
79 pte = huge_pte_offset(mm, address);
80 144
81 /* hugetlb should be locked, and hence, prefaulted */ 145 /* Get the top-level page table entry. */
82 WARN_ON(!pte || pte_none(*pte)); 146 pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0);
147 if (!pgd_present(*pgd))
148 return NULL;
83 149
84 page = &pte_page(*pte)[vpfn % (HPAGE_SIZE/PAGE_SIZE)]; 150 /* We don't have four levels. */
151 pud = pud_offset(pgd, addr);
152#ifndef __PAGETABLE_PUD_FOLDED
153# error support fourth page table level
154#endif
85 155
86 WARN_ON(!PageHead(page)); 156 /* Check for an L0 huge PTE, if we have three levels. */
157#ifndef __PAGETABLE_PMD_FOLDED
158 if (pud_huge(*pud))
159 return (pte_t *)pud;
87 160
88 return page; 161 pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud),
89} 162 pmd_index(addr), 1);
90 163 if (!pmd_present(*pmd))
91int pmd_huge(pmd_t pmd) 164 return NULL;
92{ 165#else
93 return 0; 166 pmd = pmd_offset(pud, addr);
94} 167#endif
95 168
96int pud_huge(pud_t pud) 169 /* Check for an L1 huge PTE. */
97{ 170 if (pmd_huge(*pmd))
98 return 0; 171 return (pte_t *)pmd;
99} 172
173#ifdef CONFIG_HUGETLB_SUPER_PAGES
174 /* Check for an L2 huge PTE. */
175 pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2);
176 if (!pte_present(*pte))
177 return NULL;
178 if (pte_super(*pte))
179 return pte;
180#endif
100 181
101struct page *follow_huge_pmd(struct mm_struct *mm, unsigned long address,
102 pmd_t *pmd, int write)
103{
104 return NULL; 182 return NULL;
105} 183}
106 184
107#else
108
109struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address, 185struct page *follow_huge_addr(struct mm_struct *mm, unsigned long address,
110 int write) 186 int write)
111{ 187{
@@ -149,8 +225,6 @@ int huge_pmd_unshare(struct mm_struct *mm, unsigned long *addr, pte_t *ptep)
149 return 0; 225 return 0;
150} 226}
151 227
152#endif
153
154#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA 228#ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
155static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, 229static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
156 unsigned long addr, unsigned long len, 230 unsigned long addr, unsigned long len,
@@ -322,21 +396,102 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
322 return hugetlb_get_unmapped_area_topdown(file, addr, len, 396 return hugetlb_get_unmapped_area_topdown(file, addr, len,
323 pgoff, flags); 397 pgoff, flags);
324} 398}
399#endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */
325 400
326static __init int setup_hugepagesz(char *opt) 401#ifdef CONFIG_HUGETLB_SUPER_PAGES
402static __init int __setup_hugepagesz(unsigned long ps)
327{ 403{
328 unsigned long ps = memparse(opt, &opt); 404 int log_ps = __builtin_ctzl(ps);
329 if (ps == PMD_SIZE) { 405 int level, base_shift;
330 hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); 406
331 } else if (ps == PUD_SIZE) { 407 if ((1UL << log_ps) != ps || (log_ps & 1) != 0) {
332 hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); 408 pr_warn("Not enabling %ld byte huge pages;"
409 " must be a power of four.\n", ps);
410 return -EINVAL;
411 }
412
413 if (ps > 64*1024*1024*1024UL) {
414 pr_warn("Not enabling %ld MB huge pages;"
415 " largest legal value is 64 GB .\n", ps >> 20);
416 return -EINVAL;
417 } else if (ps >= PUD_SIZE) {
418 static long hv_jpage_size;
419 if (hv_jpage_size == 0)
420 hv_jpage_size = hv_sysconf(HV_SYSCONF_PAGE_SIZE_JUMBO);
421 if (hv_jpage_size != PUD_SIZE) {
422 pr_warn("Not enabling >= %ld MB huge pages:"
423 " hypervisor reports size %ld\n",
424 PUD_SIZE >> 20, hv_jpage_size);
425 return -EINVAL;
426 }
427 level = 0;
428 base_shift = PUD_SHIFT;
429 } else if (ps >= PMD_SIZE) {
430 level = 1;
431 base_shift = PMD_SHIFT;
432 } else if (ps > PAGE_SIZE) {
433 level = 2;
434 base_shift = PAGE_SHIFT;
333 } else { 435 } else {
334 pr_err("hugepagesz: Unsupported page size %lu M\n", 436 pr_err("hugepagesz: huge page size %ld too small\n", ps);
335 ps >> 20); 437 return -EINVAL;
336 return 0;
337 } 438 }
338 return 1; 439
440 if (log_ps != base_shift) {
441 int shift_val = log_ps - base_shift;
442 if (huge_shift[level] != 0) {
443 int old_shift = base_shift + huge_shift[level];
444 pr_warn("Not enabling %ld MB huge pages;"
445 " already have size %ld MB.\n",
446 ps >> 20, (1UL << old_shift) >> 20);
447 return -EINVAL;
448 }
449 if (hv_set_pte_super_shift(level, shift_val) != 0) {
450 pr_warn("Not enabling %ld MB huge pages;"
451 " no hypervisor support.\n", ps >> 20);
452 return -EINVAL;
453 }
454 printk(KERN_DEBUG "Enabled %ld MB huge pages\n", ps >> 20);
455 huge_shift[level] = shift_val;
456 }
457
458 hugetlb_add_hstate(log_ps - PAGE_SHIFT);
459
460 return 0;
461}
462
463static bool saw_hugepagesz;
464
465static __init int setup_hugepagesz(char *opt)
466{
467 if (!saw_hugepagesz) {
468 saw_hugepagesz = true;
469 memset(huge_shift, 0, sizeof(huge_shift));
470 }
471 return __setup_hugepagesz(memparse(opt, NULL));
339} 472}
340__setup("hugepagesz=", setup_hugepagesz); 473__setup("hugepagesz=", setup_hugepagesz);
341 474
342#endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/ 475#ifdef ADDITIONAL_HUGE_SIZE
476/*
477 * Provide an additional huge page size if no "hugepagesz" args are given.
478 * In that case, all the cores have properly set up their hv super_shift
479 * already, but we need to notify the hugetlb code to enable the
480 * new huge page size from the Linux point of view.
481 */
482static __init int add_default_hugepagesz(void)
483{
484 if (!saw_hugepagesz) {
485 BUILD_BUG_ON(ADDITIONAL_HUGE_SIZE >= PMD_SIZE ||
486 ADDITIONAL_HUGE_SIZE <= PAGE_SIZE);
487 BUILD_BUG_ON((PAGE_SIZE << ADDITIONAL_HUGE_SHIFT) !=
488 ADDITIONAL_HUGE_SIZE);
489 BUILD_BUG_ON(ADDITIONAL_HUGE_SHIFT & 1);
490 hugetlb_add_hstate(ADDITIONAL_HUGE_SHIFT);
491 }
492 return 0;
493}
494arch_initcall(add_default_hugepagesz);
495#endif
496
497#endif /* CONFIG_HUGETLB_SUPER_PAGES */
diff --git a/arch/tile/mm/init.c b/arch/tile/mm/init.c
index 6a9d20ddc34f..630dd2ce2afe 100644
--- a/arch/tile/mm/init.c
+++ b/arch/tile/mm/init.c
@@ -82,7 +82,7 @@ static int num_l2_ptes[MAX_NUMNODES];
82 82
83static void init_prealloc_ptes(int node, int pages) 83static void init_prealloc_ptes(int node, int pages)
84{ 84{
85 BUG_ON(pages & (HV_L2_ENTRIES-1)); 85 BUG_ON(pages & (PTRS_PER_PTE - 1));
86 if (pages) { 86 if (pages) {
87 num_l2_ptes[node] = pages; 87 num_l2_ptes[node] = pages;
88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t), 88 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
@@ -131,14 +131,9 @@ static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
131 131
132#ifdef __tilegx__ 132#ifdef __tilegx__
133 133
134#if HV_L1_SIZE != HV_L2_SIZE
135# error Rework assumption that L1 and L2 page tables are same size.
136#endif
137
138/* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
139static inline pmd_t *alloc_pmd(void) 134static inline pmd_t *alloc_pmd(void)
140{ 135{
141 return (pmd_t *)alloc_pte(); 136 return __alloc_bootmem(L1_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
142} 137}
143 138
144static inline void assign_pmd(pud_t *pud, pmd_t *pmd) 139static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
@@ -444,6 +439,7 @@ static pgd_t pgtables[PTRS_PER_PGD]
444 */ 439 */
445static void __init kernel_physical_mapping_init(pgd_t *pgd_base) 440static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
446{ 441{
442 unsigned long long irqmask;
447 unsigned long address, pfn; 443 unsigned long address, pfn;
448 pmd_t *pmd; 444 pmd_t *pmd;
449 pte_t *pte; 445 pte_t *pte;
@@ -633,10 +629,13 @@ static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
633 * - install pgtables[] as the real page table 629 * - install pgtables[] as the real page table
634 * - flush the TLB so the new page table takes effect 630 * - flush the TLB so the new page table takes effect
635 */ 631 */
632 irqmask = interrupt_mask_save_mask();
633 interrupt_mask_set_mask(-1ULL);
636 rc = flush_and_install_context(__pa(pgtables), 634 rc = flush_and_install_context(__pa(pgtables),
637 init_pgprot((unsigned long)pgtables), 635 init_pgprot((unsigned long)pgtables),
638 __get_cpu_var(current_asid), 636 __get_cpu_var(current_asid),
639 cpumask_bits(my_cpu_mask)); 637 cpumask_bits(my_cpu_mask));
638 interrupt_mask_restore_mask(irqmask);
640 BUG_ON(rc != 0); 639 BUG_ON(rc != 0);
641 640
642 /* Copy the page table back to the normal swapper_pg_dir. */ 641 /* Copy the page table back to the normal swapper_pg_dir. */
@@ -699,6 +698,7 @@ static void __init permanent_kmaps_init(pgd_t *pgd_base)
699#endif /* CONFIG_HIGHMEM */ 698#endif /* CONFIG_HIGHMEM */
700 699
701 700
701#ifndef CONFIG_64BIT
702static void __init init_free_pfn_range(unsigned long start, unsigned long end) 702static void __init init_free_pfn_range(unsigned long start, unsigned long end)
703{ 703{
704 unsigned long pfn; 704 unsigned long pfn;
@@ -771,6 +771,7 @@ static void __init set_non_bootmem_pages_init(void)
771 init_free_pfn_range(start, end); 771 init_free_pfn_range(start, end);
772 } 772 }
773} 773}
774#endif
774 775
775/* 776/*
776 * paging_init() sets up the page tables - note that all of lowmem is 777 * paging_init() sets up the page tables - note that all of lowmem is
@@ -807,7 +808,7 @@ void __init paging_init(void)
807 * changing init_mm once we get up and running, and there's no 808 * changing init_mm once we get up and running, and there's no
808 * need for e.g. vmalloc_sync_all(). 809 * need for e.g. vmalloc_sync_all().
809 */ 810 */
810 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END)); 811 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END - 1));
811 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START); 812 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
812 assign_pmd(pud, alloc_pmd()); 813 assign_pmd(pud, alloc_pmd());
813#endif 814#endif
@@ -859,8 +860,10 @@ void __init mem_init(void)
859 /* this will put all bootmem onto the freelists */ 860 /* this will put all bootmem onto the freelists */
860 totalram_pages += free_all_bootmem(); 861 totalram_pages += free_all_bootmem();
861 862
863#ifndef CONFIG_64BIT
862 /* count all remaining LOWMEM and give all HIGHMEM to page allocator */ 864 /* count all remaining LOWMEM and give all HIGHMEM to page allocator */
863 set_non_bootmem_pages_init(); 865 set_non_bootmem_pages_init();
866#endif
864 867
865 codesize = (unsigned long)&_etext - (unsigned long)&_text; 868 codesize = (unsigned long)&_etext - (unsigned long)&_text;
866 datasize = (unsigned long)&_end - (unsigned long)&_sdata; 869 datasize = (unsigned long)&_end - (unsigned long)&_sdata;
diff --git a/arch/tile/mm/migrate.h b/arch/tile/mm/migrate.h
index cd45a0837fa6..91683d97917e 100644
--- a/arch/tile/mm/migrate.h
+++ b/arch/tile/mm/migrate.h
@@ -24,6 +24,9 @@
24/* 24/*
25 * This function is used as a helper when setting up the initial 25 * This function is used as a helper when setting up the initial
26 * page table (swapper_pg_dir). 26 * page table (swapper_pg_dir).
27 *
28 * You must mask ALL interrupts prior to invoking this code, since
29 * you can't legally touch the stack during the cache flush.
27 */ 30 */
28extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access, 31extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
29 HV_ASID asid, 32 HV_ASID asid,
@@ -39,6 +42,9 @@ extern int flush_and_install_context(HV_PhysAddr page_table, HV_PTE access,
39 * 42 *
40 * Note that any non-NULL pointers must not point to the page that 43 * Note that any non-NULL pointers must not point to the page that
41 * is handled by the stack_pte itself. 44 * is handled by the stack_pte itself.
45 *
46 * You must mask ALL interrupts prior to invoking this code, since
47 * you can't legally touch the stack during the cache flush.
42 */ 48 */
43extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va, 49extern int homecache_migrate_stack_and_flush(pte_t stack_pte, unsigned long va,
44 size_t length, pte_t *stack_ptep, 50 size_t length, pte_t *stack_ptep,
diff --git a/arch/tile/mm/migrate_32.S b/arch/tile/mm/migrate_32.S
index ac01a7cdf77f..5305814bf187 100644
--- a/arch/tile/mm/migrate_32.S
+++ b/arch/tile/mm/migrate_32.S
@@ -40,8 +40,7 @@
40#define FRAME_R32 16 40#define FRAME_R32 16
41#define FRAME_R33 20 41#define FRAME_R33 20
42#define FRAME_R34 24 42#define FRAME_R34 24
43#define FRAME_R35 28 43#define FRAME_SIZE 28
44#define FRAME_SIZE 32
45 44
46 45
47 46
@@ -66,12 +65,11 @@
66#define r_my_cpumask r5 65#define r_my_cpumask r5
67 66
68/* Locals (callee-save); must not be more than FRAME_xxx above. */ 67/* Locals (callee-save); must not be more than FRAME_xxx above. */
69#define r_save_ics r30 68#define r_context_lo r30
70#define r_context_lo r31 69#define r_context_hi r31
71#define r_context_hi r32 70#define r_access_lo r32
72#define r_access_lo r33 71#define r_access_hi r33
73#define r_access_hi r34 72#define r_asid r34
74#define r_asid r35
75 73
76STD_ENTRY(flush_and_install_context) 74STD_ENTRY(flush_and_install_context)
77 /* 75 /*
@@ -104,11 +102,7 @@ STD_ENTRY(flush_and_install_context)
104 sw r_tmp, r33 102 sw r_tmp, r33
105 addi r_tmp, sp, FRAME_R34 103 addi r_tmp, sp, FRAME_R34
106 } 104 }
107 { 105 sw r_tmp, r34
108 sw r_tmp, r34
109 addi r_tmp, sp, FRAME_R35
110 }
111 sw r_tmp, r35
112 106
113 /* Move some arguments to callee-save registers. */ 107 /* Move some arguments to callee-save registers. */
114 { 108 {
@@ -121,13 +115,6 @@ STD_ENTRY(flush_and_install_context)
121 } 115 }
122 move r_asid, r_asid_in 116 move r_asid, r_asid_in
123 117
124 /* Disable interrupts, since we can't use our stack. */
125 {
126 mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
127 movei r_tmp, 1
128 }
129 mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
130
131 /* First, flush our L2 cache. */ 118 /* First, flush our L2 cache. */
132 { 119 {
133 move r0, zero /* cache_pa */ 120 move r0, zero /* cache_pa */
@@ -163,7 +150,7 @@ STD_ENTRY(flush_and_install_context)
163 } 150 }
164 { 151 {
165 move r4, r_asid 152 move r4, r_asid
166 movei r5, HV_CTX_DIRECTIO 153 moveli r5, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
167 } 154 }
168 jal hv_install_context 155 jal hv_install_context
169 bnz r0, .Ldone 156 bnz r0, .Ldone
@@ -175,9 +162,6 @@ STD_ENTRY(flush_and_install_context)
175 } 162 }
176 163
177.Ldone: 164.Ldone:
178 /* Reset interrupts back how they were before. */
179 mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
180
181 /* Restore the callee-saved registers and return. */ 165 /* Restore the callee-saved registers and return. */
182 addli lr, sp, FRAME_SIZE 166 addli lr, sp, FRAME_SIZE
183 { 167 {
@@ -202,10 +186,6 @@ STD_ENTRY(flush_and_install_context)
202 } 186 }
203 { 187 {
204 lw r34, r_tmp 188 lw r34, r_tmp
205 addli r_tmp, sp, FRAME_R35
206 }
207 {
208 lw r35, r_tmp
209 addi sp, sp, FRAME_SIZE 189 addi sp, sp, FRAME_SIZE
210 } 190 }
211 jrp lr 191 jrp lr
diff --git a/arch/tile/mm/migrate_64.S b/arch/tile/mm/migrate_64.S
index e76fea688beb..1d15b10833d1 100644
--- a/arch/tile/mm/migrate_64.S
+++ b/arch/tile/mm/migrate_64.S
@@ -38,8 +38,7 @@
38#define FRAME_R30 16 38#define FRAME_R30 16
39#define FRAME_R31 24 39#define FRAME_R31 24
40#define FRAME_R32 32 40#define FRAME_R32 32
41#define FRAME_R33 40 41#define FRAME_SIZE 40
42#define FRAME_SIZE 48
43 42
44 43
45 44
@@ -60,10 +59,9 @@
60#define r_my_cpumask r3 59#define r_my_cpumask r3
61 60
62/* Locals (callee-save); must not be more than FRAME_xxx above. */ 61/* Locals (callee-save); must not be more than FRAME_xxx above. */
63#define r_save_ics r30 62#define r_context r30
64#define r_context r31 63#define r_access r31
65#define r_access r32 64#define r_asid r32
66#define r_asid r33
67 65
68/* 66/*
69 * Caller-save locals and frame constants are the same as 67 * Caller-save locals and frame constants are the same as
@@ -93,11 +91,7 @@ STD_ENTRY(flush_and_install_context)
93 st r_tmp, r31 91 st r_tmp, r31
94 addi r_tmp, sp, FRAME_R32 92 addi r_tmp, sp, FRAME_R32
95 } 93 }
96 { 94 st r_tmp, r32
97 st r_tmp, r32
98 addi r_tmp, sp, FRAME_R33
99 }
100 st r_tmp, r33
101 95
102 /* Move some arguments to callee-save registers. */ 96 /* Move some arguments to callee-save registers. */
103 { 97 {
@@ -106,13 +100,6 @@ STD_ENTRY(flush_and_install_context)
106 } 100 }
107 move r_asid, r_asid_in 101 move r_asid, r_asid_in
108 102
109 /* Disable interrupts, since we can't use our stack. */
110 {
111 mfspr r_save_ics, INTERRUPT_CRITICAL_SECTION
112 movei r_tmp, 1
113 }
114 mtspr INTERRUPT_CRITICAL_SECTION, r_tmp
115
116 /* First, flush our L2 cache. */ 103 /* First, flush our L2 cache. */
117 { 104 {
118 move r0, zero /* cache_pa */ 105 move r0, zero /* cache_pa */
@@ -147,7 +134,7 @@ STD_ENTRY(flush_and_install_context)
147 } 134 }
148 { 135 {
149 move r2, r_asid 136 move r2, r_asid
150 movei r3, HV_CTX_DIRECTIO 137 moveli r3, HV_CTX_DIRECTIO | CTX_PAGE_FLAG
151 } 138 }
152 jal hv_install_context 139 jal hv_install_context
153 bnez r0, 1f 140 bnez r0, 1f
@@ -158,10 +145,7 @@ STD_ENTRY(flush_and_install_context)
158 jal hv_flush_all 145 jal hv_flush_all
159 } 146 }
160 147
1611: /* Reset interrupts back how they were before. */ 1481: /* Restore the callee-saved registers and return. */
162 mtspr INTERRUPT_CRITICAL_SECTION, r_save_ics
163
164 /* Restore the callee-saved registers and return. */
165 addli lr, sp, FRAME_SIZE 149 addli lr, sp, FRAME_SIZE
166 { 150 {
167 ld lr, lr 151 ld lr, lr
@@ -177,10 +161,6 @@ STD_ENTRY(flush_and_install_context)
177 } 161 }
178 { 162 {
179 ld r32, r_tmp 163 ld r32, r_tmp
180 addli r_tmp, sp, FRAME_R33
181 }
182 {
183 ld r33, r_tmp
184 addi sp, sp, FRAME_SIZE 164 addi sp, sp, FRAME_SIZE
185 } 165 }
186 jrp lr 166 jrp lr
diff --git a/arch/tile/mm/pgtable.c b/arch/tile/mm/pgtable.c
index 2410aa899b3e..345edfed9fcd 100644
--- a/arch/tile/mm/pgtable.c
+++ b/arch/tile/mm/pgtable.c
@@ -132,15 +132,6 @@ void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
132 set_pte_pfn(address, phys >> PAGE_SHIFT, flags); 132 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
133} 133}
134 134
135#if defined(CONFIG_HIGHPTE)
136pte_t *_pte_offset_map(pmd_t *dir, unsigned long address)
137{
138 pte_t *pte = kmap_atomic(pmd_page(*dir)) +
139 (pmd_ptfn(*dir) << HV_LOG2_PAGE_TABLE_ALIGN) & ~PAGE_MASK;
140 return &pte[pte_index(address)];
141}
142#endif
143
144/** 135/**
145 * shatter_huge_page() - ensure a given address is mapped by a small page. 136 * shatter_huge_page() - ensure a given address is mapped by a small page.
146 * 137 *
@@ -289,33 +280,26 @@ void pgd_free(struct mm_struct *mm, pgd_t *pgd)
289 280
290#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER) 281#define L2_USER_PGTABLE_PAGES (1 << L2_USER_PGTABLE_ORDER)
291 282
292struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) 283struct page *pgtable_alloc_one(struct mm_struct *mm, unsigned long address,
284 int order)
293{ 285{
294 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO; 286 gfp_t flags = GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO;
295 struct page *p; 287 struct page *p;
296#if L2_USER_PGTABLE_ORDER > 0
297 int i; 288 int i;
298#endif
299
300#ifdef CONFIG_HIGHPTE
301 flags |= __GFP_HIGHMEM;
302#endif
303 289
304 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER); 290 p = alloc_pages(flags, L2_USER_PGTABLE_ORDER);
305 if (p == NULL) 291 if (p == NULL)
306 return NULL; 292 return NULL;
307 293
308#if L2_USER_PGTABLE_ORDER > 0
309 /* 294 /*
310 * Make every page have a page_count() of one, not just the first. 295 * Make every page have a page_count() of one, not just the first.
311 * We don't use __GFP_COMP since it doesn't look like it works 296 * We don't use __GFP_COMP since it doesn't look like it works
312 * correctly with tlb_remove_page(). 297 * correctly with tlb_remove_page().
313 */ 298 */
314 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 299 for (i = 1; i < order; ++i) {
315 init_page_count(p+i); 300 init_page_count(p+i);
316 inc_zone_page_state(p+i, NR_PAGETABLE); 301 inc_zone_page_state(p+i, NR_PAGETABLE);
317 } 302 }
318#endif
319 303
320 pgtable_page_ctor(p); 304 pgtable_page_ctor(p);
321 return p; 305 return p;
@@ -326,28 +310,28 @@ struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
326 * process). We have to correct whatever pte_alloc_one() did before 310 * process). We have to correct whatever pte_alloc_one() did before
327 * returning the pages to the allocator. 311 * returning the pages to the allocator.
328 */ 312 */
329void pte_free(struct mm_struct *mm, struct page *p) 313void pgtable_free(struct mm_struct *mm, struct page *p, int order)
330{ 314{
331 int i; 315 int i;
332 316
333 pgtable_page_dtor(p); 317 pgtable_page_dtor(p);
334 __free_page(p); 318 __free_page(p);
335 319
336 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 320 for (i = 1; i < order; ++i) {
337 __free_page(p+i); 321 __free_page(p+i);
338 dec_zone_page_state(p+i, NR_PAGETABLE); 322 dec_zone_page_state(p+i, NR_PAGETABLE);
339 } 323 }
340} 324}
341 325
342void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte, 326void __pgtable_free_tlb(struct mmu_gather *tlb, struct page *pte,
343 unsigned long address) 327 unsigned long address, int order)
344{ 328{
345 int i; 329 int i;
346 330
347 pgtable_page_dtor(pte); 331 pgtable_page_dtor(pte);
348 tlb_remove_page(tlb, pte); 332 tlb_remove_page(tlb, pte);
349 333
350 for (i = 1; i < L2_USER_PGTABLE_PAGES; ++i) { 334 for (i = 1; i < order; ++i) {
351 tlb_remove_page(tlb, pte + i); 335 tlb_remove_page(tlb, pte + i);
352 dec_zone_page_state(pte + i, NR_PAGETABLE); 336 dec_zone_page_state(pte + i, NR_PAGETABLE);
353 } 337 }
@@ -490,7 +474,7 @@ void set_pte(pte_t *ptep, pte_t pte)
490/* Can this mm load a PTE with cached_priority set? */ 474/* Can this mm load a PTE with cached_priority set? */
491static inline int mm_is_priority_cached(struct mm_struct *mm) 475static inline int mm_is_priority_cached(struct mm_struct *mm)
492{ 476{
493 return mm->context.priority_cached; 477 return mm->context.priority_cached != 0;
494} 478}
495 479
496/* 480/*
@@ -500,8 +484,8 @@ static inline int mm_is_priority_cached(struct mm_struct *mm)
500void start_mm_caching(struct mm_struct *mm) 484void start_mm_caching(struct mm_struct *mm)
501{ 485{
502 if (!mm_is_priority_cached(mm)) { 486 if (!mm_is_priority_cached(mm)) {
503 mm->context.priority_cached = -1U; 487 mm->context.priority_cached = -1UL;
504 hv_set_caching(-1U); 488 hv_set_caching(-1UL);
505 } 489 }
506} 490}
507 491
@@ -516,7 +500,7 @@ void start_mm_caching(struct mm_struct *mm)
516 * Presumably we'll come back later and have more luck and clear 500 * Presumably we'll come back later and have more luck and clear
517 * the value then; for now we'll just keep the cache marked for priority. 501 * the value then; for now we'll just keep the cache marked for priority.
518 */ 502 */
519static unsigned int update_priority_cached(struct mm_struct *mm) 503static unsigned long update_priority_cached(struct mm_struct *mm)
520{ 504{
521 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) { 505 if (mm->context.priority_cached && down_write_trylock(&mm->mmap_sem)) {
522 struct vm_area_struct *vm; 506 struct vm_area_struct *vm;
diff --git a/arch/um/Makefile b/arch/um/Makefile
index 55c0661e2b5d..097091059aaa 100644
--- a/arch/um/Makefile
+++ b/arch/um/Makefile
@@ -121,15 +121,8 @@ LINK_WRAPS = -Wl,--wrap,malloc -Wl,--wrap,free -Wl,--wrap,calloc
121 121
122LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt)) 122LD_FLAGS_CMDLINE = $(foreach opt,$(LDFLAGS),-Wl,$(opt))
123 123
124CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE) 124# Used by link-vmlinux.sh which has special support for um link
125define cmd_vmlinux__ 125export CFLAGS_vmlinux := $(LINK-y) $(LINK_WRAPS) $(LD_FLAGS_CMDLINE)
126 $(CC) $(CFLAGS_vmlinux) -o $@ \
127 -Wl,-T,$(vmlinux-lds) $(vmlinux-init) \
128 -Wl,--start-group $(vmlinux-main) -Wl,--end-group \
129 -lutil \
130 $(filter-out $(vmlinux-lds) $(vmlinux-init) $(vmlinux-main) vmlinux.o \
131 FORCE ,$^) ; rm -f linux
132endef
133 126
134# When cleaning we don't include .config, so we don't include 127# When cleaning we don't include .config, so we don't include
135# TT or skas makefiles and don't clean skas_ptregs.h. 128# TT or skas makefiles and don't clean skas_ptregs.h.
diff --git a/arch/um/include/asm/kvm_para.h b/arch/um/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/um/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/unicore32/include/asm/kvm_para.h b/arch/unicore32/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/unicore32/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/arch/x86/Kbuild b/arch/x86/Kbuild
index 0e9dec6cadd1..e5287d8517aa 100644
--- a/arch/x86/Kbuild
+++ b/arch/x86/Kbuild
@@ -1,4 +1,3 @@
1
2obj-$(CONFIG_KVM) += kvm/ 1obj-$(CONFIG_KVM) += kvm/
3 2
4# Xen paravirtualization support 3# Xen paravirtualization support
@@ -7,6 +6,7 @@ obj-$(CONFIG_XEN) += xen/
7# lguest paravirtualization support 6# lguest paravirtualization support
8obj-$(CONFIG_LGUEST_GUEST) += lguest/ 7obj-$(CONFIG_LGUEST_GUEST) += lguest/
9 8
9obj-y += realmode/
10obj-y += kernel/ 10obj-y += kernel/
11obj-y += mm/ 11obj-y += mm/
12 12
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index 66cc380bebf0..d700811785ea 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -32,6 +32,7 @@ config X86
32 select ARCH_WANT_OPTIONAL_GPIOLIB 32 select ARCH_WANT_OPTIONAL_GPIOLIB
33 select ARCH_WANT_FRAME_POINTERS 33 select ARCH_WANT_FRAME_POINTERS
34 select HAVE_DMA_ATTRS 34 select HAVE_DMA_ATTRS
35 select HAVE_DMA_CONTIGUOUS if !SWIOTLB
35 select HAVE_KRETPROBES 36 select HAVE_KRETPROBES
36 select HAVE_OPTPROBES 37 select HAVE_OPTPROBES
37 select HAVE_FTRACE_MCOUNT_RECORD 38 select HAVE_FTRACE_MCOUNT_RECORD
@@ -92,6 +93,8 @@ config X86
92 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC) 93 select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
93 select GENERIC_TIME_VSYSCALL if X86_64 94 select GENERIC_TIME_VSYSCALL if X86_64
94 select KTIME_SCALAR if X86_32 95 select KTIME_SCALAR if X86_32
96 select GENERIC_STRNCPY_FROM_USER
97 select GENERIC_STRNLEN_USER
95 98
96config INSTRUCTION_DECODER 99config INSTRUCTION_DECODER
97 def_bool (KPROBES || PERF_EVENTS || UPROBES) 100 def_bool (KPROBES || PERF_EVENTS || UPROBES)
diff --git a/arch/x86/include/asm/acpi.h b/arch/x86/include/asm/acpi.h
index 610001d385dd..0c44630d1789 100644
--- a/arch/x86/include/asm/acpi.h
+++ b/arch/x86/include/asm/acpi.h
@@ -29,7 +29,7 @@
29#include <asm/processor.h> 29#include <asm/processor.h>
30#include <asm/mmu.h> 30#include <asm/mmu.h>
31#include <asm/mpspec.h> 31#include <asm/mpspec.h>
32#include <asm/trampoline.h> 32#include <asm/realmode.h>
33 33
34#define COMPILER_DEPENDENT_INT64 long long 34#define COMPILER_DEPENDENT_INT64 long long
35#define COMPILER_DEPENDENT_UINT64 unsigned long long 35#define COMPILER_DEPENDENT_UINT64 unsigned long long
@@ -117,11 +117,8 @@ static inline void acpi_disable_pci(void)
117/* Low-level suspend routine. */ 117/* Low-level suspend routine. */
118extern int acpi_suspend_lowlevel(void); 118extern int acpi_suspend_lowlevel(void);
119 119
120extern const unsigned char acpi_wakeup_code[]; 120/* Physical address to resume after wakeup */
121#define acpi_wakeup_address (__pa(TRAMPOLINE_SYM(acpi_wakeup_code))) 121#define acpi_wakeup_address ((unsigned long)(real_mode_header->wakeup_start))
122
123/* early initialization routine */
124extern void acpi_reserve_wakeup_memory(void);
125 122
126/* 123/*
127 * Check if the CPU can handle C2 and deeper 124 * Check if the CPU can handle C2 and deeper
diff --git a/arch/x86/include/asm/dma-contiguous.h b/arch/x86/include/asm/dma-contiguous.h
new file mode 100644
index 000000000000..c09241659971
--- /dev/null
+++ b/arch/x86/include/asm/dma-contiguous.h
@@ -0,0 +1,13 @@
1#ifndef ASMX86_DMA_CONTIGUOUS_H
2#define ASMX86_DMA_CONTIGUOUS_H
3
4#ifdef __KERNEL__
5
6#include <linux/types.h>
7#include <asm-generic/dma-contiguous.h>
8
9static inline void
10dma_contiguous_early_fixup(phys_addr_t base, unsigned long size) { }
11
12#endif
13#endif
diff --git a/arch/x86/include/asm/dma-mapping.h b/arch/x86/include/asm/dma-mapping.h
index 61c0bd25845a..f7b4c7903e7e 100644
--- a/arch/x86/include/asm/dma-mapping.h
+++ b/arch/x86/include/asm/dma-mapping.h
@@ -13,6 +13,7 @@
13#include <asm/io.h> 13#include <asm/io.h>
14#include <asm/swiotlb.h> 14#include <asm/swiotlb.h>
15#include <asm-generic/dma-coherent.h> 15#include <asm-generic/dma-coherent.h>
16#include <linux/dma-contiguous.h>
16 17
17#ifdef CONFIG_ISA 18#ifdef CONFIG_ISA
18# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24) 19# define ISA_DMA_BIT_MASK DMA_BIT_MASK(24)
@@ -62,6 +63,10 @@ extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
62 dma_addr_t *dma_addr, gfp_t flag, 63 dma_addr_t *dma_addr, gfp_t flag,
63 struct dma_attrs *attrs); 64 struct dma_attrs *attrs);
64 65
66extern void dma_generic_free_coherent(struct device *dev, size_t size,
67 void *vaddr, dma_addr_t dma_addr,
68 struct dma_attrs *attrs);
69
65#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */ 70#ifdef CONFIG_X86_DMA_REMAP /* Platform code defines bridge-specific code */
66extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size); 71extern bool dma_capable(struct device *dev, dma_addr_t addr, size_t size);
67extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr); 72extern dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr);
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index c222e1a1b12a..1ac46c22dd50 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -200,7 +200,7 @@ typedef u32 __attribute__((vector_size(16))) sse128_t;
200 200
201/* Type, address-of, and value of an instruction's operand. */ 201/* Type, address-of, and value of an instruction's operand. */
202struct operand { 202struct operand {
203 enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_NONE } type; 203 enum { OP_REG, OP_MEM, OP_IMM, OP_XMM, OP_MM, OP_NONE } type;
204 unsigned int bytes; 204 unsigned int bytes;
205 union { 205 union {
206 unsigned long orig_val; 206 unsigned long orig_val;
@@ -213,12 +213,14 @@ struct operand {
213 unsigned seg; 213 unsigned seg;
214 } mem; 214 } mem;
215 unsigned xmm; 215 unsigned xmm;
216 unsigned mm;
216 } addr; 217 } addr;
217 union { 218 union {
218 unsigned long val; 219 unsigned long val;
219 u64 val64; 220 u64 val64;
220 char valptr[sizeof(unsigned long) + 2]; 221 char valptr[sizeof(unsigned long) + 2];
221 sse128_t vec_val; 222 sse128_t vec_val;
223 u64 mm_val;
222 }; 224 };
223}; 225};
224 226
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index e5b97be12d2a..db7c1f2709a2 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -173,6 +173,9 @@ enum {
173#define DR7_FIXED_1 0x00000400 173#define DR7_FIXED_1 0x00000400
174#define DR7_VOLATILE 0xffff23ff 174#define DR7_VOLATILE 0xffff23ff
175 175
176/* apic attention bits */
177#define KVM_APIC_CHECK_VAPIC 0
178
176/* 179/*
177 * We don't want allocation failures within the mmu code, so we preallocate 180 * We don't want allocation failures within the mmu code, so we preallocate
178 * enough memory for a single page fault in a cache. 181 * enough memory for a single page fault in a cache.
@@ -238,8 +241,6 @@ struct kvm_mmu_page {
238#endif 241#endif
239 242
240 int write_flooding_count; 243 int write_flooding_count;
241
242 struct rcu_head rcu;
243}; 244};
244 245
245struct kvm_pio_request { 246struct kvm_pio_request {
@@ -338,6 +339,7 @@ struct kvm_vcpu_arch {
338 u64 efer; 339 u64 efer;
339 u64 apic_base; 340 u64 apic_base;
340 struct kvm_lapic *apic; /* kernel irqchip context */ 341 struct kvm_lapic *apic; /* kernel irqchip context */
342 unsigned long apic_attention;
341 int32_t apic_arb_prio; 343 int32_t apic_arb_prio;
342 int mp_state; 344 int mp_state;
343 int sipi_vector; 345 int sipi_vector;
@@ -537,8 +539,6 @@ struct kvm_arch {
537 u64 hv_guest_os_id; 539 u64 hv_guest_os_id;
538 u64 hv_hypercall; 540 u64 hv_hypercall;
539 541
540 atomic_t reader_counter;
541
542 #ifdef CONFIG_KVM_MMU_AUDIT 542 #ifdef CONFIG_KVM_MMU_AUDIT
543 int audit_point; 543 int audit_point;
544 #endif 544 #endif
@@ -713,8 +713,9 @@ void kvm_mmu_set_mask_ptes(u64 user_mask, u64 accessed_mask,
713 713
714int kvm_mmu_reset_context(struct kvm_vcpu *vcpu); 714int kvm_mmu_reset_context(struct kvm_vcpu *vcpu);
715void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot); 715void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot);
716int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, 716void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
717 struct kvm_memory_slot *slot); 717 struct kvm_memory_slot *slot,
718 gfn_t gfn_offset, unsigned long mask);
718void kvm_mmu_zap_all(struct kvm *kvm); 719void kvm_mmu_zap_all(struct kvm *kvm);
719unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm); 720unsigned int kvm_mmu_calculate_mmu_pages(struct kvm *kvm);
720void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages); 721void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages);
diff --git a/arch/x86/include/asm/kvm_para.h b/arch/x86/include/asm/kvm_para.h
index 183922e13de1..63ab1661d00e 100644
--- a/arch/x86/include/asm/kvm_para.h
+++ b/arch/x86/include/asm/kvm_para.h
@@ -95,6 +95,14 @@ struct kvm_vcpu_pv_apf_data {
95extern void kvmclock_init(void); 95extern void kvmclock_init(void);
96extern int kvm_register_clock(char *txt); 96extern int kvm_register_clock(char *txt);
97 97
98#ifdef CONFIG_KVM_CLOCK
99bool kvm_check_and_clear_guest_paused(void);
100#else
101static inline bool kvm_check_and_clear_guest_paused(void)
102{
103 return false;
104}
105#endif /* CONFIG_KVMCLOCK */
98 106
99/* This instruction is vmcall. On non-VT architectures, it will generate a 107/* This instruction is vmcall. On non-VT architectures, it will generate a
100 * trap that we will then rewrite to the appropriate instruction. 108 * trap that we will then rewrite to the appropriate instruction.
@@ -173,14 +181,16 @@ static inline int kvm_para_available(void)
173 if (boot_cpu_data.cpuid_level < 0) 181 if (boot_cpu_data.cpuid_level < 0)
174 return 0; /* So we don't blow up on old processors */ 182 return 0; /* So we don't blow up on old processors */
175 183
176 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx); 184 if (cpu_has_hypervisor) {
177 memcpy(signature + 0, &ebx, 4); 185 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
178 memcpy(signature + 4, &ecx, 4); 186 memcpy(signature + 0, &ebx, 4);
179 memcpy(signature + 8, &edx, 4); 187 memcpy(signature + 4, &ecx, 4);
180 signature[12] = 0; 188 memcpy(signature + 8, &edx, 4);
189 signature[12] = 0;
181 190
182 if (strcmp(signature, "KVMKVMKVM") == 0) 191 if (strcmp(signature, "KVMKVMKVM") == 0)
183 return 1; 192 return 1;
193 }
184 194
185 return 0; 195 return 0;
186} 196}
diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
index effff47a3c82..43876f16caf1 100644
--- a/arch/x86/include/asm/pgtable-3level.h
+++ b/arch/x86/include/asm/pgtable-3level.h
@@ -31,6 +31,56 @@ static inline void native_set_pte(pte_t *ptep, pte_t pte)
31 ptep->pte_low = pte.pte_low; 31 ptep->pte_low = pte.pte_low;
32} 32}
33 33
34#define pmd_read_atomic pmd_read_atomic
35/*
36 * pte_offset_map_lock on 32bit PAE kernels was reading the pmd_t with
37 * a "*pmdp" dereference done by gcc. Problem is, in certain places
38 * where pte_offset_map_lock is called, concurrent page faults are
39 * allowed, if the mmap_sem is hold for reading. An example is mincore
40 * vs page faults vs MADV_DONTNEED. On the page fault side
41 * pmd_populate rightfully does a set_64bit, but if we're reading the
42 * pmd_t with a "*pmdp" on the mincore side, a SMP race can happen
43 * because gcc will not read the 64bit of the pmd atomically. To fix
44 * this all places running pmd_offset_map_lock() while holding the
45 * mmap_sem in read mode, shall read the pmdp pointer using this
46 * function to know if the pmd is null nor not, and in turn to know if
47 * they can run pmd_offset_map_lock or pmd_trans_huge or other pmd
48 * operations.
49 *
50 * Without THP if the mmap_sem is hold for reading, the
51 * pmd can only transition from null to not null while pmd_read_atomic runs.
52 * So there's no need of literally reading it atomically.
53 *
54 * With THP if the mmap_sem is hold for reading, the pmd can become
55 * THP or null or point to a pte (and in turn become "stable") at any
56 * time under pmd_read_atomic, so it's mandatory to read it atomically
57 * with cmpxchg8b.
58 */
59#ifndef CONFIG_TRANSPARENT_HUGEPAGE
60static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
61{
62 pmdval_t ret;
63 u32 *tmp = (u32 *)pmdp;
64
65 ret = (pmdval_t) (*tmp);
66 if (ret) {
67 /*
68 * If the low part is null, we must not read the high part
69 * or we can end up with a partial pmd.
70 */
71 smp_rmb();
72 ret |= ((pmdval_t)*(tmp + 1)) << 32;
73 }
74
75 return (pmd_t) { ret };
76}
77#else /* CONFIG_TRANSPARENT_HUGEPAGE */
78static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
79{
80 return (pmd_t) { atomic64_read((atomic64_t *)pmdp) };
81}
82#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
83
34static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) 84static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
35{ 85{
36 set_64bit((unsigned long long *)(ptep), native_pte_val(pte)); 86 set_64bit((unsigned long long *)(ptep), native_pte_val(pte));
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 7745b257f035..39bc5777211a 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -544,13 +544,16 @@ static inline void load_sp0(struct tss_struct *tss,
544 * enable), so that any CPU's that boot up 544 * enable), so that any CPU's that boot up
545 * after us can get the correct flags. 545 * after us can get the correct flags.
546 */ 546 */
547extern unsigned long mmu_cr4_features; 547extern unsigned long mmu_cr4_features;
548extern u32 *trampoline_cr4_features;
548 549
549static inline void set_in_cr4(unsigned long mask) 550static inline void set_in_cr4(unsigned long mask)
550{ 551{
551 unsigned long cr4; 552 unsigned long cr4;
552 553
553 mmu_cr4_features |= mask; 554 mmu_cr4_features |= mask;
555 if (trampoline_cr4_features)
556 *trampoline_cr4_features = mmu_cr4_features;
554 cr4 = read_cr4(); 557 cr4 = read_cr4();
555 cr4 |= mask; 558 cr4 |= mask;
556 write_cr4(cr4); 559 write_cr4(cr4);
@@ -561,6 +564,8 @@ static inline void clear_in_cr4(unsigned long mask)
561 unsigned long cr4; 564 unsigned long cr4;
562 565
563 mmu_cr4_features &= ~mask; 566 mmu_cr4_features &= ~mask;
567 if (trampoline_cr4_features)
568 *trampoline_cr4_features = mmu_cr4_features;
564 cr4 = read_cr4(); 569 cr4 = read_cr4();
565 cr4 &= ~mask; 570 cr4 &= ~mask;
566 write_cr4(cr4); 571 write_cr4(cr4);
diff --git a/arch/x86/include/asm/pvclock-abi.h b/arch/x86/include/asm/pvclock-abi.h
index 35f2d1948ada..6167fd798188 100644
--- a/arch/x86/include/asm/pvclock-abi.h
+++ b/arch/x86/include/asm/pvclock-abi.h
@@ -40,5 +40,6 @@ struct pvclock_wall_clock {
40} __attribute__((__packed__)); 40} __attribute__((__packed__));
41 41
42#define PVCLOCK_TSC_STABLE_BIT (1 << 0) 42#define PVCLOCK_TSC_STABLE_BIT (1 << 0)
43#define PVCLOCK_GUEST_STOPPED (1 << 1)
43#endif /* __ASSEMBLY__ */ 44#endif /* __ASSEMBLY__ */
44#endif /* _ASM_X86_PVCLOCK_ABI_H */ 45#endif /* _ASM_X86_PVCLOCK_ABI_H */
diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
new file mode 100644
index 000000000000..fce3f4ae5bd6
--- /dev/null
+++ b/arch/x86/include/asm/realmode.h
@@ -0,0 +1,62 @@
1#ifndef _ARCH_X86_REALMODE_H
2#define _ARCH_X86_REALMODE_H
3
4#include <linux/types.h>
5#include <asm/io.h>
6
7/* This must match data at realmode.S */
8struct real_mode_header {
9 u32 text_start;
10 u32 ro_end;
11 /* SMP trampoline */
12 u32 trampoline_start;
13 u32 trampoline_status;
14 u32 trampoline_header;
15#ifdef CONFIG_X86_64
16 u32 trampoline_pgd;
17#endif
18 /* ACPI S3 wakeup */
19#ifdef CONFIG_ACPI_SLEEP
20 u32 wakeup_start;
21 u32 wakeup_header;
22#endif
23 /* APM/BIOS reboot */
24#ifdef CONFIG_X86_32
25 u32 machine_real_restart_asm;
26#endif
27};
28
29/* This must match data at trampoline_32/64.S */
30struct trampoline_header {
31#ifdef CONFIG_X86_32
32 u32 start;
33 u16 gdt_pad;
34 u16 gdt_limit;
35 u32 gdt_base;
36#else
37 u64 start;
38 u64 efer;
39 u32 cr4;
40#endif
41};
42
43extern struct real_mode_header *real_mode_header;
44extern unsigned char real_mode_blob_end[];
45
46extern unsigned long init_rsp;
47extern unsigned long initial_code;
48extern unsigned long initial_gs;
49
50extern unsigned char real_mode_blob[];
51extern unsigned char real_mode_relocs[];
52
53#ifdef CONFIG_X86_32
54extern unsigned char startup_32_smp[];
55extern unsigned char boot_gdt[];
56#else
57extern unsigned char secondary_startup_64[];
58#endif
59
60extern void __init setup_real_mode(void);
61
62#endif /* _ARCH_X86_REALMODE_H */
diff --git a/arch/x86/include/asm/sta2x11.h b/arch/x86/include/asm/sta2x11.h
new file mode 100644
index 000000000000..e9d32df89ccc
--- /dev/null
+++ b/arch/x86/include/asm/sta2x11.h
@@ -0,0 +1,12 @@
1/*
2 * Header file for STMicroelectronics ConneXt (STA2X11) IOHub
3 */
4#ifndef __ASM_STA2X11_H
5#define __ASM_STA2X11_H
6
7#include <linux/pci.h>
8
9/* This needs to be called from the MFD to configure its sub-devices */
10struct sta2x11_instance *sta2x11_get_instance(struct pci_dev *pdev);
11
12#endif /* __ASM_STA2X11_H */
diff --git a/arch/x86/include/asm/trampoline.h b/arch/x86/include/asm/trampoline.h
deleted file mode 100644
index feca3118a73b..000000000000
--- a/arch/x86/include/asm/trampoline.h
+++ /dev/null
@@ -1,39 +0,0 @@
1#ifndef _ASM_X86_TRAMPOLINE_H
2#define _ASM_X86_TRAMPOLINE_H
3
4#ifndef __ASSEMBLY__
5
6#include <linux/types.h>
7#include <asm/io.h>
8
9/*
10 * Trampoline 80x86 program as an array. These are in the init rodata
11 * segment, but that's okay, because we only care about the relative
12 * addresses of the symbols.
13 */
14extern const unsigned char x86_trampoline_start [];
15extern const unsigned char x86_trampoline_end [];
16extern unsigned char *x86_trampoline_base;
17
18extern unsigned long init_rsp;
19extern unsigned long initial_code;
20extern unsigned long initial_gs;
21
22extern void __init setup_trampolines(void);
23
24extern const unsigned char trampoline_data[];
25extern const unsigned char trampoline_status[];
26
27#define TRAMPOLINE_SYM(x) \
28 ((void *)(x86_trampoline_base + \
29 ((const unsigned char *)(x) - x86_trampoline_start)))
30
31/* Address of the SMP trampoline */
32static inline unsigned long trampoline_address(void)
33{
34 return virt_to_phys(TRAMPOLINE_SYM(trampoline_data));
35}
36
37#endif /* __ASSEMBLY__ */
38
39#endif /* _ASM_X86_TRAMPOLINE_H */
diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
index 851fe0dc13bc..04cd6882308e 100644
--- a/arch/x86/include/asm/uaccess.h
+++ b/arch/x86/include/asm/uaccess.h
@@ -32,6 +32,7 @@
32 32
33#define segment_eq(a, b) ((a).seg == (b).seg) 33#define segment_eq(a, b) ((a).seg == (b).seg)
34 34
35#define user_addr_max() (current_thread_info()->addr_limit.seg)
35#define __addr_ok(addr) \ 36#define __addr_ok(addr) \
36 ((unsigned long __force)(addr) < \ 37 ((unsigned long __force)(addr) < \
37 (current_thread_info()->addr_limit.seg)) 38 (current_thread_info()->addr_limit.seg))
@@ -565,6 +566,9 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n);
565extern __must_check long 566extern __must_check long
566strncpy_from_user(char *dst, const char __user *src, long count); 567strncpy_from_user(char *dst, const char __user *src, long count);
567 568
569extern __must_check long strlen_user(const char __user *str);
570extern __must_check long strnlen_user(const char __user *str, long n);
571
568/* 572/*
569 * movsl can be slow when source and dest are not both 8-byte aligned 573 * movsl can be slow when source and dest are not both 8-byte aligned
570 */ 574 */
diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
index 8084bc73b18c..576e39bca6ad 100644
--- a/arch/x86/include/asm/uaccess_32.h
+++ b/arch/x86/include/asm/uaccess_32.h
@@ -213,23 +213,6 @@ static inline unsigned long __must_check copy_from_user(void *to,
213 return n; 213 return n;
214} 214}
215 215
216/**
217 * strlen_user: - Get the size of a string in user space.
218 * @str: The string to measure.
219 *
220 * Context: User context only. This function may sleep.
221 *
222 * Get the size of a NUL-terminated string in user space.
223 *
224 * Returns the size of the string INCLUDING the terminating NUL.
225 * On exception, returns 0.
226 *
227 * If there is a limit on the length of a valid string, you may wish to
228 * consider using strnlen_user() instead.
229 */
230#define strlen_user(str) strnlen_user(str, LONG_MAX)
231
232long strnlen_user(const char __user *str, long n);
233unsigned long __must_check clear_user(void __user *mem, unsigned long len); 216unsigned long __must_check clear_user(void __user *mem, unsigned long len);
234unsigned long __must_check __clear_user(void __user *mem, unsigned long len); 217unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
235 218
diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
index fcd4b6f3ef02..8e796fbbf9c6 100644
--- a/arch/x86/include/asm/uaccess_64.h
+++ b/arch/x86/include/asm/uaccess_64.h
@@ -208,9 +208,6 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
208 } 208 }
209} 209}
210 210
211__must_check long strnlen_user(const char __user *str, long n);
212__must_check long __strnlen_user(const char __user *str, long n);
213__must_check long strlen_user(const char __user *str);
214__must_check unsigned long clear_user(void __user *mem, unsigned long len); 211__must_check unsigned long clear_user(void __user *mem, unsigned long len);
215__must_check unsigned long __clear_user(void __user *mem, unsigned long len); 212__must_check unsigned long __clear_user(void __user *mem, unsigned long len);
216 213
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
index ae03facfadd6..5b238981542a 100644
--- a/arch/x86/include/asm/word-at-a-time.h
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -10,6 +10,11 @@
10 * bit count instruction, that might be better than the multiply 10 * bit count instruction, that might be better than the multiply
11 * and shift, for example. 11 * and shift, for example.
12 */ 12 */
13struct word_at_a_time {
14 const unsigned long one_bits, high_bits;
15};
16
17#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
13 18
14#ifdef CONFIG_64BIT 19#ifdef CONFIG_64BIT
15 20
@@ -37,10 +42,31 @@ static inline long count_masked_bytes(long mask)
37 42
38#endif 43#endif
39 44
40/* Return the high bit set in the first byte that is a zero */ 45/* Return nonzero if it has a zero */
41static inline unsigned long has_zero(unsigned long a) 46static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
47{
48 unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
49 *bits = mask;
50 return mask;
51}
52
53static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
54{
55 return bits;
56}
57
58static inline unsigned long create_zero_mask(unsigned long bits)
59{
60 bits = (bits - 1) & ~bits;
61 return bits >> 7;
62}
63
64/* The mask we created is directly usable as a bytemask */
65#define zero_bytemask(mask) (mask)
66
67static inline unsigned long find_zero(unsigned long mask)
42{ 68{
43 return ((a - REPEAT_BYTE(0x01)) & ~a) & REPEAT_BYTE(0x80); 69 return count_masked_bytes(mask);
44} 70}
45 71
46/* 72/*
diff --git a/arch/x86/include/asm/xen/events.h b/arch/x86/include/asm/xen/events.h
index 1df35417c412..cc146d51449e 100644
--- a/arch/x86/include/asm/xen/events.h
+++ b/arch/x86/include/asm/xen/events.h
@@ -6,6 +6,7 @@ enum ipi_vector {
6 XEN_CALL_FUNCTION_VECTOR, 6 XEN_CALL_FUNCTION_VECTOR,
7 XEN_CALL_FUNCTION_SINGLE_VECTOR, 7 XEN_CALL_FUNCTION_SINGLE_VECTOR,
8 XEN_SPIN_UNLOCK_VECTOR, 8 XEN_SPIN_UNLOCK_VECTOR,
9 XEN_IRQ_WORK_VECTOR,
9 10
10 XEN_NR_IPIS, 11 XEN_NR_IPIS,
11}; 12};
diff --git a/arch/x86/include/asm/xen/page.h b/arch/x86/include/asm/xen/page.h
index c34f96c2f7a0..93971e841dd5 100644
--- a/arch/x86/include/asm/xen/page.h
+++ b/arch/x86/include/asm/xen/page.h
@@ -44,6 +44,7 @@ extern unsigned long machine_to_phys_nr;
44 44
45extern unsigned long get_phys_to_machine(unsigned long pfn); 45extern unsigned long get_phys_to_machine(unsigned long pfn);
46extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn); 46extern bool set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47extern bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn);
47extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); 48extern bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn);
48extern unsigned long set_phys_range_identity(unsigned long pfn_s, 49extern unsigned long set_phys_range_identity(unsigned long pfn_s,
49 unsigned long pfn_e); 50 unsigned long pfn_e);
diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
index 9bba5b79902b..8215e5652d97 100644
--- a/arch/x86/kernel/Makefile
+++ b/arch/x86/kernel/Makefile
@@ -35,7 +35,6 @@ obj-y += tsc.o io_delay.o rtc.o
35obj-y += pci-iommu_table.o 35obj-y += pci-iommu_table.o
36obj-y += resource.o 36obj-y += resource.o
37 37
38obj-y += trampoline.o trampoline_$(BITS).o
39obj-y += process.o 38obj-y += process.o
40obj-y += i387.o xsave.o 39obj-y += i387.o xsave.o
41obj-y += ptrace.o 40obj-y += ptrace.o
@@ -48,7 +47,6 @@ obj-$(CONFIG_STACKTRACE) += stacktrace.o
48obj-y += cpu/ 47obj-y += cpu/
49obj-y += acpi/ 48obj-y += acpi/
50obj-y += reboot.o 49obj-y += reboot.o
51obj-$(CONFIG_X86_32) += reboot_32.o
52obj-$(CONFIG_X86_MSR) += msr.o 50obj-$(CONFIG_X86_MSR) += msr.o
53obj-$(CONFIG_X86_CPUID) += cpuid.o 51obj-$(CONFIG_X86_CPUID) += cpuid.o
54obj-$(CONFIG_PCI) += early-quirks.o 52obj-$(CONFIG_PCI) += early-quirks.o
diff --git a/arch/x86/kernel/acpi/Makefile b/arch/x86/kernel/acpi/Makefile
index 6f35260bb3ef..163b22581472 100644
--- a/arch/x86/kernel/acpi/Makefile
+++ b/arch/x86/kernel/acpi/Makefile
@@ -1,14 +1,7 @@
1subdir- := realmode
2
3obj-$(CONFIG_ACPI) += boot.o 1obj-$(CONFIG_ACPI) += boot.o
4obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_rm.o wakeup_$(BITS).o 2obj-$(CONFIG_ACPI_SLEEP) += sleep.o wakeup_$(BITS).o
5 3
6ifneq ($(CONFIG_ACPI_PROCESSOR),) 4ifneq ($(CONFIG_ACPI_PROCESSOR),)
7obj-y += cstate.o 5obj-y += cstate.o
8endif 6endif
9 7
10$(obj)/wakeup_rm.o: $(obj)/realmode/wakeup.bin
11
12$(obj)/realmode/wakeup.bin: FORCE
13 $(Q)$(MAKE) $(build)=$(obj)/realmode
14
diff --git a/arch/x86/kernel/acpi/realmode/.gitignore b/arch/x86/kernel/acpi/realmode/.gitignore
deleted file mode 100644
index 58f1f48a58f8..000000000000
--- a/arch/x86/kernel/acpi/realmode/.gitignore
+++ /dev/null
@@ -1,3 +0,0 @@
1wakeup.bin
2wakeup.elf
3wakeup.lds
diff --git a/arch/x86/kernel/acpi/realmode/Makefile b/arch/x86/kernel/acpi/realmode/Makefile
deleted file mode 100644
index 6a564ac67ef5..000000000000
--- a/arch/x86/kernel/acpi/realmode/Makefile
+++ /dev/null
@@ -1,59 +0,0 @@
1#
2# arch/x86/kernel/acpi/realmode/Makefile
3#
4# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive
6# for more details.
7#
8
9always := wakeup.bin
10targets := wakeup.elf wakeup.lds
11
12wakeup-y += wakeup.o wakemain.o video-mode.o copy.o bioscall.o regs.o
13
14# The link order of the video-*.o modules can matter. In particular,
15# video-vga.o *must* be listed first, followed by video-vesa.o.
16# Hardware-specific drivers should follow in the order they should be
17# probed, and video-bios.o should typically be last.
18wakeup-y += video-vga.o
19wakeup-y += video-vesa.o
20wakeup-y += video-bios.o
21
22targets += $(wakeup-y)
23
24bootsrc := $(src)/../../../boot
25
26# ---------------------------------------------------------------------------
27
28# How to compile the 16-bit code. Note we always compile for -march=i386,
29# that way we can complain to the user if the CPU is insufficient.
30# Compile with _SETUP since this is similar to the boot-time setup code.
31KBUILD_CFLAGS := $(LINUXINCLUDE) -g -Os -D_SETUP -D_WAKEUP -D__KERNEL__ \
32 -I$(srctree)/$(bootsrc) \
33 $(cflags-y) \
34 -Wall -Wstrict-prototypes \
35 -march=i386 -mregparm=3 \
36 -include $(srctree)/$(bootsrc)/code16gcc.h \
37 -fno-strict-aliasing -fomit-frame-pointer \
38 $(call cc-option, -ffreestanding) \
39 $(call cc-option, -fno-toplevel-reorder,\
40 $(call cc-option, -fno-unit-at-a-time)) \
41 $(call cc-option, -fno-stack-protector) \
42 $(call cc-option, -mpreferred-stack-boundary=2)
43KBUILD_CFLAGS += $(call cc-option, -m32)
44KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
45GCOV_PROFILE := n
46
47WAKEUP_OBJS = $(addprefix $(obj)/,$(wakeup-y))
48
49LDFLAGS_wakeup.elf := -T
50
51CPPFLAGS_wakeup.lds += -P -C
52
53$(obj)/wakeup.elf: $(obj)/wakeup.lds $(WAKEUP_OBJS) FORCE
54 $(call if_changed,ld)
55
56OBJCOPYFLAGS_wakeup.bin := -O binary
57
58$(obj)/wakeup.bin: $(obj)/wakeup.elf FORCE
59 $(call if_changed,objcopy)
diff --git a/arch/x86/kernel/acpi/realmode/bioscall.S b/arch/x86/kernel/acpi/realmode/bioscall.S
deleted file mode 100644
index f51eb0bb56ce..000000000000
--- a/arch/x86/kernel/acpi/realmode/bioscall.S
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../../boot/bioscall.S"
diff --git a/arch/x86/kernel/acpi/realmode/copy.S b/arch/x86/kernel/acpi/realmode/copy.S
deleted file mode 100644
index dc59ebee69d8..000000000000
--- a/arch/x86/kernel/acpi/realmode/copy.S
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../../boot/copy.S"
diff --git a/arch/x86/kernel/acpi/realmode/regs.c b/arch/x86/kernel/acpi/realmode/regs.c
deleted file mode 100644
index 6206033ba202..000000000000
--- a/arch/x86/kernel/acpi/realmode/regs.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../../boot/regs.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-bios.c b/arch/x86/kernel/acpi/realmode/video-bios.c
deleted file mode 100644
index 7deabc144a27..000000000000
--- a/arch/x86/kernel/acpi/realmode/video-bios.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../../boot/video-bios.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-mode.c b/arch/x86/kernel/acpi/realmode/video-mode.c
deleted file mode 100644
index 328ad209f113..000000000000
--- a/arch/x86/kernel/acpi/realmode/video-mode.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../../boot/video-mode.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vesa.c b/arch/x86/kernel/acpi/realmode/video-vesa.c
deleted file mode 100644
index 9dbb9672226a..000000000000
--- a/arch/x86/kernel/acpi/realmode/video-vesa.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../../boot/video-vesa.c"
diff --git a/arch/x86/kernel/acpi/realmode/video-vga.c b/arch/x86/kernel/acpi/realmode/video-vga.c
deleted file mode 100644
index bcc81255f374..000000000000
--- a/arch/x86/kernel/acpi/realmode/video-vga.c
+++ /dev/null
@@ -1 +0,0 @@
1#include "../../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.lds.S b/arch/x86/kernel/acpi/realmode/wakeup.lds.S
deleted file mode 100644
index d4f8010a5b1b..000000000000
--- a/arch/x86/kernel/acpi/realmode/wakeup.lds.S
+++ /dev/null
@@ -1,62 +0,0 @@
1/*
2 * wakeup.ld
3 *
4 * Linker script for the real-mode wakeup code
5 */
6#undef i386
7#include "wakeup.h"
8
9OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
10OUTPUT_ARCH(i386)
11ENTRY(_start)
12
13SECTIONS
14{
15 . = 0;
16 .jump : {
17 *(.jump)
18 } = 0x90909090
19
20 . = WAKEUP_HEADER_OFFSET;
21 .header : {
22 *(.header)
23 }
24
25 . = ALIGN(16);
26 .text : {
27 *(.text*)
28 } = 0x90909090
29
30 . = ALIGN(16);
31 .rodata : {
32 *(.rodata*)
33 }
34
35 .videocards : {
36 video_cards = .;
37 *(.videocards)
38 video_cards_end = .;
39 }
40
41 . = ALIGN(16);
42 .data : {
43 *(.data*)
44 }
45
46 . = ALIGN(16);
47 .bss : {
48 __bss_start = .;
49 *(.bss)
50 __bss_end = .;
51 }
52
53 .signature : {
54 *(.signature)
55 }
56
57 _end = .;
58
59 /DISCARD/ : {
60 *(.note*)
61 }
62}
diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
index 146a49c763a4..95bf99de9058 100644
--- a/arch/x86/kernel/acpi/sleep.c
+++ b/arch/x86/kernel/acpi/sleep.c
@@ -14,8 +14,9 @@
14#include <asm/desc.h> 14#include <asm/desc.h>
15#include <asm/pgtable.h> 15#include <asm/pgtable.h>
16#include <asm/cacheflush.h> 16#include <asm/cacheflush.h>
17#include <asm/realmode.h>
17 18
18#include "realmode/wakeup.h" 19#include "../../realmode/rm/wakeup.h"
19#include "sleep.h" 20#include "sleep.h"
20 21
21unsigned long acpi_realmode_flags; 22unsigned long acpi_realmode_flags;
@@ -36,13 +37,9 @@ asmlinkage void acpi_enter_s3(void)
36 */ 37 */
37int acpi_suspend_lowlevel(void) 38int acpi_suspend_lowlevel(void)
38{ 39{
39 struct wakeup_header *header; 40 struct wakeup_header *header =
40 /* address in low memory of the wakeup routine. */ 41 (struct wakeup_header *) __va(real_mode_header->wakeup_header);
41 char *acpi_realmode;
42 42
43 acpi_realmode = TRAMPOLINE_SYM(acpi_wakeup_code);
44
45 header = (struct wakeup_header *)(acpi_realmode + WAKEUP_HEADER_OFFSET);
46 if (header->signature != WAKEUP_HEADER_SIGNATURE) { 43 if (header->signature != WAKEUP_HEADER_SIGNATURE) {
47 printk(KERN_ERR "wakeup header does not match\n"); 44 printk(KERN_ERR "wakeup header does not match\n");
48 return -EINVAL; 45 return -EINVAL;
@@ -50,27 +47,6 @@ int acpi_suspend_lowlevel(void)
50 47
51 header->video_mode = saved_video_mode; 48 header->video_mode = saved_video_mode;
52 49
53 header->wakeup_jmp_seg = acpi_wakeup_address >> 4;
54
55 /*
56 * Set up the wakeup GDT. We set these up as Big Real Mode,
57 * that is, with limits set to 4 GB. At least the Lenovo
58 * Thinkpad X61 is known to need this for the video BIOS
59 * initialization quirk to work; this is likely to also
60 * be the case for other laptops or integrated video devices.
61 */
62
63 /* GDT[0]: GDT self-pointer */
64 header->wakeup_gdt[0] =
65 (u64)(sizeof(header->wakeup_gdt) - 1) +
66 ((u64)__pa(&header->wakeup_gdt) << 16);
67 /* GDT[1]: big real mode-like code segment */
68 header->wakeup_gdt[1] =
69 GDT_ENTRY(0x809b, acpi_wakeup_address, 0xfffff);
70 /* GDT[2]: big real mode-like data segment */
71 header->wakeup_gdt[2] =
72 GDT_ENTRY(0x8093, acpi_wakeup_address, 0xfffff);
73
74#ifndef CONFIG_64BIT 50#ifndef CONFIG_64BIT
75 store_gdt((struct desc_ptr *)&header->pmode_gdt); 51 store_gdt((struct desc_ptr *)&header->pmode_gdt);
76 52
@@ -95,7 +71,6 @@ int acpi_suspend_lowlevel(void)
95 header->pmode_cr3 = (u32)__pa(&initial_page_table); 71 header->pmode_cr3 = (u32)__pa(&initial_page_table);
96 saved_magic = 0x12345678; 72 saved_magic = 0x12345678;
97#else /* CONFIG_64BIT */ 73#else /* CONFIG_64BIT */
98 header->trampoline_segment = trampoline_address() >> 4;
99#ifdef CONFIG_SMP 74#ifdef CONFIG_SMP
100 stack_start = (unsigned long)temp_stack + sizeof(temp_stack); 75 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
101 early_gdt_descr.address = 76 early_gdt_descr.address =
diff --git a/arch/x86/kernel/acpi/sleep.h b/arch/x86/kernel/acpi/sleep.h
index d68677a2a010..5653a5791ec9 100644
--- a/arch/x86/kernel/acpi/sleep.h
+++ b/arch/x86/kernel/acpi/sleep.h
@@ -2,8 +2,8 @@
2 * Variables and functions used by the code in sleep.c 2 * Variables and functions used by the code in sleep.c
3 */ 3 */
4 4
5#include <asm/trampoline.h>
6#include <linux/linkage.h> 5#include <linux/linkage.h>
6#include <asm/realmode.h>
7 7
8extern unsigned long saved_video_mode; 8extern unsigned long saved_video_mode;
9extern long saved_magic; 9extern long saved_magic;
diff --git a/arch/x86/kernel/acpi/wakeup_rm.S b/arch/x86/kernel/acpi/wakeup_rm.S
deleted file mode 100644
index 63b8ab524f2c..000000000000
--- a/arch/x86/kernel/acpi/wakeup_rm.S
+++ /dev/null
@@ -1,12 +0,0 @@
1/*
2 * Wrapper script for the realmode binary as a transport object
3 * before copying to low memory.
4 */
5#include <asm/page_types.h>
6
7 .section ".x86_trampoline","a"
8 .balign PAGE_SIZE
9 .globl acpi_wakeup_code
10acpi_wakeup_code:
11 .incbin "arch/x86/kernel/acpi/realmode/wakeup.bin"
12 .size acpi_wakeup_code, .-acpi_wakeup_code
diff --git a/arch/x86/kernel/cpu/mcheck/mce-apei.c b/arch/x86/kernel/cpu/mcheck/mce-apei.c
index 507ea58688e2..cd8b166a1735 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-apei.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-apei.c
@@ -42,7 +42,8 @@ void apei_mce_report_mem_error(int corrected, struct cper_sec_mem_err *mem_err)
42 struct mce m; 42 struct mce m;
43 43
44 /* Only corrected MC is reported */ 44 /* Only corrected MC is reported */
45 if (!corrected) 45 if (!corrected || !(mem_err->validation_bits &
46 CPER_MEM_VALID_PHYSICAL_ADDRESS))
46 return; 47 return;
47 48
48 mce_setup(&m); 49 mce_setup(&m);
diff --git a/arch/x86/kernel/cpu/mcheck/mce-severity.c b/arch/x86/kernel/cpu/mcheck/mce-severity.c
index 0c82091b1652..413c2ced887c 100644
--- a/arch/x86/kernel/cpu/mcheck/mce-severity.c
+++ b/arch/x86/kernel/cpu/mcheck/mce-severity.c
@@ -126,6 +126,16 @@ static struct severity {
126 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA), 126 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_DATA),
127 USER 127 USER
128 ), 128 ),
129 MCESEV(
130 KEEP, "HT thread notices Action required: instruction fetch error",
131 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
132 MCGMASK(MCG_STATUS_EIPV, 0)
133 ),
134 MCESEV(
135 AR, "Action required: instruction fetch error",
136 SER, MASK(MCI_STATUS_OVER|MCI_UC_SAR|MCI_ADDR|MCACOD, MCI_UC_SAR|MCI_ADDR|MCACOD_INSTR),
137 USER
138 ),
129#endif 139#endif
130 MCESEV( 140 MCESEV(
131 PANIC, "Action required: unknown MCACOD", 141 PANIC, "Action required: unknown MCACOD",
@@ -165,15 +175,19 @@ static struct severity {
165}; 175};
166 176
167/* 177/*
168 * If the EIPV bit is set, it means the saved IP is the 178 * If mcgstatus indicated that ip/cs on the stack were
169 * instruction which caused the MCE. 179 * no good, then "m->cs" will be zero and we will have
180 * to assume the worst case (IN_KERNEL) as we actually
181 * have no idea what we were executing when the machine
182 * check hit.
183 * If we do have a good "m->cs" (or a faked one in the
184 * case we were executing in VM86 mode) we can use it to
185 * distinguish an exception taken in user from from one
186 * taken in the kernel.
170 */ 187 */
171static int error_context(struct mce *m) 188static int error_context(struct mce *m)
172{ 189{
173 if (m->mcgstatus & MCG_STATUS_EIPV) 190 return ((m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
174 return (m->ip && (m->cs & 3) == 3) ? IN_USER : IN_KERNEL;
175 /* Unknown, assume kernel */
176 return IN_KERNEL;
177} 191}
178 192
179int mce_severity(struct mce *m, int tolerant, char **msg) 193int mce_severity(struct mce *m, int tolerant, char **msg)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index aaa056f31693..b4180f425fb8 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -437,6 +437,14 @@ static inline void mce_gather_info(struct mce *m, struct pt_regs *regs)
437 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) { 437 if (m->mcgstatus & (MCG_STATUS_RIPV|MCG_STATUS_EIPV)) {
438 m->ip = regs->ip; 438 m->ip = regs->ip;
439 m->cs = regs->cs; 439 m->cs = regs->cs;
440
441 /*
442 * When in VM86 mode make the cs look like ring 3
443 * always. This is a lie, but it's better than passing
444 * the additional vm86 bit around everywhere.
445 */
446 if (v8086_mode(regs))
447 m->cs |= 3;
440 } 448 }
441 /* Use accurate RIP reporting if available. */ 449 /* Use accurate RIP reporting if available. */
442 if (rip_msr) 450 if (rip_msr)
@@ -641,16 +649,18 @@ EXPORT_SYMBOL_GPL(machine_check_poll);
641 * Do a quick check if any of the events requires a panic. 649 * Do a quick check if any of the events requires a panic.
642 * This decides if we keep the events around or clear them. 650 * This decides if we keep the events around or clear them.
643 */ 651 */
644static int mce_no_way_out(struct mce *m, char **msg) 652static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp)
645{ 653{
646 int i; 654 int i, ret = 0;
647 655
648 for (i = 0; i < banks; i++) { 656 for (i = 0; i < banks; i++) {
649 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i)); 657 m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
658 if (m->status & MCI_STATUS_VAL)
659 __set_bit(i, validp);
650 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY) 660 if (mce_severity(m, tolerant, msg) >= MCE_PANIC_SEVERITY)
651 return 1; 661 ret = 1;
652 } 662 }
653 return 0; 663 return ret;
654} 664}
655 665
656/* 666/*
@@ -1013,6 +1023,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1013 */ 1023 */
1014 int kill_it = 0; 1024 int kill_it = 0;
1015 DECLARE_BITMAP(toclear, MAX_NR_BANKS); 1025 DECLARE_BITMAP(toclear, MAX_NR_BANKS);
1026 DECLARE_BITMAP(valid_banks, MAX_NR_BANKS);
1016 char *msg = "Unknown"; 1027 char *msg = "Unknown";
1017 1028
1018 atomic_inc(&mce_entry); 1029 atomic_inc(&mce_entry);
@@ -1027,7 +1038,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1027 final = &__get_cpu_var(mces_seen); 1038 final = &__get_cpu_var(mces_seen);
1028 *final = m; 1039 *final = m;
1029 1040
1030 no_way_out = mce_no_way_out(&m, &msg); 1041 memset(valid_banks, 0, sizeof(valid_banks));
1042 no_way_out = mce_no_way_out(&m, &msg, valid_banks);
1031 1043
1032 barrier(); 1044 barrier();
1033 1045
@@ -1047,6 +1059,8 @@ void do_machine_check(struct pt_regs *regs, long error_code)
1047 order = mce_start(&no_way_out); 1059 order = mce_start(&no_way_out);
1048 for (i = 0; i < banks; i++) { 1060 for (i = 0; i < banks; i++) {
1049 __clear_bit(i, toclear); 1061 __clear_bit(i, toclear);
1062 if (!test_bit(i, valid_banks))
1063 continue;
1050 if (!mce_banks[i].ctl) 1064 if (!mce_banks[i].ctl)
1051 continue; 1065 continue;
1052 1066
diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
index 62d61e9976eb..41857970517f 100644
--- a/arch/x86/kernel/e820.c
+++ b/arch/x86/kernel/e820.c
@@ -113,7 +113,9 @@ static void __init __e820_add_region(struct e820map *e820x, u64 start, u64 size,
113 int x = e820x->nr_map; 113 int x = e820x->nr_map;
114 114
115 if (x >= ARRAY_SIZE(e820x->map)) { 115 if (x >= ARRAY_SIZE(e820x->map)) {
116 printk(KERN_ERR "Ooops! Too many entries in the memory map!\n"); 116 printk(KERN_ERR "e820: too many entries; ignoring [mem %#010llx-%#010llx]\n",
117 (unsigned long long) start,
118 (unsigned long long) (start + size - 1));
117 return; 119 return;
118 } 120 }
119 121
@@ -133,19 +135,19 @@ static void __init e820_print_type(u32 type)
133 switch (type) { 135 switch (type) {
134 case E820_RAM: 136 case E820_RAM:
135 case E820_RESERVED_KERN: 137 case E820_RESERVED_KERN:
136 printk(KERN_CONT "(usable)"); 138 printk(KERN_CONT "usable");
137 break; 139 break;
138 case E820_RESERVED: 140 case E820_RESERVED:
139 printk(KERN_CONT "(reserved)"); 141 printk(KERN_CONT "reserved");
140 break; 142 break;
141 case E820_ACPI: 143 case E820_ACPI:
142 printk(KERN_CONT "(ACPI data)"); 144 printk(KERN_CONT "ACPI data");
143 break; 145 break;
144 case E820_NVS: 146 case E820_NVS:
145 printk(KERN_CONT "(ACPI NVS)"); 147 printk(KERN_CONT "ACPI NVS");
146 break; 148 break;
147 case E820_UNUSABLE: 149 case E820_UNUSABLE:
148 printk(KERN_CONT "(unusable)"); 150 printk(KERN_CONT "unusable");
149 break; 151 break;
150 default: 152 default:
151 printk(KERN_CONT "type %u", type); 153 printk(KERN_CONT "type %u", type);
@@ -158,10 +160,10 @@ void __init e820_print_map(char *who)
158 int i; 160 int i;
159 161
160 for (i = 0; i < e820.nr_map; i++) { 162 for (i = 0; i < e820.nr_map; i++) {
161 printk(KERN_INFO " %s: %016Lx - %016Lx ", who, 163 printk(KERN_INFO "%s: [mem %#018Lx-%#018Lx] ", who,
162 (unsigned long long) e820.map[i].addr, 164 (unsigned long long) e820.map[i].addr,
163 (unsigned long long) 165 (unsigned long long)
164 (e820.map[i].addr + e820.map[i].size)); 166 (e820.map[i].addr + e820.map[i].size - 1));
165 e820_print_type(e820.map[i].type); 167 e820_print_type(e820.map[i].type);
166 printk(KERN_CONT "\n"); 168 printk(KERN_CONT "\n");
167 } 169 }
@@ -428,9 +430,8 @@ static u64 __init __e820_update_range(struct e820map *e820x, u64 start,
428 size = ULLONG_MAX - start; 430 size = ULLONG_MAX - start;
429 431
430 end = start + size; 432 end = start + size;
431 printk(KERN_DEBUG "e820 update range: %016Lx - %016Lx ", 433 printk(KERN_DEBUG "e820: update [mem %#010Lx-%#010Lx] ",
432 (unsigned long long) start, 434 (unsigned long long) start, (unsigned long long) (end - 1));
433 (unsigned long long) end);
434 e820_print_type(old_type); 435 e820_print_type(old_type);
435 printk(KERN_CONT " ==> "); 436 printk(KERN_CONT " ==> ");
436 e820_print_type(new_type); 437 e820_print_type(new_type);
@@ -509,9 +510,8 @@ u64 __init e820_remove_range(u64 start, u64 size, unsigned old_type,
509 size = ULLONG_MAX - start; 510 size = ULLONG_MAX - start;
510 511
511 end = start + size; 512 end = start + size;
512 printk(KERN_DEBUG "e820 remove range: %016Lx - %016Lx ", 513 printk(KERN_DEBUG "e820: remove [mem %#010Lx-%#010Lx] ",
513 (unsigned long long) start, 514 (unsigned long long) start, (unsigned long long) (end - 1));
514 (unsigned long long) end);
515 if (checktype) 515 if (checktype)
516 e820_print_type(old_type); 516 e820_print_type(old_type);
517 printk(KERN_CONT "\n"); 517 printk(KERN_CONT "\n");
@@ -567,7 +567,7 @@ void __init update_e820(void)
567 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map)) 567 if (sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &nr_map))
568 return; 568 return;
569 e820.nr_map = nr_map; 569 e820.nr_map = nr_map;
570 printk(KERN_INFO "modified physical RAM map:\n"); 570 printk(KERN_INFO "e820: modified physical RAM map:\n");
571 e820_print_map("modified"); 571 e820_print_map("modified");
572} 572}
573static void __init update_e820_saved(void) 573static void __init update_e820_saved(void)
@@ -637,8 +637,8 @@ __init void e820_setup_gap(void)
637 if (!found) { 637 if (!found) {
638 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024; 638 gapstart = (max_pfn << PAGE_SHIFT) + 1024*1024;
639 printk(KERN_ERR 639 printk(KERN_ERR
640 "PCI: Warning: Cannot find a gap in the 32bit address range\n" 640 "e820: cannot find a gap in the 32bit address range\n"
641 "PCI: Unassigned devices with 32bit resource registers may break!\n"); 641 "e820: PCI devices with unassigned 32bit BARs may break!\n");
642 } 642 }
643#endif 643#endif
644 644
@@ -648,8 +648,8 @@ __init void e820_setup_gap(void)
648 pci_mem_start = gapstart; 648 pci_mem_start = gapstart;
649 649
650 printk(KERN_INFO 650 printk(KERN_INFO
651 "Allocating PCI resources starting at %lx (gap: %lx:%lx)\n", 651 "e820: [mem %#010lx-%#010lx] available for PCI devices\n",
652 pci_mem_start, gapstart, gapsize); 652 gapstart, gapstart + gapsize - 1);
653} 653}
654 654
655/** 655/**
@@ -667,7 +667,7 @@ void __init parse_e820_ext(struct setup_data *sdata)
667 extmap = (struct e820entry *)(sdata->data); 667 extmap = (struct e820entry *)(sdata->data);
668 __append_e820_map(extmap, entries); 668 __append_e820_map(extmap, entries);
669 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); 669 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
670 printk(KERN_INFO "extended physical RAM map:\n"); 670 printk(KERN_INFO "e820: extended physical RAM map:\n");
671 e820_print_map("extended"); 671 e820_print_map("extended");
672} 672}
673 673
@@ -734,7 +734,7 @@ u64 __init early_reserve_e820(u64 size, u64 align)
734 addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE); 734 addr = __memblock_alloc_base(size, align, MEMBLOCK_ALLOC_ACCESSIBLE);
735 if (addr) { 735 if (addr) {
736 e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED); 736 e820_update_range_saved(addr, size, E820_RAM, E820_RESERVED);
737 printk(KERN_INFO "update e820_saved for early_reserve_e820\n"); 737 printk(KERN_INFO "e820: update e820_saved for early_reserve_e820\n");
738 update_e820_saved(); 738 update_e820_saved();
739 } 739 }
740 740
@@ -784,7 +784,7 @@ static unsigned long __init e820_end_pfn(unsigned long limit_pfn, unsigned type)
784 if (last_pfn > max_arch_pfn) 784 if (last_pfn > max_arch_pfn)
785 last_pfn = max_arch_pfn; 785 last_pfn = max_arch_pfn;
786 786
787 printk(KERN_INFO "last_pfn = %#lx max_arch_pfn = %#lx\n", 787 printk(KERN_INFO "e820: last_pfn = %#lx max_arch_pfn = %#lx\n",
788 last_pfn, max_arch_pfn); 788 last_pfn, max_arch_pfn);
789 return last_pfn; 789 return last_pfn;
790} 790}
@@ -888,7 +888,7 @@ void __init finish_e820_parsing(void)
888 early_panic("Invalid user supplied memory map"); 888 early_panic("Invalid user supplied memory map");
889 e820.nr_map = nr; 889 e820.nr_map = nr;
890 890
891 printk(KERN_INFO "user-defined physical RAM map:\n"); 891 printk(KERN_INFO "e820: user-defined physical RAM map:\n");
892 e820_print_map("user"); 892 e820_print_map("user");
893 } 893 }
894} 894}
@@ -996,8 +996,9 @@ void __init e820_reserve_resources_late(void)
996 end = MAX_RESOURCE_SIZE; 996 end = MAX_RESOURCE_SIZE;
997 if (start >= end) 997 if (start >= end)
998 continue; 998 continue;
999 printk(KERN_DEBUG "reserve RAM buffer: %016llx - %016llx ", 999 printk(KERN_DEBUG
1000 start, end); 1000 "e820: reserve RAM buffer [mem %#010llx-%#010llx]\n",
1001 start, end);
1001 reserve_region_with_split(&iomem_resource, start, end, 1002 reserve_region_with_split(&iomem_resource, start, end,
1002 "RAM buffer"); 1003 "RAM buffer");
1003 } 1004 }
@@ -1047,7 +1048,7 @@ void __init setup_memory_map(void)
1047 1048
1048 who = x86_init.resources.memory_setup(); 1049 who = x86_init.resources.memory_setup();
1049 memcpy(&e820_saved, &e820, sizeof(struct e820map)); 1050 memcpy(&e820_saved, &e820, sizeof(struct e820map));
1050 printk(KERN_INFO "BIOS-provided physical RAM map:\n"); 1051 printk(KERN_INFO "e820: BIOS-provided physical RAM map:\n");
1051 e820_print_map(who); 1052 e820_print_map(who);
1052} 1053}
1053 1054
diff --git a/arch/x86/kernel/head32.c b/arch/x86/kernel/head32.c
index 51ff18616d50..c18f59d10101 100644
--- a/arch/x86/kernel/head32.c
+++ b/arch/x86/kernel/head32.c
@@ -14,7 +14,6 @@
14#include <asm/sections.h> 14#include <asm/sections.h>
15#include <asm/e820.h> 15#include <asm/e820.h>
16#include <asm/page.h> 16#include <asm/page.h>
17#include <asm/trampoline.h>
18#include <asm/apic.h> 17#include <asm/apic.h>
19#include <asm/io_apic.h> 18#include <asm/io_apic.h>
20#include <asm/bios_ebda.h> 19#include <asm/bios_ebda.h>
diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
index 3a3b779f41d3..037df57a99ac 100644
--- a/arch/x86/kernel/head64.c
+++ b/arch/x86/kernel/head64.c
@@ -24,7 +24,6 @@
24#include <asm/sections.h> 24#include <asm/sections.h>
25#include <asm/kdebug.h> 25#include <asm/kdebug.h>
26#include <asm/e820.h> 26#include <asm/e820.h>
27#include <asm/trampoline.h>
28#include <asm/bios_ebda.h> 27#include <asm/bios_ebda.h>
29 28
30static void __init zap_identity_mappings(void) 29static void __init zap_identity_mappings(void)
diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
index 463c9797ca6a..d42ab17b7397 100644
--- a/arch/x86/kernel/head_32.S
+++ b/arch/x86/kernel/head_32.S
@@ -274,10 +274,7 @@ num_subarch_entries = (. - subarch_entries) / 4
274 * If cpu hotplug is not supported then this code can go in init section 274 * If cpu hotplug is not supported then this code can go in init section
275 * which will be freed later 275 * which will be freed later
276 */ 276 */
277
278__CPUINIT 277__CPUINIT
279
280#ifdef CONFIG_SMP
281ENTRY(startup_32_smp) 278ENTRY(startup_32_smp)
282 cld 279 cld
283 movl $(__BOOT_DS),%eax 280 movl $(__BOOT_DS),%eax
@@ -288,7 +285,7 @@ ENTRY(startup_32_smp)
288 movl pa(stack_start),%ecx 285 movl pa(stack_start),%ecx
289 movl %eax,%ss 286 movl %eax,%ss
290 leal -__PAGE_OFFSET(%ecx),%esp 287 leal -__PAGE_OFFSET(%ecx),%esp
291#endif /* CONFIG_SMP */ 288
292default_entry: 289default_entry:
293 290
294/* 291/*
diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
index 7a40f2447321..94bf9cc2c7ee 100644
--- a/arch/x86/kernel/head_64.S
+++ b/arch/x86/kernel/head_64.S
@@ -139,10 +139,6 @@ ident_complete:
139 /* Fixup phys_base */ 139 /* Fixup phys_base */
140 addq %rbp, phys_base(%rip) 140 addq %rbp, phys_base(%rip)
141 141
142 /* Fixup trampoline */
143 addq %rbp, trampoline_level4_pgt + 0(%rip)
144 addq %rbp, trampoline_level4_pgt + (511*8)(%rip)
145
146 /* Due to ENTRY(), sometimes the empty space gets filled with 142 /* Due to ENTRY(), sometimes the empty space gets filled with
147 * zeros. Better take a jmp than relying on empty space being 143 * zeros. Better take a jmp than relying on empty space being
148 * filled with 0x90 (nop) 144 * filled with 0x90 (nop)
diff --git a/arch/x86/kernel/kvmclock.c b/arch/x86/kernel/kvmclock.c
index f8492da65bfc..086eb58c6e80 100644
--- a/arch/x86/kernel/kvmclock.c
+++ b/arch/x86/kernel/kvmclock.c
@@ -22,6 +22,7 @@
22#include <asm/msr.h> 22#include <asm/msr.h>
23#include <asm/apic.h> 23#include <asm/apic.h>
24#include <linux/percpu.h> 24#include <linux/percpu.h>
25#include <linux/hardirq.h>
25 26
26#include <asm/x86_init.h> 27#include <asm/x86_init.h>
27#include <asm/reboot.h> 28#include <asm/reboot.h>
@@ -114,6 +115,25 @@ static void kvm_get_preset_lpj(void)
114 preset_lpj = lpj; 115 preset_lpj = lpj;
115} 116}
116 117
118bool kvm_check_and_clear_guest_paused(void)
119{
120 bool ret = false;
121 struct pvclock_vcpu_time_info *src;
122
123 /*
124 * per_cpu() is safe here because this function is only called from
125 * timer functions where preemption is already disabled.
126 */
127 WARN_ON(!in_atomic());
128 src = &__get_cpu_var(hv_clock);
129 if ((src->flags & PVCLOCK_GUEST_STOPPED) != 0) {
130 __this_cpu_and(hv_clock.flags, ~PVCLOCK_GUEST_STOPPED);
131 ret = true;
132 }
133
134 return ret;
135}
136
117static struct clocksource kvm_clock = { 137static struct clocksource kvm_clock = {
118 .name = "kvm-clock", 138 .name = "kvm-clock",
119 .read = kvm_clock_get_cycles, 139 .read = kvm_clock_get_cycles,
diff --git a/arch/x86/kernel/mpparse.c b/arch/x86/kernel/mpparse.c
index b02d4dd6b8a3..d2b56489d70f 100644
--- a/arch/x86/kernel/mpparse.c
+++ b/arch/x86/kernel/mpparse.c
@@ -27,7 +27,6 @@
27#include <asm/proto.h> 27#include <asm/proto.h>
28#include <asm/bios_ebda.h> 28#include <asm/bios_ebda.h>
29#include <asm/e820.h> 29#include <asm/e820.h>
30#include <asm/trampoline.h>
31#include <asm/setup.h> 30#include <asm/setup.h>
32#include <asm/smp.h> 31#include <asm/smp.h>
33 32
@@ -568,8 +567,8 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
568 struct mpf_intel *mpf; 567 struct mpf_intel *mpf;
569 unsigned long mem; 568 unsigned long mem;
570 569
571 apic_printk(APIC_VERBOSE, "Scan SMP from %p for %ld bytes.\n", 570 apic_printk(APIC_VERBOSE, "Scan for SMP in [mem %#010lx-%#010lx]\n",
572 bp, length); 571 base, base + length - 1);
573 BUILD_BUG_ON(sizeof(*mpf) != 16); 572 BUILD_BUG_ON(sizeof(*mpf) != 16);
574 573
575 while (length > 0) { 574 while (length > 0) {
@@ -584,8 +583,10 @@ static int __init smp_scan_config(unsigned long base, unsigned long length)
584#endif 583#endif
585 mpf_found = mpf; 584 mpf_found = mpf;
586 585
587 printk(KERN_INFO "found SMP MP-table at [%p] %llx\n", 586 printk(KERN_INFO "found SMP MP-table at [mem %#010llx-%#010llx] mapped at [%p]\n",
588 mpf, (u64)virt_to_phys(mpf)); 587 (unsigned long long) virt_to_phys(mpf),
588 (unsigned long long) virt_to_phys(mpf) +
589 sizeof(*mpf) - 1, mpf);
589 590
590 mem = virt_to_phys(mpf); 591 mem = virt_to_phys(mpf);
591 memblock_reserve(mem, sizeof(*mpf)); 592 memblock_reserve(mem, sizeof(*mpf));
diff --git a/arch/x86/kernel/pci-dma.c b/arch/x86/kernel/pci-dma.c
index 3003250ac51d..62c9457ccd2f 100644
--- a/arch/x86/kernel/pci-dma.c
+++ b/arch/x86/kernel/pci-dma.c
@@ -100,14 +100,18 @@ void *dma_generic_alloc_coherent(struct device *dev, size_t size,
100 struct dma_attrs *attrs) 100 struct dma_attrs *attrs)
101{ 101{
102 unsigned long dma_mask; 102 unsigned long dma_mask;
103 struct page *page; 103 struct page *page = NULL;
104 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
104 dma_addr_t addr; 105 dma_addr_t addr;
105 106
106 dma_mask = dma_alloc_coherent_mask(dev, flag); 107 dma_mask = dma_alloc_coherent_mask(dev, flag);
107 108
108 flag |= __GFP_ZERO; 109 flag |= __GFP_ZERO;
109again: 110again:
110 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); 111 if (!(flag & GFP_ATOMIC))
112 page = dma_alloc_from_contiguous(dev, count, get_order(size));
113 if (!page)
114 page = alloc_pages_node(dev_to_node(dev), flag, get_order(size));
111 if (!page) 115 if (!page)
112 return NULL; 116 return NULL;
113 117
@@ -127,6 +131,16 @@ again:
127 return page_address(page); 131 return page_address(page);
128} 132}
129 133
134void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
135 dma_addr_t dma_addr, struct dma_attrs *attrs)
136{
137 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
138 struct page *page = virt_to_page(vaddr);
139
140 if (!dma_release_from_contiguous(dev, page, count))
141 free_pages((unsigned long)vaddr, get_order(size));
142}
143
130/* 144/*
131 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel 145 * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
132 * parameter documentation. 146 * parameter documentation.
diff --git a/arch/x86/kernel/pci-nommu.c b/arch/x86/kernel/pci-nommu.c
index f96050685b46..871be4a84c7d 100644
--- a/arch/x86/kernel/pci-nommu.c
+++ b/arch/x86/kernel/pci-nommu.c
@@ -74,12 +74,6 @@ static int nommu_map_sg(struct device *hwdev, struct scatterlist *sg,
74 return nents; 74 return nents;
75} 75}
76 76
77static void nommu_free_coherent(struct device *dev, size_t size, void *vaddr,
78 dma_addr_t dma_addr, struct dma_attrs *attrs)
79{
80 free_pages((unsigned long)vaddr, get_order(size));
81}
82
83static void nommu_sync_single_for_device(struct device *dev, 77static void nommu_sync_single_for_device(struct device *dev,
84 dma_addr_t addr, size_t size, 78 dma_addr_t addr, size_t size,
85 enum dma_data_direction dir) 79 enum dma_data_direction dir)
@@ -97,7 +91,7 @@ static void nommu_sync_sg_for_device(struct device *dev,
97 91
98struct dma_map_ops nommu_dma_ops = { 92struct dma_map_ops nommu_dma_ops = {
99 .alloc = dma_generic_alloc_coherent, 93 .alloc = dma_generic_alloc_coherent,
100 .free = nommu_free_coherent, 94 .free = dma_generic_free_coherent,
101 .map_sg = nommu_map_sg, 95 .map_sg = nommu_map_sg,
102 .map_page = nommu_map_page, 96 .map_page = nommu_map_page,
103 .sync_single_for_device = nommu_sync_single_for_device, 97 .sync_single_for_device = nommu_sync_single_for_device,
diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
index 77215c23fba1..79c45af81604 100644
--- a/arch/x86/kernel/reboot.c
+++ b/arch/x86/kernel/reboot.c
@@ -24,6 +24,7 @@
24#ifdef CONFIG_X86_32 24#ifdef CONFIG_X86_32
25# include <linux/ctype.h> 25# include <linux/ctype.h>
26# include <linux/mc146818rtc.h> 26# include <linux/mc146818rtc.h>
27# include <asm/realmode.h>
27#else 28#else
28# include <asm/x86_init.h> 29# include <asm/x86_init.h>
29#endif 30#endif
@@ -156,15 +157,10 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
156 return 0; 157 return 0;
157} 158}
158 159
159extern const unsigned char machine_real_restart_asm[];
160extern const u64 machine_real_restart_gdt[3];
161
162void machine_real_restart(unsigned int type) 160void machine_real_restart(unsigned int type)
163{ 161{
164 void *restart_va; 162 void (*restart_lowmem)(unsigned int) = (void (*)(unsigned int))
165 unsigned long restart_pa; 163 real_mode_header->machine_real_restart_asm;
166 void (*restart_lowmem)(unsigned int);
167 u64 *lowmem_gdt;
168 164
169 local_irq_disable(); 165 local_irq_disable();
170 166
@@ -195,21 +191,6 @@ void machine_real_restart(unsigned int type)
195 * too. */ 191 * too. */
196 *((unsigned short *)0x472) = reboot_mode; 192 *((unsigned short *)0x472) = reboot_mode;
197 193
198 /* Patch the GDT in the low memory trampoline */
199 lowmem_gdt = TRAMPOLINE_SYM(machine_real_restart_gdt);
200
201 restart_va = TRAMPOLINE_SYM(machine_real_restart_asm);
202 restart_pa = virt_to_phys(restart_va);
203 restart_lowmem = (void (*)(unsigned int))restart_pa;
204
205 /* GDT[0]: GDT self-pointer */
206 lowmem_gdt[0] =
207 (u64)(sizeof(machine_real_restart_gdt) - 1) +
208 ((u64)virt_to_phys(lowmem_gdt) << 16);
209 /* GDT[1]: 64K real mode code segment */
210 lowmem_gdt[1] =
211 GDT_ENTRY(0x009b, restart_pa, 0xffff);
212
213 /* Jump to the identity-mapped low memory code */ 194 /* Jump to the identity-mapped low memory code */
214 restart_lowmem(type); 195 restart_lowmem(type);
215} 196}
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 366c688d619e..16be6dc14db1 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -49,6 +49,7 @@
49#include <asm/pci-direct.h> 49#include <asm/pci-direct.h>
50#include <linux/init_ohci1394_dma.h> 50#include <linux/init_ohci1394_dma.h>
51#include <linux/kvm_para.h> 51#include <linux/kvm_para.h>
52#include <linux/dma-contiguous.h>
52 53
53#include <linux/errno.h> 54#include <linux/errno.h>
54#include <linux/kernel.h> 55#include <linux/kernel.h>
@@ -72,7 +73,7 @@
72 73
73#include <asm/mtrr.h> 74#include <asm/mtrr.h>
74#include <asm/apic.h> 75#include <asm/apic.h>
75#include <asm/trampoline.h> 76#include <asm/realmode.h>
76#include <asm/e820.h> 77#include <asm/e820.h>
77#include <asm/mpspec.h> 78#include <asm/mpspec.h>
78#include <asm/setup.h> 79#include <asm/setup.h>
@@ -333,8 +334,8 @@ static void __init relocate_initrd(void)
333 memblock_reserve(ramdisk_here, area_size); 334 memblock_reserve(ramdisk_here, area_size);
334 initrd_start = ramdisk_here + PAGE_OFFSET; 335 initrd_start = ramdisk_here + PAGE_OFFSET;
335 initrd_end = initrd_start + ramdisk_size; 336 initrd_end = initrd_start + ramdisk_size;
336 printk(KERN_INFO "Allocated new RAMDISK: %08llx - %08llx\n", 337 printk(KERN_INFO "Allocated new RAMDISK: [mem %#010llx-%#010llx]\n",
337 ramdisk_here, ramdisk_here + ramdisk_size); 338 ramdisk_here, ramdisk_here + ramdisk_size - 1);
338 339
339 q = (char *)initrd_start; 340 q = (char *)initrd_start;
340 341
@@ -365,8 +366,8 @@ static void __init relocate_initrd(void)
365 /* high pages is not converted by early_res_to_bootmem */ 366 /* high pages is not converted by early_res_to_bootmem */
366 ramdisk_image = boot_params.hdr.ramdisk_image; 367 ramdisk_image = boot_params.hdr.ramdisk_image;
367 ramdisk_size = boot_params.hdr.ramdisk_size; 368 ramdisk_size = boot_params.hdr.ramdisk_size;
368 printk(KERN_INFO "Move RAMDISK from %016llx - %016llx to" 369 printk(KERN_INFO "Move RAMDISK from [mem %#010llx-%#010llx] to"
369 " %08llx - %08llx\n", 370 " [mem %#010llx-%#010llx]\n",
370 ramdisk_image, ramdisk_image + ramdisk_size - 1, 371 ramdisk_image, ramdisk_image + ramdisk_size - 1,
371 ramdisk_here, ramdisk_here + ramdisk_size - 1); 372 ramdisk_here, ramdisk_here + ramdisk_size - 1);
372} 373}
@@ -391,8 +392,8 @@ static void __init reserve_initrd(void)
391 ramdisk_size, end_of_lowmem>>1); 392 ramdisk_size, end_of_lowmem>>1);
392 } 393 }
393 394
394 printk(KERN_INFO "RAMDISK: %08llx - %08llx\n", ramdisk_image, 395 printk(KERN_INFO "RAMDISK: [mem %#010llx-%#010llx]\n", ramdisk_image,
395 ramdisk_end); 396 ramdisk_end - 1);
396 397
397 398
398 if (ramdisk_end <= end_of_lowmem) { 399 if (ramdisk_end <= end_of_lowmem) {
@@ -905,10 +906,10 @@ void __init setup_arch(char **cmdline_p)
905 setup_bios_corruption_check(); 906 setup_bios_corruption_check();
906#endif 907#endif
907 908
908 printk(KERN_DEBUG "initial memory mapped : 0 - %08lx\n", 909 printk(KERN_DEBUG "initial memory mapped: [mem 0x00000000-%#010lx]\n",
909 max_pfn_mapped<<PAGE_SHIFT); 910 (max_pfn_mapped<<PAGE_SHIFT) - 1);
910 911
911 setup_trampolines(); 912 setup_real_mode();
912 913
913 init_gbpages(); 914 init_gbpages();
914 915
@@ -925,6 +926,7 @@ void __init setup_arch(char **cmdline_p)
925 } 926 }
926#endif 927#endif
927 memblock.current_limit = get_max_mapped(); 928 memblock.current_limit = get_max_mapped();
929 dma_contiguous_reserve(0);
928 930
929 /* 931 /*
930 * NOTE: On x86-32, only from this point on, fixmaps are ready for use. 932 * NOTE: On x86-32, only from this point on, fixmaps are ready for use.
@@ -966,6 +968,8 @@ void __init setup_arch(char **cmdline_p)
966 if (boot_cpu_data.cpuid_level >= 0) { 968 if (boot_cpu_data.cpuid_level >= 0) {
967 /* A CPU has %cr4 if and only if it has CPUID */ 969 /* A CPU has %cr4 if and only if it has CPUID */
968 mmu_cr4_features = read_cr4(); 970 mmu_cr4_features = read_cr4();
971 if (trampoline_cr4_features)
972 *trampoline_cr4_features = mmu_cr4_features;
969 } 973 }
970 974
971#ifdef CONFIG_X86_32 975#ifdef CONFIG_X86_32
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 433529e29be4..f56f96da77f5 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -57,7 +57,7 @@
57#include <asm/nmi.h> 57#include <asm/nmi.h>
58#include <asm/irq.h> 58#include <asm/irq.h>
59#include <asm/idle.h> 59#include <asm/idle.h>
60#include <asm/trampoline.h> 60#include <asm/realmode.h>
61#include <asm/cpu.h> 61#include <asm/cpu.h>
62#include <asm/numa.h> 62#include <asm/numa.h>
63#include <asm/pgtable.h> 63#include <asm/pgtable.h>
@@ -73,6 +73,8 @@
73#include <asm/smpboot_hooks.h> 73#include <asm/smpboot_hooks.h>
74#include <asm/i8259.h> 74#include <asm/i8259.h>
75 75
76#include <asm/realmode.h>
77
76/* State of each CPU */ 78/* State of each CPU */
77DEFINE_PER_CPU(int, cpu_state) = { 0 }; 79DEFINE_PER_CPU(int, cpu_state) = { 0 };
78 80
@@ -660,8 +662,12 @@ static void __cpuinit announce_cpu(int cpu, int apicid)
660 */ 662 */
661static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle) 663static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
662{ 664{
665 volatile u32 *trampoline_status =
666 (volatile u32 *) __va(real_mode_header->trampoline_status);
667 /* start_ip had better be page-aligned! */
668 unsigned long start_ip = real_mode_header->trampoline_start;
669
663 unsigned long boot_error = 0; 670 unsigned long boot_error = 0;
664 unsigned long start_ip;
665 int timeout; 671 int timeout;
666 672
667 alternatives_smp_switch(1); 673 alternatives_smp_switch(1);
@@ -684,9 +690,6 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
684 initial_code = (unsigned long)start_secondary; 690 initial_code = (unsigned long)start_secondary;
685 stack_start = idle->thread.sp; 691 stack_start = idle->thread.sp;
686 692
687 /* start_ip had better be page-aligned! */
688 start_ip = trampoline_address();
689
690 /* So we see what's up */ 693 /* So we see what's up */
691 announce_cpu(cpu, apicid); 694 announce_cpu(cpu, apicid);
692 695
@@ -749,8 +752,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
749 pr_debug("CPU%d: has booted.\n", cpu); 752 pr_debug("CPU%d: has booted.\n", cpu);
750 } else { 753 } else {
751 boot_error = 1; 754 boot_error = 1;
752 if (*(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) 755 if (*trampoline_status == 0xA5A5A5A5)
753 == 0xA5A5A5A5)
754 /* trampoline started but...? */ 756 /* trampoline started but...? */
755 pr_err("CPU%d: Stuck ??\n", cpu); 757 pr_err("CPU%d: Stuck ??\n", cpu);
756 else 758 else
@@ -776,7 +778,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
776 } 778 }
777 779
778 /* mark "stuck" area as not stuck */ 780 /* mark "stuck" area as not stuck */
779 *(volatile u32 *)TRAMPOLINE_SYM(trampoline_status) = 0; 781 *trampoline_status = 0;
780 782
781 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) { 783 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
782 /* 784 /*
diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
index 6410744ac5cb..f84fe00fad48 100644
--- a/arch/x86/kernel/tboot.c
+++ b/arch/x86/kernel/tboot.c
@@ -32,7 +32,7 @@
32#include <linux/mm.h> 32#include <linux/mm.h>
33#include <linux/tboot.h> 33#include <linux/tboot.h>
34 34
35#include <asm/trampoline.h> 35#include <asm/realmode.h>
36#include <asm/processor.h> 36#include <asm/processor.h>
37#include <asm/bootparam.h> 37#include <asm/bootparam.h>
38#include <asm/pgtable.h> 38#include <asm/pgtable.h>
@@ -44,7 +44,7 @@
44#include <asm/e820.h> 44#include <asm/e820.h>
45#include <asm/io.h> 45#include <asm/io.h>
46 46
47#include "acpi/realmode/wakeup.h" 47#include "../realmode/rm/wakeup.h"
48 48
49/* Global pointer to shared data; NULL means no measured launch. */ 49/* Global pointer to shared data; NULL means no measured launch. */
50struct tboot *tboot __read_mostly; 50struct tboot *tboot __read_mostly;
@@ -201,7 +201,8 @@ static int tboot_setup_sleep(void)
201 add_mac_region(e820.map[i].addr, e820.map[i].size); 201 add_mac_region(e820.map[i].addr, e820.map[i].size);
202 } 202 }
203 203
204 tboot->acpi_sinfo.kernel_s3_resume_vector = acpi_wakeup_address; 204 tboot->acpi_sinfo.kernel_s3_resume_vector =
205 real_mode_header->wakeup_start;
205 206
206 return 0; 207 return 0;
207} 208}
diff --git a/arch/x86/kernel/trampoline.c b/arch/x86/kernel/trampoline.c
deleted file mode 100644
index a73b61055ad6..000000000000
--- a/arch/x86/kernel/trampoline.c
+++ /dev/null
@@ -1,42 +0,0 @@
1#include <linux/io.h>
2#include <linux/memblock.h>
3
4#include <asm/trampoline.h>
5#include <asm/cacheflush.h>
6#include <asm/pgtable.h>
7
8unsigned char *x86_trampoline_base;
9
10void __init setup_trampolines(void)
11{
12 phys_addr_t mem;
13 size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
14
15 /* Has to be in very low memory so we can execute real-mode AP code. */
16 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
17 if (!mem)
18 panic("Cannot allocate trampoline\n");
19
20 x86_trampoline_base = __va(mem);
21 memblock_reserve(mem, size);
22
23 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
24 x86_trampoline_base, (unsigned long long)mem, size);
25
26 memcpy(x86_trampoline_base, x86_trampoline_start, size);
27}
28
29/*
30 * setup_trampolines() gets called very early, to guarantee the
31 * availability of low memory. This is before the proper kernel page
32 * tables are set up, so we cannot set page permissions in that
33 * function. Thus, we use an arch_initcall instead.
34 */
35static int __init configure_trampolines(void)
36{
37 size_t size = PAGE_ALIGN(x86_trampoline_end - x86_trampoline_start);
38
39 set_memory_x((unsigned long)x86_trampoline_base, size >> PAGE_SHIFT);
40 return 0;
41}
42arch_initcall(configure_trampolines);
diff --git a/arch/x86/kernel/trampoline_32.S b/arch/x86/kernel/trampoline_32.S
deleted file mode 100644
index 451c0a7ef7fd..000000000000
--- a/arch/x86/kernel/trampoline_32.S
+++ /dev/null
@@ -1,83 +0,0 @@
1/*
2 *
3 * Trampoline.S Derived from Setup.S by Linus Torvalds
4 *
5 * 4 Jan 1997 Michael Chastain: changed to gnu as.
6 *
7 * This is only used for booting secondary CPUs in SMP machine
8 *
9 * Entry: CS:IP point to the start of our code, we are
10 * in real mode with no stack, but the rest of the
11 * trampoline page to make our stack and everything else
12 * is a mystery.
13 *
14 * We jump into arch/x86/kernel/head_32.S.
15 *
16 * On entry to trampoline_data, the processor is in real mode
17 * with 16-bit addressing and 16-bit data. CS has some value
18 * and IP is zero. Thus, data addresses need to be absolute
19 * (no relocation) and are taken with regard to r_base.
20 *
21 * If you work on this file, check the object module with
22 * objdump --reloc to make sure there are no relocation
23 * entries except for:
24 *
25 * TYPE VALUE
26 * R_386_32 startup_32_smp
27 * R_386_32 boot_gdt
28 */
29
30#include <linux/linkage.h>
31#include <linux/init.h>
32#include <asm/segment.h>
33#include <asm/page_types.h>
34
35#ifdef CONFIG_SMP
36
37 .section ".x86_trampoline","a"
38 .balign PAGE_SIZE
39 .code16
40
41ENTRY(trampoline_data)
42r_base = .
43 wbinvd # Needed for NUMA-Q should be harmless for others
44 mov %cs, %ax # Code and data in the same place
45 mov %ax, %ds
46
47 cli # We should be safe anyway
48
49 movl $0xA5A5A5A5, trampoline_status - r_base
50 # write marker for master knows we're running
51
52 /* GDT tables in non default location kernel can be beyond 16MB and
53 * lgdt will not be able to load the address as in real mode default
54 * operand size is 16bit. Use lgdtl instead to force operand size
55 * to 32 bit.
56 */
57
58 lidtl boot_idt_descr - r_base # load idt with 0, 0
59 lgdtl boot_gdt_descr - r_base # load gdt with whatever is appropriate
60
61 xor %ax, %ax
62 inc %ax # protected mode (PE) bit
63 lmsw %ax # into protected mode
64 # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
65 ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
66
67 # These need to be in the same 64K segment as the above;
68 # hence we don't use the boot_gdt_descr defined in head.S
69boot_gdt_descr:
70 .word __BOOT_DS + 7 # gdt limit
71 .long boot_gdt - __PAGE_OFFSET # gdt base
72
73boot_idt_descr:
74 .word 0 # idt limit = 0
75 .long 0 # idt base = 0L
76
77ENTRY(trampoline_status)
78 .long 0
79
80.globl trampoline_end
81trampoline_end:
82
83#endif /* CONFIG_SMP */
diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
index 0f703f10901a..22a1530146a8 100644
--- a/arch/x86/kernel/vmlinux.lds.S
+++ b/arch/x86/kernel/vmlinux.lds.S
@@ -197,18 +197,6 @@ SECTIONS
197 197
198 INIT_DATA_SECTION(16) 198 INIT_DATA_SECTION(16)
199 199
200 /*
201 * Code and data for a variety of lowlevel trampolines, to be
202 * copied into base memory (< 1 MiB) during initialization.
203 * Since it is copied early, the main copy can be discarded
204 * afterwards.
205 */
206 .x86_trampoline : AT(ADDR(.x86_trampoline) - LOAD_OFFSET) {
207 x86_trampoline_start = .;
208 *(.x86_trampoline)
209 x86_trampoline_end = .;
210 }
211
212 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { 200 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
213 __x86_cpu_dev_start = .; 201 __x86_cpu_dev_start = .;
214 *(.x86_cpu_dev.init) 202 *(.x86_cpu_dev.init)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 1a7fe868f375..a28f338843ea 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -36,6 +36,7 @@ config KVM
36 select TASKSTATS 36 select TASKSTATS
37 select TASK_DELAY_ACCT 37 select TASK_DELAY_ACCT
38 select PERF_EVENTS 38 select PERF_EVENTS
39 select HAVE_KVM_MSI
39 ---help--- 40 ---help---
40 Support hosting fully virtualized guest machines using hardware 41 Support hosting fully virtualized guest machines using hardware
41 virtualization extensions. You will need a fairly recent 42 virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 9fed5bedaad6..7df1c6d839fb 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -247,7 +247,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
247 247
248 /* cpuid 7.0.ebx */ 248 /* cpuid 7.0.ebx */
249 const u32 kvm_supported_word9_x86_features = 249 const u32 kvm_supported_word9_x86_features =
250 F(FSGSBASE) | F(BMI1) | F(AVX2) | F(SMEP) | F(BMI2) | F(ERMS); 250 F(FSGSBASE) | F(BMI1) | F(HLE) | F(AVX2) | F(SMEP) |
251 F(BMI2) | F(ERMS) | F(RTM);
251 252
252 /* all calls to cpuid_count() should be made on the same cpu */ 253 /* all calls to cpuid_count() should be made on the same cpu */
253 get_cpu(); 254 get_cpu();
@@ -397,7 +398,7 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
397 case KVM_CPUID_SIGNATURE: { 398 case KVM_CPUID_SIGNATURE: {
398 char signature[12] = "KVMKVMKVM\0\0"; 399 char signature[12] = "KVMKVMKVM\0\0";
399 u32 *sigptr = (u32 *)signature; 400 u32 *sigptr = (u32 *)signature;
400 entry->eax = 0; 401 entry->eax = KVM_CPUID_FEATURES;
401 entry->ebx = sigptr[0]; 402 entry->ebx = sigptr[0];
402 entry->ecx = sigptr[1]; 403 entry->ecx = sigptr[1];
403 entry->edx = sigptr[2]; 404 entry->edx = sigptr[2];
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 83756223f8aa..f95d242ee9f7 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -142,6 +142,10 @@
142#define Src2FS (OpFS << Src2Shift) 142#define Src2FS (OpFS << Src2Shift)
143#define Src2GS (OpGS << Src2Shift) 143#define Src2GS (OpGS << Src2Shift)
144#define Src2Mask (OpMask << Src2Shift) 144#define Src2Mask (OpMask << Src2Shift)
145#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
146#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
147#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
148#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
145 149
146#define X2(x...) x, x 150#define X2(x...) x, x
147#define X3(x...) X2(x), x 151#define X3(x...) X2(x), x
@@ -557,6 +561,29 @@ static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
557 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg); 561 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
558} 562}
559 563
564/*
565 * x86 defines three classes of vector instructions: explicitly
566 * aligned, explicitly unaligned, and the rest, which change behaviour
567 * depending on whether they're AVX encoded or not.
568 *
569 * Also included is CMPXCHG16B which is not a vector instruction, yet it is
570 * subject to the same check.
571 */
572static bool insn_aligned(struct x86_emulate_ctxt *ctxt, unsigned size)
573{
574 if (likely(size < 16))
575 return false;
576
577 if (ctxt->d & Aligned)
578 return true;
579 else if (ctxt->d & Unaligned)
580 return false;
581 else if (ctxt->d & Avx)
582 return false;
583 else
584 return true;
585}
586
560static int __linearize(struct x86_emulate_ctxt *ctxt, 587static int __linearize(struct x86_emulate_ctxt *ctxt,
561 struct segmented_address addr, 588 struct segmented_address addr,
562 unsigned size, bool write, bool fetch, 589 unsigned size, bool write, bool fetch,
@@ -621,6 +648,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
621 } 648 }
622 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8) 649 if (fetch ? ctxt->mode != X86EMUL_MODE_PROT64 : ctxt->ad_bytes != 8)
623 la &= (u32)-1; 650 la &= (u32)-1;
651 if (insn_aligned(ctxt, size) && ((la & (size - 1)) != 0))
652 return emulate_gp(ctxt, 0);
624 *linear = la; 653 *linear = la;
625 return X86EMUL_CONTINUE; 654 return X86EMUL_CONTINUE;
626bad: 655bad:
@@ -859,6 +888,40 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
859 ctxt->ops->put_fpu(ctxt); 888 ctxt->ops->put_fpu(ctxt);
860} 889}
861 890
891static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
892{
893 ctxt->ops->get_fpu(ctxt);
894 switch (reg) {
895 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
896 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
897 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
898 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
899 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
900 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
901 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
902 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
903 default: BUG();
904 }
905 ctxt->ops->put_fpu(ctxt);
906}
907
908static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
909{
910 ctxt->ops->get_fpu(ctxt);
911 switch (reg) {
912 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
913 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
914 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
915 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
916 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
917 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
918 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
919 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
920 default: BUG();
921 }
922 ctxt->ops->put_fpu(ctxt);
923}
924
862static void decode_register_operand(struct x86_emulate_ctxt *ctxt, 925static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
863 struct operand *op) 926 struct operand *op)
864{ 927{
@@ -875,6 +938,13 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
875 read_sse_reg(ctxt, &op->vec_val, reg); 938 read_sse_reg(ctxt, &op->vec_val, reg);
876 return; 939 return;
877 } 940 }
941 if (ctxt->d & Mmx) {
942 reg &= 7;
943 op->type = OP_MM;
944 op->bytes = 8;
945 op->addr.mm = reg;
946 return;
947 }
878 948
879 op->type = OP_REG; 949 op->type = OP_REG;
880 if (ctxt->d & ByteOp) { 950 if (ctxt->d & ByteOp) {
@@ -902,7 +972,6 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
902 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */ 972 ctxt->modrm_rm = base_reg = (ctxt->rex_prefix & 1) << 3; /* REG.B */
903 } 973 }
904 974
905 ctxt->modrm = insn_fetch(u8, ctxt);
906 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6; 975 ctxt->modrm_mod |= (ctxt->modrm & 0xc0) >> 6;
907 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3; 976 ctxt->modrm_reg |= (ctxt->modrm & 0x38) >> 3;
908 ctxt->modrm_rm |= (ctxt->modrm & 0x07); 977 ctxt->modrm_rm |= (ctxt->modrm & 0x07);
@@ -920,6 +989,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
920 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); 989 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
921 return rc; 990 return rc;
922 } 991 }
992 if (ctxt->d & Mmx) {
993 op->type = OP_MM;
994 op->bytes = 8;
995 op->addr.xmm = ctxt->modrm_rm & 7;
996 return rc;
997 }
923 fetch_register_operand(op); 998 fetch_register_operand(op);
924 return rc; 999 return rc;
925 } 1000 }
@@ -1387,6 +1462,9 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
1387 case OP_XMM: 1462 case OP_XMM:
1388 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm); 1463 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1389 break; 1464 break;
1465 case OP_MM:
1466 write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm);
1467 break;
1390 case OP_NONE: 1468 case OP_NONE:
1391 /* no writeback */ 1469 /* no writeback */
1392 break; 1470 break;
@@ -2790,7 +2868,7 @@ static int em_rdpmc(struct x86_emulate_ctxt *ctxt)
2790 2868
2791static int em_mov(struct x86_emulate_ctxt *ctxt) 2869static int em_mov(struct x86_emulate_ctxt *ctxt)
2792{ 2870{
2793 ctxt->dst.val = ctxt->src.val; 2871 memcpy(ctxt->dst.valptr, ctxt->src.valptr, ctxt->op_bytes);
2794 return X86EMUL_CONTINUE; 2872 return X86EMUL_CONTINUE;
2795} 2873}
2796 2874
@@ -2870,12 +2948,6 @@ static int em_mov_sreg_rm(struct x86_emulate_ctxt *ctxt)
2870 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg); 2948 return load_segment_descriptor(ctxt, sel, ctxt->modrm_reg);
2871} 2949}
2872 2950
2873static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2874{
2875 memcpy(&ctxt->dst.vec_val, &ctxt->src.vec_val, ctxt->op_bytes);
2876 return X86EMUL_CONTINUE;
2877}
2878
2879static int em_invlpg(struct x86_emulate_ctxt *ctxt) 2951static int em_invlpg(struct x86_emulate_ctxt *ctxt)
2880{ 2952{
2881 int rc; 2953 int rc;
@@ -3061,35 +3133,13 @@ static int em_btc(struct x86_emulate_ctxt *ctxt)
3061 3133
3062static int em_bsf(struct x86_emulate_ctxt *ctxt) 3134static int em_bsf(struct x86_emulate_ctxt *ctxt)
3063{ 3135{
3064 u8 zf; 3136 emulate_2op_SrcV_nobyte(ctxt, "bsf");
3065
3066 __asm__ ("bsf %2, %0; setz %1"
3067 : "=r"(ctxt->dst.val), "=q"(zf)
3068 : "r"(ctxt->src.val));
3069
3070 ctxt->eflags &= ~X86_EFLAGS_ZF;
3071 if (zf) {
3072 ctxt->eflags |= X86_EFLAGS_ZF;
3073 /* Disable writeback. */
3074 ctxt->dst.type = OP_NONE;
3075 }
3076 return X86EMUL_CONTINUE; 3137 return X86EMUL_CONTINUE;
3077} 3138}
3078 3139
3079static int em_bsr(struct x86_emulate_ctxt *ctxt) 3140static int em_bsr(struct x86_emulate_ctxt *ctxt)
3080{ 3141{
3081 u8 zf; 3142 emulate_2op_SrcV_nobyte(ctxt, "bsr");
3082
3083 __asm__ ("bsr %2, %0; setz %1"
3084 : "=r"(ctxt->dst.val), "=q"(zf)
3085 : "r"(ctxt->src.val));
3086
3087 ctxt->eflags &= ~X86_EFLAGS_ZF;
3088 if (zf) {
3089 ctxt->eflags |= X86_EFLAGS_ZF;
3090 /* Disable writeback. */
3091 ctxt->dst.type = OP_NONE;
3092 }
3093 return X86EMUL_CONTINUE; 3143 return X86EMUL_CONTINUE;
3094} 3144}
3095 3145
@@ -3286,8 +3336,8 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3286 .check_perm = (_p) } 3336 .check_perm = (_p) }
3287#define N D(0) 3337#define N D(0)
3288#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) } 3338#define EXT(_f, _e) { .flags = ((_f) | RMExt), .u.group = (_e) }
3289#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) } 3339#define G(_f, _g) { .flags = ((_f) | Group | ModRM), .u.group = (_g) }
3290#define GD(_f, _g) { .flags = ((_f) | GroupDual), .u.gdual = (_g) } 3340#define GD(_f, _g) { .flags = ((_f) | GroupDual | ModRM), .u.gdual = (_g) }
3291#define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 3341#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
3292#define II(_f, _e, _i) \ 3342#define II(_f, _e, _i) \
3293 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i } 3343 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
@@ -3307,25 +3357,25 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
3307 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e) 3357 I2bv(((_f) & ~Lock) | DstAcc | SrcImm, _e)
3308 3358
3309static struct opcode group7_rm1[] = { 3359static struct opcode group7_rm1[] = {
3310 DI(SrcNone | ModRM | Priv, monitor), 3360 DI(SrcNone | Priv, monitor),
3311 DI(SrcNone | ModRM | Priv, mwait), 3361 DI(SrcNone | Priv, mwait),
3312 N, N, N, N, N, N, 3362 N, N, N, N, N, N,
3313}; 3363};
3314 3364
3315static struct opcode group7_rm3[] = { 3365static struct opcode group7_rm3[] = {
3316 DIP(SrcNone | ModRM | Prot | Priv, vmrun, check_svme_pa), 3366 DIP(SrcNone | Prot | Priv, vmrun, check_svme_pa),
3317 II(SrcNone | ModRM | Prot | VendorSpecific, em_vmmcall, vmmcall), 3367 II(SrcNone | Prot | VendorSpecific, em_vmmcall, vmmcall),
3318 DIP(SrcNone | ModRM | Prot | Priv, vmload, check_svme_pa), 3368 DIP(SrcNone | Prot | Priv, vmload, check_svme_pa),
3319 DIP(SrcNone | ModRM | Prot | Priv, vmsave, check_svme_pa), 3369 DIP(SrcNone | Prot | Priv, vmsave, check_svme_pa),
3320 DIP(SrcNone | ModRM | Prot | Priv, stgi, check_svme), 3370 DIP(SrcNone | Prot | Priv, stgi, check_svme),
3321 DIP(SrcNone | ModRM | Prot | Priv, clgi, check_svme), 3371 DIP(SrcNone | Prot | Priv, clgi, check_svme),
3322 DIP(SrcNone | ModRM | Prot | Priv, skinit, check_svme), 3372 DIP(SrcNone | Prot | Priv, skinit, check_svme),
3323 DIP(SrcNone | ModRM | Prot | Priv, invlpga, check_svme), 3373 DIP(SrcNone | Prot | Priv, invlpga, check_svme),
3324}; 3374};
3325 3375
3326static struct opcode group7_rm7[] = { 3376static struct opcode group7_rm7[] = {
3327 N, 3377 N,
3328 DIP(SrcNone | ModRM, rdtscp, check_rdtsc), 3378 DIP(SrcNone, rdtscp, check_rdtsc),
3329 N, N, N, N, N, N, 3379 N, N, N, N, N, N,
3330}; 3380};
3331 3381
@@ -3341,81 +3391,86 @@ static struct opcode group1[] = {
3341}; 3391};
3342 3392
3343static struct opcode group1A[] = { 3393static struct opcode group1A[] = {
3344 I(DstMem | SrcNone | ModRM | Mov | Stack, em_pop), N, N, N, N, N, N, N, 3394 I(DstMem | SrcNone | Mov | Stack, em_pop), N, N, N, N, N, N, N,
3345}; 3395};
3346 3396
3347static struct opcode group3[] = { 3397static struct opcode group3[] = {
3348 I(DstMem | SrcImm | ModRM, em_test), 3398 I(DstMem | SrcImm, em_test),
3349 I(DstMem | SrcImm | ModRM, em_test), 3399 I(DstMem | SrcImm, em_test),
3350 I(DstMem | SrcNone | ModRM | Lock, em_not), 3400 I(DstMem | SrcNone | Lock, em_not),
3351 I(DstMem | SrcNone | ModRM | Lock, em_neg), 3401 I(DstMem | SrcNone | Lock, em_neg),
3352 I(SrcMem | ModRM, em_mul_ex), 3402 I(SrcMem, em_mul_ex),
3353 I(SrcMem | ModRM, em_imul_ex), 3403 I(SrcMem, em_imul_ex),
3354 I(SrcMem | ModRM, em_div_ex), 3404 I(SrcMem, em_div_ex),
3355 I(SrcMem | ModRM, em_idiv_ex), 3405 I(SrcMem, em_idiv_ex),
3356}; 3406};
3357 3407
3358static struct opcode group4[] = { 3408static struct opcode group4[] = {
3359 I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45), 3409 I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3360 I(ByteOp | DstMem | SrcNone | ModRM | Lock, em_grp45), 3410 I(ByteOp | DstMem | SrcNone | Lock, em_grp45),
3361 N, N, N, N, N, N, 3411 N, N, N, N, N, N,
3362}; 3412};
3363 3413
3364static struct opcode group5[] = { 3414static struct opcode group5[] = {
3365 I(DstMem | SrcNone | ModRM | Lock, em_grp45), 3415 I(DstMem | SrcNone | Lock, em_grp45),
3366 I(DstMem | SrcNone | ModRM | Lock, em_grp45), 3416 I(DstMem | SrcNone | Lock, em_grp45),
3367 I(SrcMem | ModRM | Stack, em_grp45), 3417 I(SrcMem | Stack, em_grp45),
3368 I(SrcMemFAddr | ModRM | ImplicitOps | Stack, em_call_far), 3418 I(SrcMemFAddr | ImplicitOps | Stack, em_call_far),
3369 I(SrcMem | ModRM | Stack, em_grp45), 3419 I(SrcMem | Stack, em_grp45),
3370 I(SrcMemFAddr | ModRM | ImplicitOps, em_grp45), 3420 I(SrcMemFAddr | ImplicitOps, em_grp45),
3371 I(SrcMem | ModRM | Stack, em_grp45), N, 3421 I(SrcMem | Stack, em_grp45), N,
3372}; 3422};
3373 3423
3374static struct opcode group6[] = { 3424static struct opcode group6[] = {
3375 DI(ModRM | Prot, sldt), 3425 DI(Prot, sldt),
3376 DI(ModRM | Prot, str), 3426 DI(Prot, str),
3377 DI(ModRM | Prot | Priv, lldt), 3427 DI(Prot | Priv, lldt),
3378 DI(ModRM | Prot | Priv, ltr), 3428 DI(Prot | Priv, ltr),
3379 N, N, N, N, 3429 N, N, N, N,
3380}; 3430};
3381 3431
3382static struct group_dual group7 = { { 3432static struct group_dual group7 = { {
3383 DI(ModRM | Mov | DstMem | Priv, sgdt), 3433 DI(Mov | DstMem | Priv, sgdt),
3384 DI(ModRM | Mov | DstMem | Priv, sidt), 3434 DI(Mov | DstMem | Priv, sidt),
3385 II(ModRM | SrcMem | Priv, em_lgdt, lgdt), 3435 II(SrcMem | Priv, em_lgdt, lgdt),
3386 II(ModRM | SrcMem | Priv, em_lidt, lidt), 3436 II(SrcMem | Priv, em_lidt, lidt),
3387 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N, 3437 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3388 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), 3438 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3389 II(SrcMem | ModRM | ByteOp | Priv | NoAccess, em_invlpg, invlpg), 3439 II(SrcMem | ByteOp | Priv | NoAccess, em_invlpg, invlpg),
3390}, { 3440}, {
3391 I(SrcNone | ModRM | Priv | VendorSpecific, em_vmcall), 3441 I(SrcNone | Priv | VendorSpecific, em_vmcall),
3392 EXT(0, group7_rm1), 3442 EXT(0, group7_rm1),
3393 N, EXT(0, group7_rm3), 3443 N, EXT(0, group7_rm3),
3394 II(SrcNone | ModRM | DstMem | Mov, em_smsw, smsw), N, 3444 II(SrcNone | DstMem | Mov, em_smsw, smsw), N,
3395 II(SrcMem16 | ModRM | Mov | Priv, em_lmsw, lmsw), EXT(0, group7_rm7), 3445 II(SrcMem16 | Mov | Priv, em_lmsw, lmsw),
3446 EXT(0, group7_rm7),
3396} }; 3447} };
3397 3448
3398static struct opcode group8[] = { 3449static struct opcode group8[] = {
3399 N, N, N, N, 3450 N, N, N, N,
3400 I(DstMem | SrcImmByte | ModRM, em_bt), 3451 I(DstMem | SrcImmByte, em_bt),
3401 I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_bts), 3452 I(DstMem | SrcImmByte | Lock | PageTable, em_bts),
3402 I(DstMem | SrcImmByte | ModRM | Lock, em_btr), 3453 I(DstMem | SrcImmByte | Lock, em_btr),
3403 I(DstMem | SrcImmByte | ModRM | Lock | PageTable, em_btc), 3454 I(DstMem | SrcImmByte | Lock | PageTable, em_btc),
3404}; 3455};
3405 3456
3406static struct group_dual group9 = { { 3457static struct group_dual group9 = { {
3407 N, I(DstMem64 | ModRM | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N, 3458 N, I(DstMem64 | Lock | PageTable, em_cmpxchg8b), N, N, N, N, N, N,
3408}, { 3459}, {
3409 N, N, N, N, N, N, N, N, 3460 N, N, N, N, N, N, N, N,
3410} }; 3461} };
3411 3462
3412static struct opcode group11[] = { 3463static struct opcode group11[] = {
3413 I(DstMem | SrcImm | ModRM | Mov | PageTable, em_mov), 3464 I(DstMem | SrcImm | Mov | PageTable, em_mov),
3414 X7(D(Undefined)), 3465 X7(D(Undefined)),
3415}; 3466};
3416 3467
3417static struct gprefix pfx_0f_6f_0f_7f = { 3468static struct gprefix pfx_0f_6f_0f_7f = {
3418 N, N, N, I(Sse, em_movdqu), 3469 I(Mmx, em_mov), I(Sse | Aligned, em_mov), N, I(Sse | Unaligned, em_mov),
3470};
3471
3472static struct gprefix pfx_vmovntpx = {
3473 I(0, em_mov), N, N, N,
3419}; 3474};
3420 3475
3421static struct opcode opcode_table[256] = { 3476static struct opcode opcode_table[256] = {
@@ -3464,10 +3519,10 @@ static struct opcode opcode_table[256] = {
3464 /* 0x70 - 0x7F */ 3519 /* 0x70 - 0x7F */
3465 X16(D(SrcImmByte)), 3520 X16(D(SrcImmByte)),
3466 /* 0x80 - 0x87 */ 3521 /* 0x80 - 0x87 */
3467 G(ByteOp | DstMem | SrcImm | ModRM | Group, group1), 3522 G(ByteOp | DstMem | SrcImm, group1),
3468 G(DstMem | SrcImm | ModRM | Group, group1), 3523 G(DstMem | SrcImm, group1),
3469 G(ByteOp | DstMem | SrcImm | ModRM | No64 | Group, group1), 3524 G(ByteOp | DstMem | SrcImm | No64, group1),
3470 G(DstMem | SrcImmByte | ModRM | Group, group1), 3525 G(DstMem | SrcImmByte, group1),
3471 I2bv(DstMem | SrcReg | ModRM, em_test), 3526 I2bv(DstMem | SrcReg | ModRM, em_test),
3472 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg), 3527 I2bv(DstMem | SrcReg | ModRM | Lock | PageTable, em_xchg),
3473 /* 0x88 - 0x8F */ 3528 /* 0x88 - 0x8F */
@@ -3549,7 +3604,8 @@ static struct opcode twobyte_table[256] = {
3549 IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write), 3604 IIP(ModRM | SrcMem | Priv | Op3264, em_cr_write, cr_write, check_cr_write),
3550 IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write), 3605 IIP(ModRM | SrcMem | Priv | Op3264, em_dr_write, dr_write, check_dr_write),
3551 N, N, N, N, 3606 N, N, N, N,
3552 N, N, N, N, N, N, N, N, 3607 N, N, N, GP(ModRM | DstMem | SrcReg | Sse | Mov | Aligned, &pfx_vmovntpx),
3608 N, N, N, N,
3553 /* 0x30 - 0x3F */ 3609 /* 0x30 - 0x3F */
3554 II(ImplicitOps | Priv, em_wrmsr, wrmsr), 3610 II(ImplicitOps | Priv, em_wrmsr, wrmsr),
3555 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc), 3611 IIP(ImplicitOps, em_rdtsc, rdtsc, check_rdtsc),
@@ -3897,17 +3953,16 @@ done_prefixes:
3897 } 3953 }
3898 ctxt->d = opcode.flags; 3954 ctxt->d = opcode.flags;
3899 3955
3956 if (ctxt->d & ModRM)
3957 ctxt->modrm = insn_fetch(u8, ctxt);
3958
3900 while (ctxt->d & GroupMask) { 3959 while (ctxt->d & GroupMask) {
3901 switch (ctxt->d & GroupMask) { 3960 switch (ctxt->d & GroupMask) {
3902 case Group: 3961 case Group:
3903 ctxt->modrm = insn_fetch(u8, ctxt);
3904 --ctxt->_eip;
3905 goffset = (ctxt->modrm >> 3) & 7; 3962 goffset = (ctxt->modrm >> 3) & 7;
3906 opcode = opcode.u.group[goffset]; 3963 opcode = opcode.u.group[goffset];
3907 break; 3964 break;
3908 case GroupDual: 3965 case GroupDual:
3909 ctxt->modrm = insn_fetch(u8, ctxt);
3910 --ctxt->_eip;
3911 goffset = (ctxt->modrm >> 3) & 7; 3966 goffset = (ctxt->modrm >> 3) & 7;
3912 if ((ctxt->modrm >> 6) == 3) 3967 if ((ctxt->modrm >> 6) == 3)
3913 opcode = opcode.u.gdual->mod3[goffset]; 3968 opcode = opcode.u.gdual->mod3[goffset];
@@ -3960,6 +4015,8 @@ done_prefixes:
3960 4015
3961 if (ctxt->d & Sse) 4016 if (ctxt->d & Sse)
3962 ctxt->op_bytes = 16; 4017 ctxt->op_bytes = 16;
4018 else if (ctxt->d & Mmx)
4019 ctxt->op_bytes = 8;
3963 4020
3964 /* ModRM and SIB bytes. */ 4021 /* ModRM and SIB bytes. */
3965 if (ctxt->d & ModRM) { 4022 if (ctxt->d & ModRM) {
@@ -4030,6 +4087,35 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4030 return false; 4087 return false;
4031} 4088}
4032 4089
4090static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4091{
4092 bool fault = false;
4093
4094 ctxt->ops->get_fpu(ctxt);
4095 asm volatile("1: fwait \n\t"
4096 "2: \n\t"
4097 ".pushsection .fixup,\"ax\" \n\t"
4098 "3: \n\t"
4099 "movb $1, %[fault] \n\t"
4100 "jmp 2b \n\t"
4101 ".popsection \n\t"
4102 _ASM_EXTABLE(1b, 3b)
4103 : [fault]"+qm"(fault));
4104 ctxt->ops->put_fpu(ctxt);
4105
4106 if (unlikely(fault))
4107 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4108
4109 return X86EMUL_CONTINUE;
4110}
4111
4112static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4113 struct operand *op)
4114{
4115 if (op->type == OP_MM)
4116 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4117}
4118
4033int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 4119int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4034{ 4120{
4035 struct x86_emulate_ops *ops = ctxt->ops; 4121 struct x86_emulate_ops *ops = ctxt->ops;
@@ -4054,18 +4140,31 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4054 goto done; 4140 goto done;
4055 } 4141 }
4056 4142
4057 if ((ctxt->d & Sse) 4143 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4058 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM) 4144 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4059 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4060 rc = emulate_ud(ctxt); 4145 rc = emulate_ud(ctxt);
4061 goto done; 4146 goto done;
4062 } 4147 }
4063 4148
4064 if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { 4149 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4065 rc = emulate_nm(ctxt); 4150 rc = emulate_nm(ctxt);
4066 goto done; 4151 goto done;
4067 } 4152 }
4068 4153
4154 if (ctxt->d & Mmx) {
4155 rc = flush_pending_x87_faults(ctxt);
4156 if (rc != X86EMUL_CONTINUE)
4157 goto done;
4158 /*
4159 * Now that we know the fpu is exception safe, we can fetch
4160 * operands from it.
4161 */
4162 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4163 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4164 if (!(ctxt->d & Mov))
4165 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4166 }
4167
4069 if (unlikely(ctxt->guest_mode) && ctxt->intercept) { 4168 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4070 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4169 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4071 X86_ICPT_PRE_EXCEPT); 4170 X86_ICPT_PRE_EXCEPT);
diff --git a/arch/x86/kvm/i8254.c b/arch/x86/kvm/i8254.c
index d68f99df690c..adba28f88d1a 100644
--- a/arch/x86/kvm/i8254.c
+++ b/arch/x86/kvm/i8254.c
@@ -34,7 +34,6 @@
34 34
35#include <linux/kvm_host.h> 35#include <linux/kvm_host.h>
36#include <linux/slab.h> 36#include <linux/slab.h>
37#include <linux/workqueue.h>
38 37
39#include "irq.h" 38#include "irq.h"
40#include "i8254.h" 39#include "i8254.h"
@@ -249,7 +248,7 @@ static void kvm_pit_ack_irq(struct kvm_irq_ack_notifier *kian)
249 /* in this case, we had multiple outstanding pit interrupts 248 /* in this case, we had multiple outstanding pit interrupts
250 * that we needed to inject. Reinject 249 * that we needed to inject. Reinject
251 */ 250 */
252 queue_work(ps->pit->wq, &ps->pit->expired); 251 queue_kthread_work(&ps->pit->worker, &ps->pit->expired);
253 ps->irq_ack = 1; 252 ps->irq_ack = 1;
254 spin_unlock(&ps->inject_lock); 253 spin_unlock(&ps->inject_lock);
255} 254}
@@ -270,7 +269,7 @@ void __kvm_migrate_pit_timer(struct kvm_vcpu *vcpu)
270static void destroy_pit_timer(struct kvm_pit *pit) 269static void destroy_pit_timer(struct kvm_pit *pit)
271{ 270{
272 hrtimer_cancel(&pit->pit_state.pit_timer.timer); 271 hrtimer_cancel(&pit->pit_state.pit_timer.timer);
273 cancel_work_sync(&pit->expired); 272 flush_kthread_work(&pit->expired);
274} 273}
275 274
276static bool kpit_is_periodic(struct kvm_timer *ktimer) 275static bool kpit_is_periodic(struct kvm_timer *ktimer)
@@ -284,7 +283,7 @@ static struct kvm_timer_ops kpit_ops = {
284 .is_periodic = kpit_is_periodic, 283 .is_periodic = kpit_is_periodic,
285}; 284};
286 285
287static void pit_do_work(struct work_struct *work) 286static void pit_do_work(struct kthread_work *work)
288{ 287{
289 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired); 288 struct kvm_pit *pit = container_of(work, struct kvm_pit, expired);
290 struct kvm *kvm = pit->kvm; 289 struct kvm *kvm = pit->kvm;
@@ -328,7 +327,7 @@ static enum hrtimer_restart pit_timer_fn(struct hrtimer *data)
328 327
329 if (ktimer->reinject || !atomic_read(&ktimer->pending)) { 328 if (ktimer->reinject || !atomic_read(&ktimer->pending)) {
330 atomic_inc(&ktimer->pending); 329 atomic_inc(&ktimer->pending);
331 queue_work(pt->wq, &pt->expired); 330 queue_kthread_work(&pt->worker, &pt->expired);
332 } 331 }
333 332
334 if (ktimer->t_ops->is_periodic(ktimer)) { 333 if (ktimer->t_ops->is_periodic(ktimer)) {
@@ -353,7 +352,7 @@ static void create_pit_timer(struct kvm *kvm, u32 val, int is_period)
353 352
354 /* TODO The new value only affected after the retriggered */ 353 /* TODO The new value only affected after the retriggered */
355 hrtimer_cancel(&pt->timer); 354 hrtimer_cancel(&pt->timer);
356 cancel_work_sync(&ps->pit->expired); 355 flush_kthread_work(&ps->pit->expired);
357 pt->period = interval; 356 pt->period = interval;
358 ps->is_periodic = is_period; 357 ps->is_periodic = is_period;
359 358
@@ -669,6 +668,8 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
669{ 668{
670 struct kvm_pit *pit; 669 struct kvm_pit *pit;
671 struct kvm_kpit_state *pit_state; 670 struct kvm_kpit_state *pit_state;
671 struct pid *pid;
672 pid_t pid_nr;
672 int ret; 673 int ret;
673 674
674 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL); 675 pit = kzalloc(sizeof(struct kvm_pit), GFP_KERNEL);
@@ -685,14 +686,20 @@ struct kvm_pit *kvm_create_pit(struct kvm *kvm, u32 flags)
685 mutex_lock(&pit->pit_state.lock); 686 mutex_lock(&pit->pit_state.lock);
686 spin_lock_init(&pit->pit_state.inject_lock); 687 spin_lock_init(&pit->pit_state.inject_lock);
687 688
688 pit->wq = create_singlethread_workqueue("kvm-pit-wq"); 689 pid = get_pid(task_tgid(current));
689 if (!pit->wq) { 690 pid_nr = pid_vnr(pid);
691 put_pid(pid);
692
693 init_kthread_worker(&pit->worker);
694 pit->worker_task = kthread_run(kthread_worker_fn, &pit->worker,
695 "kvm-pit/%d", pid_nr);
696 if (IS_ERR(pit->worker_task)) {
690 mutex_unlock(&pit->pit_state.lock); 697 mutex_unlock(&pit->pit_state.lock);
691 kvm_free_irq_source_id(kvm, pit->irq_source_id); 698 kvm_free_irq_source_id(kvm, pit->irq_source_id);
692 kfree(pit); 699 kfree(pit);
693 return NULL; 700 return NULL;
694 } 701 }
695 INIT_WORK(&pit->expired, pit_do_work); 702 init_kthread_work(&pit->expired, pit_do_work);
696 703
697 kvm->arch.vpit = pit; 704 kvm->arch.vpit = pit;
698 pit->kvm = kvm; 705 pit->kvm = kvm;
@@ -736,7 +743,7 @@ fail:
736 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier); 743 kvm_unregister_irq_mask_notifier(kvm, 0, &pit->mask_notifier);
737 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier); 744 kvm_unregister_irq_ack_notifier(kvm, &pit_state->irq_ack_notifier);
738 kvm_free_irq_source_id(kvm, pit->irq_source_id); 745 kvm_free_irq_source_id(kvm, pit->irq_source_id);
739 destroy_workqueue(pit->wq); 746 kthread_stop(pit->worker_task);
740 kfree(pit); 747 kfree(pit);
741 return NULL; 748 return NULL;
742} 749}
@@ -756,10 +763,10 @@ void kvm_free_pit(struct kvm *kvm)
756 mutex_lock(&kvm->arch.vpit->pit_state.lock); 763 mutex_lock(&kvm->arch.vpit->pit_state.lock);
757 timer = &kvm->arch.vpit->pit_state.pit_timer.timer; 764 timer = &kvm->arch.vpit->pit_state.pit_timer.timer;
758 hrtimer_cancel(timer); 765 hrtimer_cancel(timer);
759 cancel_work_sync(&kvm->arch.vpit->expired); 766 flush_kthread_work(&kvm->arch.vpit->expired);
767 kthread_stop(kvm->arch.vpit->worker_task);
760 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id); 768 kvm_free_irq_source_id(kvm, kvm->arch.vpit->irq_source_id);
761 mutex_unlock(&kvm->arch.vpit->pit_state.lock); 769 mutex_unlock(&kvm->arch.vpit->pit_state.lock);
762 destroy_workqueue(kvm->arch.vpit->wq);
763 kfree(kvm->arch.vpit); 770 kfree(kvm->arch.vpit);
764 } 771 }
765} 772}
diff --git a/arch/x86/kvm/i8254.h b/arch/x86/kvm/i8254.h
index 51a97426e791..fdf40425ea1d 100644
--- a/arch/x86/kvm/i8254.h
+++ b/arch/x86/kvm/i8254.h
@@ -1,6 +1,8 @@
1#ifndef __I8254_H 1#ifndef __I8254_H
2#define __I8254_H 2#define __I8254_H
3 3
4#include <linux/kthread.h>
5
4#include "iodev.h" 6#include "iodev.h"
5 7
6struct kvm_kpit_channel_state { 8struct kvm_kpit_channel_state {
@@ -39,8 +41,9 @@ struct kvm_pit {
39 struct kvm_kpit_state pit_state; 41 struct kvm_kpit_state pit_state;
40 int irq_source_id; 42 int irq_source_id;
41 struct kvm_irq_mask_notifier mask_notifier; 43 struct kvm_irq_mask_notifier mask_notifier;
42 struct workqueue_struct *wq; 44 struct kthread_worker worker;
43 struct work_struct expired; 45 struct task_struct *worker_task;
46 struct kthread_work expired;
44}; 47};
45 48
46#define KVM_PIT_BASE_ADDRESS 0x40 49#define KVM_PIT_BASE_ADDRESS 0x40
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 858432287ab6..93c15743f1ee 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -92,6 +92,11 @@ static inline int apic_test_and_clear_vector(int vec, void *bitmap)
92 return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 92 return test_and_clear_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
93} 93}
94 94
95static inline int apic_test_vector(int vec, void *bitmap)
96{
97 return test_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
98}
99
95static inline void apic_set_vector(int vec, void *bitmap) 100static inline void apic_set_vector(int vec, void *bitmap)
96{ 101{
97 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec)); 102 set_bit(VEC_POS(vec), (bitmap) + REG_POS(vec));
@@ -480,7 +485,6 @@ int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2)
480static void apic_set_eoi(struct kvm_lapic *apic) 485static void apic_set_eoi(struct kvm_lapic *apic)
481{ 486{
482 int vector = apic_find_highest_isr(apic); 487 int vector = apic_find_highest_isr(apic);
483 int trigger_mode;
484 /* 488 /*
485 * Not every write EOI will has corresponding ISR, 489 * Not every write EOI will has corresponding ISR,
486 * one example is when Kernel check timer on setup_IO_APIC 490 * one example is when Kernel check timer on setup_IO_APIC
@@ -491,12 +495,15 @@ static void apic_set_eoi(struct kvm_lapic *apic)
491 apic_clear_vector(vector, apic->regs + APIC_ISR); 495 apic_clear_vector(vector, apic->regs + APIC_ISR);
492 apic_update_ppr(apic); 496 apic_update_ppr(apic);
493 497
494 if (apic_test_and_clear_vector(vector, apic->regs + APIC_TMR)) 498 if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI) &&
495 trigger_mode = IOAPIC_LEVEL_TRIG; 499 kvm_ioapic_handles_vector(apic->vcpu->kvm, vector)) {
496 else 500 int trigger_mode;
497 trigger_mode = IOAPIC_EDGE_TRIG; 501 if (apic_test_vector(vector, apic->regs + APIC_TMR))
498 if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) 502 trigger_mode = IOAPIC_LEVEL_TRIG;
503 else
504 trigger_mode = IOAPIC_EDGE_TRIG;
499 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); 505 kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode);
506 }
500 kvm_make_request(KVM_REQ_EVENT, apic->vcpu); 507 kvm_make_request(KVM_REQ_EVENT, apic->vcpu);
501} 508}
502 509
@@ -1081,6 +1088,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
1081 apic_update_ppr(apic); 1088 apic_update_ppr(apic);
1082 1089
1083 vcpu->arch.apic_arb_prio = 0; 1090 vcpu->arch.apic_arb_prio = 0;
1091 vcpu->arch.apic_attention = 0;
1084 1092
1085 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr=" 1093 apic_debug(KERN_INFO "%s: vcpu=%p, id=%d, base_msr="
1086 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__, 1094 "0x%016" PRIx64 ", base_address=0x%0lx.\n", __func__,
@@ -1280,7 +1288,7 @@ void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu)
1280 u32 data; 1288 u32 data;
1281 void *vapic; 1289 void *vapic;
1282 1290
1283 if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) 1291 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1284 return; 1292 return;
1285 1293
1286 vapic = kmap_atomic(vcpu->arch.apic->vapic_page); 1294 vapic = kmap_atomic(vcpu->arch.apic->vapic_page);
@@ -1297,7 +1305,7 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1297 struct kvm_lapic *apic; 1305 struct kvm_lapic *apic;
1298 void *vapic; 1306 void *vapic;
1299 1307
1300 if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) 1308 if (!test_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention))
1301 return; 1309 return;
1302 1310
1303 apic = vcpu->arch.apic; 1311 apic = vcpu->arch.apic;
@@ -1317,10 +1325,11 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu)
1317 1325
1318void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) 1326void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr)
1319{ 1327{
1320 if (!irqchip_in_kernel(vcpu->kvm))
1321 return;
1322
1323 vcpu->arch.apic->vapic_addr = vapic_addr; 1328 vcpu->arch.apic->vapic_addr = vapic_addr;
1329 if (vapic_addr)
1330 __set_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1331 else
1332 __clear_bit(KVM_APIC_CHECK_VAPIC, &vcpu->arch.apic_attention);
1324} 1333}
1325 1334
1326int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) 1335int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4cb164268846..72102e0ab7cb 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -135,8 +135,6 @@ module_param(dbg, bool, 0644);
135#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \ 135#define PT64_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | PT_USER_MASK \
136 | PT64_NX_MASK) 136 | PT64_NX_MASK)
137 137
138#define PTE_LIST_EXT 4
139
140#define ACC_EXEC_MASK 1 138#define ACC_EXEC_MASK 1
141#define ACC_WRITE_MASK PT_WRITABLE_MASK 139#define ACC_WRITE_MASK PT_WRITABLE_MASK
142#define ACC_USER_MASK PT_USER_MASK 140#define ACC_USER_MASK PT_USER_MASK
@@ -151,6 +149,9 @@ module_param(dbg, bool, 0644);
151 149
152#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level) 150#define SHADOW_PT_INDEX(addr, level) PT64_INDEX(addr, level)
153 151
152/* make pte_list_desc fit well in cache line */
153#define PTE_LIST_EXT 3
154
154struct pte_list_desc { 155struct pte_list_desc {
155 u64 *sptes[PTE_LIST_EXT]; 156 u64 *sptes[PTE_LIST_EXT];
156 struct pte_list_desc *more; 157 struct pte_list_desc *more;
@@ -550,19 +551,29 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
550 551
551static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu) 552static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
552{ 553{
553 rcu_read_lock(); 554 /*
554 atomic_inc(&vcpu->kvm->arch.reader_counter); 555 * Prevent page table teardown by making any free-er wait during
555 556 * kvm_flush_remote_tlbs() IPI to all active vcpus.
556 /* Increase the counter before walking shadow page table */ 557 */
557 smp_mb__after_atomic_inc(); 558 local_irq_disable();
559 vcpu->mode = READING_SHADOW_PAGE_TABLES;
560 /*
561 * Make sure a following spte read is not reordered ahead of the write
562 * to vcpu->mode.
563 */
564 smp_mb();
558} 565}
559 566
560static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu) 567static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
561{ 568{
562 /* Decrease the counter after walking shadow page table finished */ 569 /*
563 smp_mb__before_atomic_dec(); 570 * Make sure the write to vcpu->mode is not reordered in front of
564 atomic_dec(&vcpu->kvm->arch.reader_counter); 571 * reads to sptes. If it does, kvm_commit_zap_page() can see us
565 rcu_read_unlock(); 572 * OUTSIDE_GUEST_MODE and proceed to free the shadow page table.
573 */
574 smp_mb();
575 vcpu->mode = OUTSIDE_GUEST_MODE;
576 local_irq_enable();
566} 577}
567 578
568static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache, 579static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
@@ -841,32 +852,6 @@ static int pte_list_add(struct kvm_vcpu *vcpu, u64 *spte,
841 return count; 852 return count;
842} 853}
843 854
844static u64 *pte_list_next(unsigned long *pte_list, u64 *spte)
845{
846 struct pte_list_desc *desc;
847 u64 *prev_spte;
848 int i;
849
850 if (!*pte_list)
851 return NULL;
852 else if (!(*pte_list & 1)) {
853 if (!spte)
854 return (u64 *)*pte_list;
855 return NULL;
856 }
857 desc = (struct pte_list_desc *)(*pte_list & ~1ul);
858 prev_spte = NULL;
859 while (desc) {
860 for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) {
861 if (prev_spte == spte)
862 return desc->sptes[i];
863 prev_spte = desc->sptes[i];
864 }
865 desc = desc->more;
866 }
867 return NULL;
868}
869
870static void 855static void
871pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc, 856pte_list_desc_remove_entry(unsigned long *pte_list, struct pte_list_desc *desc,
872 int i, struct pte_list_desc *prev_desc) 857 int i, struct pte_list_desc *prev_desc)
@@ -987,11 +972,6 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
987 return pte_list_add(vcpu, spte, rmapp); 972 return pte_list_add(vcpu, spte, rmapp);
988} 973}
989 974
990static u64 *rmap_next(unsigned long *rmapp, u64 *spte)
991{
992 return pte_list_next(rmapp, spte);
993}
994
995static void rmap_remove(struct kvm *kvm, u64 *spte) 975static void rmap_remove(struct kvm *kvm, u64 *spte)
996{ 976{
997 struct kvm_mmu_page *sp; 977 struct kvm_mmu_page *sp;
@@ -1004,106 +984,201 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
1004 pte_list_remove(spte, rmapp); 984 pte_list_remove(spte, rmapp);
1005} 985}
1006 986
987/*
988 * Used by the following functions to iterate through the sptes linked by a
989 * rmap. All fields are private and not assumed to be used outside.
990 */
991struct rmap_iterator {
992 /* private fields */
993 struct pte_list_desc *desc; /* holds the sptep if not NULL */
994 int pos; /* index of the sptep */
995};
996
997/*
998 * Iteration must be started by this function. This should also be used after
999 * removing/dropping sptes from the rmap link because in such cases the
1000 * information in the itererator may not be valid.
1001 *
1002 * Returns sptep if found, NULL otherwise.
1003 */
1004static u64 *rmap_get_first(unsigned long rmap, struct rmap_iterator *iter)
1005{
1006 if (!rmap)
1007 return NULL;
1008
1009 if (!(rmap & 1)) {
1010 iter->desc = NULL;
1011 return (u64 *)rmap;
1012 }
1013
1014 iter->desc = (struct pte_list_desc *)(rmap & ~1ul);
1015 iter->pos = 0;
1016 return iter->desc->sptes[iter->pos];
1017}
1018
1019/*
1020 * Must be used with a valid iterator: e.g. after rmap_get_first().
1021 *
1022 * Returns sptep if found, NULL otherwise.
1023 */
1024static u64 *rmap_get_next(struct rmap_iterator *iter)
1025{
1026 if (iter->desc) {
1027 if (iter->pos < PTE_LIST_EXT - 1) {
1028 u64 *sptep;
1029
1030 ++iter->pos;
1031 sptep = iter->desc->sptes[iter->pos];
1032 if (sptep)
1033 return sptep;
1034 }
1035
1036 iter->desc = iter->desc->more;
1037
1038 if (iter->desc) {
1039 iter->pos = 0;
1040 /* desc->sptes[0] cannot be NULL */
1041 return iter->desc->sptes[iter->pos];
1042 }
1043 }
1044
1045 return NULL;
1046}
1047
1007static void drop_spte(struct kvm *kvm, u64 *sptep) 1048static void drop_spte(struct kvm *kvm, u64 *sptep)
1008{ 1049{
1009 if (mmu_spte_clear_track_bits(sptep)) 1050 if (mmu_spte_clear_track_bits(sptep))
1010 rmap_remove(kvm, sptep); 1051 rmap_remove(kvm, sptep);
1011} 1052}
1012 1053
1013int kvm_mmu_rmap_write_protect(struct kvm *kvm, u64 gfn, 1054static int __rmap_write_protect(struct kvm *kvm, unsigned long *rmapp, int level)
1014 struct kvm_memory_slot *slot)
1015{ 1055{
1016 unsigned long *rmapp; 1056 u64 *sptep;
1017 u64 *spte; 1057 struct rmap_iterator iter;
1018 int i, write_protected = 0; 1058 int write_protected = 0;
1019 1059
1020 rmapp = __gfn_to_rmap(gfn, PT_PAGE_TABLE_LEVEL, slot); 1060 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
1021 spte = rmap_next(rmapp, NULL); 1061 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1022 while (spte) { 1062 rmap_printk("rmap_write_protect: spte %p %llx\n", sptep, *sptep);
1023 BUG_ON(!(*spte & PT_PRESENT_MASK)); 1063
1024 rmap_printk("rmap_write_protect: spte %p %llx\n", spte, *spte); 1064 if (!is_writable_pte(*sptep)) {
1025 if (is_writable_pte(*spte)) { 1065 sptep = rmap_get_next(&iter);
1026 mmu_spte_update(spte, *spte & ~PT_WRITABLE_MASK); 1066 continue;
1027 write_protected = 1;
1028 } 1067 }
1029 spte = rmap_next(rmapp, spte);
1030 }
1031 1068
1032 /* check for huge page mappings */ 1069 if (level == PT_PAGE_TABLE_LEVEL) {
1033 for (i = PT_DIRECTORY_LEVEL; 1070 mmu_spte_update(sptep, *sptep & ~PT_WRITABLE_MASK);
1034 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) { 1071 sptep = rmap_get_next(&iter);
1035 rmapp = __gfn_to_rmap(gfn, i, slot); 1072 } else {
1036 spte = rmap_next(rmapp, NULL); 1073 BUG_ON(!is_large_pte(*sptep));
1037 while (spte) { 1074 drop_spte(kvm, sptep);
1038 BUG_ON(!(*spte & PT_PRESENT_MASK)); 1075 --kvm->stat.lpages;
1039 BUG_ON(!is_large_pte(*spte)); 1076 sptep = rmap_get_first(*rmapp, &iter);
1040 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
1041 if (is_writable_pte(*spte)) {
1042 drop_spte(kvm, spte);
1043 --kvm->stat.lpages;
1044 spte = NULL;
1045 write_protected = 1;
1046 }
1047 spte = rmap_next(rmapp, spte);
1048 } 1077 }
1078
1079 write_protected = 1;
1049 } 1080 }
1050 1081
1051 return write_protected; 1082 return write_protected;
1052} 1083}
1053 1084
1085/**
1086 * kvm_mmu_write_protect_pt_masked - write protect selected PT level pages
1087 * @kvm: kvm instance
1088 * @slot: slot to protect
1089 * @gfn_offset: start of the BITS_PER_LONG pages we care about
1090 * @mask: indicates which pages we should protect
1091 *
1092 * Used when we do not need to care about huge page mappings: e.g. during dirty
1093 * logging we do not have any such mappings.
1094 */
1095void kvm_mmu_write_protect_pt_masked(struct kvm *kvm,
1096 struct kvm_memory_slot *slot,
1097 gfn_t gfn_offset, unsigned long mask)
1098{
1099 unsigned long *rmapp;
1100
1101 while (mask) {
1102 rmapp = &slot->rmap[gfn_offset + __ffs(mask)];
1103 __rmap_write_protect(kvm, rmapp, PT_PAGE_TABLE_LEVEL);
1104
1105 /* clear the first set bit */
1106 mask &= mask - 1;
1107 }
1108}
1109
1054static int rmap_write_protect(struct kvm *kvm, u64 gfn) 1110static int rmap_write_protect(struct kvm *kvm, u64 gfn)
1055{ 1111{
1056 struct kvm_memory_slot *slot; 1112 struct kvm_memory_slot *slot;
1113 unsigned long *rmapp;
1114 int i;
1115 int write_protected = 0;
1057 1116
1058 slot = gfn_to_memslot(kvm, gfn); 1117 slot = gfn_to_memslot(kvm, gfn);
1059 return kvm_mmu_rmap_write_protect(kvm, gfn, slot); 1118
1119 for (i = PT_PAGE_TABLE_LEVEL;
1120 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
1121 rmapp = __gfn_to_rmap(gfn, i, slot);
1122 write_protected |= __rmap_write_protect(kvm, rmapp, i);
1123 }
1124
1125 return write_protected;
1060} 1126}
1061 1127
1062static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, 1128static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
1063 unsigned long data) 1129 unsigned long data)
1064{ 1130{
1065 u64 *spte; 1131 u64 *sptep;
1132 struct rmap_iterator iter;
1066 int need_tlb_flush = 0; 1133 int need_tlb_flush = 0;
1067 1134
1068 while ((spte = rmap_next(rmapp, NULL))) { 1135 while ((sptep = rmap_get_first(*rmapp, &iter))) {
1069 BUG_ON(!(*spte & PT_PRESENT_MASK)); 1136 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1070 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", spte, *spte); 1137 rmap_printk("kvm_rmap_unmap_hva: spte %p %llx\n", sptep, *sptep);
1071 drop_spte(kvm, spte); 1138
1139 drop_spte(kvm, sptep);
1072 need_tlb_flush = 1; 1140 need_tlb_flush = 1;
1073 } 1141 }
1142
1074 return need_tlb_flush; 1143 return need_tlb_flush;
1075} 1144}
1076 1145
1077static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp, 1146static int kvm_set_pte_rmapp(struct kvm *kvm, unsigned long *rmapp,
1078 unsigned long data) 1147 unsigned long data)
1079{ 1148{
1149 u64 *sptep;
1150 struct rmap_iterator iter;
1080 int need_flush = 0; 1151 int need_flush = 0;
1081 u64 *spte, new_spte; 1152 u64 new_spte;
1082 pte_t *ptep = (pte_t *)data; 1153 pte_t *ptep = (pte_t *)data;
1083 pfn_t new_pfn; 1154 pfn_t new_pfn;
1084 1155
1085 WARN_ON(pte_huge(*ptep)); 1156 WARN_ON(pte_huge(*ptep));
1086 new_pfn = pte_pfn(*ptep); 1157 new_pfn = pte_pfn(*ptep);
1087 spte = rmap_next(rmapp, NULL); 1158
1088 while (spte) { 1159 for (sptep = rmap_get_first(*rmapp, &iter); sptep;) {
1089 BUG_ON(!is_shadow_present_pte(*spte)); 1160 BUG_ON(!is_shadow_present_pte(*sptep));
1090 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", spte, *spte); 1161 rmap_printk("kvm_set_pte_rmapp: spte %p %llx\n", sptep, *sptep);
1162
1091 need_flush = 1; 1163 need_flush = 1;
1164
1092 if (pte_write(*ptep)) { 1165 if (pte_write(*ptep)) {
1093 drop_spte(kvm, spte); 1166 drop_spte(kvm, sptep);
1094 spte = rmap_next(rmapp, NULL); 1167 sptep = rmap_get_first(*rmapp, &iter);
1095 } else { 1168 } else {
1096 new_spte = *spte &~ (PT64_BASE_ADDR_MASK); 1169 new_spte = *sptep & ~PT64_BASE_ADDR_MASK;
1097 new_spte |= (u64)new_pfn << PAGE_SHIFT; 1170 new_spte |= (u64)new_pfn << PAGE_SHIFT;
1098 1171
1099 new_spte &= ~PT_WRITABLE_MASK; 1172 new_spte &= ~PT_WRITABLE_MASK;
1100 new_spte &= ~SPTE_HOST_WRITEABLE; 1173 new_spte &= ~SPTE_HOST_WRITEABLE;
1101 new_spte &= ~shadow_accessed_mask; 1174 new_spte &= ~shadow_accessed_mask;
1102 mmu_spte_clear_track_bits(spte); 1175
1103 mmu_spte_set(spte, new_spte); 1176 mmu_spte_clear_track_bits(sptep);
1104 spte = rmap_next(rmapp, spte); 1177 mmu_spte_set(sptep, new_spte);
1178 sptep = rmap_get_next(&iter);
1105 } 1179 }
1106 } 1180 }
1181
1107 if (need_flush) 1182 if (need_flush)
1108 kvm_flush_remote_tlbs(kvm); 1183 kvm_flush_remote_tlbs(kvm);
1109 1184
@@ -1162,7 +1237,8 @@ void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
1162static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 1237static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1163 unsigned long data) 1238 unsigned long data)
1164{ 1239{
1165 u64 *spte; 1240 u64 *sptep;
1241 struct rmap_iterator iter;
1166 int young = 0; 1242 int young = 0;
1167 1243
1168 /* 1244 /*
@@ -1175,25 +1251,24 @@ static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1175 if (!shadow_accessed_mask) 1251 if (!shadow_accessed_mask)
1176 return kvm_unmap_rmapp(kvm, rmapp, data); 1252 return kvm_unmap_rmapp(kvm, rmapp, data);
1177 1253
1178 spte = rmap_next(rmapp, NULL); 1254 for (sptep = rmap_get_first(*rmapp, &iter); sptep;
1179 while (spte) { 1255 sptep = rmap_get_next(&iter)) {
1180 int _young; 1256 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1181 u64 _spte = *spte; 1257
1182 BUG_ON(!(_spte & PT_PRESENT_MASK)); 1258 if (*sptep & PT_ACCESSED_MASK) {
1183 _young = _spte & PT_ACCESSED_MASK;
1184 if (_young) {
1185 young = 1; 1259 young = 1;
1186 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)spte); 1260 clear_bit(PT_ACCESSED_SHIFT, (unsigned long *)sptep);
1187 } 1261 }
1188 spte = rmap_next(rmapp, spte);
1189 } 1262 }
1263
1190 return young; 1264 return young;
1191} 1265}
1192 1266
1193static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, 1267static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1194 unsigned long data) 1268 unsigned long data)
1195{ 1269{
1196 u64 *spte; 1270 u64 *sptep;
1271 struct rmap_iterator iter;
1197 int young = 0; 1272 int young = 0;
1198 1273
1199 /* 1274 /*
@@ -1204,16 +1279,14 @@ static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1204 if (!shadow_accessed_mask) 1279 if (!shadow_accessed_mask)
1205 goto out; 1280 goto out;
1206 1281
1207 spte = rmap_next(rmapp, NULL); 1282 for (sptep = rmap_get_first(*rmapp, &iter); sptep;
1208 while (spte) { 1283 sptep = rmap_get_next(&iter)) {
1209 u64 _spte = *spte; 1284 BUG_ON(!(*sptep & PT_PRESENT_MASK));
1210 BUG_ON(!(_spte & PT_PRESENT_MASK)); 1285
1211 young = _spte & PT_ACCESSED_MASK; 1286 if (*sptep & PT_ACCESSED_MASK) {
1212 if (young) {
1213 young = 1; 1287 young = 1;
1214 break; 1288 break;
1215 } 1289 }
1216 spte = rmap_next(rmapp, spte);
1217 } 1290 }
1218out: 1291out:
1219 return young; 1292 return young;
@@ -1865,10 +1938,11 @@ static void kvm_mmu_put_page(struct kvm_mmu_page *sp, u64 *parent_pte)
1865 1938
1866static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) 1939static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp)
1867{ 1940{
1868 u64 *parent_pte; 1941 u64 *sptep;
1942 struct rmap_iterator iter;
1869 1943
1870 while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL))) 1944 while ((sptep = rmap_get_first(sp->parent_ptes, &iter)))
1871 drop_parent_pte(sp, parent_pte); 1945 drop_parent_pte(sp, sptep);
1872} 1946}
1873 1947
1874static int mmu_zap_unsync_children(struct kvm *kvm, 1948static int mmu_zap_unsync_children(struct kvm *kvm,
@@ -1925,30 +1999,6 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
1925 return ret; 1999 return ret;
1926} 2000}
1927 2001
1928static void kvm_mmu_isolate_pages(struct list_head *invalid_list)
1929{
1930 struct kvm_mmu_page *sp;
1931
1932 list_for_each_entry(sp, invalid_list, link)
1933 kvm_mmu_isolate_page(sp);
1934}
1935
1936static void free_pages_rcu(struct rcu_head *head)
1937{
1938 struct kvm_mmu_page *next, *sp;
1939
1940 sp = container_of(head, struct kvm_mmu_page, rcu);
1941 while (sp) {
1942 if (!list_empty(&sp->link))
1943 next = list_first_entry(&sp->link,
1944 struct kvm_mmu_page, link);
1945 else
1946 next = NULL;
1947 kvm_mmu_free_page(sp);
1948 sp = next;
1949 }
1950}
1951
1952static void kvm_mmu_commit_zap_page(struct kvm *kvm, 2002static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1953 struct list_head *invalid_list) 2003 struct list_head *invalid_list)
1954{ 2004{
@@ -1957,17 +2007,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1957 if (list_empty(invalid_list)) 2007 if (list_empty(invalid_list))
1958 return; 2008 return;
1959 2009
1960 kvm_flush_remote_tlbs(kvm); 2010 /*
1961 2011 * wmb: make sure everyone sees our modifications to the page tables
1962 if (atomic_read(&kvm->arch.reader_counter)) { 2012 * rmb: make sure we see changes to vcpu->mode
1963 kvm_mmu_isolate_pages(invalid_list); 2013 */
1964 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); 2014 smp_mb();
1965 list_del_init(invalid_list);
1966 2015
1967 trace_kvm_mmu_delay_free_pages(sp); 2016 /*
1968 call_rcu(&sp->rcu, free_pages_rcu); 2017 * Wait for all vcpus to exit guest mode and/or lockless shadow
1969 return; 2018 * page table walks.
1970 } 2019 */
2020 kvm_flush_remote_tlbs(kvm);
1971 2021
1972 do { 2022 do {
1973 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link); 2023 sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
@@ -1975,7 +2025,6 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1975 kvm_mmu_isolate_page(sp); 2025 kvm_mmu_isolate_page(sp);
1976 kvm_mmu_free_page(sp); 2026 kvm_mmu_free_page(sp);
1977 } while (!list_empty(invalid_list)); 2027 } while (!list_empty(invalid_list));
1978
1979} 2028}
1980 2029
1981/* 2030/*
@@ -3554,7 +3603,7 @@ static bool detect_write_flooding(struct kvm_mmu_page *sp)
3554 * Skip write-flooding detected for the sp whose level is 1, because 3603 * Skip write-flooding detected for the sp whose level is 1, because
3555 * it can become unsync, then the guest page is not write-protected. 3604 * it can become unsync, then the guest page is not write-protected.
3556 */ 3605 */
3557 if (sp->role.level == 1) 3606 if (sp->role.level == PT_PAGE_TABLE_LEVEL)
3558 return false; 3607 return false;
3559 3608
3560 return ++sp->write_flooding_count >= 3; 3609 return ++sp->write_flooding_count >= 3;
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 715da5a19a5b..7d7d0b9e23eb 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -192,7 +192,8 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
192{ 192{
193 struct kvm_memory_slot *slot; 193 struct kvm_memory_slot *slot;
194 unsigned long *rmapp; 194 unsigned long *rmapp;
195 u64 *spte; 195 u64 *sptep;
196 struct rmap_iterator iter;
196 197
197 if (sp->role.direct || sp->unsync || sp->role.invalid) 198 if (sp->role.direct || sp->unsync || sp->role.invalid)
198 return; 199 return;
@@ -200,13 +201,12 @@ static void audit_write_protection(struct kvm *kvm, struct kvm_mmu_page *sp)
200 slot = gfn_to_memslot(kvm, sp->gfn); 201 slot = gfn_to_memslot(kvm, sp->gfn);
201 rmapp = &slot->rmap[sp->gfn - slot->base_gfn]; 202 rmapp = &slot->rmap[sp->gfn - slot->base_gfn];
202 203
203 spte = rmap_next(rmapp, NULL); 204 for (sptep = rmap_get_first(*rmapp, &iter); sptep;
204 while (spte) { 205 sptep = rmap_get_next(&iter)) {
205 if (is_writable_pte(*spte)) 206 if (is_writable_pte(*sptep))
206 audit_printk(kvm, "shadow page has writable " 207 audit_printk(kvm, "shadow page has writable "
207 "mappings: gfn %llx role %x\n", 208 "mappings: gfn %llx role %x\n",
208 sp->gfn, sp->role.word); 209 sp->gfn, sp->role.word);
209 spte = rmap_next(rmapp, spte);
210 } 210 }
211} 211}
212 212
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index df5a70311be8..34f970937ef1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -658,7 +658,7 @@ static gpa_t FNAME(get_level1_sp_gpa)(struct kvm_mmu_page *sp)
658{ 658{
659 int offset = 0; 659 int offset = 0;
660 660
661 WARN_ON(sp->role.level != 1); 661 WARN_ON(sp->role.level != PT_PAGE_TABLE_LEVEL);
662 662
663 if (PTTYPE == 32) 663 if (PTTYPE == 32)
664 offset = sp->role.quadrant << PT64_LEVEL_BITS; 664 offset = sp->role.quadrant << PT64_LEVEL_BITS;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e334389e1c75..f75af406b268 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -22,6 +22,7 @@
22#include "x86.h" 22#include "x86.h"
23 23
24#include <linux/module.h> 24#include <linux/module.h>
25#include <linux/mod_devicetable.h>
25#include <linux/kernel.h> 26#include <linux/kernel.h>
26#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
27#include <linux/highmem.h> 28#include <linux/highmem.h>
@@ -42,6 +43,12 @@
42MODULE_AUTHOR("Qumranet"); 43MODULE_AUTHOR("Qumranet");
43MODULE_LICENSE("GPL"); 44MODULE_LICENSE("GPL");
44 45
46static const struct x86_cpu_id svm_cpu_id[] = {
47 X86_FEATURE_MATCH(X86_FEATURE_SVM),
48 {}
49};
50MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
51
45#define IOPM_ALLOC_ORDER 2 52#define IOPM_ALLOC_ORDER 2
46#define MSRPM_ALLOC_ORDER 1 53#define MSRPM_ALLOC_ORDER 1
47 54
@@ -3240,6 +3247,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
3240 svm_clear_vintr(svm); 3247 svm_clear_vintr(svm);
3241 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; 3248 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3242 mark_dirty(svm->vmcb, VMCB_INTR); 3249 mark_dirty(svm->vmcb, VMCB_INTR);
3250 ++svm->vcpu.stat.irq_window_exits;
3243 /* 3251 /*
3244 * If the user space waits to inject interrupts, exit as soon as 3252 * If the user space waits to inject interrupts, exit as soon as
3245 * possible 3253 * possible
@@ -3247,7 +3255,6 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
3247 if (!irqchip_in_kernel(svm->vcpu.kvm) && 3255 if (!irqchip_in_kernel(svm->vcpu.kvm) &&
3248 kvm_run->request_interrupt_window && 3256 kvm_run->request_interrupt_window &&
3249 !kvm_cpu_has_interrupt(&svm->vcpu)) { 3257 !kvm_cpu_has_interrupt(&svm->vcpu)) {
3250 ++svm->vcpu.stat.irq_window_exits;
3251 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN; 3258 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
3252 return 0; 3259 return 0;
3253 } 3260 }
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 4ff0ab9bc3c8..32eb58866292 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -27,6 +27,7 @@
27#include <linux/highmem.h> 27#include <linux/highmem.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/moduleparam.h> 29#include <linux/moduleparam.h>
30#include <linux/mod_devicetable.h>
30#include <linux/ftrace_event.h> 31#include <linux/ftrace_event.h>
31#include <linux/slab.h> 32#include <linux/slab.h>
32#include <linux/tboot.h> 33#include <linux/tboot.h>
@@ -51,6 +52,12 @@
51MODULE_AUTHOR("Qumranet"); 52MODULE_AUTHOR("Qumranet");
52MODULE_LICENSE("GPL"); 53MODULE_LICENSE("GPL");
53 54
55static const struct x86_cpu_id vmx_cpu_id[] = {
56 X86_FEATURE_MATCH(X86_FEATURE_VMX),
57 {}
58};
59MODULE_DEVICE_TABLE(x86cpu, vmx_cpu_id);
60
54static bool __read_mostly enable_vpid = 1; 61static bool __read_mostly enable_vpid = 1;
55module_param_named(vpid, enable_vpid, bool, 0444); 62module_param_named(vpid, enable_vpid, bool, 0444);
56 63
@@ -386,6 +393,9 @@ struct vcpu_vmx {
386 struct { 393 struct {
387 int loaded; 394 int loaded;
388 u16 fs_sel, gs_sel, ldt_sel; 395 u16 fs_sel, gs_sel, ldt_sel;
396#ifdef CONFIG_X86_64
397 u16 ds_sel, es_sel;
398#endif
389 int gs_ldt_reload_needed; 399 int gs_ldt_reload_needed;
390 int fs_reload_needed; 400 int fs_reload_needed;
391 } host_state; 401 } host_state;
@@ -1411,6 +1421,11 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
1411 } 1421 }
1412 1422
1413#ifdef CONFIG_X86_64 1423#ifdef CONFIG_X86_64
1424 savesegment(ds, vmx->host_state.ds_sel);
1425 savesegment(es, vmx->host_state.es_sel);
1426#endif
1427
1428#ifdef CONFIG_X86_64
1414 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE)); 1429 vmcs_writel(HOST_FS_BASE, read_msr(MSR_FS_BASE));
1415 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE)); 1430 vmcs_writel(HOST_GS_BASE, read_msr(MSR_GS_BASE));
1416#else 1431#else
@@ -1450,6 +1465,19 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
1450 } 1465 }
1451 if (vmx->host_state.fs_reload_needed) 1466 if (vmx->host_state.fs_reload_needed)
1452 loadsegment(fs, vmx->host_state.fs_sel); 1467 loadsegment(fs, vmx->host_state.fs_sel);
1468#ifdef CONFIG_X86_64
1469 if (unlikely(vmx->host_state.ds_sel | vmx->host_state.es_sel)) {
1470 loadsegment(ds, vmx->host_state.ds_sel);
1471 loadsegment(es, vmx->host_state.es_sel);
1472 }
1473#else
1474 /*
1475 * The sysexit path does not restore ds/es, so we must set them to
1476 * a reasonable value ourselves.
1477 */
1478 loadsegment(ds, __USER_DS);
1479 loadsegment(es, __USER_DS);
1480#endif
1453 reload_tss(); 1481 reload_tss();
1454#ifdef CONFIG_X86_64 1482#ifdef CONFIG_X86_64
1455 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); 1483 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
@@ -3633,8 +3661,18 @@ static void vmx_set_constant_host_state(void)
3633 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */ 3661 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
3634 3662
3635 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ 3663 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
3664#ifdef CONFIG_X86_64
3665 /*
3666 * Load null selectors, so we can avoid reloading them in
3667 * __vmx_load_host_state(), in case userspace uses the null selectors
3668 * too (the expected case).
3669 */
3670 vmcs_write16(HOST_DS_SELECTOR, 0);
3671 vmcs_write16(HOST_ES_SELECTOR, 0);
3672#else
3636 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3673 vmcs_write16(HOST_DS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3637 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3674 vmcs_write16(HOST_ES_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3675#endif
3638 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */ 3676 vmcs_write16(HOST_SS_SELECTOR, __KERNEL_DS); /* 22.2.4 */
3639 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */ 3677 vmcs_write16(HOST_TR_SELECTOR, GDT_ENTRY_TSS*8); /* 22.2.4 */
3640 3678
@@ -6256,7 +6294,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6256 } 6294 }
6257 } 6295 }
6258 6296
6259 asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS));
6260 vmx->loaded_vmcs->launched = 1; 6297 vmx->loaded_vmcs->launched = 1;
6261 6298
6262 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 6299 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
@@ -6343,7 +6380,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
6343 return &vmx->vcpu; 6380 return &vmx->vcpu;
6344 6381
6345free_vmcs: 6382free_vmcs:
6346 free_vmcs(vmx->loaded_vmcs->vmcs); 6383 free_loaded_vmcs(vmx->loaded_vmcs);
6347free_msrs: 6384free_msrs:
6348 kfree(vmx->guest_msrs); 6385 kfree(vmx->guest_msrs);
6349uninit_vcpu: 6386uninit_vcpu:
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 185a2b823a2d..be6d54929fa7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2147,6 +2147,7 @@ int kvm_dev_ioctl_check_extension(long ext)
2147 case KVM_CAP_ASYNC_PF: 2147 case KVM_CAP_ASYNC_PF:
2148 case KVM_CAP_GET_TSC_KHZ: 2148 case KVM_CAP_GET_TSC_KHZ:
2149 case KVM_CAP_PCI_2_3: 2149 case KVM_CAP_PCI_2_3:
2150 case KVM_CAP_KVMCLOCK_CTRL:
2150 r = 1; 2151 r = 1;
2151 break; 2152 break;
2152 case KVM_CAP_COALESCED_MMIO: 2153 case KVM_CAP_COALESCED_MMIO:
@@ -2597,6 +2598,23 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
2597 return r; 2598 return r;
2598} 2599}
2599 2600
2601/*
2602 * kvm_set_guest_paused() indicates to the guest kernel that it has been
2603 * stopped by the hypervisor. This function will be called from the host only.
2604 * EINVAL is returned when the host attempts to set the flag for a guest that
2605 * does not support pv clocks.
2606 */
2607static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
2608{
2609 struct pvclock_vcpu_time_info *src = &vcpu->arch.hv_clock;
2610 if (!vcpu->arch.time_page)
2611 return -EINVAL;
2612 src->flags |= PVCLOCK_GUEST_STOPPED;
2613 mark_page_dirty(vcpu->kvm, vcpu->arch.time >> PAGE_SHIFT);
2614 kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
2615 return 0;
2616}
2617
2600long kvm_arch_vcpu_ioctl(struct file *filp, 2618long kvm_arch_vcpu_ioctl(struct file *filp,
2601 unsigned int ioctl, unsigned long arg) 2619 unsigned int ioctl, unsigned long arg)
2602{ 2620{
@@ -2873,6 +2891,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
2873 r = vcpu->arch.virtual_tsc_khz; 2891 r = vcpu->arch.virtual_tsc_khz;
2874 goto out; 2892 goto out;
2875 } 2893 }
2894 case KVM_KVMCLOCK_CTRL: {
2895 r = kvm_set_guest_paused(vcpu);
2896 goto out;
2897 }
2876 default: 2898 default:
2877 r = -EINVAL; 2899 r = -EINVAL;
2878 } 2900 }
@@ -3045,57 +3067,32 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm,
3045} 3067}
3046 3068
3047/** 3069/**
3048 * write_protect_slot - write protect a slot for dirty logging 3070 * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot
3049 * @kvm: the kvm instance 3071 * @kvm: kvm instance
3050 * @memslot: the slot we protect 3072 * @log: slot id and address to which we copy the log
3051 * @dirty_bitmap: the bitmap indicating which pages are dirty
3052 * @nr_dirty_pages: the number of dirty pages
3053 * 3073 *
3054 * We have two ways to find all sptes to protect: 3074 * We need to keep it in mind that VCPU threads can write to the bitmap
3055 * 1. Use kvm_mmu_slot_remove_write_access() which walks all shadow pages and 3075 * concurrently. So, to avoid losing data, we keep the following order for
3056 * checks ones that have a spte mapping a page in the slot. 3076 * each bit:
3057 * 2. Use kvm_mmu_rmap_write_protect() for each gfn found in the bitmap.
3058 * 3077 *
3059 * Generally speaking, if there are not so many dirty pages compared to the 3078 * 1. Take a snapshot of the bit and clear it if needed.
3060 * number of shadow pages, we should use the latter. 3079 * 2. Write protect the corresponding page.
3080 * 3. Flush TLB's if needed.
3081 * 4. Copy the snapshot to the userspace.
3061 * 3082 *
3062 * Note that letting others write into a page marked dirty in the old bitmap 3083 * Between 2 and 3, the guest may write to the page using the remaining TLB
3063 * by using the remaining tlb entry is not a problem. That page will become 3084 * entry. This is not a problem because the page will be reported dirty at
3064 * write protected again when we flush the tlb and then be reported dirty to 3085 * step 4 using the snapshot taken before and step 3 ensures that successive
3065 * the user space by copying the old bitmap. 3086 * writes will be logged for the next call.
3066 */
3067static void write_protect_slot(struct kvm *kvm,
3068 struct kvm_memory_slot *memslot,
3069 unsigned long *dirty_bitmap,
3070 unsigned long nr_dirty_pages)
3071{
3072 spin_lock(&kvm->mmu_lock);
3073
3074 /* Not many dirty pages compared to # of shadow pages. */
3075 if (nr_dirty_pages < kvm->arch.n_used_mmu_pages) {
3076 unsigned long gfn_offset;
3077
3078 for_each_set_bit(gfn_offset, dirty_bitmap, memslot->npages) {
3079 unsigned long gfn = memslot->base_gfn + gfn_offset;
3080
3081 kvm_mmu_rmap_write_protect(kvm, gfn, memslot);
3082 }
3083 kvm_flush_remote_tlbs(kvm);
3084 } else
3085 kvm_mmu_slot_remove_write_access(kvm, memslot->id);
3086
3087 spin_unlock(&kvm->mmu_lock);
3088}
3089
3090/*
3091 * Get (and clear) the dirty memory log for a memory slot.
3092 */ 3087 */
3093int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, 3088int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
3094 struct kvm_dirty_log *log)
3095{ 3089{
3096 int r; 3090 int r;
3097 struct kvm_memory_slot *memslot; 3091 struct kvm_memory_slot *memslot;
3098 unsigned long n, nr_dirty_pages; 3092 unsigned long n, i;
3093 unsigned long *dirty_bitmap;
3094 unsigned long *dirty_bitmap_buffer;
3095 bool is_dirty = false;
3099 3096
3100 mutex_lock(&kvm->slots_lock); 3097 mutex_lock(&kvm->slots_lock);
3101 3098
@@ -3104,49 +3101,42 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
3104 goto out; 3101 goto out;
3105 3102
3106 memslot = id_to_memslot(kvm->memslots, log->slot); 3103 memslot = id_to_memslot(kvm->memslots, log->slot);
3104
3105 dirty_bitmap = memslot->dirty_bitmap;
3107 r = -ENOENT; 3106 r = -ENOENT;
3108 if (!memslot->dirty_bitmap) 3107 if (!dirty_bitmap)
3109 goto out; 3108 goto out;
3110 3109
3111 n = kvm_dirty_bitmap_bytes(memslot); 3110 n = kvm_dirty_bitmap_bytes(memslot);
3112 nr_dirty_pages = memslot->nr_dirty_pages;
3113 3111
3114 /* If nothing is dirty, don't bother messing with page tables. */ 3112 dirty_bitmap_buffer = dirty_bitmap + n / sizeof(long);
3115 if (nr_dirty_pages) { 3113 memset(dirty_bitmap_buffer, 0, n);
3116 struct kvm_memslots *slots, *old_slots;
3117 unsigned long *dirty_bitmap, *dirty_bitmap_head;
3118 3114
3119 dirty_bitmap = memslot->dirty_bitmap; 3115 spin_lock(&kvm->mmu_lock);
3120 dirty_bitmap_head = memslot->dirty_bitmap_head;
3121 if (dirty_bitmap == dirty_bitmap_head)
3122 dirty_bitmap_head += n / sizeof(long);
3123 memset(dirty_bitmap_head, 0, n);
3124 3116
3125 r = -ENOMEM; 3117 for (i = 0; i < n / sizeof(long); i++) {
3126 slots = kmemdup(kvm->memslots, sizeof(*kvm->memslots), GFP_KERNEL); 3118 unsigned long mask;
3127 if (!slots) 3119 gfn_t offset;
3128 goto out;
3129 3120
3130 memslot = id_to_memslot(slots, log->slot); 3121 if (!dirty_bitmap[i])
3131 memslot->nr_dirty_pages = 0; 3122 continue;
3132 memslot->dirty_bitmap = dirty_bitmap_head;
3133 update_memslots(slots, NULL);
3134 3123
3135 old_slots = kvm->memslots; 3124 is_dirty = true;
3136 rcu_assign_pointer(kvm->memslots, slots);
3137 synchronize_srcu_expedited(&kvm->srcu);
3138 kfree(old_slots);
3139 3125
3140 write_protect_slot(kvm, memslot, dirty_bitmap, nr_dirty_pages); 3126 mask = xchg(&dirty_bitmap[i], 0);
3127 dirty_bitmap_buffer[i] = mask;
3141 3128
3142 r = -EFAULT; 3129 offset = i * BITS_PER_LONG;
3143 if (copy_to_user(log->dirty_bitmap, dirty_bitmap, n)) 3130 kvm_mmu_write_protect_pt_masked(kvm, memslot, offset, mask);
3144 goto out;
3145 } else {
3146 r = -EFAULT;
3147 if (clear_user(log->dirty_bitmap, n))
3148 goto out;
3149 } 3131 }
3132 if (is_dirty)
3133 kvm_flush_remote_tlbs(kvm);
3134
3135 spin_unlock(&kvm->mmu_lock);
3136
3137 r = -EFAULT;
3138 if (copy_to_user(log->dirty_bitmap, dirty_bitmap_buffer, n))
3139 goto out;
3150 3140
3151 r = 0; 3141 r = 0;
3152out: 3142out:
@@ -3728,9 +3718,8 @@ struct read_write_emulator_ops {
3728static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes) 3718static int read_prepare(struct kvm_vcpu *vcpu, void *val, int bytes)
3729{ 3719{
3730 if (vcpu->mmio_read_completed) { 3720 if (vcpu->mmio_read_completed) {
3731 memcpy(val, vcpu->mmio_data, bytes);
3732 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes, 3721 trace_kvm_mmio(KVM_TRACE_MMIO_READ, bytes,
3733 vcpu->mmio_phys_addr, *(u64 *)val); 3722 vcpu->mmio_fragments[0].gpa, *(u64 *)val);
3734 vcpu->mmio_read_completed = 0; 3723 vcpu->mmio_read_completed = 0;
3735 return 1; 3724 return 1;
3736 } 3725 }
@@ -3766,8 +3755,9 @@ static int read_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3766static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa, 3755static int write_exit_mmio(struct kvm_vcpu *vcpu, gpa_t gpa,
3767 void *val, int bytes) 3756 void *val, int bytes)
3768{ 3757{
3769 memcpy(vcpu->mmio_data, val, bytes); 3758 struct kvm_mmio_fragment *frag = &vcpu->mmio_fragments[0];
3770 memcpy(vcpu->run->mmio.data, vcpu->mmio_data, 8); 3759
3760 memcpy(vcpu->run->mmio.data, frag->data, frag->len);
3771 return X86EMUL_CONTINUE; 3761 return X86EMUL_CONTINUE;
3772} 3762}
3773 3763
@@ -3794,10 +3784,7 @@ static int emulator_read_write_onepage(unsigned long addr, void *val,
3794 gpa_t gpa; 3784 gpa_t gpa;
3795 int handled, ret; 3785 int handled, ret;
3796 bool write = ops->write; 3786 bool write = ops->write;
3797 3787 struct kvm_mmio_fragment *frag;
3798 if (ops->read_write_prepare &&
3799 ops->read_write_prepare(vcpu, val, bytes))
3800 return X86EMUL_CONTINUE;
3801 3788
3802 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write); 3789 ret = vcpu_mmio_gva_to_gpa(vcpu, addr, &gpa, exception, write);
3803 3790
@@ -3823,15 +3810,19 @@ mmio:
3823 bytes -= handled; 3810 bytes -= handled;
3824 val += handled; 3811 val += handled;
3825 3812
3826 vcpu->mmio_needed = 1; 3813 while (bytes) {
3827 vcpu->run->exit_reason = KVM_EXIT_MMIO; 3814 unsigned now = min(bytes, 8U);
3828 vcpu->run->mmio.phys_addr = vcpu->mmio_phys_addr = gpa;
3829 vcpu->mmio_size = bytes;
3830 vcpu->run->mmio.len = min(vcpu->mmio_size, 8);
3831 vcpu->run->mmio.is_write = vcpu->mmio_is_write = write;
3832 vcpu->mmio_index = 0;
3833 3815
3834 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes); 3816 frag = &vcpu->mmio_fragments[vcpu->mmio_nr_fragments++];
3817 frag->gpa = gpa;
3818 frag->data = val;
3819 frag->len = now;
3820
3821 gpa += now;
3822 val += now;
3823 bytes -= now;
3824 }
3825 return X86EMUL_CONTINUE;
3835} 3826}
3836 3827
3837int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr, 3828int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
@@ -3840,10 +3831,18 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
3840 struct read_write_emulator_ops *ops) 3831 struct read_write_emulator_ops *ops)
3841{ 3832{
3842 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt); 3833 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
3834 gpa_t gpa;
3835 int rc;
3836
3837 if (ops->read_write_prepare &&
3838 ops->read_write_prepare(vcpu, val, bytes))
3839 return X86EMUL_CONTINUE;
3840
3841 vcpu->mmio_nr_fragments = 0;
3843 3842
3844 /* Crossing a page boundary? */ 3843 /* Crossing a page boundary? */
3845 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) { 3844 if (((addr + bytes - 1) ^ addr) & PAGE_MASK) {
3846 int rc, now; 3845 int now;
3847 3846
3848 now = -addr & ~PAGE_MASK; 3847 now = -addr & ~PAGE_MASK;
3849 rc = emulator_read_write_onepage(addr, val, now, exception, 3848 rc = emulator_read_write_onepage(addr, val, now, exception,
@@ -3856,8 +3855,25 @@ int emulator_read_write(struct x86_emulate_ctxt *ctxt, unsigned long addr,
3856 bytes -= now; 3855 bytes -= now;
3857 } 3856 }
3858 3857
3859 return emulator_read_write_onepage(addr, val, bytes, exception, 3858 rc = emulator_read_write_onepage(addr, val, bytes, exception,
3860 vcpu, ops); 3859 vcpu, ops);
3860 if (rc != X86EMUL_CONTINUE)
3861 return rc;
3862
3863 if (!vcpu->mmio_nr_fragments)
3864 return rc;
3865
3866 gpa = vcpu->mmio_fragments[0].gpa;
3867
3868 vcpu->mmio_needed = 1;
3869 vcpu->mmio_cur_fragment = 0;
3870
3871 vcpu->run->mmio.len = vcpu->mmio_fragments[0].len;
3872 vcpu->run->mmio.is_write = vcpu->mmio_is_write = ops->write;
3873 vcpu->run->exit_reason = KVM_EXIT_MMIO;
3874 vcpu->run->mmio.phys_addr = gpa;
3875
3876 return ops->read_write_exit_mmio(vcpu, gpa, val, bytes);
3861} 3877}
3862 3878
3863static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt, 3879static int emulator_read_emulated(struct x86_emulate_ctxt *ctxt,
@@ -5263,10 +5279,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5263 kvm_deliver_pmi(vcpu); 5279 kvm_deliver_pmi(vcpu);
5264 } 5280 }
5265 5281
5266 r = kvm_mmu_reload(vcpu);
5267 if (unlikely(r))
5268 goto out;
5269
5270 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) { 5282 if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
5271 inject_pending_event(vcpu); 5283 inject_pending_event(vcpu);
5272 5284
@@ -5282,6 +5294,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5282 } 5294 }
5283 } 5295 }
5284 5296
5297 r = kvm_mmu_reload(vcpu);
5298 if (unlikely(r)) {
5299 kvm_x86_ops->cancel_injection(vcpu);
5300 goto out;
5301 }
5302
5285 preempt_disable(); 5303 preempt_disable();
5286 5304
5287 kvm_x86_ops->prepare_guest_switch(vcpu); 5305 kvm_x86_ops->prepare_guest_switch(vcpu);
@@ -5456,33 +5474,55 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
5456 return r; 5474 return r;
5457} 5475}
5458 5476
5477/*
5478 * Implements the following, as a state machine:
5479 *
5480 * read:
5481 * for each fragment
5482 * write gpa, len
5483 * exit
5484 * copy data
5485 * execute insn
5486 *
5487 * write:
5488 * for each fragment
5489 * write gpa, len
5490 * copy data
5491 * exit
5492 */
5459static int complete_mmio(struct kvm_vcpu *vcpu) 5493static int complete_mmio(struct kvm_vcpu *vcpu)
5460{ 5494{
5461 struct kvm_run *run = vcpu->run; 5495 struct kvm_run *run = vcpu->run;
5496 struct kvm_mmio_fragment *frag;
5462 int r; 5497 int r;
5463 5498
5464 if (!(vcpu->arch.pio.count || vcpu->mmio_needed)) 5499 if (!(vcpu->arch.pio.count || vcpu->mmio_needed))
5465 return 1; 5500 return 1;
5466 5501
5467 if (vcpu->mmio_needed) { 5502 if (vcpu->mmio_needed) {
5468 vcpu->mmio_needed = 0; 5503 /* Complete previous fragment */
5504 frag = &vcpu->mmio_fragments[vcpu->mmio_cur_fragment++];
5469 if (!vcpu->mmio_is_write) 5505 if (!vcpu->mmio_is_write)
5470 memcpy(vcpu->mmio_data + vcpu->mmio_index, 5506 memcpy(frag->data, run->mmio.data, frag->len);
5471 run->mmio.data, 8); 5507 if (vcpu->mmio_cur_fragment == vcpu->mmio_nr_fragments) {
5472 vcpu->mmio_index += 8; 5508 vcpu->mmio_needed = 0;
5473 if (vcpu->mmio_index < vcpu->mmio_size) { 5509 if (vcpu->mmio_is_write)
5474 run->exit_reason = KVM_EXIT_MMIO; 5510 return 1;
5475 run->mmio.phys_addr = vcpu->mmio_phys_addr + vcpu->mmio_index; 5511 vcpu->mmio_read_completed = 1;
5476 memcpy(run->mmio.data, vcpu->mmio_data + vcpu->mmio_index, 8); 5512 goto done;
5477 run->mmio.len = min(vcpu->mmio_size - vcpu->mmio_index, 8);
5478 run->mmio.is_write = vcpu->mmio_is_write;
5479 vcpu->mmio_needed = 1;
5480 return 0;
5481 } 5513 }
5514 /* Initiate next fragment */
5515 ++frag;
5516 run->exit_reason = KVM_EXIT_MMIO;
5517 run->mmio.phys_addr = frag->gpa;
5482 if (vcpu->mmio_is_write) 5518 if (vcpu->mmio_is_write)
5483 return 1; 5519 memcpy(run->mmio.data, frag->data, frag->len);
5484 vcpu->mmio_read_completed = 1; 5520 run->mmio.len = frag->len;
5521 run->mmio.is_write = vcpu->mmio_is_write;
5522 return 0;
5523
5485 } 5524 }
5525done:
5486 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); 5526 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
5487 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE); 5527 r = emulate_instruction(vcpu, EMULTYPE_NO_DECODE);
5488 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5528 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -6399,21 +6439,9 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
6399 kvm_cpu_has_interrupt(vcpu)); 6439 kvm_cpu_has_interrupt(vcpu));
6400} 6440}
6401 6441
6402void kvm_vcpu_kick(struct kvm_vcpu *vcpu) 6442int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
6403{ 6443{
6404 int me; 6444 return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE;
6405 int cpu = vcpu->cpu;
6406
6407 if (waitqueue_active(&vcpu->wq)) {
6408 wake_up_interruptible(&vcpu->wq);
6409 ++vcpu->stat.halt_wakeup;
6410 }
6411
6412 me = get_cpu();
6413 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
6414 if (kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE)
6415 smp_send_reschedule(cpu);
6416 put_cpu();
6417} 6445}
6418 6446
6419int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) 6447int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h
index cb80c293cdd8..3d1134ddb885 100644
--- a/arch/x86/kvm/x86.h
+++ b/arch/x86/kvm/x86.h
@@ -64,7 +64,7 @@ static inline int is_pse(struct kvm_vcpu *vcpu)
64 64
65static inline int is_paging(struct kvm_vcpu *vcpu) 65static inline int is_paging(struct kvm_vcpu *vcpu)
66{ 66{
67 return kvm_read_cr0_bits(vcpu, X86_CR0_PG); 67 return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG));
68} 68}
69 69
70static inline u32 bit(int bitno) 70static inline u32 bit(int bitno)
diff --git a/arch/x86/lib/usercopy.c b/arch/x86/lib/usercopy.c
index 2e4e4b02c37a..f61ee67ec00f 100644
--- a/arch/x86/lib/usercopy.c
+++ b/arch/x86/lib/usercopy.c
@@ -43,100 +43,3 @@ copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
43 return len; 43 return len;
44} 44}
45EXPORT_SYMBOL_GPL(copy_from_user_nmi); 45EXPORT_SYMBOL_GPL(copy_from_user_nmi);
46
47/*
48 * Do a strncpy, return length of string without final '\0'.
49 * 'count' is the user-supplied count (return 'count' if we
50 * hit it), 'max' is the address space maximum (and we return
51 * -EFAULT if we hit it).
52 */
53static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
54{
55 long res = 0;
56
57 /*
58 * Truncate 'max' to the user-specified limit, so that
59 * we only have one limit we need to check in the loop
60 */
61 if (max > count)
62 max = count;
63
64 while (max >= sizeof(unsigned long)) {
65 unsigned long c, mask;
66
67 /* Fall back to byte-at-a-time if we get a page fault */
68 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
69 break;
70 mask = has_zero(c);
71 if (mask) {
72 mask = (mask - 1) & ~mask;
73 mask >>= 7;
74 *(unsigned long *)(dst+res) = c & mask;
75 return res + count_masked_bytes(mask);
76 }
77 *(unsigned long *)(dst+res) = c;
78 res += sizeof(unsigned long);
79 max -= sizeof(unsigned long);
80 }
81
82 while (max) {
83 char c;
84
85 if (unlikely(__get_user(c,src+res)))
86 return -EFAULT;
87 dst[res] = c;
88 if (!c)
89 return res;
90 res++;
91 max--;
92 }
93
94 /*
95 * Uhhuh. We hit 'max'. But was that the user-specified maximum
96 * too? If so, that's ok - we got as much as the user asked for.
97 */
98 if (res >= count)
99 return res;
100
101 /*
102 * Nope: we hit the address space limit, and we still had more
103 * characters the caller would have wanted. That's an EFAULT.
104 */
105 return -EFAULT;
106}
107
108/**
109 * strncpy_from_user: - Copy a NUL terminated string from userspace.
110 * @dst: Destination address, in kernel space. This buffer must be at
111 * least @count bytes long.
112 * @src: Source address, in user space.
113 * @count: Maximum number of bytes to copy, including the trailing NUL.
114 *
115 * Copies a NUL-terminated string from userspace to kernel space.
116 *
117 * On success, returns the length of the string (not including the trailing
118 * NUL).
119 *
120 * If access to userspace fails, returns -EFAULT (some data may have been
121 * copied).
122 *
123 * If @count is smaller than the length of the string, copies @count bytes
124 * and returns @count.
125 */
126long
127strncpy_from_user(char *dst, const char __user *src, long count)
128{
129 unsigned long max_addr, src_addr;
130
131 if (unlikely(count <= 0))
132 return 0;
133
134 max_addr = current_thread_info()->addr_limit.seg;
135 src_addr = (unsigned long)src;
136 if (likely(src_addr < max_addr)) {
137 unsigned long max = max_addr - src_addr;
138 return do_strncpy_from_user(dst, src, count, max);
139 }
140 return -EFAULT;
141}
142EXPORT_SYMBOL(strncpy_from_user);
diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
index 883b216c60b2..1781b2f950e2 100644
--- a/arch/x86/lib/usercopy_32.c
+++ b/arch/x86/lib/usercopy_32.c
@@ -95,47 +95,6 @@ __clear_user(void __user *to, unsigned long n)
95} 95}
96EXPORT_SYMBOL(__clear_user); 96EXPORT_SYMBOL(__clear_user);
97 97
98/**
99 * strnlen_user: - Get the size of a string in user space.
100 * @s: The string to measure.
101 * @n: The maximum valid length
102 *
103 * Get the size of a NUL-terminated string in user space.
104 *
105 * Returns the size of the string INCLUDING the terminating NUL.
106 * On exception, returns 0.
107 * If the string is too long, returns a value greater than @n.
108 */
109long strnlen_user(const char __user *s, long n)
110{
111 unsigned long mask = -__addr_ok(s);
112 unsigned long res, tmp;
113
114 might_fault();
115
116 __asm__ __volatile__(
117 " testl %0, %0\n"
118 " jz 3f\n"
119 " andl %0,%%ecx\n"
120 "0: repne; scasb\n"
121 " setne %%al\n"
122 " subl %%ecx,%0\n"
123 " addl %0,%%eax\n"
124 "1:\n"
125 ".section .fixup,\"ax\"\n"
126 "2: xorl %%eax,%%eax\n"
127 " jmp 1b\n"
128 "3: movb $1,%%al\n"
129 " jmp 1b\n"
130 ".previous\n"
131 _ASM_EXTABLE(0b,2b)
132 :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp)
133 :"0" (n), "1" (s), "2" (0), "3" (mask)
134 :"cc");
135 return res & mask;
136}
137EXPORT_SYMBOL(strnlen_user);
138
139#ifdef CONFIG_X86_INTEL_USERCOPY 98#ifdef CONFIG_X86_INTEL_USERCOPY
140static unsigned long 99static unsigned long
141__copy_user_intel(void __user *to, const void *from, unsigned long size) 100__copy_user_intel(void __user *to, const void *from, unsigned long size)
diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
index 0d0326f388c0..e5b130bc2d0e 100644
--- a/arch/x86/lib/usercopy_64.c
+++ b/arch/x86/lib/usercopy_64.c
@@ -52,54 +52,6 @@ unsigned long clear_user(void __user *to, unsigned long n)
52} 52}
53EXPORT_SYMBOL(clear_user); 53EXPORT_SYMBOL(clear_user);
54 54
55/*
56 * Return the size of a string (including the ending 0)
57 *
58 * Return 0 on exception, a value greater than N if too long
59 */
60
61long __strnlen_user(const char __user *s, long n)
62{
63 long res = 0;
64 char c;
65
66 while (1) {
67 if (res>n)
68 return n+1;
69 if (__get_user(c, s))
70 return 0;
71 if (!c)
72 return res+1;
73 res++;
74 s++;
75 }
76}
77EXPORT_SYMBOL(__strnlen_user);
78
79long strnlen_user(const char __user *s, long n)
80{
81 if (!access_ok(VERIFY_READ, s, 1))
82 return 0;
83 return __strnlen_user(s, n);
84}
85EXPORT_SYMBOL(strnlen_user);
86
87long strlen_user(const char __user *s)
88{
89 long res = 0;
90 char c;
91
92 for (;;) {
93 if (get_user(c, s))
94 return 0;
95 if (!c)
96 return res+1;
97 res++;
98 s++;
99 }
100}
101EXPORT_SYMBOL(strlen_user);
102
103unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) 55unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
104{ 56{
105 if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { 57 if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
index 319b6f2fb8b9..97141c26a13a 100644
--- a/arch/x86/mm/init.c
+++ b/arch/x86/mm/init.c
@@ -84,8 +84,9 @@ static void __init find_early_table_space(struct map_range *mr, unsigned long en
84 pgt_buf_end = pgt_buf_start; 84 pgt_buf_end = pgt_buf_start;
85 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); 85 pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT);
86 86
87 printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", 87 printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n",
88 end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); 88 end - 1, pgt_buf_start << PAGE_SHIFT,
89 (pgt_buf_top << PAGE_SHIFT) - 1);
89} 90}
90 91
91void __init native_pagetable_reserve(u64 start, u64 end) 92void __init native_pagetable_reserve(u64 start, u64 end)
@@ -132,7 +133,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
132 int nr_range, i; 133 int nr_range, i;
133 int use_pse, use_gbpages; 134 int use_pse, use_gbpages;
134 135
135 printk(KERN_INFO "init_memory_mapping: %016lx-%016lx\n", start, end); 136 printk(KERN_INFO "init_memory_mapping: [mem %#010lx-%#010lx]\n",
137 start, end - 1);
136 138
137#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK) 139#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KMEMCHECK)
138 /* 140 /*
@@ -251,8 +253,8 @@ unsigned long __init_refok init_memory_mapping(unsigned long start,
251 } 253 }
252 254
253 for (i = 0; i < nr_range; i++) 255 for (i = 0; i < nr_range; i++)
254 printk(KERN_DEBUG " %010lx - %010lx page %s\n", 256 printk(KERN_DEBUG " [mem %#010lx-%#010lx] page %s\n",
255 mr[i].start, mr[i].end, 257 mr[i].start, mr[i].end - 1,
256 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":( 258 (mr[i].page_size_mask & (1<<PG_LEVEL_1G))?"1G":(
257 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k")); 259 (mr[i].page_size_mask & (1<<PG_LEVEL_2M))?"2M":"4k"));
258 260
@@ -350,8 +352,8 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
350 * create a kernel page fault: 352 * create a kernel page fault:
351 */ 353 */
352#ifdef CONFIG_DEBUG_PAGEALLOC 354#ifdef CONFIG_DEBUG_PAGEALLOC
353 printk(KERN_INFO "debug: unmapping init memory %08lx..%08lx\n", 355 printk(KERN_INFO "debug: unmapping init [mem %#010lx-%#010lx]\n",
354 begin, end); 356 begin, end - 1);
355 set_memory_np(begin, (end - begin) >> PAGE_SHIFT); 357 set_memory_np(begin, (end - begin) >> PAGE_SHIFT);
356#else 358#else
357 /* 359 /*
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index 19d3fa08b119..2d125be1bae9 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -141,8 +141,8 @@ static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
141 141
142 /* whine about and ignore invalid blks */ 142 /* whine about and ignore invalid blks */
143 if (start > end || nid < 0 || nid >= MAX_NUMNODES) { 143 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
144 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n", 144 pr_warning("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
145 nid, start, end); 145 nid, start, end - 1);
146 return 0; 146 return 0;
147 } 147 }
148 148
@@ -210,8 +210,8 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
210 210
211 start = roundup(start, ZONE_ALIGN); 211 start = roundup(start, ZONE_ALIGN);
212 212
213 printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", 213 printk(KERN_INFO "Initmem setup node %d [mem %#010Lx-%#010Lx]\n",
214 nid, start, end); 214 nid, start, end - 1);
215 215
216 /* 216 /*
217 * Allocate node data. Try remap allocator first, node-local 217 * Allocate node data. Try remap allocator first, node-local
@@ -232,7 +232,7 @@ static void __init setup_node_data(int nid, u64 start, u64 end)
232 } 232 }
233 233
234 /* report and initialize */ 234 /* report and initialize */
235 printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]%s\n", 235 printk(KERN_INFO " NODE_DATA [mem %#010Lx-%#010Lx]%s\n",
236 nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : ""); 236 nd_pa, nd_pa + nd_size - 1, remapped ? " (remapped)" : "");
237 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); 237 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
238 if (!remapped && tnid != nid) 238 if (!remapped && tnid != nid)
@@ -291,14 +291,14 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
291 */ 291 */
292 if (bi->end > bj->start && bi->start < bj->end) { 292 if (bi->end > bj->start && bi->start < bj->end) {
293 if (bi->nid != bj->nid) { 293 if (bi->nid != bj->nid) {
294 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n", 294 pr_err("NUMA: node %d [mem %#010Lx-%#010Lx] overlaps with node %d [mem %#010Lx-%#010Lx]\n",
295 bi->nid, bi->start, bi->end, 295 bi->nid, bi->start, bi->end - 1,
296 bj->nid, bj->start, bj->end); 296 bj->nid, bj->start, bj->end - 1);
297 return -EINVAL; 297 return -EINVAL;
298 } 298 }
299 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n", 299 pr_warning("NUMA: Warning: node %d [mem %#010Lx-%#010Lx] overlaps with itself [mem %#010Lx-%#010Lx]\n",
300 bi->nid, bi->start, bi->end, 300 bi->nid, bi->start, bi->end - 1,
301 bj->start, bj->end); 301 bj->start, bj->end - 1);
302 } 302 }
303 303
304 /* 304 /*
@@ -320,9 +320,9 @@ int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
320 } 320 }
321 if (k < mi->nr_blks) 321 if (k < mi->nr_blks)
322 continue; 322 continue;
323 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n", 323 printk(KERN_INFO "NUMA: Node %d [mem %#010Lx-%#010Lx] + [mem %#010Lx-%#010Lx] -> [mem %#010Lx-%#010Lx]\n",
324 bi->nid, bi->start, bi->end, bj->start, bj->end, 324 bi->nid, bi->start, bi->end - 1, bj->start,
325 start, end); 325 bj->end - 1, start, end - 1);
326 bi->start = start; 326 bi->start = start;
327 bi->end = end; 327 bi->end = end;
328 numa_remove_memblk_from(j--, mi); 328 numa_remove_memblk_from(j--, mi);
@@ -616,8 +616,8 @@ static int __init dummy_numa_init(void)
616{ 616{
617 printk(KERN_INFO "%s\n", 617 printk(KERN_INFO "%s\n",
618 numa_off ? "NUMA turned off" : "No NUMA configuration found"); 618 numa_off ? "NUMA turned off" : "No NUMA configuration found");
619 printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n", 619 printk(KERN_INFO "Faking a node at [mem %#018Lx-%#018Lx]\n",
620 0LLU, PFN_PHYS(max_pfn)); 620 0LLU, PFN_PHYS(max_pfn) - 1);
621 621
622 node_set(0, numa_nodes_parsed); 622 node_set(0, numa_nodes_parsed);
623 numa_add_memblk(0, 0, PFN_PHYS(max_pfn)); 623 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
diff --git a/arch/x86/mm/numa_emulation.c b/arch/x86/mm/numa_emulation.c
index 871dd8868170..dbbbb47260cc 100644
--- a/arch/x86/mm/numa_emulation.c
+++ b/arch/x86/mm/numa_emulation.c
@@ -68,8 +68,8 @@ static int __init emu_setup_memblk(struct numa_meminfo *ei,
68 numa_remove_memblk_from(phys_blk, pi); 68 numa_remove_memblk_from(phys_blk, pi);
69 } 69 }
70 70
71 printk(KERN_INFO "Faking node %d at %016Lx-%016Lx (%LuMB)\n", nid, 71 printk(KERN_INFO "Faking node %d at [mem %#018Lx-%#018Lx] (%LuMB)\n",
72 eb->start, eb->end, (eb->end - eb->start) >> 20); 72 nid, eb->start, eb->end - 1, (eb->end - eb->start) >> 20);
73 return 0; 73 return 0;
74} 74}
75 75
diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
index bea6e573e02b..3d68ef6d2266 100644
--- a/arch/x86/mm/pat.c
+++ b/arch/x86/mm/pat.c
@@ -225,9 +225,8 @@ static int reserve_ram_pages_type(u64 start, u64 end, unsigned long req_type,
225 page = pfn_to_page(pfn); 225 page = pfn_to_page(pfn);
226 type = get_page_memtype(page); 226 type = get_page_memtype(page);
227 if (type != -1) { 227 if (type != -1) {
228 printk(KERN_INFO "reserve_ram_pages_type failed " 228 printk(KERN_INFO "reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%lx, req 0x%lx\n",
229 "0x%Lx-0x%Lx, track 0x%lx, req 0x%lx\n", 229 start, end - 1, type, req_type);
230 start, end, type, req_type);
231 if (new_type) 230 if (new_type)
232 *new_type = type; 231 *new_type = type;
233 232
@@ -330,9 +329,9 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
330 329
331 err = rbt_memtype_check_insert(new, new_type); 330 err = rbt_memtype_check_insert(new, new_type);
332 if (err) { 331 if (err) {
333 printk(KERN_INFO "reserve_memtype failed 0x%Lx-0x%Lx, " 332 printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
334 "track %s, req %s\n", 333 start, end - 1,
335 start, end, cattr_name(new->type), cattr_name(req_type)); 334 cattr_name(new->type), cattr_name(req_type));
336 kfree(new); 335 kfree(new);
337 spin_unlock(&memtype_lock); 336 spin_unlock(&memtype_lock);
338 337
@@ -341,8 +340,8 @@ int reserve_memtype(u64 start, u64 end, unsigned long req_type,
341 340
342 spin_unlock(&memtype_lock); 341 spin_unlock(&memtype_lock);
343 342
344 dprintk("reserve_memtype added 0x%Lx-0x%Lx, track %s, req %s, ret %s\n", 343 dprintk("reserve_memtype added [mem %#010Lx-%#010Lx], track %s, req %s, ret %s\n",
345 start, end, cattr_name(new->type), cattr_name(req_type), 344 start, end - 1, cattr_name(new->type), cattr_name(req_type),
346 new_type ? cattr_name(*new_type) : "-"); 345 new_type ? cattr_name(*new_type) : "-");
347 346
348 return err; 347 return err;
@@ -376,14 +375,14 @@ int free_memtype(u64 start, u64 end)
376 spin_unlock(&memtype_lock); 375 spin_unlock(&memtype_lock);
377 376
378 if (!entry) { 377 if (!entry) {
379 printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", 378 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
380 current->comm, current->pid, start, end); 379 current->comm, current->pid, start, end - 1);
381 return -EINVAL; 380 return -EINVAL;
382 } 381 }
383 382
384 kfree(entry); 383 kfree(entry);
385 384
386 dprintk("free_memtype request 0x%Lx-0x%Lx\n", start, end); 385 dprintk("free_memtype request [mem %#010Lx-%#010Lx]\n", start, end - 1);
387 386
388 return 0; 387 return 0;
389} 388}
@@ -507,9 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
507 506
508 while (cursor < to) { 507 while (cursor < to) {
509 if (!devmem_is_allowed(pfn)) { 508 if (!devmem_is_allowed(pfn)) {
510 printk(KERN_INFO 509 printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
511 "Program %s tried to access /dev/mem between %Lx->%Lx.\n", 510 current->comm, from, to - 1);
512 current->comm, from, to);
513 return 0; 511 return 0;
514 } 512 }
515 cursor += PAGE_SIZE; 513 cursor += PAGE_SIZE;
@@ -570,12 +568,11 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
570 size; 568 size;
571 569
572 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) { 570 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
573 printk(KERN_INFO 571 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
574 "%s:%d ioremap_change_attr failed %s " 572 "for [mem %#010Lx-%#010Lx]\n",
575 "for %Lx-%Lx\n",
576 current->comm, current->pid, 573 current->comm, current->pid,
577 cattr_name(flags), 574 cattr_name(flags),
578 base, (unsigned long long)(base + size)); 575 base, (unsigned long long)(base + size-1));
579 return -EINVAL; 576 return -EINVAL;
580 } 577 }
581 return 0; 578 return 0;
@@ -607,12 +604,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
607 604
608 flags = lookup_memtype(paddr); 605 flags = lookup_memtype(paddr);
609 if (want_flags != flags) { 606 if (want_flags != flags) {
610 printk(KERN_WARNING 607 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
611 "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n",
612 current->comm, current->pid, 608 current->comm, current->pid,
613 cattr_name(want_flags), 609 cattr_name(want_flags),
614 (unsigned long long)paddr, 610 (unsigned long long)paddr,
615 (unsigned long long)(paddr + size), 611 (unsigned long long)(paddr + size - 1),
616 cattr_name(flags)); 612 cattr_name(flags));
617 *vma_prot = __pgprot((pgprot_val(*vma_prot) & 613 *vma_prot = __pgprot((pgprot_val(*vma_prot) &
618 (~_PAGE_CACHE_MASK)) | 614 (~_PAGE_CACHE_MASK)) |
@@ -630,11 +626,11 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
630 !is_new_memtype_allowed(paddr, size, want_flags, flags)) { 626 !is_new_memtype_allowed(paddr, size, want_flags, flags)) {
631 free_memtype(paddr, paddr + size); 627 free_memtype(paddr, paddr + size);
632 printk(KERN_ERR "%s:%d map pfn expected mapping type %s" 628 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
633 " for %Lx-%Lx, got %s\n", 629 " for [mem %#010Lx-%#010Lx], got %s\n",
634 current->comm, current->pid, 630 current->comm, current->pid,
635 cattr_name(want_flags), 631 cattr_name(want_flags),
636 (unsigned long long)paddr, 632 (unsigned long long)paddr,
637 (unsigned long long)(paddr + size), 633 (unsigned long long)(paddr + size - 1),
638 cattr_name(flags)); 634 cattr_name(flags));
639 return -EINVAL; 635 return -EINVAL;
640 } 636 }
diff --git a/arch/x86/mm/srat.c b/arch/x86/mm/srat.c
index efb5b4b93711..732af3a96183 100644
--- a/arch/x86/mm/srat.c
+++ b/arch/x86/mm/srat.c
@@ -176,8 +176,9 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
176 return; 176 return;
177 } 177 }
178 178
179 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm, 179 printk(KERN_INFO "SRAT: Node %u PXM %u [mem %#010Lx-%#010Lx]\n",
180 start, end); 180 node, pxm,
181 (unsigned long long) start, (unsigned long long) end - 1);
181} 182}
182 183
183void __init acpi_numa_arch_fixup(void) {} 184void __init acpi_numa_arch_fixup(void) {}
diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c
index 7415aa927913..56ab74989cf1 100644
--- a/arch/x86/pci/xen.c
+++ b/arch/x86/pci/xen.c
@@ -64,6 +64,10 @@ static int xen_register_pirq(u32 gsi, int gsi_override, int triggering,
64 int shareable = 0; 64 int shareable = 0;
65 char *name; 65 char *name;
66 66
67 irq = xen_irq_from_gsi(gsi);
68 if (irq > 0)
69 return irq;
70
67 if (set_pirq) 71 if (set_pirq)
68 pirq = gsi; 72 pirq = gsi;
69 73
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
new file mode 100644
index 000000000000..94f7fbe97b08
--- /dev/null
+++ b/arch/x86/realmode/Makefile
@@ -0,0 +1,18 @@
1#
2# arch/x86/realmode/Makefile
3#
4# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive
6# for more details.
7#
8#
9
10subdir- := rm
11
12obj-y += init.o
13obj-y += rmpiggy.o
14
15$(obj)/rmpiggy.o: $(obj)/rm/realmode.bin
16
17$(obj)/rm/realmode.bin: FORCE
18 $(Q)$(MAKE) $(build)=$(obj)/rm $@
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
new file mode 100644
index 000000000000..cbca565af5bd
--- /dev/null
+++ b/arch/x86/realmode/init.c
@@ -0,0 +1,115 @@
1#include <linux/io.h>
2#include <linux/memblock.h>
3
4#include <asm/cacheflush.h>
5#include <asm/pgtable.h>
6#include <asm/realmode.h>
7
8struct real_mode_header *real_mode_header;
9u32 *trampoline_cr4_features;
10
11void __init setup_real_mode(void)
12{
13 phys_addr_t mem;
14 u16 real_mode_seg;
15 u32 *rel;
16 u32 count;
17 u32 *ptr;
18 u16 *seg;
19 int i;
20 unsigned char *base;
21 struct trampoline_header *trampoline_header;
22 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
23#ifdef CONFIG_X86_64
24 u64 *trampoline_pgd;
25 u64 efer;
26#endif
27
28 /* Has to be in very low memory so we can execute real-mode AP code. */
29 mem = memblock_find_in_range(0, 1<<20, size, PAGE_SIZE);
30 if (!mem)
31 panic("Cannot allocate trampoline\n");
32
33 base = __va(mem);
34 memblock_reserve(mem, size);
35 real_mode_header = (struct real_mode_header *) base;
36 printk(KERN_DEBUG "Base memory trampoline at [%p] %llx size %zu\n",
37 base, (unsigned long long)mem, size);
38
39 memcpy(base, real_mode_blob, size);
40
41 real_mode_seg = __pa(base) >> 4;
42 rel = (u32 *) real_mode_relocs;
43
44 /* 16-bit segment relocations. */
45 count = rel[0];
46 rel = &rel[1];
47 for (i = 0; i < count; i++) {
48 seg = (u16 *) (base + rel[i]);
49 *seg = real_mode_seg;
50 }
51
52 /* 32-bit linear relocations. */
53 count = rel[i];
54 rel = &rel[i + 1];
55 for (i = 0; i < count; i++) {
56 ptr = (u32 *) (base + rel[i]);
57 *ptr += __pa(base);
58 }
59
60 /* Must be perfomed *after* relocation. */
61 trampoline_header = (struct trampoline_header *)
62 __va(real_mode_header->trampoline_header);
63
64#ifdef CONFIG_X86_32
65 trampoline_header->start = __pa(startup_32_smp);
66 trampoline_header->gdt_limit = __BOOT_DS + 7;
67 trampoline_header->gdt_base = __pa(boot_gdt);
68#else
69 /*
70 * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
71 * so we need to mask it out.
72 */
73 rdmsrl(MSR_EFER, efer);
74 trampoline_header->efer = efer & ~EFER_LMA;
75
76 trampoline_header->start = (u64) secondary_startup_64;
77 trampoline_cr4_features = &trampoline_header->cr4;
78 *trampoline_cr4_features = read_cr4();
79
80 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
81 trampoline_pgd[0] = __pa(level3_ident_pgt) + _KERNPG_TABLE;
82 trampoline_pgd[511] = __pa(level3_kernel_pgt) + _KERNPG_TABLE;
83#endif
84}
85
86/*
87 * set_real_mode_permissions() gets called very early, to guarantee the
88 * availability of low memory. This is before the proper kernel page
89 * tables are set up, so we cannot set page permissions in that
90 * function. Thus, we use an arch_initcall instead.
91 */
92static int __init set_real_mode_permissions(void)
93{
94 unsigned char *base = (unsigned char *) real_mode_header;
95 size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
96
97 size_t ro_size =
98 PAGE_ALIGN(real_mode_header->ro_end) -
99 __pa(base);
100
101 size_t text_size =
102 PAGE_ALIGN(real_mode_header->ro_end) -
103 real_mode_header->text_start;
104
105 unsigned long text_start =
106 (unsigned long) __va(real_mode_header->text_start);
107
108 set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
109 set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
110 set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
111
112 return 0;
113}
114
115arch_initcall(set_real_mode_permissions);
diff --git a/arch/x86/realmode/rm/.gitignore b/arch/x86/realmode/rm/.gitignore
new file mode 100644
index 000000000000..b6ed3a2555cb
--- /dev/null
+++ b/arch/x86/realmode/rm/.gitignore
@@ -0,0 +1,3 @@
1pasyms.h
2realmode.lds
3realmode.relocs
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
new file mode 100644
index 000000000000..5b84a2d30888
--- /dev/null
+++ b/arch/x86/realmode/rm/Makefile
@@ -0,0 +1,82 @@
1#
2# arch/x86/realmode/Makefile
3#
4# This file is subject to the terms and conditions of the GNU General Public
5# License. See the file "COPYING" in the main directory of this archive
6# for more details.
7#
8#
9
10always := realmode.bin realmode.relocs
11
12wakeup-objs := wakeup_asm.o wakemain.o video-mode.o
13wakeup-objs += copy.o bioscall.o regs.o
14# The link order of the video-*.o modules can matter. In particular,
15# video-vga.o *must* be listed first, followed by video-vesa.o.
16# Hardware-specific drivers should follow in the order they should be
17# probed, and video-bios.o should typically be last.
18wakeup-objs += video-vga.o
19wakeup-objs += video-vesa.o
20wakeup-objs += video-bios.o
21
22realmode-y += header.o
23realmode-y += trampoline_$(BITS).o
24realmode-y += stack.o
25realmode-$(CONFIG_X86_32) += reboot_32.o
26realmode-$(CONFIG_ACPI_SLEEP) += $(wakeup-objs)
27
28targets += $(realmode-y)
29
30REALMODE_OBJS = $(addprefix $(obj)/,$(realmode-y))
31
32sed-pasyms := -n -r -e 's/^([0-9a-fA-F]+) [ABCDGRSTVW] (.+)$$/pa_\2 = \2;/p'
33
34quiet_cmd_pasyms = PASYMS $@
35 cmd_pasyms = $(NM) $(filter-out FORCE,$^) | \
36 sed $(sed-pasyms) | sort | uniq > $@
37
38targets += pasyms.h
39$(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
40 $(call if_changed,pasyms)
41
42targets += realmode.lds
43$(obj)/realmode.lds: $(obj)/pasyms.h
44
45LDFLAGS_realmode.elf := --emit-relocs -T
46CPPFLAGS_realmode.lds += -P -C -I$(obj)
47
48targets += realmode.elf
49$(obj)/realmode.elf: $(obj)/realmode.lds $(REALMODE_OBJS) FORCE
50 $(call if_changed,ld)
51
52OBJCOPYFLAGS_realmode.bin := -O binary
53
54targets += realmode.bin
55$(obj)/realmode.bin: $(obj)/realmode.elf $(obj)/realmode.relocs
56 $(call if_changed,objcopy)
57
58quiet_cmd_relocs = RELOCS $@
59 cmd_relocs = arch/x86/tools/relocs --realmode $< > $@
60
61targets += realmode.relocs
62$(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
63 $(call if_changed,relocs)
64
65# ---------------------------------------------------------------------------
66
67# How to compile the 16-bit code. Note we always compile for -march=i386,
68# that way we can complain to the user if the CPU is insufficient.
69KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
70 -I$(srctree)/arch/x86/boot \
71 -DDISABLE_BRANCH_PROFILING \
72 -Wall -Wstrict-prototypes \
73 -march=i386 -mregparm=3 \
74 -include $(srctree)/$(src)/../../boot/code16gcc.h \
75 -fno-strict-aliasing -fomit-frame-pointer \
76 $(call cc-option, -ffreestanding) \
77 $(call cc-option, -fno-toplevel-reorder,\
78 $(call cc-option, -fno-unit-at-a-time)) \
79 $(call cc-option, -fno-stack-protector) \
80 $(call cc-option, -mpreferred-stack-boundary=2)
81KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
82GCOV_PROFILE := n
diff --git a/arch/x86/realmode/rm/bioscall.S b/arch/x86/realmode/rm/bioscall.S
new file mode 100644
index 000000000000..16162d197918
--- /dev/null
+++ b/arch/x86/realmode/rm/bioscall.S
@@ -0,0 +1 @@
#include "../../boot/bioscall.S"
diff --git a/arch/x86/realmode/rm/copy.S b/arch/x86/realmode/rm/copy.S
new file mode 100644
index 000000000000..b785e6f38fdd
--- /dev/null
+++ b/arch/x86/realmode/rm/copy.S
@@ -0,0 +1 @@
#include "../../boot/copy.S"
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
new file mode 100644
index 000000000000..fadf48378ada
--- /dev/null
+++ b/arch/x86/realmode/rm/header.S
@@ -0,0 +1,41 @@
1/*
2 * Real-mode blob header; this should match realmode.h and be
3 * readonly; for mutable data instead add pointers into the .data
4 * or .bss sections as appropriate.
5 */
6
7#include <linux/linkage.h>
8#include <asm/page_types.h>
9
10#include "realmode.h"
11
12 .section ".header", "a"
13
14 .balign 16
15GLOBAL(real_mode_header)
16 .long pa_text_start
17 .long pa_ro_end
18 /* SMP trampoline */
19 .long pa_trampoline_start
20 .long pa_trampoline_status
21 .long pa_trampoline_header
22#ifdef CONFIG_X86_64
23 .long pa_trampoline_pgd;
24#endif
25 /* ACPI S3 wakeup */
26#ifdef CONFIG_ACPI_SLEEP
27 .long pa_wakeup_start
28 .long pa_wakeup_header
29#endif
30 /* APM/BIOS reboot */
31#ifdef CONFIG_X86_32
32 .long pa_machine_real_restart_asm
33#endif
34END(real_mode_header)
35
36 /* End signature, used to verify integrity */
37 .section ".signature","a"
38 .balign 4
39GLOBAL(end_signature)
40 .long REALMODE_END_SIGNATURE
41END(end_signature)
diff --git a/arch/x86/realmode/rm/realmode.h b/arch/x86/realmode/rm/realmode.h
new file mode 100644
index 000000000000..d74cff6350ed
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.h
@@ -0,0 +1,21 @@
1#ifndef ARCH_X86_REALMODE_RM_REALMODE_H
2#define ARCH_X86_REALMODE_RM_REALMODE_H
3
4#ifdef __ASSEMBLY__
5
6/*
7 * 16-bit ljmpw to the real_mode_seg
8 *
9 * This must be open-coded since gas will choke on using a
10 * relocatable symbol for the segment portion.
11 */
12#define LJMPW_RM(to) .byte 0xea ; .word (to), real_mode_seg
13
14#endif /* __ASSEMBLY__ */
15
16/*
17 * Signature at the end of the realmode region
18 */
19#define REALMODE_END_SIGNATURE 0x65a22c82
20
21#endif /* ARCH_X86_REALMODE_RM_REALMODE_H */
diff --git a/arch/x86/realmode/rm/realmode.lds.S b/arch/x86/realmode/rm/realmode.lds.S
new file mode 100644
index 000000000000..86b2e8d6b1f1
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.lds.S
@@ -0,0 +1,76 @@
1/*
2 * realmode.lds.S
3 *
4 * Linker script for the real-mode code
5 */
6
7#include <asm/page_types.h>
8
9#undef i386
10
11OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
12OUTPUT_ARCH(i386)
13
14SECTIONS
15{
16 real_mode_seg = 0;
17
18 . = 0;
19 .header : {
20 pa_real_mode_base = .;
21 *(.header)
22 }
23
24 . = ALIGN(4);
25 .rodata : {
26 *(.rodata)
27 *(.rodata.*)
28 . = ALIGN(16);
29 video_cards = .;
30 *(.videocards)
31 video_cards_end = .;
32 }
33
34 . = ALIGN(PAGE_SIZE);
35 pa_text_start = .;
36 .text : {
37 *(.text)
38 *(.text.*)
39 }
40
41 .text32 : {
42 *(.text32)
43 *(.text32.*)
44 }
45
46 .text64 : {
47 *(.text64)
48 *(.text64.*)
49 }
50 pa_ro_end = .;
51
52 . = ALIGN(PAGE_SIZE);
53 .data : {
54 *(.data)
55 *(.data.*)
56 }
57
58 . = ALIGN(128);
59 .bss : {
60 *(.bss*)
61 }
62
63 /* End signature for integrity checking */
64 . = ALIGN(4);
65 .signature : {
66 *(.signature)
67 }
68
69 /DISCARD/ : {
70 *(.note*)
71 *(.debug*)
72 *(.eh_frame*)
73 }
74
75#include "pasyms.h"
76}
diff --git a/arch/x86/kernel/reboot_32.S b/arch/x86/realmode/rm/reboot_32.S
index 1d5c46df0d78..114044876b3d 100644
--- a/arch/x86/kernel/reboot_32.S
+++ b/arch/x86/realmode/rm/reboot_32.S
@@ -2,6 +2,7 @@
2#include <linux/init.h> 2#include <linux/init.h>
3#include <asm/segment.h> 3#include <asm/segment.h>
4#include <asm/page_types.h> 4#include <asm/page_types.h>
5#include "realmode.h"
5 6
6/* 7/*
7 * The following code and data reboots the machine by switching to real 8 * The following code and data reboots the machine by switching to real
@@ -13,34 +14,20 @@
13 * 14 *
14 * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax. 15 * This code is called with the restart type (0 = BIOS, 1 = APM) in %eax.
15 */ 16 */
16 .section ".x86_trampoline","a" 17 .section ".text32", "ax"
17 .balign 16
18 .code32 18 .code32
19ENTRY(machine_real_restart_asm)
20r_base = .
21 /* Get our own relocated address */
22 call 1f
231: popl %ebx
24 subl $(1b - r_base), %ebx
25
26 /* Compute the equivalent real-mode segment */
27 movl %ebx, %ecx
28 shrl $4, %ecx
29
30 /* Patch post-real-mode segment jump */
31 movw (dispatch_table - r_base)(%ebx,%eax,2),%ax
32 movw %ax, (101f - r_base)(%ebx)
33 movw %cx, (102f - r_base)(%ebx)
34 19
20 .balign 16
21ENTRY(machine_real_restart_asm)
35 /* Set up the IDT for real mode. */ 22 /* Set up the IDT for real mode. */
36 lidtl (machine_real_restart_idt - r_base)(%ebx) 23 lidtl pa_machine_real_restart_idt
37 24
38 /* 25 /*
39 * Set up a GDT from which we can load segment descriptors for real 26 * Set up a GDT from which we can load segment descriptors for real
40 * mode. The GDT is not used in real mode; it is just needed here to 27 * mode. The GDT is not used in real mode; it is just needed here to
41 * prepare the descriptors. 28 * prepare the descriptors.
42 */ 29 */
43 lgdtl (machine_real_restart_gdt - r_base)(%ebx) 30 lgdtl pa_machine_real_restart_gdt
44 31
45 /* 32 /*
46 * Load the data segment registers with 16-bit compatible values 33 * Load the data segment registers with 16-bit compatible values
@@ -51,7 +38,7 @@ r_base = .
51 movl %ecx, %fs 38 movl %ecx, %fs
52 movl %ecx, %gs 39 movl %ecx, %gs
53 movl %ecx, %ss 40 movl %ecx, %ss
54 ljmpl $8, $1f - r_base 41 ljmpw $8, $1f
55 42
56/* 43/*
57 * This is 16-bit protected mode code to disable paging and the cache, 44 * This is 16-bit protected mode code to disable paging and the cache,
@@ -76,27 +63,29 @@ r_base = .
76 * 63 *
77 * Most of this work is probably excessive, but it is what is tested. 64 * Most of this work is probably excessive, but it is what is tested.
78 */ 65 */
66 .text
79 .code16 67 .code16
68
69 .balign 16
70machine_real_restart_asm16:
801: 711:
81 xorl %ecx, %ecx 72 xorl %ecx, %ecx
82 movl %cr0, %eax 73 movl %cr0, %edx
83 andl $0x00000011, %eax 74 andl $0x00000011, %edx
84 orl $0x60000000, %eax 75 orl $0x60000000, %edx
85 movl %eax, %cr0 76 movl %edx, %cr0
86 movl %ecx, %cr3 77 movl %ecx, %cr3
87 movl %cr0, %edx 78 movl %cr0, %edx
88 andl $0x60000000, %edx /* If no cache bits -> no wbinvd */ 79 testl $0x60000000, %edx /* If no cache bits -> no wbinvd */
89 jz 2f 80 jz 2f
90 wbinvd 81 wbinvd
912: 822:
92 andb $0x10, %al 83 andb $0x10, %dl
93 movl %eax, %cr0 84 movl %edx, %cr0
94 .byte 0xea /* ljmpw */ 85 LJMPW_RM(3f)
95101: .word 0 /* Offset */ 863:
96102: .word 0 /* Segment */ 87 andw %ax, %ax
97 88 jz bios
98bios:
99 ljmpw $0xf000, $0xfff0
100 89
101apm: 90apm:
102 movw $0x1000, %ax 91 movw $0x1000, %ax
@@ -106,26 +95,34 @@ apm:
106 movw $0x0001, %bx 95 movw $0x0001, %bx
107 movw $0x0003, %cx 96 movw $0x0003, %cx
108 int $0x15 97 int $0x15
98 /* This should never return... */
109 99
110END(machine_real_restart_asm) 100bios:
101 ljmpw $0xf000, $0xfff0
111 102
112 .balign 16 103 .section ".rodata", "a"
113 /* These must match <asm/reboot.h */
114dispatch_table:
115 .word bios - r_base
116 .word apm - r_base
117END(dispatch_table)
118 104
119 .balign 16 105 .balign 16
120machine_real_restart_idt: 106GLOBAL(machine_real_restart_idt)
121 .word 0xffff /* Length - real mode default value */ 107 .word 0xffff /* Length - real mode default value */
122 .long 0 /* Base - real mode default value */ 108 .long 0 /* Base - real mode default value */
123END(machine_real_restart_idt) 109END(machine_real_restart_idt)
124 110
125 .balign 16 111 .balign 16
126ENTRY(machine_real_restart_gdt) 112GLOBAL(machine_real_restart_gdt)
127 .quad 0 /* Self-pointer, filled in by PM code */ 113 /* Self-pointer */
128 .quad 0 /* 16-bit code segment, filled in by PM code */ 114 .word 0xffff /* Length - real mode default value */
115 .long pa_machine_real_restart_gdt
116 .word 0
117
118 /*
119 * 16-bit code segment pointing to real_mode_seg
120 * Selector value 8
121 */
122 .word 0xffff /* Limit */
123 .long 0x9b000000 + pa_real_mode_base
124 .word 0
125
129 /* 126 /*
130 * 16-bit data segment with the selector value 16 = 0x10 and 127 * 16-bit data segment with the selector value 16 = 0x10 and
131 * base value 0x100; since this is consistent with real mode 128 * base value 0x100; since this is consistent with real mode
diff --git a/arch/x86/realmode/rm/regs.c b/arch/x86/realmode/rm/regs.c
new file mode 100644
index 000000000000..fbb15b9f9ca9
--- /dev/null
+++ b/arch/x86/realmode/rm/regs.c
@@ -0,0 +1 @@
#include "../../boot/regs.c"
diff --git a/arch/x86/realmode/rm/stack.S b/arch/x86/realmode/rm/stack.S
new file mode 100644
index 000000000000..867ae87adfae
--- /dev/null
+++ b/arch/x86/realmode/rm/stack.S
@@ -0,0 +1,19 @@
1/*
2 * Common heap and stack allocations
3 */
4
5#include <linux/linkage.h>
6
7 .data
8GLOBAL(HEAP)
9 .long rm_heap
10GLOBAL(heap_end)
11 .long rm_stack
12
13 .bss
14 .balign 16
15GLOBAL(rm_heap)
16 .space 2048
17GLOBAL(rm_stack)
18 .space 2048
19GLOBAL(rm_stack_end)
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
new file mode 100644
index 000000000000..c1b2791183e7
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_32.S
@@ -0,0 +1,74 @@
1/*
2 *
3 * Trampoline.S Derived from Setup.S by Linus Torvalds
4 *
5 * 4 Jan 1997 Michael Chastain: changed to gnu as.
6 *
7 * This is only used for booting secondary CPUs in SMP machine
8 *
9 * Entry: CS:IP point to the start of our code, we are
10 * in real mode with no stack, but the rest of the
11 * trampoline page to make our stack and everything else
12 * is a mystery.
13 *
14 * We jump into arch/x86/kernel/head_32.S.
15 *
16 * On entry to trampoline_start, the processor is in real mode
17 * with 16-bit addressing and 16-bit data. CS has some value
18 * and IP is zero. Thus, we load CS to the physical segment
19 * of the real mode code before doing anything further.
20 */
21
22#include <linux/linkage.h>
23#include <linux/init.h>
24#include <asm/segment.h>
25#include <asm/page_types.h>
26#include "realmode.h"
27
28 .text
29 .code16
30
31 .balign PAGE_SIZE
32ENTRY(trampoline_start)
33 wbinvd # Needed for NUMA-Q should be harmless for others
34
35 LJMPW_RM(1f)
361:
37 mov %cs, %ax # Code and data in the same place
38 mov %ax, %ds
39
40 cli # We should be safe anyway
41
42 movl tr_start, %eax # where we need to go
43
44 movl $0xA5A5A5A5, trampoline_status
45 # write marker for master knows we're running
46
47 /*
48 * GDT tables in non default location kernel can be beyond 16MB and
49 * lgdt will not be able to load the address as in real mode default
50 * operand size is 16bit. Use lgdtl instead to force operand size
51 * to 32 bit.
52 */
53 lidtl tr_idt # load idt with 0, 0
54 lgdtl tr_gdt # load gdt with whatever is appropriate
55
56 movw $1, %dx # protected mode (PE) bit
57 lmsw %dx # into protected mode
58
59 ljmpl $__BOOT_CS, $pa_startup_32
60
61 .section ".text32","ax"
62 .code32
63ENTRY(startup_32) # note: also used from wakeup_asm.S
64 jmp *%eax
65
66 .bss
67 .balign 8
68GLOBAL(trampoline_header)
69 tr_start: .space 4
70 tr_gdt_pad: .space 2
71 tr_gdt: .space 6
72END(trampoline_header)
73
74#include "trampoline_common.S"
diff --git a/arch/x86/kernel/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
index 09ff51799e96..bb360dc39d21 100644
--- a/arch/x86/kernel/trampoline_64.S
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -5,12 +5,12 @@
5 * 4 Jan 1997 Michael Chastain: changed to gnu as. 5 * 4 Jan 1997 Michael Chastain: changed to gnu as.
6 * 15 Sept 2005 Eric Biederman: 64bit PIC support 6 * 15 Sept 2005 Eric Biederman: 64bit PIC support
7 * 7 *
8 * Entry: CS:IP point to the start of our code, we are 8 * Entry: CS:IP point to the start of our code, we are
9 * in real mode with no stack, but the rest of the 9 * in real mode with no stack, but the rest of the
10 * trampoline page to make our stack and everything else 10 * trampoline page to make our stack and everything else
11 * is a mystery. 11 * is a mystery.
12 * 12 *
13 * On entry to trampoline_data, the processor is in real mode 13 * On entry to trampoline_start, the processor is in real mode
14 * with 16-bit addressing and 16-bit data. CS has some value 14 * with 16-bit addressing and 16-bit data. CS has some value
15 * and IP is zero. Thus, data addresses need to be absolute 15 * and IP is zero. Thus, data addresses need to be absolute
16 * (no relocation) and are taken with regard to r_base. 16 * (no relocation) and are taken with regard to r_base.
@@ -31,43 +31,33 @@
31#include <asm/msr.h> 31#include <asm/msr.h>
32#include <asm/segment.h> 32#include <asm/segment.h>
33#include <asm/processor-flags.h> 33#include <asm/processor-flags.h>
34#include "realmode.h"
34 35
35 .section ".x86_trampoline","a" 36 .text
36 .balign PAGE_SIZE
37 .code16 37 .code16
38 38
39ENTRY(trampoline_data) 39 .balign PAGE_SIZE
40r_base = . 40ENTRY(trampoline_start)
41 cli # We should be safe anyway 41 cli # We should be safe anyway
42 wbinvd 42 wbinvd
43
44 LJMPW_RM(1f)
451:
43 mov %cs, %ax # Code and data in the same place 46 mov %cs, %ax # Code and data in the same place
44 mov %ax, %ds 47 mov %ax, %ds
45 mov %ax, %es 48 mov %ax, %es
46 mov %ax, %ss 49 mov %ax, %ss
47 50
51 movl $0xA5A5A5A5, trampoline_status
52 # write marker for master knows we're running
48 53
49 movl $0xA5A5A5A5, trampoline_status - r_base 54 # Setup stack
50 # write marker for master knows we're running 55 movl $rm_stack_end, %esp
51
52 # Setup stack
53 movw $(trampoline_stack_end - r_base), %sp
54 56
55 call verify_cpu # Verify the cpu supports long mode 57 call verify_cpu # Verify the cpu supports long mode
56 testl %eax, %eax # Check for return code 58 testl %eax, %eax # Check for return code
57 jnz no_longmode 59 jnz no_longmode
58 60
59 mov %cs, %ax
60 movzx %ax, %esi # Find the 32bit trampoline location
61 shll $4, %esi
62
63 # Fixup the absolute vectors
64 leal (startup_32 - r_base)(%esi), %eax
65 movl %eax, startup_32_vector - r_base
66 leal (startup_64 - r_base)(%esi), %eax
67 movl %eax, startup_64_vector - r_base
68 leal (tgdt - r_base)(%esi), %eax
69 movl %eax, (tgdt + 2 - r_base)
70
71 /* 61 /*
72 * GDT tables in non default location kernel can be beyond 16MB and 62 * GDT tables in non default location kernel can be beyond 16MB and
73 * lgdt will not be able to load the address as in real mode default 63 * lgdt will not be able to load the address as in real mode default
@@ -75,36 +65,49 @@ r_base = .
75 * to 32 bit. 65 * to 32 bit.
76 */ 66 */
77 67
78 lidtl tidt - r_base # load idt with 0, 0 68 lidtl tr_idt # load idt with 0, 0
79 lgdtl tgdt - r_base # load gdt with whatever is appropriate 69 lgdtl tr_gdt # load gdt with whatever is appropriate
70
71 movw $__KERNEL_DS, %dx # Data segment descriptor
80 72
81 mov $X86_CR0_PE, %ax # protected mode (PE) bit 73 # Enable protected mode
82 lmsw %ax # into protected mode 74 movl $X86_CR0_PE, %eax # protected mode (PE) bit
75 movl %eax, %cr0 # into protected mode
83 76
84 # flush prefetch and jump to startup_32 77 # flush prefetch and jump to startup_32
85 ljmpl *(startup_32_vector - r_base) 78 ljmpl $__KERNEL32_CS, $pa_startup_32
86 79
80no_longmode:
81 hlt
82 jmp no_longmode
83#include "../kernel/verify_cpu.S"
84
85 .section ".text32","ax"
87 .code32 86 .code32
88 .balign 4 87 .balign 4
89startup_32: 88ENTRY(startup_32)
90 movl $__KERNEL_DS, %eax # Initialize the %ds segment register 89 movl %edx, %ss
91 movl %eax, %ds 90 addl $pa_real_mode_base, %esp
92 91 movl %edx, %ds
93 movl $X86_CR4_PAE, %eax 92 movl %edx, %es
93 movl %edx, %fs
94 movl %edx, %gs
95
96 movl pa_tr_cr4, %eax
94 movl %eax, %cr4 # Enable PAE mode 97 movl %eax, %cr4 # Enable PAE mode
95 98
96 # Setup trampoline 4 level pagetables 99 # Setup trampoline 4 level pagetables
97 leal (trampoline_level4_pgt - r_base)(%esi), %eax 100 movl $pa_trampoline_pgd, %eax
98 movl %eax, %cr3 101 movl %eax, %cr3
99 102
103 # Set up EFER
104 movl pa_tr_efer, %eax
105 movl pa_tr_efer + 4, %edx
100 movl $MSR_EFER, %ecx 106 movl $MSR_EFER, %ecx
101 movl $(1 << _EFER_LME), %eax # Enable Long Mode
102 xorl %edx, %edx
103 wrmsr 107 wrmsr
104 108
105 # Enable paging and in turn activate Long Mode 109 # Enable paging and in turn activate Long Mode
106 # Enable protected mode 110 movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
107 movl $(X86_CR0_PG | X86_CR0_PE), %eax
108 movl %eax, %cr0 111 movl %eax, %cr0
109 112
110 /* 113 /*
@@ -113,59 +116,38 @@ startup_32:
113 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use 116 * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
114 * the new gdt/idt that has __KERNEL_CS with CS.L = 1. 117 * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
115 */ 118 */
116 ljmp *(startup_64_vector - r_base)(%esi) 119 ljmpl $__KERNEL_CS, $pa_startup_64
117 120
121 .section ".text64","ax"
118 .code64 122 .code64
119 .balign 4 123 .balign 4
120startup_64: 124ENTRY(startup_64)
121 # Now jump into the kernel using virtual addresses 125 # Now jump into the kernel using virtual addresses
122 movq $secondary_startup_64, %rax 126 jmpq *tr_start(%rip)
123 jmp *%rax
124
125 .code16
126no_longmode:
127 hlt
128 jmp no_longmode
129#include "verify_cpu.S"
130
131 .balign 4
132 # Careful these need to be in the same 64K segment as the above;
133tidt:
134 .word 0 # idt limit = 0
135 .word 0, 0 # idt base = 0L
136 127
128 .section ".rodata","a"
137 # Duplicate the global descriptor table 129 # Duplicate the global descriptor table
138 # so the kernel can live anywhere 130 # so the kernel can live anywhere
139 .balign 4 131 .balign 16
140tgdt: 132 .globl tr_gdt
141 .short tgdt_end - tgdt # gdt limit 133tr_gdt:
142 .long tgdt - r_base 134 .short tr_gdt_end - tr_gdt - 1 # gdt limit
143 .short 0 135 .long pa_tr_gdt
136 .short 0
144 .quad 0x00cf9b000000ffff # __KERNEL32_CS 137 .quad 0x00cf9b000000ffff # __KERNEL32_CS
145 .quad 0x00af9b000000ffff # __KERNEL_CS 138 .quad 0x00af9b000000ffff # __KERNEL_CS
146 .quad 0x00cf93000000ffff # __KERNEL_DS 139 .quad 0x00cf93000000ffff # __KERNEL_DS
147tgdt_end: 140tr_gdt_end:
148 141
149 .balign 4 142 .bss
150startup_32_vector: 143 .balign PAGE_SIZE
151 .long startup_32 - r_base 144GLOBAL(trampoline_pgd) .space PAGE_SIZE
152 .word __KERNEL32_CS, 0
153 145
154 .balign 4 146 .balign 8
155startup_64_vector: 147GLOBAL(trampoline_header)
156 .long startup_64 - r_base 148 tr_start: .space 8
157 .word __KERNEL_CS, 0 149 GLOBAL(tr_efer) .space 8
150 GLOBAL(tr_cr4) .space 4
151END(trampoline_header)
158 152
159 .balign 4 153#include "trampoline_common.S"
160ENTRY(trampoline_status)
161 .long 0
162
163trampoline_stack:
164 .org 0x1000
165trampoline_stack_end:
166ENTRY(trampoline_level4_pgt)
167 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
168 .fill 510,8,0
169 .quad level3_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
170
171ENTRY(trampoline_end)
diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S
new file mode 100644
index 000000000000..b1ecdb9692ad
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_common.S
@@ -0,0 +1,7 @@
1 .section ".rodata","a"
2 .balign 16
3tr_idt: .fill 1, 6, 0
4
5 .bss
6 .balign 4
7GLOBAL(trampoline_status) .space 4
diff --git a/arch/x86/realmode/rm/video-bios.c b/arch/x86/realmode/rm/video-bios.c
new file mode 100644
index 000000000000..848b25aaf11b
--- /dev/null
+++ b/arch/x86/realmode/rm/video-bios.c
@@ -0,0 +1 @@
#include "../../boot/video-bios.c"
diff --git a/arch/x86/realmode/rm/video-mode.c b/arch/x86/realmode/rm/video-mode.c
new file mode 100644
index 000000000000..2a98b7e2368b
--- /dev/null
+++ b/arch/x86/realmode/rm/video-mode.c
@@ -0,0 +1 @@
#include "../../boot/video-mode.c"
diff --git a/arch/x86/realmode/rm/video-vesa.c b/arch/x86/realmode/rm/video-vesa.c
new file mode 100644
index 000000000000..413edddb51e5
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vesa.c
@@ -0,0 +1 @@
#include "../../boot/video-vesa.c"
diff --git a/arch/x86/realmode/rm/video-vga.c b/arch/x86/realmode/rm/video-vga.c
new file mode 100644
index 000000000000..3085f5c9d288
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vga.c
@@ -0,0 +1 @@
#include "../../boot/video-vga.c"
diff --git a/arch/x86/kernel/acpi/realmode/wakemain.c b/arch/x86/realmode/rm/wakemain.c
index 883962d9eef2..91405d515ec6 100644
--- a/arch/x86/kernel/acpi/realmode/wakemain.c
+++ b/arch/x86/realmode/rm/wakemain.c
@@ -65,7 +65,8 @@ void main(void)
65{ 65{
66 /* Kill machine if structures are wrong */ 66 /* Kill machine if structures are wrong */
67 if (wakeup_header.real_magic != 0x12345678) 67 if (wakeup_header.real_magic != 0x12345678)
68 while (1); 68 while (1)
69 ;
69 70
70 if (wakeup_header.realmode_flags & 4) 71 if (wakeup_header.realmode_flags & 4)
71 send_morse("...-"); 72 send_morse("...-");
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.h b/arch/x86/realmode/rm/wakeup.h
index 97a29e1430e3..9317e0042f24 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.h
+++ b/arch/x86/realmode/rm/wakeup.h
@@ -12,9 +12,8 @@
12/* This must match data at wakeup.S */ 12/* This must match data at wakeup.S */
13struct wakeup_header { 13struct wakeup_header {
14 u16 video_mode; /* Video mode number */ 14 u16 video_mode; /* Video mode number */
15 u16 _jmp1; /* ljmpl opcode, 32-bit only */
16 u32 pmode_entry; /* Protected mode resume point, 32-bit only */ 15 u32 pmode_entry; /* Protected mode resume point, 32-bit only */
17 u16 _jmp2; /* CS value, 32-bit only */ 16 u16 pmode_cs;
18 u32 pmode_cr0; /* Protected mode cr0 */ 17 u32 pmode_cr0; /* Protected mode cr0 */
19 u32 pmode_cr3; /* Protected mode cr3 */ 18 u32 pmode_cr3; /* Protected mode cr3 */
20 u32 pmode_cr4; /* Protected mode cr4 */ 19 u32 pmode_cr4; /* Protected mode cr4 */
@@ -26,12 +25,6 @@ struct wakeup_header {
26 u32 pmode_behavior; /* Wakeup routine behavior flags */ 25 u32 pmode_behavior; /* Wakeup routine behavior flags */
27 u32 realmode_flags; 26 u32 realmode_flags;
28 u32 real_magic; 27 u32 real_magic;
29 u16 trampoline_segment; /* segment with trampoline code, 64-bit only */
30 u8 _pad1;
31 u8 wakeup_jmp;
32 u16 wakeup_jmp_off;
33 u16 wakeup_jmp_seg;
34 u64 wakeup_gdt[3];
35 u32 signature; /* To check we have correct structure */ 28 u32 signature; /* To check we have correct structure */
36} __attribute__((__packed__)); 29} __attribute__((__packed__));
37 30
@@ -40,7 +33,6 @@ extern struct wakeup_header wakeup_header;
40 33
41#define WAKEUP_HEADER_OFFSET 8 34#define WAKEUP_HEADER_OFFSET 8
42#define WAKEUP_HEADER_SIGNATURE 0x51ee1111 35#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
43#define WAKEUP_END_SIGNATURE 0x65a22c82
44 36
45/* Wakeup behavior bits */ 37/* Wakeup behavior bits */
46#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0 38#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
diff --git a/arch/x86/kernel/acpi/realmode/wakeup.S b/arch/x86/realmode/rm/wakeup_asm.S
index b4fd836e4053..8905166b0bbb 100644
--- a/arch/x86/kernel/acpi/realmode/wakeup.S
+++ b/arch/x86/realmode/rm/wakeup_asm.S
@@ -1,50 +1,47 @@
1/* 1/*
2 * ACPI wakeup real mode startup stub 2 * ACPI wakeup real mode startup stub
3 */ 3 */
4#include <linux/linkage.h>
4#include <asm/segment.h> 5#include <asm/segment.h>
5#include <asm/msr-index.h> 6#include <asm/msr-index.h>
6#include <asm/page_types.h> 7#include <asm/page_types.h>
7#include <asm/pgtable_types.h> 8#include <asm/pgtable_types.h>
8#include <asm/processor-flags.h> 9#include <asm/processor-flags.h>
10#include "realmode.h"
9#include "wakeup.h" 11#include "wakeup.h"
10 12
11 .code16 13 .code16
12 .section ".jump", "ax"
13 .globl _start
14_start:
15 cli
16 jmp wakeup_code
17 14
18/* This should match the structure in wakeup.h */ 15/* This should match the structure in wakeup.h */
19 .section ".header", "a" 16 .section ".data", "aw"
20 .globl wakeup_header 17
21wakeup_header: 18 .balign 16
22video_mode: .short 0 /* Video mode number */ 19GLOBAL(wakeup_header)
23pmode_return: .byte 0x66, 0xea /* ljmpl */ 20 video_mode: .short 0 /* Video mode number */
24 .long 0 /* offset goes here */ 21 pmode_entry: .long 0
25 .short __KERNEL_CS 22 pmode_cs: .short __KERNEL_CS
26pmode_cr0: .long 0 /* Saved %cr0 */ 23 pmode_cr0: .long 0 /* Saved %cr0 */
27pmode_cr3: .long 0 /* Saved %cr3 */ 24 pmode_cr3: .long 0 /* Saved %cr3 */
28pmode_cr4: .long 0 /* Saved %cr4 */ 25 pmode_cr4: .long 0 /* Saved %cr4 */
29pmode_efer: .quad 0 /* Saved EFER */ 26 pmode_efer: .quad 0 /* Saved EFER */
30pmode_gdt: .quad 0 27 pmode_gdt: .quad 0
31pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */ 28 pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
32pmode_behavior: .long 0 /* Wakeup behavior flags */ 29 pmode_behavior: .long 0 /* Wakeup behavior flags */
33realmode_flags: .long 0 30 realmode_flags: .long 0
34real_magic: .long 0 31 real_magic: .long 0
35trampoline_segment: .word 0 32 signature: .long WAKEUP_HEADER_SIGNATURE
36_pad1: .byte 0 33END(wakeup_header)
37wakeup_jmp: .byte 0xea /* ljmpw */
38wakeup_jmp_off: .word 3f
39wakeup_jmp_seg: .word 0
40wakeup_gdt: .quad 0, 0, 0
41signature: .long WAKEUP_HEADER_SIGNATURE
42 34
43 .text 35 .text
44 .code16 36 .code16
45wakeup_code: 37
38 .balign 16
39ENTRY(wakeup_start)
40 cli
46 cld 41 cld
47 42
43 LJMPW_RM(3f)
443:
48 /* Apparently some dimwit BIOS programmers don't know how to 45 /* Apparently some dimwit BIOS programmers don't know how to
49 program a PM to RM transition, and we might end up here with 46 program a PM to RM transition, and we might end up here with
50 junk in the data segment descriptor registers. The only way 47 junk in the data segment descriptor registers. The only way
@@ -54,8 +51,7 @@ wakeup_code:
54 movl %cr0, %eax 51 movl %cr0, %eax
55 orb $X86_CR0_PE, %al 52 orb $X86_CR0_PE, %al
56 movl %eax, %cr0 53 movl %eax, %cr0
57 jmp 1f 54 ljmpw $8, $2f
581: ljmpw $8, $2f
592: 552:
60 movw %cx, %ds 56 movw %cx, %ds
61 movw %cx, %es 57 movw %cx, %es
@@ -65,16 +61,18 @@ wakeup_code:
65 61
66 andb $~X86_CR0_PE, %al 62 andb $~X86_CR0_PE, %al
67 movl %eax, %cr0 63 movl %eax, %cr0
68 jmp wakeup_jmp 64 LJMPW_RM(3f)
693: 653:
70 /* Set up segments */ 66 /* Set up segments */
71 movw %cs, %ax 67 movw %cs, %ax
68 movw %ax, %ss
69 movl $rm_stack_end, %esp
72 movw %ax, %ds 70 movw %ax, %ds
73 movw %ax, %es 71 movw %ax, %es
74 movw %ax, %ss 72 movw %ax, %fs
75 lidtl wakeup_idt 73 movw %ax, %gs
76 74
77 movl $wakeup_stack_end, %esp 75 lidtl wakeup_idt
78 76
79 /* Clear the EFLAGS */ 77 /* Clear the EFLAGS */
80 pushl $0 78 pushl $0
@@ -87,7 +85,7 @@ wakeup_code:
87 85
88 /* Check we really have everything... */ 86 /* Check we really have everything... */
89 movl end_signature, %eax 87 movl end_signature, %eax
90 cmpl $WAKEUP_END_SIGNATURE, %eax 88 cmpl $REALMODE_END_SIGNATURE, %eax
91 jne bogus_real_magic 89 jne bogus_real_magic
92 90
93 /* Call the C code */ 91 /* Call the C code */
@@ -128,14 +126,13 @@ wakeup_code:
128 lgdtl pmode_gdt 126 lgdtl pmode_gdt
129 127
130 /* This really couldn't... */ 128 /* This really couldn't... */
131 movl pmode_cr0, %eax 129 movl pmode_entry, %eax
132 movl %eax, %cr0 130 movl pmode_cr0, %ecx
133 jmp pmode_return 131 movl %ecx, %cr0
132 ljmpl $__KERNEL_CS, $pa_startup_32
133 /* -> jmp *%eax in trampoline_32.S */
134#else 134#else
135 pushw $0 135 jmp trampoline_start
136 pushw trampoline_segment
137 pushw $0
138 lret
139#endif 136#endif
140 137
141bogus_real_magic: 138bogus_real_magic:
@@ -143,28 +140,38 @@ bogus_real_magic:
143 hlt 140 hlt
144 jmp 1b 141 jmp 1b
145 142
146 .data 143 .section ".rodata","a"
144
145 /*
146 * Set up the wakeup GDT. We set these up as Big Real Mode,
147 * that is, with limits set to 4 GB. At least the Lenovo
148 * Thinkpad X61 is known to need this for the video BIOS
149 * initialization quirk to work; this is likely to also
150 * be the case for other laptops or integrated video devices.
151 */
152
153 .balign 16
154GLOBAL(wakeup_gdt)
155 .word 3*8-1 /* Self-descriptor */
156 .long pa_wakeup_gdt
157 .word 0
158
159 .word 0xffff /* 16-bit code segment @ real_mode_base */
160 .long 0x9b000000 + pa_real_mode_base
161 .word 0x008f /* big real mode */
162
163 .word 0xffff /* 16-bit data segment @ real_mode_base */
164 .long 0x93000000 + pa_real_mode_base
165 .word 0x008f /* big real mode */
166END(wakeup_gdt)
167
168 .section ".rodata","a"
147 .balign 8 169 .balign 8
148 170
149 /* This is the standard real-mode IDT */ 171 /* This is the standard real-mode IDT */
150wakeup_idt: 172 .balign 16
173GLOBAL(wakeup_idt)
151 .word 0xffff /* limit */ 174 .word 0xffff /* limit */
152 .long 0 /* address */ 175 .long 0 /* address */
153 .word 0 176 .word 0
154 177END(wakeup_idt)
155 .globl HEAP, heap_end
156HEAP:
157 .long wakeup_heap
158heap_end:
159 .long wakeup_stack
160
161 .bss
162wakeup_heap:
163 .space 2048
164wakeup_stack:
165 .space 2048
166wakeup_stack_end:
167
168 .section ".signature","a"
169end_signature:
170 .long WAKEUP_END_SIGNATURE
diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S
new file mode 100644
index 000000000000..204c6ece0e97
--- /dev/null
+++ b/arch/x86/realmode/rmpiggy.S
@@ -0,0 +1,20 @@
1/*
2 * Wrapper script for the realmode binary as a transport object
3 * before copying to low memory.
4 */
5#include <linux/linkage.h>
6#include <asm/page_types.h>
7
8 .section ".init.data","aw"
9
10 .balign PAGE_SIZE
11
12GLOBAL(real_mode_blob)
13 .incbin "arch/x86/realmode/rm/realmode.bin"
14END(real_mode_blob)
15
16GLOBAL(real_mode_blob_end);
17
18GLOBAL(real_mode_relocs)
19 .incbin "arch/x86/realmode/rm/realmode.relocs"
20END(real_mode_relocs)
diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
index b685296d4464..5a1847d61930 100644
--- a/arch/x86/tools/relocs.c
+++ b/arch/x86/tools/relocs.c
@@ -78,6 +78,13 @@ static const char * const sym_regex_kernel[S_NSYMTYPES] = {
78 78
79static const char * const sym_regex_realmode[S_NSYMTYPES] = { 79static const char * const sym_regex_realmode[S_NSYMTYPES] = {
80/* 80/*
81 * These symbols are known to be relative, even if the linker marks them
82 * as absolute (typically defined outside any section in the linker script.)
83 */
84 [S_REL] =
85 "^pa_",
86
87/*
81 * These are 16-bit segment symbols when compiling 16-bit code. 88 * These are 16-bit segment symbols when compiling 16-bit code.
82 */ 89 */
83 [S_SEG] = 90 [S_SEG] =
diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c
index ef1db1900d86..c8377fb26cdf 100644
--- a/arch/x86/xen/debugfs.c
+++ b/arch/x86/xen/debugfs.c
@@ -19,107 +19,3 @@ struct dentry * __init xen_init_debugfs(void)
19 return d_xen_debug; 19 return d_xen_debug;
20} 20}
21 21
22struct array_data
23{
24 void *array;
25 unsigned elements;
26};
27
28static int u32_array_open(struct inode *inode, struct file *file)
29{
30 file->private_data = NULL;
31 return nonseekable_open(inode, file);
32}
33
34static size_t format_array(char *buf, size_t bufsize, const char *fmt,
35 u32 *array, unsigned array_size)
36{
37 size_t ret = 0;
38 unsigned i;
39
40 for(i = 0; i < array_size; i++) {
41 size_t len;
42
43 len = snprintf(buf, bufsize, fmt, array[i]);
44 len++; /* ' ' or '\n' */
45 ret += len;
46
47 if (buf) {
48 buf += len;
49 bufsize -= len;
50 buf[-1] = (i == array_size-1) ? '\n' : ' ';
51 }
52 }
53
54 ret++; /* \0 */
55 if (buf)
56 *buf = '\0';
57
58 return ret;
59}
60
61static char *format_array_alloc(const char *fmt, u32 *array, unsigned array_size)
62{
63 size_t len = format_array(NULL, 0, fmt, array, array_size);
64 char *ret;
65
66 ret = kmalloc(len, GFP_KERNEL);
67 if (ret == NULL)
68 return NULL;
69
70 format_array(ret, len, fmt, array, array_size);
71 return ret;
72}
73
74static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
75 loff_t *ppos)
76{
77 struct inode *inode = file->f_path.dentry->d_inode;
78 struct array_data *data = inode->i_private;
79 size_t size;
80
81 if (*ppos == 0) {
82 if (file->private_data) {
83 kfree(file->private_data);
84 file->private_data = NULL;
85 }
86
87 file->private_data = format_array_alloc("%u", data->array, data->elements);
88 }
89
90 size = 0;
91 if (file->private_data)
92 size = strlen(file->private_data);
93
94 return simple_read_from_buffer(buf, len, ppos, file->private_data, size);
95}
96
97static int xen_array_release(struct inode *inode, struct file *file)
98{
99 kfree(file->private_data);
100
101 return 0;
102}
103
104static const struct file_operations u32_array_fops = {
105 .owner = THIS_MODULE,
106 .open = u32_array_open,
107 .release= xen_array_release,
108 .read = u32_array_read,
109 .llseek = no_llseek,
110};
111
112struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
113 struct dentry *parent,
114 u32 *array, unsigned elements)
115{
116 struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
117
118 if (data == NULL)
119 return NULL;
120
121 data->array = array;
122 data->elements = elements;
123
124 return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
125}
diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h
index 78d25499be5b..12ebf3325c7b 100644
--- a/arch/x86/xen/debugfs.h
+++ b/arch/x86/xen/debugfs.h
@@ -3,8 +3,4 @@
3 3
4struct dentry * __init xen_init_debugfs(void); 4struct dentry * __init xen_init_debugfs(void);
5 5
6struct dentry *xen_debugfs_create_u32_array(const char *name, umode_t mode,
7 struct dentry *parent,
8 u32 *array, unsigned elements);
9
10#endif /* _XEN_DEBUGFS_H */ 6#endif /* _XEN_DEBUGFS_H */
diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
index c0f5facdb10c..75f33b2a5933 100644
--- a/arch/x86/xen/enlighten.c
+++ b/arch/x86/xen/enlighten.c
@@ -42,6 +42,7 @@
42#include <xen/page.h> 42#include <xen/page.h>
43#include <xen/hvm.h> 43#include <xen/hvm.h>
44#include <xen/hvc-console.h> 44#include <xen/hvc-console.h>
45#include <xen/acpi.h>
45 46
46#include <asm/paravirt.h> 47#include <asm/paravirt.h>
47#include <asm/apic.h> 48#include <asm/apic.h>
@@ -75,6 +76,7 @@
75 76
76#include "xen-ops.h" 77#include "xen-ops.h"
77#include "mmu.h" 78#include "mmu.h"
79#include "smp.h"
78#include "multicalls.h" 80#include "multicalls.h"
79 81
80EXPORT_SYMBOL_GPL(hypercall_page); 82EXPORT_SYMBOL_GPL(hypercall_page);
@@ -883,6 +885,14 @@ static void set_xen_basic_apic_ops(void)
883 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle; 885 apic->safe_wait_icr_idle = xen_safe_apic_wait_icr_idle;
884 apic->set_apic_id = xen_set_apic_id; 886 apic->set_apic_id = xen_set_apic_id;
885 apic->get_apic_id = xen_get_apic_id; 887 apic->get_apic_id = xen_get_apic_id;
888
889#ifdef CONFIG_SMP
890 apic->send_IPI_allbutself = xen_send_IPI_allbutself;
891 apic->send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself;
892 apic->send_IPI_mask = xen_send_IPI_mask;
893 apic->send_IPI_all = xen_send_IPI_all;
894 apic->send_IPI_self = xen_send_IPI_self;
895#endif
886} 896}
887 897
888#endif 898#endif
@@ -1340,7 +1350,6 @@ asmlinkage void __init xen_start_kernel(void)
1340 1350
1341 xen_raw_console_write("mapping kernel into physical memory\n"); 1351 xen_raw_console_write("mapping kernel into physical memory\n");
1342 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages); 1352 pgd = xen_setup_kernel_pagetable(pgd, xen_start_info->nr_pages);
1343 xen_ident_map_ISA();
1344 1353
1345 /* Allocate and initialize top and mid mfn levels for p2m structure */ 1354 /* Allocate and initialize top and mid mfn levels for p2m structure */
1346 xen_build_mfn_list_list(); 1355 xen_build_mfn_list_list();
@@ -1400,6 +1409,8 @@ asmlinkage void __init xen_start_kernel(void)
1400 1409
1401 /* Make sure ACS will be enabled */ 1410 /* Make sure ACS will be enabled */
1402 pci_request_acs(); 1411 pci_request_acs();
1412
1413 xen_acpi_sleep_register();
1403 } 1414 }
1404#ifdef CONFIG_PCI 1415#ifdef CONFIG_PCI
1405 /* PCI BIOS service won't work from a PV guest. */ 1416 /* PCI BIOS service won't work from a PV guest. */
diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
index 3506cd4f9a43..3a73785631ce 100644
--- a/arch/x86/xen/mmu.c
+++ b/arch/x86/xen/mmu.c
@@ -1933,29 +1933,6 @@ static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot)
1933#endif 1933#endif
1934} 1934}
1935 1935
1936void __init xen_ident_map_ISA(void)
1937{
1938 unsigned long pa;
1939
1940 /*
1941 * If we're dom0, then linear map the ISA machine addresses into
1942 * the kernel's address space.
1943 */
1944 if (!xen_initial_domain())
1945 return;
1946
1947 xen_raw_printk("Xen: setup ISA identity maps\n");
1948
1949 for (pa = ISA_START_ADDRESS; pa < ISA_END_ADDRESS; pa += PAGE_SIZE) {
1950 pte_t pte = mfn_pte(PFN_DOWN(pa), PAGE_KERNEL_IO);
1951
1952 if (HYPERVISOR_update_va_mapping(PAGE_OFFSET + pa, pte, 0))
1953 BUG();
1954 }
1955
1956 xen_flush_tlb();
1957}
1958
1959static void __init xen_post_allocator_init(void) 1936static void __init xen_post_allocator_init(void)
1960{ 1937{
1961 pv_mmu_ops.set_pte = xen_set_pte; 1938 pv_mmu_ops.set_pte = xen_set_pte;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 1b267e75158d..ffd08c414e91 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -499,16 +499,18 @@ static bool alloc_p2m(unsigned long pfn)
499 return true; 499 return true;
500} 500}
501 501
502static bool __init __early_alloc_p2m(unsigned long pfn) 502static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary)
503{ 503{
504 unsigned topidx, mididx, idx; 504 unsigned topidx, mididx, idx;
505 unsigned long *p2m;
506 unsigned long *mid_mfn_p;
505 507
506 topidx = p2m_top_index(pfn); 508 topidx = p2m_top_index(pfn);
507 mididx = p2m_mid_index(pfn); 509 mididx = p2m_mid_index(pfn);
508 idx = p2m_index(pfn); 510 idx = p2m_index(pfn);
509 511
510 /* Pfff.. No boundary cross-over, lets get out. */ 512 /* Pfff.. No boundary cross-over, lets get out. */
511 if (!idx) 513 if (!idx && check_boundary)
512 return false; 514 return false;
513 515
514 WARN(p2m_top[topidx][mididx] == p2m_identity, 516 WARN(p2m_top[topidx][mididx] == p2m_identity,
@@ -522,24 +524,66 @@ static bool __init __early_alloc_p2m(unsigned long pfn)
522 return false; 524 return false;
523 525
524 /* Boundary cross-over for the edges: */ 526 /* Boundary cross-over for the edges: */
525 if (idx) { 527 p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
526 unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE);
527 unsigned long *mid_mfn_p;
528 528
529 p2m_init(p2m); 529 p2m_init(p2m);
530 530
531 p2m_top[topidx][mididx] = p2m; 531 p2m_top[topidx][mididx] = p2m;
532 532
533 /* For save/restore we need to MFN of the P2M saved */ 533 /* For save/restore we need to MFN of the P2M saved */
534 534
535 mid_mfn_p = p2m_top_mfn_p[topidx]; 535 mid_mfn_p = p2m_top_mfn_p[topidx];
536 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), 536 WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing),
537 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", 537 "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n",
538 topidx, mididx); 538 topidx, mididx);
539 mid_mfn_p[mididx] = virt_to_mfn(p2m); 539 mid_mfn_p[mididx] = virt_to_mfn(p2m);
540
541 return true;
542}
543
544static bool __init early_alloc_p2m(unsigned long pfn)
545{
546 unsigned topidx = p2m_top_index(pfn);
547 unsigned long *mid_mfn_p;
548 unsigned long **mid;
549
550 mid = p2m_top[topidx];
551 mid_mfn_p = p2m_top_mfn_p[topidx];
552 if (mid == p2m_mid_missing) {
553 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
554
555 p2m_mid_init(mid);
556
557 p2m_top[topidx] = mid;
540 558
559 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
541 } 560 }
542 return idx != 0; 561 /* And the save/restore P2M tables.. */
562 if (mid_mfn_p == p2m_mid_missing_mfn) {
563 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
564 p2m_mid_mfn_init(mid_mfn_p);
565
566 p2m_top_mfn_p[topidx] = mid_mfn_p;
567 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
568 /* Note: we don't set mid_mfn_p[midix] here,
569 * look in early_alloc_p2m_middle */
570 }
571 return true;
572}
573bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn)
574{
575 if (unlikely(!__set_phys_to_machine(pfn, mfn))) {
576 if (!early_alloc_p2m(pfn))
577 return false;
578
579 if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/))
580 return false;
581
582 if (!__set_phys_to_machine(pfn, mfn))
583 return false;
584 }
585
586 return true;
543} 587}
544unsigned long __init set_phys_range_identity(unsigned long pfn_s, 588unsigned long __init set_phys_range_identity(unsigned long pfn_s,
545 unsigned long pfn_e) 589 unsigned long pfn_e)
@@ -559,35 +603,11 @@ unsigned long __init set_phys_range_identity(unsigned long pfn_s,
559 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); 603 pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE));
560 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) 604 pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE)
561 { 605 {
562 unsigned topidx = p2m_top_index(pfn); 606 WARN_ON(!early_alloc_p2m(pfn));
563 unsigned long *mid_mfn_p;
564 unsigned long **mid;
565
566 mid = p2m_top[topidx];
567 mid_mfn_p = p2m_top_mfn_p[topidx];
568 if (mid == p2m_mid_missing) {
569 mid = extend_brk(PAGE_SIZE, PAGE_SIZE);
570
571 p2m_mid_init(mid);
572
573 p2m_top[topidx] = mid;
574
575 BUG_ON(mid_mfn_p != p2m_mid_missing_mfn);
576 }
577 /* And the save/restore P2M tables.. */
578 if (mid_mfn_p == p2m_mid_missing_mfn) {
579 mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE);
580 p2m_mid_mfn_init(mid_mfn_p);
581
582 p2m_top_mfn_p[topidx] = mid_mfn_p;
583 p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p);
584 /* Note: we don't set mid_mfn_p[midix] here,
585 * look in __early_alloc_p2m */
586 }
587 } 607 }
588 608
589 __early_alloc_p2m(pfn_s); 609 early_alloc_p2m_middle(pfn_s, true);
590 __early_alloc_p2m(pfn_e); 610 early_alloc_p2m_middle(pfn_e, true);
591 611
592 for (pfn = pfn_s; pfn < pfn_e; pfn++) 612 for (pfn = pfn_s; pfn < pfn_e; pfn++)
593 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) 613 if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn)))
diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c
index 1ba8dff26753..3ebba0753d38 100644
--- a/arch/x86/xen/setup.c
+++ b/arch/x86/xen/setup.c
@@ -26,7 +26,6 @@
26#include <xen/interface/memory.h> 26#include <xen/interface/memory.h>
27#include <xen/interface/physdev.h> 27#include <xen/interface/physdev.h>
28#include <xen/features.h> 28#include <xen/features.h>
29
30#include "xen-ops.h" 29#include "xen-ops.h"
31#include "vdso.h" 30#include "vdso.h"
32 31
@@ -84,8 +83,8 @@ static void __init xen_add_extra_mem(u64 start, u64 size)
84 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 83 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
85} 84}
86 85
87static unsigned long __init xen_release_chunk(unsigned long start, 86static unsigned long __init xen_do_chunk(unsigned long start,
88 unsigned long end) 87 unsigned long end, bool release)
89{ 88{
90 struct xen_memory_reservation reservation = { 89 struct xen_memory_reservation reservation = {
91 .address_bits = 0, 90 .address_bits = 0,
@@ -96,30 +95,138 @@ static unsigned long __init xen_release_chunk(unsigned long start,
96 unsigned long pfn; 95 unsigned long pfn;
97 int ret; 96 int ret;
98 97
99 for(pfn = start; pfn < end; pfn++) { 98 for (pfn = start; pfn < end; pfn++) {
99 unsigned long frame;
100 unsigned long mfn = pfn_to_mfn(pfn); 100 unsigned long mfn = pfn_to_mfn(pfn);
101 101
102 /* Make sure pfn exists to start with */ 102 if (release) {
103 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) 103 /* Make sure pfn exists to start with */
104 continue; 104 if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn)
105 105 continue;
106 set_xen_guest_handle(reservation.extent_start, &mfn); 106 frame = mfn;
107 } else {
108 if (mfn != INVALID_P2M_ENTRY)
109 continue;
110 frame = pfn;
111 }
112 set_xen_guest_handle(reservation.extent_start, &frame);
107 reservation.nr_extents = 1; 113 reservation.nr_extents = 1;
108 114
109 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, 115 ret = HYPERVISOR_memory_op(release ? XENMEM_decrease_reservation : XENMEM_populate_physmap,
110 &reservation); 116 &reservation);
111 WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); 117 WARN(ret != 1, "Failed to %s pfn %lx err=%d\n",
118 release ? "release" : "populate", pfn, ret);
119
112 if (ret == 1) { 120 if (ret == 1) {
113 __set_phys_to_machine(pfn, INVALID_P2M_ENTRY); 121 if (!early_set_phys_to_machine(pfn, release ? INVALID_P2M_ENTRY : frame)) {
122 if (release)
123 break;
124 set_xen_guest_handle(reservation.extent_start, &frame);
125 reservation.nr_extents = 1;
126 ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
127 &reservation);
128 break;
129 }
114 len++; 130 len++;
115 } 131 } else
132 break;
116 } 133 }
117 printk(KERN_INFO "Freeing %lx-%lx pfn range: %lu pages freed\n", 134 if (len)
118 start, end, len); 135 printk(KERN_INFO "%s %lx-%lx pfn range: %lu pages %s\n",
136 release ? "Freeing" : "Populating",
137 start, end, len,
138 release ? "freed" : "added");
119 139
120 return len; 140 return len;
121} 141}
122 142
143static unsigned long __init xen_release_chunk(unsigned long start,
144 unsigned long end)
145{
146 return xen_do_chunk(start, end, true);
147}
148
149static unsigned long __init xen_populate_chunk(
150 const struct e820entry *list, size_t map_size,
151 unsigned long max_pfn, unsigned long *last_pfn,
152 unsigned long credits_left)
153{
154 const struct e820entry *entry;
155 unsigned int i;
156 unsigned long done = 0;
157 unsigned long dest_pfn;
158
159 for (i = 0, entry = list; i < map_size; i++, entry++) {
160 unsigned long credits = credits_left;
161 unsigned long s_pfn;
162 unsigned long e_pfn;
163 unsigned long pfns;
164 long capacity;
165
166 if (credits <= 0)
167 break;
168
169 if (entry->type != E820_RAM)
170 continue;
171
172 e_pfn = PFN_UP(entry->addr + entry->size);
173
174 /* We only care about E820 after the xen_start_info->nr_pages */
175 if (e_pfn <= max_pfn)
176 continue;
177
178 s_pfn = PFN_DOWN(entry->addr);
179 /* If the E820 falls within the nr_pages, we want to start
180 * at the nr_pages PFN.
181 * If that would mean going past the E820 entry, skip it
182 */
183 if (s_pfn <= max_pfn) {
184 capacity = e_pfn - max_pfn;
185 dest_pfn = max_pfn;
186 } else {
187 /* last_pfn MUST be within E820_RAM regions */
188 if (*last_pfn && e_pfn >= *last_pfn)
189 s_pfn = *last_pfn;
190 capacity = e_pfn - s_pfn;
191 dest_pfn = s_pfn;
192 }
193 /* If we had filled this E820_RAM entry, go to the next one. */
194 if (capacity <= 0)
195 continue;
196
197 if (credits > capacity)
198 credits = capacity;
199
200 pfns = xen_do_chunk(dest_pfn, dest_pfn + credits, false);
201 done += pfns;
202 credits_left -= pfns;
203 *last_pfn = (dest_pfn + pfns);
204 }
205 return done;
206}
207
208static void __init xen_set_identity_and_release_chunk(
209 unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages,
210 unsigned long *released, unsigned long *identity)
211{
212 unsigned long pfn;
213
214 /*
215 * If the PFNs are currently mapped, the VA mapping also needs
216 * to be updated to be 1:1.
217 */
218 for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++)
219 (void)HYPERVISOR_update_va_mapping(
220 (unsigned long)__va(pfn << PAGE_SHIFT),
221 mfn_pte(pfn, PAGE_KERNEL_IO), 0);
222
223 if (start_pfn < nr_pages)
224 *released += xen_release_chunk(
225 start_pfn, min(end_pfn, nr_pages));
226
227 *identity += set_phys_range_identity(start_pfn, end_pfn);
228}
229
123static unsigned long __init xen_set_identity_and_release( 230static unsigned long __init xen_set_identity_and_release(
124 const struct e820entry *list, size_t map_size, unsigned long nr_pages) 231 const struct e820entry *list, size_t map_size, unsigned long nr_pages)
125{ 232{
@@ -142,7 +249,6 @@ static unsigned long __init xen_set_identity_and_release(
142 */ 249 */
143 for (i = 0, entry = list; i < map_size; i++, entry++) { 250 for (i = 0, entry = list; i < map_size; i++, entry++) {
144 phys_addr_t end = entry->addr + entry->size; 251 phys_addr_t end = entry->addr + entry->size;
145
146 if (entry->type == E820_RAM || i == map_size - 1) { 252 if (entry->type == E820_RAM || i == map_size - 1) {
147 unsigned long start_pfn = PFN_DOWN(start); 253 unsigned long start_pfn = PFN_DOWN(start);
148 unsigned long end_pfn = PFN_UP(end); 254 unsigned long end_pfn = PFN_UP(end);
@@ -150,20 +256,19 @@ static unsigned long __init xen_set_identity_and_release(
150 if (entry->type == E820_RAM) 256 if (entry->type == E820_RAM)
151 end_pfn = PFN_UP(entry->addr); 257 end_pfn = PFN_UP(entry->addr);
152 258
153 if (start_pfn < end_pfn) { 259 if (start_pfn < end_pfn)
154 if (start_pfn < nr_pages) 260 xen_set_identity_and_release_chunk(
155 released += xen_release_chunk( 261 start_pfn, end_pfn, nr_pages,
156 start_pfn, min(end_pfn, nr_pages)); 262 &released, &identity);
157 263
158 identity += set_phys_range_identity(
159 start_pfn, end_pfn);
160 }
161 start = end; 264 start = end;
162 } 265 }
163 } 266 }
164 267
165 printk(KERN_INFO "Released %lu pages of unused memory\n", released); 268 if (released)
166 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity); 269 printk(KERN_INFO "Released %lu pages of unused memory\n", released);
270 if (identity)
271 printk(KERN_INFO "Set %ld page(s) to 1-1 mapping\n", identity);
167 272
168 return released; 273 return released;
169} 274}
@@ -217,7 +322,9 @@ char * __init xen_memory_setup(void)
217 int rc; 322 int rc;
218 struct xen_memory_map memmap; 323 struct xen_memory_map memmap;
219 unsigned long max_pages; 324 unsigned long max_pages;
325 unsigned long last_pfn = 0;
220 unsigned long extra_pages = 0; 326 unsigned long extra_pages = 0;
327 unsigned long populated;
221 int i; 328 int i;
222 int op; 329 int op;
223 330
@@ -257,9 +364,20 @@ char * __init xen_memory_setup(void)
257 */ 364 */
258 xen_released_pages = xen_set_identity_and_release( 365 xen_released_pages = xen_set_identity_and_release(
259 map, memmap.nr_entries, max_pfn); 366 map, memmap.nr_entries, max_pfn);
260 extra_pages += xen_released_pages;
261 367
262 /* 368 /*
369 * Populate back the non-RAM pages and E820 gaps that had been
370 * released. */
371 populated = xen_populate_chunk(map, memmap.nr_entries,
372 max_pfn, &last_pfn, xen_released_pages);
373
374 extra_pages += (xen_released_pages - populated);
375
376 if (last_pfn > max_pfn) {
377 max_pfn = min(MAX_DOMAIN_PAGES, last_pfn);
378 mem_end = PFN_PHYS(max_pfn);
379 }
380 /*
263 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO 381 * Clamp the amount of extra memory to a EXTRA_MEM_RATIO
264 * factor the base size. On non-highmem systems, the base 382 * factor the base size. On non-highmem systems, the base
265 * size is the full initial memory allocation; on highmem it 383 * size is the full initial memory allocation; on highmem it
@@ -272,7 +390,6 @@ char * __init xen_memory_setup(void)
272 */ 390 */
273 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), 391 extra_pages = min(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)),
274 extra_pages); 392 extra_pages);
275
276 i = 0; 393 i = 0;
277 while (i < memmap.nr_entries) { 394 while (i < memmap.nr_entries) {
278 u64 addr = map[i].addr; 395 u64 addr = map[i].addr;
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 3700945ed0d5..afb250d22a6b 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -16,6 +16,7 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/smp.h> 18#include <linux/smp.h>
19#include <linux/irq_work.h>
19 20
20#include <asm/paravirt.h> 21#include <asm/paravirt.h>
21#include <asm/desc.h> 22#include <asm/desc.h>
@@ -41,10 +42,12 @@ cpumask_var_t xen_cpu_initialized_map;
41static DEFINE_PER_CPU(int, xen_resched_irq); 42static DEFINE_PER_CPU(int, xen_resched_irq);
42static DEFINE_PER_CPU(int, xen_callfunc_irq); 43static DEFINE_PER_CPU(int, xen_callfunc_irq);
43static DEFINE_PER_CPU(int, xen_callfuncsingle_irq); 44static DEFINE_PER_CPU(int, xen_callfuncsingle_irq);
45static DEFINE_PER_CPU(int, xen_irq_work);
44static DEFINE_PER_CPU(int, xen_debug_irq) = -1; 46static DEFINE_PER_CPU(int, xen_debug_irq) = -1;
45 47
46static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); 48static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id);
47static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); 49static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id);
50static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id);
48 51
49/* 52/*
50 * Reschedule call back. 53 * Reschedule call back.
@@ -143,6 +146,17 @@ static int xen_smp_intr_init(unsigned int cpu)
143 goto fail; 146 goto fail;
144 per_cpu(xen_callfuncsingle_irq, cpu) = rc; 147 per_cpu(xen_callfuncsingle_irq, cpu) = rc;
145 148
149 callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu);
150 rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR,
151 cpu,
152 xen_irq_work_interrupt,
153 IRQF_DISABLED|IRQF_PERCPU|IRQF_NOBALANCING,
154 callfunc_name,
155 NULL);
156 if (rc < 0)
157 goto fail;
158 per_cpu(xen_irq_work, cpu) = rc;
159
146 return 0; 160 return 0;
147 161
148 fail: 162 fail:
@@ -155,6 +169,8 @@ static int xen_smp_intr_init(unsigned int cpu)
155 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0) 169 if (per_cpu(xen_callfuncsingle_irq, cpu) >= 0)
156 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), 170 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu),
157 NULL); 171 NULL);
172 if (per_cpu(xen_irq_work, cpu) >= 0)
173 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
158 174
159 return rc; 175 return rc;
160} 176}
@@ -407,6 +423,7 @@ static void xen_cpu_die(unsigned int cpu)
407 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 423 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
408 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 424 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
409 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 425 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
426 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
410 xen_uninit_lock_cpu(cpu); 427 xen_uninit_lock_cpu(cpu);
411 xen_teardown_timer(cpu); 428 xen_teardown_timer(cpu);
412 429
@@ -469,8 +486,8 @@ static void xen_smp_send_reschedule(int cpu)
469 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); 486 xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR);
470} 487}
471 488
472static void xen_send_IPI_mask(const struct cpumask *mask, 489static void __xen_send_IPI_mask(const struct cpumask *mask,
473 enum ipi_vector vector) 490 int vector)
474{ 491{
475 unsigned cpu; 492 unsigned cpu;
476 493
@@ -482,7 +499,7 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
482{ 499{
483 int cpu; 500 int cpu;
484 501
485 xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); 502 __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR);
486 503
487 /* Make sure other vcpus get a chance to run if they need to. */ 504 /* Make sure other vcpus get a chance to run if they need to. */
488 for_each_cpu(cpu, mask) { 505 for_each_cpu(cpu, mask) {
@@ -495,10 +512,86 @@ static void xen_smp_send_call_function_ipi(const struct cpumask *mask)
495 512
496static void xen_smp_send_call_function_single_ipi(int cpu) 513static void xen_smp_send_call_function_single_ipi(int cpu)
497{ 514{
498 xen_send_IPI_mask(cpumask_of(cpu), 515 __xen_send_IPI_mask(cpumask_of(cpu),
499 XEN_CALL_FUNCTION_SINGLE_VECTOR); 516 XEN_CALL_FUNCTION_SINGLE_VECTOR);
500} 517}
501 518
519static inline int xen_map_vector(int vector)
520{
521 int xen_vector;
522
523 switch (vector) {
524 case RESCHEDULE_VECTOR:
525 xen_vector = XEN_RESCHEDULE_VECTOR;
526 break;
527 case CALL_FUNCTION_VECTOR:
528 xen_vector = XEN_CALL_FUNCTION_VECTOR;
529 break;
530 case CALL_FUNCTION_SINGLE_VECTOR:
531 xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR;
532 break;
533 case IRQ_WORK_VECTOR:
534 xen_vector = XEN_IRQ_WORK_VECTOR;
535 break;
536 default:
537 xen_vector = -1;
538 printk(KERN_ERR "xen: vector 0x%x is not implemented\n",
539 vector);
540 }
541
542 return xen_vector;
543}
544
545void xen_send_IPI_mask(const struct cpumask *mask,
546 int vector)
547{
548 int xen_vector = xen_map_vector(vector);
549
550 if (xen_vector >= 0)
551 __xen_send_IPI_mask(mask, xen_vector);
552}
553
554void xen_send_IPI_all(int vector)
555{
556 int xen_vector = xen_map_vector(vector);
557
558 if (xen_vector >= 0)
559 __xen_send_IPI_mask(cpu_online_mask, xen_vector);
560}
561
562void xen_send_IPI_self(int vector)
563{
564 int xen_vector = xen_map_vector(vector);
565
566 if (xen_vector >= 0)
567 xen_send_IPI_one(smp_processor_id(), xen_vector);
568}
569
570void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
571 int vector)
572{
573 unsigned cpu;
574 unsigned int this_cpu = smp_processor_id();
575
576 if (!(num_online_cpus() > 1))
577 return;
578
579 for_each_cpu_and(cpu, mask, cpu_online_mask) {
580 if (this_cpu == cpu)
581 continue;
582
583 xen_smp_send_call_function_single_ipi(cpu);
584 }
585}
586
587void xen_send_IPI_allbutself(int vector)
588{
589 int xen_vector = xen_map_vector(vector);
590
591 if (xen_vector >= 0)
592 xen_send_IPI_mask_allbutself(cpu_online_mask, xen_vector);
593}
594
502static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) 595static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id)
503{ 596{
504 irq_enter(); 597 irq_enter();
@@ -519,6 +612,16 @@ static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id)
519 return IRQ_HANDLED; 612 return IRQ_HANDLED;
520} 613}
521 614
615static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id)
616{
617 irq_enter();
618 irq_work_run();
619 inc_irq_stat(apic_irq_work_irqs);
620 irq_exit();
621
622 return IRQ_HANDLED;
623}
624
522static const struct smp_ops xen_smp_ops __initconst = { 625static const struct smp_ops xen_smp_ops __initconst = {
523 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu, 626 .smp_prepare_boot_cpu = xen_smp_prepare_boot_cpu,
524 .smp_prepare_cpus = xen_smp_prepare_cpus, 627 .smp_prepare_cpus = xen_smp_prepare_cpus,
@@ -565,6 +668,7 @@ static void xen_hvm_cpu_die(unsigned int cpu)
565 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); 668 unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL);
566 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); 669 unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL);
567 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); 670 unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL);
671 unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL);
568 native_cpu_die(cpu); 672 native_cpu_die(cpu);
569} 673}
570 674
diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h
new file mode 100644
index 000000000000..8981a76d081a
--- /dev/null
+++ b/arch/x86/xen/smp.h
@@ -0,0 +1,12 @@
1#ifndef _XEN_SMP_H
2
3extern void xen_send_IPI_mask(const struct cpumask *mask,
4 int vector);
5extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask,
6 int vector);
7extern void xen_send_IPI_allbutself(int vector);
8extern void physflat_send_IPI_allbutself(int vector);
9extern void xen_send_IPI_all(int vector);
10extern void xen_send_IPI_self(int vector);
11
12#endif
diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c
index d69cc6c3f808..83e866d714ce 100644
--- a/arch/x86/xen/spinlock.c
+++ b/arch/x86/xen/spinlock.c
@@ -440,12 +440,12 @@ static int __init xen_spinlock_debugfs(void)
440 debugfs_create_u64("time_total", 0444, d_spin_debug, 440 debugfs_create_u64("time_total", 0444, d_spin_debug,
441 &spinlock_stats.time_total); 441 &spinlock_stats.time_total);
442 442
443 xen_debugfs_create_u32_array("histo_total", 0444, d_spin_debug, 443 debugfs_create_u32_array("histo_total", 0444, d_spin_debug,
444 spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1); 444 spinlock_stats.histo_spin_total, HISTO_BUCKETS + 1);
445 xen_debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug, 445 debugfs_create_u32_array("histo_spinning", 0444, d_spin_debug,
446 spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1); 446 spinlock_stats.histo_spin_spinning, HISTO_BUCKETS + 1);
447 xen_debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug, 447 debugfs_create_u32_array("histo_blocked", 0444, d_spin_debug,
448 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1); 448 spinlock_stats.histo_spin_blocked, HISTO_BUCKETS + 1);
449 449
450 return 0; 450 return 0;
451} 451}
diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
index 45c0c0667bd9..202d4c150154 100644
--- a/arch/x86/xen/xen-ops.h
+++ b/arch/x86/xen/xen-ops.h
@@ -28,7 +28,6 @@ void xen_setup_shared_info(void);
28void xen_build_mfn_list_list(void); 28void xen_build_mfn_list_list(void);
29void xen_setup_machphys_mapping(void); 29void xen_setup_machphys_mapping(void);
30pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); 30pgd_t *xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn);
31void xen_ident_map_ISA(void);
32void xen_reserve_top(void); 31void xen_reserve_top(void);
33extern unsigned long xen_max_p2m_pfn; 32extern unsigned long xen_max_p2m_pfn;
34 33
diff --git a/arch/xtensa/include/asm/kvm_para.h b/arch/xtensa/include/asm/kvm_para.h
new file mode 100644
index 000000000000..14fab8f0b957
--- /dev/null
+++ b/arch/xtensa/include/asm/kvm_para.h
@@ -0,0 +1 @@
#include <asm-generic/kvm_para.h>
diff --git a/drivers/Makefile b/drivers/Makefile
index 0ee98d50f975..2ba29ffef2cb 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -18,7 +18,7 @@ obj-$(CONFIG_SFI) += sfi/
18# PnP must come after ACPI since it will eventually need to check if acpi 18# PnP must come after ACPI since it will eventually need to check if acpi
19# was used and do nothing if so 19# was used and do nothing if so
20obj-$(CONFIG_PNP) += pnp/ 20obj-$(CONFIG_PNP) += pnp/
21obj-$(CONFIG_ARM_AMBA) += amba/ 21obj-y += amba/
22# Many drivers will want to use DMA so this has to be made available 22# Many drivers will want to use DMA so this has to be made available
23# really early. 23# really early.
24obj-$(CONFIG_DMA_ENGINE) += dma/ 24obj-$(CONFIG_DMA_ENGINE) += dma/
diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
index 8cf6c46e99fb..6680df36b963 100644
--- a/drivers/acpi/bgrt.c
+++ b/drivers/acpi/bgrt.c
@@ -11,6 +11,7 @@
11#include <linux/init.h> 11#include <linux/init.h>
12#include <linux/device.h> 12#include <linux/device.h>
13#include <linux/sysfs.h> 13#include <linux/sysfs.h>
14#include <linux/io.h>
14#include <acpi/acpi.h> 15#include <acpi/acpi.h>
15#include <acpi/acpi_bus.h> 16#include <acpi/acpi_bus.h>
16 17
diff --git a/drivers/acpi/sleep.c b/drivers/acpi/sleep.c
index 06527c526618..74ee4ab577b6 100644
--- a/drivers/acpi/sleep.c
+++ b/drivers/acpi/sleep.c
@@ -93,11 +93,9 @@ static int acpi_sleep_prepare(u32 acpi_state)
93#ifdef CONFIG_ACPI_SLEEP 93#ifdef CONFIG_ACPI_SLEEP
94 /* do we have a wakeup address for S2 and S3? */ 94 /* do we have a wakeup address for S2 and S3? */
95 if (acpi_state == ACPI_STATE_S3) { 95 if (acpi_state == ACPI_STATE_S3) {
96 if (!acpi_wakeup_address) { 96 if (!acpi_wakeup_address)
97 return -EFAULT; 97 return -EFAULT;
98 } 98 acpi_set_firmware_waking_vector(acpi_wakeup_address);
99 acpi_set_firmware_waking_vector(
100 (acpi_physical_address)acpi_wakeup_address);
101 99
102 } 100 }
103 ACPI_FLUSH_CPU_CACHE(); 101 ACPI_FLUSH_CPU_CACHE();
diff --git a/drivers/amba/Makefile b/drivers/amba/Makefile
index 40fe74097be2..66e81c2f1e3c 100644
--- a/drivers/amba/Makefile
+++ b/drivers/amba/Makefile
@@ -1,2 +1,2 @@
1obj-y += bus.o 1obj-$(CONFIG_ARM_AMBA) += bus.o
2 2obj-$(CONFIG_TEGRA_AHB) += tegra-ahb.o
diff --git a/drivers/amba/tegra-ahb.c b/drivers/amba/tegra-ahb.c
new file mode 100644
index 000000000000..aa0b1f160528
--- /dev/null
+++ b/drivers/amba/tegra-ahb.c
@@ -0,0 +1,293 @@
1/*
2 * Copyright (c) 2012, NVIDIA CORPORATION. All rights reserved.
3 * Copyright (C) 2011 Google, Inc.
4 *
5 * Author:
6 * Jay Cheng <jacheng@nvidia.com>
7 * James Wylder <james.wylder@motorola.com>
8 * Benoit Goby <benoit@android.com>
9 * Colin Cross <ccross@android.com>
10 * Hiroshi DOYU <hdoyu@nvidia.com>
11 *
12 * This software is licensed under the terms of the GNU General Public
13 * License version 2, as published by the Free Software Foundation, and
14 * may be copied, distributed, and modified under those terms.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/platform_device.h>
26#include <linux/io.h>
27
28#define DRV_NAME "tegra-ahb"
29
30#define AHB_ARBITRATION_DISABLE 0x00
31#define AHB_ARBITRATION_PRIORITY_CTRL 0x04
32#define AHB_PRIORITY_WEIGHT(x) (((x) & 0x7) << 29)
33#define PRIORITY_SELECT_USB BIT(6)
34#define PRIORITY_SELECT_USB2 BIT(18)
35#define PRIORITY_SELECT_USB3 BIT(17)
36
37#define AHB_GIZMO_AHB_MEM 0x0c
38#define ENB_FAST_REARBITRATE BIT(2)
39#define DONT_SPLIT_AHB_WR BIT(7)
40
41#define AHB_GIZMO_APB_DMA 0x10
42#define AHB_GIZMO_IDE 0x18
43#define AHB_GIZMO_USB 0x1c
44#define AHB_GIZMO_AHB_XBAR_BRIDGE 0x20
45#define AHB_GIZMO_CPU_AHB_BRIDGE 0x24
46#define AHB_GIZMO_COP_AHB_BRIDGE 0x28
47#define AHB_GIZMO_XBAR_APB_CTLR 0x2c
48#define AHB_GIZMO_VCP_AHB_BRIDGE 0x30
49#define AHB_GIZMO_NAND 0x3c
50#define AHB_GIZMO_SDMMC4 0x44
51#define AHB_GIZMO_XIO 0x48
52#define AHB_GIZMO_BSEV 0x60
53#define AHB_GIZMO_BSEA 0x70
54#define AHB_GIZMO_NOR 0x74
55#define AHB_GIZMO_USB2 0x78
56#define AHB_GIZMO_USB3 0x7c
57#define IMMEDIATE BIT(18)
58
59#define AHB_GIZMO_SDMMC1 0x80
60#define AHB_GIZMO_SDMMC2 0x84
61#define AHB_GIZMO_SDMMC3 0x88
62#define AHB_MEM_PREFETCH_CFG_X 0xd8
63#define AHB_ARBITRATION_XBAR_CTRL 0xdc
64#define AHB_MEM_PREFETCH_CFG3 0xe0
65#define AHB_MEM_PREFETCH_CFG4 0xe4
66#define AHB_MEM_PREFETCH_CFG1 0xec
67#define AHB_MEM_PREFETCH_CFG2 0xf0
68#define PREFETCH_ENB BIT(31)
69#define MST_ID(x) (((x) & 0x1f) << 26)
70#define AHBDMA_MST_ID MST_ID(5)
71#define USB_MST_ID MST_ID(6)
72#define USB2_MST_ID MST_ID(18)
73#define USB3_MST_ID MST_ID(17)
74#define ADDR_BNDRY(x) (((x) & 0xf) << 21)
75#define INACTIVITY_TIMEOUT(x) (((x) & 0xffff) << 0)
76
77#define AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID 0xf8
78
79#define AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE BIT(17)
80
81static struct platform_driver tegra_ahb_driver;
82
83static const u32 tegra_ahb_gizmo[] = {
84 AHB_ARBITRATION_DISABLE,
85 AHB_ARBITRATION_PRIORITY_CTRL,
86 AHB_GIZMO_AHB_MEM,
87 AHB_GIZMO_APB_DMA,
88 AHB_GIZMO_IDE,
89 AHB_GIZMO_USB,
90 AHB_GIZMO_AHB_XBAR_BRIDGE,
91 AHB_GIZMO_CPU_AHB_BRIDGE,
92 AHB_GIZMO_COP_AHB_BRIDGE,
93 AHB_GIZMO_XBAR_APB_CTLR,
94 AHB_GIZMO_VCP_AHB_BRIDGE,
95 AHB_GIZMO_NAND,
96 AHB_GIZMO_SDMMC4,
97 AHB_GIZMO_XIO,
98 AHB_GIZMO_BSEV,
99 AHB_GIZMO_BSEA,
100 AHB_GIZMO_NOR,
101 AHB_GIZMO_USB2,
102 AHB_GIZMO_USB3,
103 AHB_GIZMO_SDMMC1,
104 AHB_GIZMO_SDMMC2,
105 AHB_GIZMO_SDMMC3,
106 AHB_MEM_PREFETCH_CFG_X,
107 AHB_ARBITRATION_XBAR_CTRL,
108 AHB_MEM_PREFETCH_CFG3,
109 AHB_MEM_PREFETCH_CFG4,
110 AHB_MEM_PREFETCH_CFG1,
111 AHB_MEM_PREFETCH_CFG2,
112 AHB_ARBITRATION_AHB_MEM_WRQUE_MST_ID,
113};
114
115struct tegra_ahb {
116 void __iomem *regs;
117 struct device *dev;
118 u32 ctx[0];
119};
120
121static inline u32 gizmo_readl(struct tegra_ahb *ahb, u32 offset)
122{
123 return readl(ahb->regs + offset);
124}
125
126static inline void gizmo_writel(struct tegra_ahb *ahb, u32 value, u32 offset)
127{
128 writel(value, ahb->regs + offset);
129}
130
131#ifdef CONFIG_ARCH_TEGRA_3x_SOC
132static int tegra_ahb_match_by_smmu(struct device *dev, void *data)
133{
134 struct tegra_ahb *ahb = dev_get_drvdata(dev);
135 struct device_node *dn = data;
136
137 return (ahb->dev->of_node == dn) ? 1 : 0;
138}
139
140int tegra_ahb_enable_smmu(struct device_node *dn)
141{
142 struct device *dev;
143 u32 val;
144 struct tegra_ahb *ahb;
145
146 dev = driver_find_device(&tegra_ahb_driver.driver, NULL, dn,
147 tegra_ahb_match_by_smmu);
148 if (!dev)
149 return -EPROBE_DEFER;
150 ahb = dev_get_drvdata(dev);
151 val = gizmo_readl(ahb, AHB_ARBITRATION_XBAR_CTRL);
152 val |= AHB_ARBITRATION_XBAR_CTRL_SMMU_INIT_DONE;
153 gizmo_writel(ahb, val, AHB_ARBITRATION_XBAR_CTRL);
154 return 0;
155}
156EXPORT_SYMBOL(tegra_ahb_enable_smmu);
157#endif
158
159static int tegra_ahb_suspend(struct device *dev)
160{
161 int i;
162 struct tegra_ahb *ahb = dev_get_drvdata(dev);
163
164 for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
165 ahb->ctx[i] = gizmo_readl(ahb, tegra_ahb_gizmo[i]);
166 return 0;
167}
168
169static int tegra_ahb_resume(struct device *dev)
170{
171 int i;
172 struct tegra_ahb *ahb = dev_get_drvdata(dev);
173
174 for (i = 0; i < ARRAY_SIZE(tegra_ahb_gizmo); i++)
175 gizmo_writel(ahb, ahb->ctx[i], tegra_ahb_gizmo[i]);
176 return 0;
177}
178
179static UNIVERSAL_DEV_PM_OPS(tegra_ahb_pm,
180 tegra_ahb_suspend,
181 tegra_ahb_resume, NULL);
182
183static void tegra_ahb_gizmo_init(struct tegra_ahb *ahb)
184{
185 u32 val;
186
187 val = gizmo_readl(ahb, AHB_GIZMO_AHB_MEM);
188 val |= ENB_FAST_REARBITRATE | IMMEDIATE | DONT_SPLIT_AHB_WR;
189 gizmo_writel(ahb, val, AHB_GIZMO_AHB_MEM);
190
191 val = gizmo_readl(ahb, AHB_GIZMO_USB);
192 val |= IMMEDIATE;
193 gizmo_writel(ahb, val, AHB_GIZMO_USB);
194
195 val = gizmo_readl(ahb, AHB_GIZMO_USB2);
196 val |= IMMEDIATE;
197 gizmo_writel(ahb, val, AHB_GIZMO_USB2);
198
199 val = gizmo_readl(ahb, AHB_GIZMO_USB3);
200 val |= IMMEDIATE;
201 gizmo_writel(ahb, val, AHB_GIZMO_USB3);
202
203 val = gizmo_readl(ahb, AHB_ARBITRATION_PRIORITY_CTRL);
204 val |= PRIORITY_SELECT_USB |
205 PRIORITY_SELECT_USB2 |
206 PRIORITY_SELECT_USB3 |
207 AHB_PRIORITY_WEIGHT(7);
208 gizmo_writel(ahb, val, AHB_ARBITRATION_PRIORITY_CTRL);
209
210 val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG1);
211 val &= ~MST_ID(~0);
212 val |= PREFETCH_ENB |
213 AHBDMA_MST_ID |
214 ADDR_BNDRY(0xc) |
215 INACTIVITY_TIMEOUT(0x1000);
216 gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG1);
217
218 val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG2);
219 val &= ~MST_ID(~0);
220 val |= PREFETCH_ENB |
221 USB_MST_ID |
222 ADDR_BNDRY(0xc) |
223 INACTIVITY_TIMEOUT(0x1000);
224 gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG2);
225
226 val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG3);
227 val &= ~MST_ID(~0);
228 val |= PREFETCH_ENB |
229 USB3_MST_ID |
230 ADDR_BNDRY(0xc) |
231 INACTIVITY_TIMEOUT(0x1000);
232 gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG3);
233
234 val = gizmo_readl(ahb, AHB_MEM_PREFETCH_CFG4);
235 val &= ~MST_ID(~0);
236 val |= PREFETCH_ENB |
237 USB2_MST_ID |
238 ADDR_BNDRY(0xc) |
239 INACTIVITY_TIMEOUT(0x1000);
240 gizmo_writel(ahb, val, AHB_MEM_PREFETCH_CFG4);
241}
242
243static int __devinit tegra_ahb_probe(struct platform_device *pdev)
244{
245 struct resource *res;
246 struct tegra_ahb *ahb;
247 size_t bytes;
248
249 bytes = sizeof(*ahb) + sizeof(u32) * ARRAY_SIZE(tegra_ahb_gizmo);
250 ahb = devm_kzalloc(&pdev->dev, bytes, GFP_KERNEL);
251 if (!ahb)
252 return -ENOMEM;
253
254 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
255 if (!res)
256 return -ENODEV;
257 ahb->regs = devm_request_and_ioremap(&pdev->dev, res);
258 if (!ahb->regs)
259 return -EBUSY;
260
261 ahb->dev = &pdev->dev;
262 platform_set_drvdata(pdev, ahb);
263 tegra_ahb_gizmo_init(ahb);
264 return 0;
265}
266
267static int __devexit tegra_ahb_remove(struct platform_device *pdev)
268{
269 return 0;
270}
271
272static const struct of_device_id tegra_ahb_of_match[] __devinitconst = {
273 { .compatible = "nvidia,tegra30-ahb", },
274 { .compatible = "nvidia,tegra20-ahb", },
275 {},
276};
277
278static struct platform_driver tegra_ahb_driver = {
279 .probe = tegra_ahb_probe,
280 .remove = __devexit_p(tegra_ahb_remove),
281 .driver = {
282 .name = DRV_NAME,
283 .owner = THIS_MODULE,
284 .of_match_table = tegra_ahb_of_match,
285 .pm = &tegra_ahb_pm,
286 },
287};
288module_platform_driver(tegra_ahb_driver);
289
290MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
291MODULE_DESCRIPTION("Tegra AHB driver");
292MODULE_LICENSE("GPL v2");
293MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/drivers/ata/sata_mv.c b/drivers/ata/sata_mv.c
index 7336d4a7ab31..24712adf69df 100644
--- a/drivers/ata/sata_mv.c
+++ b/drivers/ata/sata_mv.c
@@ -553,6 +553,7 @@ struct mv_host_priv {
553 553
554#if defined(CONFIG_HAVE_CLK) 554#if defined(CONFIG_HAVE_CLK)
555 struct clk *clk; 555 struct clk *clk;
556 struct clk **port_clks;
556#endif 557#endif
557 /* 558 /*
558 * These consistent DMA memory pools give us guaranteed 559 * These consistent DMA memory pools give us guaranteed
@@ -4027,6 +4028,9 @@ static int mv_platform_probe(struct platform_device *pdev)
4027 struct resource *res; 4028 struct resource *res;
4028 int n_ports = 0; 4029 int n_ports = 0;
4029 int rc; 4030 int rc;
4031#if defined(CONFIG_HAVE_CLK)
4032 int port;
4033#endif
4030 4034
4031 ata_print_version_once(&pdev->dev, DRV_VERSION); 4035 ata_print_version_once(&pdev->dev, DRV_VERSION);
4032 4036
@@ -4054,6 +4058,13 @@ static int mv_platform_probe(struct platform_device *pdev)
4054 4058
4055 if (!host || !hpriv) 4059 if (!host || !hpriv)
4056 return -ENOMEM; 4060 return -ENOMEM;
4061#if defined(CONFIG_HAVE_CLK)
4062 hpriv->port_clks = devm_kzalloc(&pdev->dev,
4063 sizeof(struct clk *) * n_ports,
4064 GFP_KERNEL);
4065 if (!hpriv->port_clks)
4066 return -ENOMEM;
4067#endif
4057 host->private_data = hpriv; 4068 host->private_data = hpriv;
4058 hpriv->n_ports = n_ports; 4069 hpriv->n_ports = n_ports;
4059 hpriv->board_idx = chip_soc; 4070 hpriv->board_idx = chip_soc;
@@ -4066,9 +4077,17 @@ static int mv_platform_probe(struct platform_device *pdev)
4066#if defined(CONFIG_HAVE_CLK) 4077#if defined(CONFIG_HAVE_CLK)
4067 hpriv->clk = clk_get(&pdev->dev, NULL); 4078 hpriv->clk = clk_get(&pdev->dev, NULL);
4068 if (IS_ERR(hpriv->clk)) 4079 if (IS_ERR(hpriv->clk))
4069 dev_notice(&pdev->dev, "cannot get clkdev\n"); 4080 dev_notice(&pdev->dev, "cannot get optional clkdev\n");
4070 else 4081 else
4071 clk_enable(hpriv->clk); 4082 clk_prepare_enable(hpriv->clk);
4083
4084 for (port = 0; port < n_ports; port++) {
4085 char port_number[16];
4086 sprintf(port_number, "%d", port);
4087 hpriv->port_clks[port] = clk_get(&pdev->dev, port_number);
4088 if (!IS_ERR(hpriv->port_clks[port]))
4089 clk_prepare_enable(hpriv->port_clks[port]);
4090 }
4072#endif 4091#endif
4073 4092
4074 /* 4093 /*
@@ -4098,9 +4117,15 @@ static int mv_platform_probe(struct platform_device *pdev)
4098err: 4117err:
4099#if defined(CONFIG_HAVE_CLK) 4118#if defined(CONFIG_HAVE_CLK)
4100 if (!IS_ERR(hpriv->clk)) { 4119 if (!IS_ERR(hpriv->clk)) {
4101 clk_disable(hpriv->clk); 4120 clk_disable_unprepare(hpriv->clk);
4102 clk_put(hpriv->clk); 4121 clk_put(hpriv->clk);
4103 } 4122 }
4123 for (port = 0; port < n_ports; port++) {
4124 if (!IS_ERR(hpriv->port_clks[port])) {
4125 clk_disable_unprepare(hpriv->port_clks[port]);
4126 clk_put(hpriv->port_clks[port]);
4127 }
4128 }
4104#endif 4129#endif
4105 4130
4106 return rc; 4131 return rc;
@@ -4119,14 +4144,21 @@ static int __devexit mv_platform_remove(struct platform_device *pdev)
4119 struct ata_host *host = platform_get_drvdata(pdev); 4144 struct ata_host *host = platform_get_drvdata(pdev);
4120#if defined(CONFIG_HAVE_CLK) 4145#if defined(CONFIG_HAVE_CLK)
4121 struct mv_host_priv *hpriv = host->private_data; 4146 struct mv_host_priv *hpriv = host->private_data;
4147 int port;
4122#endif 4148#endif
4123 ata_host_detach(host); 4149 ata_host_detach(host);
4124 4150
4125#if defined(CONFIG_HAVE_CLK) 4151#if defined(CONFIG_HAVE_CLK)
4126 if (!IS_ERR(hpriv->clk)) { 4152 if (!IS_ERR(hpriv->clk)) {
4127 clk_disable(hpriv->clk); 4153 clk_disable_unprepare(hpriv->clk);
4128 clk_put(hpriv->clk); 4154 clk_put(hpriv->clk);
4129 } 4155 }
4156 for (port = 0; port < host->n_ports; port++) {
4157 if (!IS_ERR(hpriv->port_clks[port])) {
4158 clk_disable_unprepare(hpriv->port_clks[port]);
4159 clk_put(hpriv->port_clks[port]);
4160 }
4161 }
4130#endif 4162#endif
4131 return 0; 4163 return 0;
4132} 4164}
diff --git a/drivers/base/Kconfig b/drivers/base/Kconfig
index 9aa618acfe97..9b21469482ae 100644
--- a/drivers/base/Kconfig
+++ b/drivers/base/Kconfig
@@ -192,4 +192,93 @@ config DMA_SHARED_BUFFER
192 APIs extension; the file's descriptor can then be passed on to other 192 APIs extension; the file's descriptor can then be passed on to other
193 driver. 193 driver.
194 194
195config CMA
196 bool "Contiguous Memory Allocator (EXPERIMENTAL)"
197 depends on HAVE_DMA_CONTIGUOUS && HAVE_MEMBLOCK && EXPERIMENTAL
198 select MIGRATION
199 help
200 This enables the Contiguous Memory Allocator which allows drivers
201 to allocate big physically-contiguous blocks of memory for use with
202 hardware components that do not support I/O map nor scatter-gather.
203
204 For more information see <include/linux/dma-contiguous.h>.
205 If unsure, say "n".
206
207if CMA
208
209config CMA_DEBUG
210 bool "CMA debug messages (DEVELOPMENT)"
211 depends on DEBUG_KERNEL
212 help
213 Turns on debug messages in CMA. This produces KERN_DEBUG
214 messages for every CMA call as well as various messages while
215 processing calls such as dma_alloc_from_contiguous().
216 This option does not affect warning and error messages.
217
218comment "Default contiguous memory area size:"
219
220config CMA_SIZE_MBYTES
221 int "Size in Mega Bytes"
222 depends on !CMA_SIZE_SEL_PERCENTAGE
223 default 16
224 help
225 Defines the size (in MiB) of the default memory area for Contiguous
226 Memory Allocator.
227
228config CMA_SIZE_PERCENTAGE
229 int "Percentage of total memory"
230 depends on !CMA_SIZE_SEL_MBYTES
231 default 10
232 help
233 Defines the size of the default memory area for Contiguous Memory
234 Allocator as a percentage of the total memory in the system.
235
236choice
237 prompt "Selected region size"
238 default CMA_SIZE_SEL_ABSOLUTE
239
240config CMA_SIZE_SEL_MBYTES
241 bool "Use mega bytes value only"
242
243config CMA_SIZE_SEL_PERCENTAGE
244 bool "Use percentage value only"
245
246config CMA_SIZE_SEL_MIN
247 bool "Use lower value (minimum)"
248
249config CMA_SIZE_SEL_MAX
250 bool "Use higher value (maximum)"
251
252endchoice
253
254config CMA_ALIGNMENT
255 int "Maximum PAGE_SIZE order of alignment for contiguous buffers"
256 range 4 9
257 default 8
258 help
259 DMA mapping framework by default aligns all buffers to the smallest
260 PAGE_SIZE order which is greater than or equal to the requested buffer
261 size. This works well for buffers up to a few hundreds kilobytes, but
262 for larger buffers it just a memory waste. With this parameter you can
263 specify the maximum PAGE_SIZE order for contiguous buffers. Larger
264 buffers will be aligned only to this specified order. The order is
265 expressed as a power of two multiplied by the PAGE_SIZE.
266
267 For example, if your system defaults to 4KiB pages, the order value
268 of 8 means that the buffers will be aligned up to 1MiB only.
269
270 If unsure, leave the default value "8".
271
272config CMA_AREAS
273 int "Maximum count of the CMA device-private areas"
274 default 7
275 help
276 CMA allows to create CMA areas for particular devices. This parameter
277 sets the maximum number of such device private CMA areas in the
278 system.
279
280 If unsure, leave the default value "7".
281
282endif
283
195endmenu 284endmenu
diff --git a/drivers/base/Makefile b/drivers/base/Makefile
index b6d1b9c4200c..5aa2d703d19f 100644
--- a/drivers/base/Makefile
+++ b/drivers/base/Makefile
@@ -6,6 +6,7 @@ obj-y := core.o bus.o dd.o syscore.o \
6 attribute_container.o transport_class.o \ 6 attribute_container.o transport_class.o \
7 topology.o 7 topology.o
8obj-$(CONFIG_DEVTMPFS) += devtmpfs.o 8obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
9obj-$(CONFIG_CMA) += dma-contiguous.o
9obj-y += power/ 10obj-y += power/
10obj-$(CONFIG_HAS_DMA) += dma-mapping.o 11obj-$(CONFIG_HAS_DMA) += dma-mapping.o
11obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o 12obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
diff --git a/drivers/base/dma-buf.c b/drivers/base/dma-buf.c
index 05c64c11bad2..24e88fe29ec1 100644
--- a/drivers/base/dma-buf.c
+++ b/drivers/base/dma-buf.c
@@ -44,8 +44,26 @@ static int dma_buf_release(struct inode *inode, struct file *file)
44 return 0; 44 return 0;
45} 45}
46 46
47static int dma_buf_mmap_internal(struct file *file, struct vm_area_struct *vma)
48{
49 struct dma_buf *dmabuf;
50
51 if (!is_dma_buf_file(file))
52 return -EINVAL;
53
54 dmabuf = file->private_data;
55
56 /* check for overflowing the buffer's size */
57 if (vma->vm_pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
58 dmabuf->size >> PAGE_SHIFT)
59 return -EINVAL;
60
61 return dmabuf->ops->mmap(dmabuf, vma);
62}
63
47static const struct file_operations dma_buf_fops = { 64static const struct file_operations dma_buf_fops = {
48 .release = dma_buf_release, 65 .release = dma_buf_release,
66 .mmap = dma_buf_mmap_internal,
49}; 67};
50 68
51/* 69/*
@@ -82,7 +100,8 @@ struct dma_buf *dma_buf_export(void *priv, const struct dma_buf_ops *ops,
82 || !ops->unmap_dma_buf 100 || !ops->unmap_dma_buf
83 || !ops->release 101 || !ops->release
84 || !ops->kmap_atomic 102 || !ops->kmap_atomic
85 || !ops->kmap)) { 103 || !ops->kmap
104 || !ops->mmap)) {
86 return ERR_PTR(-EINVAL); 105 return ERR_PTR(-EINVAL);
87 } 106 }
88 107
@@ -406,3 +425,81 @@ void dma_buf_kunmap(struct dma_buf *dmabuf, unsigned long page_num,
406 dmabuf->ops->kunmap(dmabuf, page_num, vaddr); 425 dmabuf->ops->kunmap(dmabuf, page_num, vaddr);
407} 426}
408EXPORT_SYMBOL_GPL(dma_buf_kunmap); 427EXPORT_SYMBOL_GPL(dma_buf_kunmap);
428
429
430/**
431 * dma_buf_mmap - Setup up a userspace mmap with the given vma
432 * @dmabuf: [in] buffer that should back the vma
433 * @vma: [in] vma for the mmap
434 * @pgoff: [in] offset in pages where this mmap should start within the
435 * dma-buf buffer.
436 *
437 * This function adjusts the passed in vma so that it points at the file of the
438 * dma_buf operation. It alsog adjusts the starting pgoff and does bounds
439 * checking on the size of the vma. Then it calls the exporters mmap function to
440 * set up the mapping.
441 *
442 * Can return negative error values, returns 0 on success.
443 */
444int dma_buf_mmap(struct dma_buf *dmabuf, struct vm_area_struct *vma,
445 unsigned long pgoff)
446{
447 if (WARN_ON(!dmabuf || !vma))
448 return -EINVAL;
449
450 /* check for offset overflow */
451 if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) < pgoff)
452 return -EOVERFLOW;
453
454 /* check for overflowing the buffer's size */
455 if (pgoff + ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) >
456 dmabuf->size >> PAGE_SHIFT)
457 return -EINVAL;
458
459 /* readjust the vma */
460 if (vma->vm_file)
461 fput(vma->vm_file);
462
463 vma->vm_file = dmabuf->file;
464 get_file(vma->vm_file);
465
466 vma->vm_pgoff = pgoff;
467
468 return dmabuf->ops->mmap(dmabuf, vma);
469}
470EXPORT_SYMBOL_GPL(dma_buf_mmap);
471
472/**
473 * dma_buf_vmap - Create virtual mapping for the buffer object into kernel
474 * address space. Same restrictions as for vmap and friends apply.
475 * @dmabuf: [in] buffer to vmap
476 *
477 * This call may fail due to lack of virtual mapping address space.
478 * These calls are optional in drivers. The intended use for them
479 * is for mapping objects linear in kernel space for high use objects.
480 * Please attempt to use kmap/kunmap before thinking about these interfaces.
481 */
482void *dma_buf_vmap(struct dma_buf *dmabuf)
483{
484 if (WARN_ON(!dmabuf))
485 return NULL;
486
487 if (dmabuf->ops->vmap)
488 return dmabuf->ops->vmap(dmabuf);
489 return NULL;
490}
491EXPORT_SYMBOL_GPL(dma_buf_vmap);
492
493/**
494 * dma_buf_vunmap - Unmap a vmap obtained by dma_buf_vmap.
495 * @dmabuf: [in] buffer to vunmap
496 */
497void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
498{
499 if (WARN_ON(!dmabuf))
500 return;
501
502 if (dmabuf->ops->vunmap)
503 dmabuf->ops->vunmap(dmabuf, vaddr);
504}
505EXPORT_SYMBOL_GPL(dma_buf_vunmap);
diff --git a/drivers/base/dma-coherent.c b/drivers/base/dma-coherent.c
index bb0025c510b3..1b85949e3d2f 100644
--- a/drivers/base/dma-coherent.c
+++ b/drivers/base/dma-coherent.c
@@ -10,6 +10,7 @@
10struct dma_coherent_mem { 10struct dma_coherent_mem {
11 void *virt_base; 11 void *virt_base;
12 dma_addr_t device_base; 12 dma_addr_t device_base;
13 phys_addr_t pfn_base;
13 int size; 14 int size;
14 int flags; 15 int flags;
15 unsigned long *bitmap; 16 unsigned long *bitmap;
@@ -44,6 +45,7 @@ int dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
44 45
45 dev->dma_mem->virt_base = mem_base; 46 dev->dma_mem->virt_base = mem_base;
46 dev->dma_mem->device_base = device_addr; 47 dev->dma_mem->device_base = device_addr;
48 dev->dma_mem->pfn_base = PFN_DOWN(bus_addr);
47 dev->dma_mem->size = pages; 49 dev->dma_mem->size = pages;
48 dev->dma_mem->flags = flags; 50 dev->dma_mem->flags = flags;
49 51
@@ -176,3 +178,43 @@ int dma_release_from_coherent(struct device *dev, int order, void *vaddr)
176 return 0; 178 return 0;
177} 179}
178EXPORT_SYMBOL(dma_release_from_coherent); 180EXPORT_SYMBOL(dma_release_from_coherent);
181
182/**
183 * dma_mmap_from_coherent() - try to mmap the memory allocated from
184 * per-device coherent memory pool to userspace
185 * @dev: device from which the memory was allocated
186 * @vma: vm_area for the userspace memory
187 * @vaddr: cpu address returned by dma_alloc_from_coherent
188 * @size: size of the memory buffer allocated by dma_alloc_from_coherent
189 *
190 * This checks whether the memory was allocated from the per-device
191 * coherent memory pool and if so, maps that memory to the provided vma.
192 *
193 * Returns 1 if we correctly mapped the memory, or 0 if
194 * dma_release_coherent() should proceed with mapping memory from
195 * generic pools.
196 */
197int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
198 void *vaddr, size_t size, int *ret)
199{
200 struct dma_coherent_mem *mem = dev ? dev->dma_mem : NULL;
201
202 if (mem && vaddr >= mem->virt_base && vaddr + size <=
203 (mem->virt_base + (mem->size << PAGE_SHIFT))) {
204 unsigned long off = vma->vm_pgoff;
205 int start = (vaddr - mem->virt_base) >> PAGE_SHIFT;
206 int user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
207 int count = size >> PAGE_SHIFT;
208
209 *ret = -ENXIO;
210 if (off < count && user_count <= count - off) {
211 unsigned pfn = mem->pfn_base + start + off;
212 *ret = remap_pfn_range(vma, vma->vm_start, pfn,
213 user_count << PAGE_SHIFT,
214 vma->vm_page_prot);
215 }
216 return 1;
217 }
218 return 0;
219}
220EXPORT_SYMBOL(dma_mmap_from_coherent);
diff --git a/drivers/base/dma-contiguous.c b/drivers/base/dma-contiguous.c
new file mode 100644
index 000000000000..78efb0306a44
--- /dev/null
+++ b/drivers/base/dma-contiguous.c
@@ -0,0 +1,401 @@
1/*
2 * Contiguous Memory Allocator for DMA mapping framework
3 * Copyright (c) 2010-2011 by Samsung Electronics.
4 * Written by:
5 * Marek Szyprowski <m.szyprowski@samsung.com>
6 * Michal Nazarewicz <mina86@mina86.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation; either version 2 of the
11 * License or (at your optional) any later version of the license.
12 */
13
14#define pr_fmt(fmt) "cma: " fmt
15
16#ifdef CONFIG_CMA_DEBUG
17#ifndef DEBUG
18# define DEBUG
19#endif
20#endif
21
22#include <asm/page.h>
23#include <asm/dma-contiguous.h>
24
25#include <linux/memblock.h>
26#include <linux/err.h>
27#include <linux/mm.h>
28#include <linux/mutex.h>
29#include <linux/page-isolation.h>
30#include <linux/slab.h>
31#include <linux/swap.h>
32#include <linux/mm_types.h>
33#include <linux/dma-contiguous.h>
34
35#ifndef SZ_1M
36#define SZ_1M (1 << 20)
37#endif
38
39struct cma {
40 unsigned long base_pfn;
41 unsigned long count;
42 unsigned long *bitmap;
43};
44
45struct cma *dma_contiguous_default_area;
46
47#ifdef CONFIG_CMA_SIZE_MBYTES
48#define CMA_SIZE_MBYTES CONFIG_CMA_SIZE_MBYTES
49#else
50#define CMA_SIZE_MBYTES 0
51#endif
52
53/*
54 * Default global CMA area size can be defined in kernel's .config.
55 * This is usefull mainly for distro maintainers to create a kernel
56 * that works correctly for most supported systems.
57 * The size can be set in bytes or as a percentage of the total memory
58 * in the system.
59 *
60 * Users, who want to set the size of global CMA area for their system
61 * should use cma= kernel parameter.
62 */
63static const unsigned long size_bytes = CMA_SIZE_MBYTES * SZ_1M;
64static long size_cmdline = -1;
65
66static int __init early_cma(char *p)
67{
68 pr_debug("%s(%s)\n", __func__, p);
69 size_cmdline = memparse(p, &p);
70 return 0;
71}
72early_param("cma", early_cma);
73
74#ifdef CONFIG_CMA_SIZE_PERCENTAGE
75
76static unsigned long __init __maybe_unused cma_early_percent_memory(void)
77{
78 struct memblock_region *reg;
79 unsigned long total_pages = 0;
80
81 /*
82 * We cannot use memblock_phys_mem_size() here, because
83 * memblock_analyze() has not been called yet.
84 */
85 for_each_memblock(memory, reg)
86 total_pages += memblock_region_memory_end_pfn(reg) -
87 memblock_region_memory_base_pfn(reg);
88
89 return (total_pages * CONFIG_CMA_SIZE_PERCENTAGE / 100) << PAGE_SHIFT;
90}
91
92#else
93
94static inline __maybe_unused unsigned long cma_early_percent_memory(void)
95{
96 return 0;
97}
98
99#endif
100
101/**
102 * dma_contiguous_reserve() - reserve area for contiguous memory handling
103 * @limit: End address of the reserved memory (optional, 0 for any).
104 *
105 * This function reserves memory from early allocator. It should be
106 * called by arch specific code once the early allocator (memblock or bootmem)
107 * has been activated and all other subsystems have already allocated/reserved
108 * memory.
109 */
110void __init dma_contiguous_reserve(phys_addr_t limit)
111{
112 unsigned long selected_size = 0;
113
114 pr_debug("%s(limit %08lx)\n", __func__, (unsigned long)limit);
115
116 if (size_cmdline != -1) {
117 selected_size = size_cmdline;
118 } else {
119#ifdef CONFIG_CMA_SIZE_SEL_MBYTES
120 selected_size = size_bytes;
121#elif defined(CONFIG_CMA_SIZE_SEL_PERCENTAGE)
122 selected_size = cma_early_percent_memory();
123#elif defined(CONFIG_CMA_SIZE_SEL_MIN)
124 selected_size = min(size_bytes, cma_early_percent_memory());
125#elif defined(CONFIG_CMA_SIZE_SEL_MAX)
126 selected_size = max(size_bytes, cma_early_percent_memory());
127#endif
128 }
129
130 if (selected_size) {
131 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
132 selected_size / SZ_1M);
133
134 dma_declare_contiguous(NULL, selected_size, 0, limit);
135 }
136};
137
138static DEFINE_MUTEX(cma_mutex);
139
140static __init int cma_activate_area(unsigned long base_pfn, unsigned long count)
141{
142 unsigned long pfn = base_pfn;
143 unsigned i = count >> pageblock_order;
144 struct zone *zone;
145
146 WARN_ON_ONCE(!pfn_valid(pfn));
147 zone = page_zone(pfn_to_page(pfn));
148
149 do {
150 unsigned j;
151 base_pfn = pfn;
152 for (j = pageblock_nr_pages; j; --j, pfn++) {
153 WARN_ON_ONCE(!pfn_valid(pfn));
154 if (page_zone(pfn_to_page(pfn)) != zone)
155 return -EINVAL;
156 }
157 init_cma_reserved_pageblock(pfn_to_page(base_pfn));
158 } while (--i);
159 return 0;
160}
161
162static __init struct cma *cma_create_area(unsigned long base_pfn,
163 unsigned long count)
164{
165 int bitmap_size = BITS_TO_LONGS(count) * sizeof(long);
166 struct cma *cma;
167 int ret = -ENOMEM;
168
169 pr_debug("%s(base %08lx, count %lx)\n", __func__, base_pfn, count);
170
171 cma = kmalloc(sizeof *cma, GFP_KERNEL);
172 if (!cma)
173 return ERR_PTR(-ENOMEM);
174
175 cma->base_pfn = base_pfn;
176 cma->count = count;
177 cma->bitmap = kzalloc(bitmap_size, GFP_KERNEL);
178
179 if (!cma->bitmap)
180 goto no_mem;
181
182 ret = cma_activate_area(base_pfn, count);
183 if (ret)
184 goto error;
185
186 pr_debug("%s: returned %p\n", __func__, (void *)cma);
187 return cma;
188
189error:
190 kfree(cma->bitmap);
191no_mem:
192 kfree(cma);
193 return ERR_PTR(ret);
194}
195
196static struct cma_reserved {
197 phys_addr_t start;
198 unsigned long size;
199 struct device *dev;
200} cma_reserved[MAX_CMA_AREAS] __initdata;
201static unsigned cma_reserved_count __initdata;
202
203static int __init cma_init_reserved_areas(void)
204{
205 struct cma_reserved *r = cma_reserved;
206 unsigned i = cma_reserved_count;
207
208 pr_debug("%s()\n", __func__);
209
210 for (; i; --i, ++r) {
211 struct cma *cma;
212 cma = cma_create_area(PFN_DOWN(r->start),
213 r->size >> PAGE_SHIFT);
214 if (!IS_ERR(cma))
215 dev_set_cma_area(r->dev, cma);
216 }
217 return 0;
218}
219core_initcall(cma_init_reserved_areas);
220
221/**
222 * dma_declare_contiguous() - reserve area for contiguous memory handling
223 * for particular device
224 * @dev: Pointer to device structure.
225 * @size: Size of the reserved memory.
226 * @base: Start address of the reserved memory (optional, 0 for any).
227 * @limit: End address of the reserved memory (optional, 0 for any).
228 *
229 * This function reserves memory for specified device. It should be
230 * called by board specific code when early allocator (memblock or bootmem)
231 * is still activate.
232 */
233int __init dma_declare_contiguous(struct device *dev, unsigned long size,
234 phys_addr_t base, phys_addr_t limit)
235{
236 struct cma_reserved *r = &cma_reserved[cma_reserved_count];
237 unsigned long alignment;
238
239 pr_debug("%s(size %lx, base %08lx, limit %08lx)\n", __func__,
240 (unsigned long)size, (unsigned long)base,
241 (unsigned long)limit);
242
243 /* Sanity checks */
244 if (cma_reserved_count == ARRAY_SIZE(cma_reserved)) {
245 pr_err("Not enough slots for CMA reserved regions!\n");
246 return -ENOSPC;
247 }
248
249 if (!size)
250 return -EINVAL;
251
252 /* Sanitise input arguments */
253 alignment = PAGE_SIZE << max(MAX_ORDER, pageblock_order);
254 base = ALIGN(base, alignment);
255 size = ALIGN(size, alignment);
256 limit &= ~(alignment - 1);
257
258 /* Reserve memory */
259 if (base) {
260 if (memblock_is_region_reserved(base, size) ||
261 memblock_reserve(base, size) < 0) {
262 base = -EBUSY;
263 goto err;
264 }
265 } else {
266 /*
267 * Use __memblock_alloc_base() since
268 * memblock_alloc_base() panic()s.
269 */
270 phys_addr_t addr = __memblock_alloc_base(size, alignment, limit);
271 if (!addr) {
272 base = -ENOMEM;
273 goto err;
274 } else if (addr + size > ~(unsigned long)0) {
275 memblock_free(addr, size);
276 base = -EINVAL;
277 goto err;
278 } else {
279 base = addr;
280 }
281 }
282
283 /*
284 * Each reserved area must be initialised later, when more kernel
285 * subsystems (like slab allocator) are available.
286 */
287 r->start = base;
288 r->size = size;
289 r->dev = dev;
290 cma_reserved_count++;
291 pr_info("CMA: reserved %ld MiB at %08lx\n", size / SZ_1M,
292 (unsigned long)base);
293
294 /* Architecture specific contiguous memory fixup. */
295 dma_contiguous_early_fixup(base, size);
296 return 0;
297err:
298 pr_err("CMA: failed to reserve %ld MiB\n", size / SZ_1M);
299 return base;
300}
301
302/**
303 * dma_alloc_from_contiguous() - allocate pages from contiguous area
304 * @dev: Pointer to device for which the allocation is performed.
305 * @count: Requested number of pages.
306 * @align: Requested alignment of pages (in PAGE_SIZE order).
307 *
308 * This function allocates memory buffer for specified device. It uses
309 * device specific contiguous memory area if available or the default
310 * global one. Requires architecture specific get_dev_cma_area() helper
311 * function.
312 */
313struct page *dma_alloc_from_contiguous(struct device *dev, int count,
314 unsigned int align)
315{
316 unsigned long mask, pfn, pageno, start = 0;
317 struct cma *cma = dev_get_cma_area(dev);
318 int ret;
319
320 if (!cma || !cma->count)
321 return NULL;
322
323 if (align > CONFIG_CMA_ALIGNMENT)
324 align = CONFIG_CMA_ALIGNMENT;
325
326 pr_debug("%s(cma %p, count %d, align %d)\n", __func__, (void *)cma,
327 count, align);
328
329 if (!count)
330 return NULL;
331
332 mask = (1 << align) - 1;
333
334 mutex_lock(&cma_mutex);
335
336 for (;;) {
337 pageno = bitmap_find_next_zero_area(cma->bitmap, cma->count,
338 start, count, mask);
339 if (pageno >= cma->count) {
340 ret = -ENOMEM;
341 goto error;
342 }
343
344 pfn = cma->base_pfn + pageno;
345 ret = alloc_contig_range(pfn, pfn + count, MIGRATE_CMA);
346 if (ret == 0) {
347 bitmap_set(cma->bitmap, pageno, count);
348 break;
349 } else if (ret != -EBUSY) {
350 goto error;
351 }
352 pr_debug("%s(): memory range at %p is busy, retrying\n",
353 __func__, pfn_to_page(pfn));
354 /* try again with a bit different memory target */
355 start = pageno + mask + 1;
356 }
357
358 mutex_unlock(&cma_mutex);
359
360 pr_debug("%s(): returned %p\n", __func__, pfn_to_page(pfn));
361 return pfn_to_page(pfn);
362error:
363 mutex_unlock(&cma_mutex);
364 return NULL;
365}
366
367/**
368 * dma_release_from_contiguous() - release allocated pages
369 * @dev: Pointer to device for which the pages were allocated.
370 * @pages: Allocated pages.
371 * @count: Number of allocated pages.
372 *
373 * This function releases memory allocated by dma_alloc_from_contiguous().
374 * It returns false when provided pages do not belong to contiguous area and
375 * true otherwise.
376 */
377bool dma_release_from_contiguous(struct device *dev, struct page *pages,
378 int count)
379{
380 struct cma *cma = dev_get_cma_area(dev);
381 unsigned long pfn;
382
383 if (!cma || !pages)
384 return false;
385
386 pr_debug("%s(page %p)\n", __func__, (void *)pages);
387
388 pfn = page_to_pfn(pages);
389
390 if (pfn < cma->base_pfn || pfn >= cma->base_pfn + cma->count)
391 return false;
392
393 VM_BUG_ON(pfn + count > cma->base_pfn + cma->count);
394
395 mutex_lock(&cma_mutex);
396 bitmap_clear(cma->bitmap, pfn - cma->base_pfn, count);
397 free_contig_range(pfn, count);
398 mutex_unlock(&cma_mutex);
399
400 return true;
401}
diff --git a/drivers/base/node.c b/drivers/base/node.c
index 90aa2a11a933..af1a177216f1 100644
--- a/drivers/base/node.c
+++ b/drivers/base/node.c
@@ -592,11 +592,9 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
592{ 592{
593 int n; 593 int n;
594 594
595 n = nodelist_scnprintf(buf, PAGE_SIZE, node_states[state]); 595 n = nodelist_scnprintf(buf, PAGE_SIZE-2, node_states[state]);
596 if (n > 0 && PAGE_SIZE > n + 1) { 596 buf[n++] = '\n';
597 *(buf + n++) = '\n'; 597 buf[n] = '\0';
598 *(buf + n++) = '\0';
599 }
600 return n; 598 return n;
601} 599}
602 600
diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig
index 165e1febae53..4864407e3fc4 100644
--- a/drivers/clk/Kconfig
+++ b/drivers/clk/Kconfig
@@ -12,6 +12,7 @@ config HAVE_MACH_CLKDEV
12config COMMON_CLK 12config COMMON_CLK
13 bool 13 bool
14 select HAVE_CLK_PREPARE 14 select HAVE_CLK_PREPARE
15 select CLKDEV_LOOKUP
15 ---help--- 16 ---help---
16 The common clock framework is a single definition of struct 17 The common clock framework is a single definition of struct
17 clk, useful across many platforms, as well as an 18 clk, useful across many platforms, as well as an
@@ -22,17 +23,6 @@ config COMMON_CLK
22menu "Common Clock Framework" 23menu "Common Clock Framework"
23 depends on COMMON_CLK 24 depends on COMMON_CLK
24 25
25config COMMON_CLK_DISABLE_UNUSED
26 bool "Disabled unused clocks at boot"
27 depends on COMMON_CLK
28 ---help---
29 Traverses the entire clock tree and disables any clocks that are
30 enabled in hardware but have not been enabled by any device drivers.
31 This saves power and keeps the software model of the clock in line
32 with reality.
33
34 If in doubt, say "N".
35
36config COMMON_CLK_DEBUG 26config COMMON_CLK_DEBUG
37 bool "DebugFS representation of clock tree" 27 bool "DebugFS representation of clock tree"
38 depends on COMMON_CLK 28 depends on COMMON_CLK
diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile
index 1f736bc11c4b..b9a5158a30b1 100644
--- a/drivers/clk/Makefile
+++ b/drivers/clk/Makefile
@@ -1,4 +1,7 @@
1 1
2obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o 2obj-$(CONFIG_CLKDEV_LOOKUP) += clkdev.o
3obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \ 3obj-$(CONFIG_COMMON_CLK) += clk.o clk-fixed-rate.o clk-gate.o \
4 clk-mux.o clk-divider.o 4 clk-mux.o clk-divider.o clk-fixed-factor.o
5# SoCs specific
6obj-$(CONFIG_ARCH_MXS) += mxs/
7obj-$(CONFIG_PLAT_SPEAR) += spear/
diff --git a/drivers/clk/clk-divider.c b/drivers/clk/clk-divider.c
index d5ac6a75ea57..8ea11b444528 100644
--- a/drivers/clk/clk-divider.c
+++ b/drivers/clk/clk-divider.c
@@ -45,7 +45,6 @@ static unsigned long clk_divider_recalc_rate(struct clk_hw *hw,
45 45
46 return parent_rate / div; 46 return parent_rate / div;
47} 47}
48EXPORT_SYMBOL_GPL(clk_divider_recalc_rate);
49 48
50/* 49/*
51 * The reverse of DIV_ROUND_UP: The maximum number which 50 * The reverse of DIV_ROUND_UP: The maximum number which
@@ -68,8 +67,8 @@ static int clk_divider_bestdiv(struct clk_hw *hw, unsigned long rate,
68 if (divider->flags & CLK_DIVIDER_ONE_BASED) 67 if (divider->flags & CLK_DIVIDER_ONE_BASED)
69 maxdiv--; 68 maxdiv--;
70 69
71 if (!best_parent_rate) { 70 if (!(__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT)) {
72 parent_rate = __clk_get_rate(__clk_get_parent(hw->clk)); 71 parent_rate = *best_parent_rate;
73 bestdiv = DIV_ROUND_UP(parent_rate, rate); 72 bestdiv = DIV_ROUND_UP(parent_rate, rate);
74 bestdiv = bestdiv == 0 ? 1 : bestdiv; 73 bestdiv = bestdiv == 0 ? 1 : bestdiv;
75 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv; 74 bestdiv = bestdiv > maxdiv ? maxdiv : bestdiv;
@@ -109,24 +108,18 @@ static long clk_divider_round_rate(struct clk_hw *hw, unsigned long rate,
109 int div; 108 int div;
110 div = clk_divider_bestdiv(hw, rate, prate); 109 div = clk_divider_bestdiv(hw, rate, prate);
111 110
112 if (prate) 111 return *prate / div;
113 return *prate / div;
114 else {
115 unsigned long r;
116 r = __clk_get_rate(__clk_get_parent(hw->clk));
117 return r / div;
118 }
119} 112}
120EXPORT_SYMBOL_GPL(clk_divider_round_rate);
121 113
122static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate) 114static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate,
115 unsigned long parent_rate)
123{ 116{
124 struct clk_divider *divider = to_clk_divider(hw); 117 struct clk_divider *divider = to_clk_divider(hw);
125 unsigned int div; 118 unsigned int div;
126 unsigned long flags = 0; 119 unsigned long flags = 0;
127 u32 val; 120 u32 val;
128 121
129 div = __clk_get_rate(__clk_get_parent(hw->clk)) / rate; 122 div = parent_rate / rate;
130 123
131 if (!(divider->flags & CLK_DIVIDER_ONE_BASED)) 124 if (!(divider->flags & CLK_DIVIDER_ONE_BASED))
132 div--; 125 div--;
@@ -147,15 +140,26 @@ static int clk_divider_set_rate(struct clk_hw *hw, unsigned long rate)
147 140
148 return 0; 141 return 0;
149} 142}
150EXPORT_SYMBOL_GPL(clk_divider_set_rate);
151 143
152struct clk_ops clk_divider_ops = { 144const struct clk_ops clk_divider_ops = {
153 .recalc_rate = clk_divider_recalc_rate, 145 .recalc_rate = clk_divider_recalc_rate,
154 .round_rate = clk_divider_round_rate, 146 .round_rate = clk_divider_round_rate,
155 .set_rate = clk_divider_set_rate, 147 .set_rate = clk_divider_set_rate,
156}; 148};
157EXPORT_SYMBOL_GPL(clk_divider_ops); 149EXPORT_SYMBOL_GPL(clk_divider_ops);
158 150
151/**
152 * clk_register_divider - register a divider clock with the clock framework
153 * @dev: device registering this clock
154 * @name: name of this clock
155 * @parent_name: name of clock's parent
156 * @flags: framework-specific flags
157 * @reg: register address to adjust divider
158 * @shift: number of bits to shift the bitfield
159 * @width: width of the bitfield
160 * @clk_divider_flags: divider-specific flags for this clock
161 * @lock: shared register lock for this clock
162 */
159struct clk *clk_register_divider(struct device *dev, const char *name, 163struct clk *clk_register_divider(struct device *dev, const char *name,
160 const char *parent_name, unsigned long flags, 164 const char *parent_name, unsigned long flags,
161 void __iomem *reg, u8 shift, u8 width, 165 void __iomem *reg, u8 shift, u8 width,
@@ -163,38 +167,34 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
163{ 167{
164 struct clk_divider *div; 168 struct clk_divider *div;
165 struct clk *clk; 169 struct clk *clk;
170 struct clk_init_data init;
166 171
172 /* allocate the divider */
167 div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL); 173 div = kzalloc(sizeof(struct clk_divider), GFP_KERNEL);
168
169 if (!div) { 174 if (!div) {
170 pr_err("%s: could not allocate divider clk\n", __func__); 175 pr_err("%s: could not allocate divider clk\n", __func__);
171 return NULL; 176 return ERR_PTR(-ENOMEM);
172 } 177 }
173 178
179 init.name = name;
180 init.ops = &clk_divider_ops;
181 init.flags = flags;
182 init.parent_names = (parent_name ? &parent_name: NULL);
183 init.num_parents = (parent_name ? 1 : 0);
184
174 /* struct clk_divider assignments */ 185 /* struct clk_divider assignments */
175 div->reg = reg; 186 div->reg = reg;
176 div->shift = shift; 187 div->shift = shift;
177 div->width = width; 188 div->width = width;
178 div->flags = clk_divider_flags; 189 div->flags = clk_divider_flags;
179 div->lock = lock; 190 div->lock = lock;
191 div->hw.init = &init;
180 192
181 if (parent_name) { 193 /* register the clock */
182 div->parent[0] = kstrdup(parent_name, GFP_KERNEL); 194 clk = clk_register(dev, &div->hw);
183 if (!div->parent[0])
184 goto out;
185 }
186
187 clk = clk_register(dev, name,
188 &clk_divider_ops, &div->hw,
189 div->parent,
190 (parent_name ? 1 : 0),
191 flags);
192 if (clk)
193 return clk;
194 195
195out: 196 if (IS_ERR(clk))
196 kfree(div->parent[0]); 197 kfree(div);
197 kfree(div);
198 198
199 return NULL; 199 return clk;
200} 200}
diff --git a/drivers/clk/clk-fixed-factor.c b/drivers/clk/clk-fixed-factor.c
new file mode 100644
index 000000000000..c8c003e217ad
--- /dev/null
+++ b/drivers/clk/clk-fixed-factor.c
@@ -0,0 +1,95 @@
1/*
2 * Copyright (C) 2011 Sascha Hauer, Pengutronix <s.hauer@pengutronix.de>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * Standard functionality for the common clock API.
9 */
10#include <linux/module.h>
11#include <linux/clk-provider.h>
12#include <linux/slab.h>
13#include <linux/err.h>
14
15/*
16 * DOC: basic fixed multiplier and divider clock that cannot gate
17 *
18 * Traits of this clock:
19 * prepare - clk_prepare only ensures that parents are prepared
20 * enable - clk_enable only ensures that parents are enabled
21 * rate - rate is fixed. clk->rate = parent->rate / div * mult
22 * parent - fixed parent. No clk_set_parent support
23 */
24
25#define to_clk_fixed_factor(_hw) container_of(_hw, struct clk_fixed_factor, hw)
26
27static unsigned long clk_factor_recalc_rate(struct clk_hw *hw,
28 unsigned long parent_rate)
29{
30 struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
31
32 return parent_rate * fix->mult / fix->div;
33}
34
35static long clk_factor_round_rate(struct clk_hw *hw, unsigned long rate,
36 unsigned long *prate)
37{
38 struct clk_fixed_factor *fix = to_clk_fixed_factor(hw);
39
40 if (__clk_get_flags(hw->clk) & CLK_SET_RATE_PARENT) {
41 unsigned long best_parent;
42
43 best_parent = (rate / fix->mult) * fix->div;
44 *prate = __clk_round_rate(__clk_get_parent(hw->clk),
45 best_parent);
46 }
47
48 return (*prate / fix->div) * fix->mult;
49}
50
51static int clk_factor_set_rate(struct clk_hw *hw, unsigned long rate,
52 unsigned long parent_rate)
53{
54 return 0;
55}
56
57struct clk_ops clk_fixed_factor_ops = {
58 .round_rate = clk_factor_round_rate,
59 .set_rate = clk_factor_set_rate,
60 .recalc_rate = clk_factor_recalc_rate,
61};
62EXPORT_SYMBOL_GPL(clk_fixed_factor_ops);
63
64struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
65 const char *parent_name, unsigned long flags,
66 unsigned int mult, unsigned int div)
67{
68 struct clk_fixed_factor *fix;
69 struct clk_init_data init;
70 struct clk *clk;
71
72 fix = kmalloc(sizeof(*fix), GFP_KERNEL);
73 if (!fix) {
74 pr_err("%s: could not allocate fixed factor clk\n", __func__);
75 return ERR_PTR(-ENOMEM);
76 }
77
78 /* struct clk_fixed_factor assignments */
79 fix->mult = mult;
80 fix->div = div;
81 fix->hw.init = &init;
82
83 init.name = name;
84 init.ops = &clk_fixed_factor_ops;
85 init.flags = flags;
86 init.parent_names = &parent_name;
87 init.num_parents = 1;
88
89 clk = clk_register(dev, &fix->hw);
90
91 if (IS_ERR(clk))
92 kfree(fix);
93
94 return clk;
95}
diff --git a/drivers/clk/clk-fixed-rate.c b/drivers/clk/clk-fixed-rate.c
index 90c79fb5d1bd..cbd246229786 100644
--- a/drivers/clk/clk-fixed-rate.c
+++ b/drivers/clk/clk-fixed-rate.c
@@ -32,51 +32,50 @@ static unsigned long clk_fixed_rate_recalc_rate(struct clk_hw *hw,
32{ 32{
33 return to_clk_fixed_rate(hw)->fixed_rate; 33 return to_clk_fixed_rate(hw)->fixed_rate;
34} 34}
35EXPORT_SYMBOL_GPL(clk_fixed_rate_recalc_rate);
36 35
37struct clk_ops clk_fixed_rate_ops = { 36const struct clk_ops clk_fixed_rate_ops = {
38 .recalc_rate = clk_fixed_rate_recalc_rate, 37 .recalc_rate = clk_fixed_rate_recalc_rate,
39}; 38};
40EXPORT_SYMBOL_GPL(clk_fixed_rate_ops); 39EXPORT_SYMBOL_GPL(clk_fixed_rate_ops);
41 40
41/**
42 * clk_register_fixed_rate - register fixed-rate clock with the clock framework
43 * @dev: device that is registering this clock
44 * @name: name of this clock
45 * @parent_name: name of clock's parent
46 * @flags: framework-specific flags
47 * @fixed_rate: non-adjustable clock rate
48 */
42struct clk *clk_register_fixed_rate(struct device *dev, const char *name, 49struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
43 const char *parent_name, unsigned long flags, 50 const char *parent_name, unsigned long flags,
44 unsigned long fixed_rate) 51 unsigned long fixed_rate)
45{ 52{
46 struct clk_fixed_rate *fixed; 53 struct clk_fixed_rate *fixed;
47 char **parent_names = NULL; 54 struct clk *clk;
48 u8 len; 55 struct clk_init_data init;
49 56
57 /* allocate fixed-rate clock */
50 fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL); 58 fixed = kzalloc(sizeof(struct clk_fixed_rate), GFP_KERNEL);
51
52 if (!fixed) { 59 if (!fixed) {
53 pr_err("%s: could not allocate fixed clk\n", __func__); 60 pr_err("%s: could not allocate fixed clk\n", __func__);
54 return ERR_PTR(-ENOMEM); 61 return ERR_PTR(-ENOMEM);
55 } 62 }
56 63
64 init.name = name;
65 init.ops = &clk_fixed_rate_ops;
66 init.flags = flags;
67 init.parent_names = (parent_name ? &parent_name: NULL);
68 init.num_parents = (parent_name ? 1 : 0);
69
57 /* struct clk_fixed_rate assignments */ 70 /* struct clk_fixed_rate assignments */
58 fixed->fixed_rate = fixed_rate; 71 fixed->fixed_rate = fixed_rate;
72 fixed->hw.init = &init;
59 73
60 if (parent_name) { 74 /* register the clock */
61 parent_names = kmalloc(sizeof(char *), GFP_KERNEL); 75 clk = clk_register(dev, &fixed->hw);
62
63 if (! parent_names)
64 goto out;
65 76
66 len = sizeof(char) * strlen(parent_name); 77 if (IS_ERR(clk))
67 78 kfree(fixed);
68 parent_names[0] = kmalloc(len, GFP_KERNEL);
69
70 if (!parent_names[0])
71 goto out;
72
73 strncpy(parent_names[0], parent_name, len);
74 }
75 79
76out: 80 return clk;
77 return clk_register(dev, name,
78 &clk_fixed_rate_ops, &fixed->hw,
79 parent_names,
80 (parent_name ? 1 : 0),
81 flags);
82} 81}
diff --git a/drivers/clk/clk-gate.c b/drivers/clk/clk-gate.c
index b5902e2ef2fd..578465e04be6 100644
--- a/drivers/clk/clk-gate.c
+++ b/drivers/clk/clk-gate.c
@@ -28,32 +28,38 @@
28 28
29#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw) 29#define to_clk_gate(_hw) container_of(_hw, struct clk_gate, hw)
30 30
31static void clk_gate_set_bit(struct clk_gate *gate) 31/*
32 * It works on following logic:
33 *
34 * For enabling clock, enable = 1
35 * set2dis = 1 -> clear bit -> set = 0
36 * set2dis = 0 -> set bit -> set = 1
37 *
38 * For disabling clock, enable = 0
39 * set2dis = 1 -> set bit -> set = 1
40 * set2dis = 0 -> clear bit -> set = 0
41 *
42 * So, result is always: enable xor set2dis.
43 */
44static void clk_gate_endisable(struct clk_hw *hw, int enable)
32{ 45{
33 u32 reg; 46 struct clk_gate *gate = to_clk_gate(hw);
47 int set = gate->flags & CLK_GATE_SET_TO_DISABLE ? 1 : 0;
34 unsigned long flags = 0; 48 unsigned long flags = 0;
49 u32 reg;
50
51 set ^= enable;
35 52
36 if (gate->lock) 53 if (gate->lock)
37 spin_lock_irqsave(gate->lock, flags); 54 spin_lock_irqsave(gate->lock, flags);
38 55
39 reg = readl(gate->reg); 56 reg = readl(gate->reg);
40 reg |= BIT(gate->bit_idx);
41 writel(reg, gate->reg);
42
43 if (gate->lock)
44 spin_unlock_irqrestore(gate->lock, flags);
45}
46
47static void clk_gate_clear_bit(struct clk_gate *gate)
48{
49 u32 reg;
50 unsigned long flags = 0;
51 57
52 if (gate->lock) 58 if (set)
53 spin_lock_irqsave(gate->lock, flags); 59 reg |= BIT(gate->bit_idx);
60 else
61 reg &= ~BIT(gate->bit_idx);
54 62
55 reg = readl(gate->reg);
56 reg &= ~BIT(gate->bit_idx);
57 writel(reg, gate->reg); 63 writel(reg, gate->reg);
58 64
59 if (gate->lock) 65 if (gate->lock)
@@ -62,27 +68,15 @@ static void clk_gate_clear_bit(struct clk_gate *gate)
62 68
63static int clk_gate_enable(struct clk_hw *hw) 69static int clk_gate_enable(struct clk_hw *hw)
64{ 70{
65 struct clk_gate *gate = to_clk_gate(hw); 71 clk_gate_endisable(hw, 1);
66
67 if (gate->flags & CLK_GATE_SET_TO_DISABLE)
68 clk_gate_clear_bit(gate);
69 else
70 clk_gate_set_bit(gate);
71 72
72 return 0; 73 return 0;
73} 74}
74EXPORT_SYMBOL_GPL(clk_gate_enable);
75 75
76static void clk_gate_disable(struct clk_hw *hw) 76static void clk_gate_disable(struct clk_hw *hw)
77{ 77{
78 struct clk_gate *gate = to_clk_gate(hw); 78 clk_gate_endisable(hw, 0);
79
80 if (gate->flags & CLK_GATE_SET_TO_DISABLE)
81 clk_gate_set_bit(gate);
82 else
83 clk_gate_clear_bit(gate);
84} 79}
85EXPORT_SYMBOL_GPL(clk_gate_disable);
86 80
87static int clk_gate_is_enabled(struct clk_hw *hw) 81static int clk_gate_is_enabled(struct clk_hw *hw)
88{ 82{
@@ -99,15 +93,25 @@ static int clk_gate_is_enabled(struct clk_hw *hw)
99 93
100 return reg ? 1 : 0; 94 return reg ? 1 : 0;
101} 95}
102EXPORT_SYMBOL_GPL(clk_gate_is_enabled);
103 96
104struct clk_ops clk_gate_ops = { 97const struct clk_ops clk_gate_ops = {
105 .enable = clk_gate_enable, 98 .enable = clk_gate_enable,
106 .disable = clk_gate_disable, 99 .disable = clk_gate_disable,
107 .is_enabled = clk_gate_is_enabled, 100 .is_enabled = clk_gate_is_enabled,
108}; 101};
109EXPORT_SYMBOL_GPL(clk_gate_ops); 102EXPORT_SYMBOL_GPL(clk_gate_ops);
110 103
104/**
105 * clk_register_gate - register a gate clock with the clock framework
106 * @dev: device that is registering this clock
107 * @name: name of this clock
108 * @parent_name: name of this clock's parent
109 * @flags: framework-specific flags for this clock
110 * @reg: register address to control gating of this clock
111 * @bit_idx: which bit in the register controls gating of this clock
112 * @clk_gate_flags: gate-specific flags for this clock
113 * @lock: shared register lock for this clock
114 */
111struct clk *clk_register_gate(struct device *dev, const char *name, 115struct clk *clk_register_gate(struct device *dev, const char *name,
112 const char *parent_name, unsigned long flags, 116 const char *parent_name, unsigned long flags,
113 void __iomem *reg, u8 bit_idx, 117 void __iomem *reg, u8 bit_idx,
@@ -115,36 +119,32 @@ struct clk *clk_register_gate(struct device *dev, const char *name,
115{ 119{
116 struct clk_gate *gate; 120 struct clk_gate *gate;
117 struct clk *clk; 121 struct clk *clk;
122 struct clk_init_data init;
118 123
124 /* allocate the gate */
119 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL); 125 gate = kzalloc(sizeof(struct clk_gate), GFP_KERNEL);
120
121 if (!gate) { 126 if (!gate) {
122 pr_err("%s: could not allocate gated clk\n", __func__); 127 pr_err("%s: could not allocate gated clk\n", __func__);
123 return NULL; 128 return ERR_PTR(-ENOMEM);
124 } 129 }
125 130
131 init.name = name;
132 init.ops = &clk_gate_ops;
133 init.flags = flags;
134 init.parent_names = (parent_name ? &parent_name: NULL);
135 init.num_parents = (parent_name ? 1 : 0);
136
126 /* struct clk_gate assignments */ 137 /* struct clk_gate assignments */
127 gate->reg = reg; 138 gate->reg = reg;
128 gate->bit_idx = bit_idx; 139 gate->bit_idx = bit_idx;
129 gate->flags = clk_gate_flags; 140 gate->flags = clk_gate_flags;
130 gate->lock = lock; 141 gate->lock = lock;
142 gate->hw.init = &init;
131 143
132 if (parent_name) { 144 clk = clk_register(dev, &gate->hw);
133 gate->parent[0] = kstrdup(parent_name, GFP_KERNEL); 145
134 if (!gate->parent[0]) 146 if (IS_ERR(clk))
135 goto out; 147 kfree(gate);
136 }
137 148
138 clk = clk_register(dev, name, 149 return clk;
139 &clk_gate_ops, &gate->hw,
140 gate->parent,
141 (parent_name ? 1 : 0),
142 flags);
143 if (clk)
144 return clk;
145out:
146 kfree(gate->parent[0]);
147 kfree(gate);
148
149 return NULL;
150} 150}
diff --git a/drivers/clk/clk-mux.c b/drivers/clk/clk-mux.c
index c71ad1f41a97..fd36a8ea73d9 100644
--- a/drivers/clk/clk-mux.c
+++ b/drivers/clk/clk-mux.c
@@ -55,7 +55,6 @@ static u8 clk_mux_get_parent(struct clk_hw *hw)
55 55
56 return val; 56 return val;
57} 57}
58EXPORT_SYMBOL_GPL(clk_mux_get_parent);
59 58
60static int clk_mux_set_parent(struct clk_hw *hw, u8 index) 59static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
61{ 60{
@@ -82,35 +81,47 @@ static int clk_mux_set_parent(struct clk_hw *hw, u8 index)
82 81
83 return 0; 82 return 0;
84} 83}
85EXPORT_SYMBOL_GPL(clk_mux_set_parent);
86 84
87struct clk_ops clk_mux_ops = { 85const struct clk_ops clk_mux_ops = {
88 .get_parent = clk_mux_get_parent, 86 .get_parent = clk_mux_get_parent,
89 .set_parent = clk_mux_set_parent, 87 .set_parent = clk_mux_set_parent,
90}; 88};
91EXPORT_SYMBOL_GPL(clk_mux_ops); 89EXPORT_SYMBOL_GPL(clk_mux_ops);
92 90
93struct clk *clk_register_mux(struct device *dev, const char *name, 91struct clk *clk_register_mux(struct device *dev, const char *name,
94 char **parent_names, u8 num_parents, unsigned long flags, 92 const char **parent_names, u8 num_parents, unsigned long flags,
95 void __iomem *reg, u8 shift, u8 width, 93 void __iomem *reg, u8 shift, u8 width,
96 u8 clk_mux_flags, spinlock_t *lock) 94 u8 clk_mux_flags, spinlock_t *lock)
97{ 95{
98 struct clk_mux *mux; 96 struct clk_mux *mux;
97 struct clk *clk;
98 struct clk_init_data init;
99 99
100 mux = kmalloc(sizeof(struct clk_mux), GFP_KERNEL); 100 /* allocate the mux */
101 101 mux = kzalloc(sizeof(struct clk_mux), GFP_KERNEL);
102 if (!mux) { 102 if (!mux) {
103 pr_err("%s: could not allocate mux clk\n", __func__); 103 pr_err("%s: could not allocate mux clk\n", __func__);
104 return ERR_PTR(-ENOMEM); 104 return ERR_PTR(-ENOMEM);
105 } 105 }
106 106
107 init.name = name;
108 init.ops = &clk_mux_ops;
109 init.flags = flags;
110 init.parent_names = parent_names;
111 init.num_parents = num_parents;
112
107 /* struct clk_mux assignments */ 113 /* struct clk_mux assignments */
108 mux->reg = reg; 114 mux->reg = reg;
109 mux->shift = shift; 115 mux->shift = shift;
110 mux->width = width; 116 mux->width = width;
111 mux->flags = clk_mux_flags; 117 mux->flags = clk_mux_flags;
112 mux->lock = lock; 118 mux->lock = lock;
119 mux->hw.init = &init;
120
121 clk = clk_register(dev, &mux->hw);
122
123 if (IS_ERR(clk))
124 kfree(mux);
113 125
114 return clk_register(dev, name, &clk_mux_ops, &mux->hw, 126 return clk;
115 parent_names, num_parents, flags);
116} 127}
diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index 9cf6f59e3e19..687b00d67c8a 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -194,9 +194,8 @@ static int __init clk_debug_init(void)
194late_initcall(clk_debug_init); 194late_initcall(clk_debug_init);
195#else 195#else
196static inline int clk_debug_register(struct clk *clk) { return 0; } 196static inline int clk_debug_register(struct clk *clk) { return 0; }
197#endif /* CONFIG_COMMON_CLK_DEBUG */ 197#endif
198 198
199#ifdef CONFIG_COMMON_CLK_DISABLE_UNUSED
200/* caller must hold prepare_lock */ 199/* caller must hold prepare_lock */
201static void clk_disable_unused_subtree(struct clk *clk) 200static void clk_disable_unused_subtree(struct clk *clk)
202{ 201{
@@ -246,9 +245,6 @@ static int clk_disable_unused(void)
246 return 0; 245 return 0;
247} 246}
248late_initcall(clk_disable_unused); 247late_initcall(clk_disable_unused);
249#else
250static inline int clk_disable_unused(struct clk *clk) { return 0; }
251#endif /* CONFIG_COMMON_CLK_DISABLE_UNUSED */
252 248
253/*** helper functions ***/ 249/*** helper functions ***/
254 250
@@ -287,7 +283,7 @@ unsigned long __clk_get_rate(struct clk *clk)
287 unsigned long ret; 283 unsigned long ret;
288 284
289 if (!clk) { 285 if (!clk) {
290 ret = -EINVAL; 286 ret = 0;
291 goto out; 287 goto out;
292 } 288 }
293 289
@@ -297,7 +293,7 @@ unsigned long __clk_get_rate(struct clk *clk)
297 goto out; 293 goto out;
298 294
299 if (!clk->parent) 295 if (!clk->parent)
300 ret = -ENODEV; 296 ret = 0;
301 297
302out: 298out:
303 return ret; 299 return ret;
@@ -562,7 +558,7 @@ EXPORT_SYMBOL_GPL(clk_enable);
562 * @clk: the clk whose rate is being returned 558 * @clk: the clk whose rate is being returned
563 * 559 *
564 * Simply returns the cached rate of the clk. Does not query the hardware. If 560 * Simply returns the cached rate of the clk. Does not query the hardware. If
565 * clk is NULL then returns -EINVAL. 561 * clk is NULL then returns 0.
566 */ 562 */
567unsigned long clk_get_rate(struct clk *clk) 563unsigned long clk_get_rate(struct clk *clk)
568{ 564{
@@ -584,18 +580,22 @@ EXPORT_SYMBOL_GPL(clk_get_rate);
584 */ 580 */
585unsigned long __clk_round_rate(struct clk *clk, unsigned long rate) 581unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
586{ 582{
587 unsigned long unused; 583 unsigned long parent_rate = 0;
588 584
589 if (!clk) 585 if (!clk)
590 return -EINVAL; 586 return -EINVAL;
591 587
592 if (!clk->ops->round_rate) 588 if (!clk->ops->round_rate) {
593 return clk->rate; 589 if (clk->flags & CLK_SET_RATE_PARENT)
590 return __clk_round_rate(clk->parent, rate);
591 else
592 return clk->rate;
593 }
594 594
595 if (clk->flags & CLK_SET_RATE_PARENT) 595 if (clk->parent)
596 return clk->ops->round_rate(clk->hw, rate, &unused); 596 parent_rate = clk->parent->rate;
597 else 597
598 return clk->ops->round_rate(clk->hw, rate, NULL); 598 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
599} 599}
600 600
601/** 601/**
@@ -765,25 +765,41 @@ static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
765static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate) 765static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
766{ 766{
767 struct clk *top = clk; 767 struct clk *top = clk;
768 unsigned long best_parent_rate = clk->parent->rate; 768 unsigned long best_parent_rate = 0;
769 unsigned long new_rate; 769 unsigned long new_rate;
770 770
771 if (!clk->ops->round_rate && !(clk->flags & CLK_SET_RATE_PARENT)) { 771 /* sanity */
772 clk->new_rate = clk->rate; 772 if (IS_ERR_OR_NULL(clk))
773 return NULL;
774
775 /* save parent rate, if it exists */
776 if (clk->parent)
777 best_parent_rate = clk->parent->rate;
778
779 /* never propagate up to the parent */
780 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
781 if (!clk->ops->round_rate) {
782 clk->new_rate = clk->rate;
783 return NULL;
784 }
785 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
786 goto out;
787 }
788
789 /* need clk->parent from here on out */
790 if (!clk->parent) {
791 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
773 return NULL; 792 return NULL;
774 } 793 }
775 794
776 if (!clk->ops->round_rate && (clk->flags & CLK_SET_RATE_PARENT)) { 795 if (!clk->ops->round_rate) {
777 top = clk_calc_new_rates(clk->parent, rate); 796 top = clk_calc_new_rates(clk->parent, rate);
778 new_rate = clk->new_rate = clk->parent->new_rate; 797 new_rate = clk->parent->new_rate;
779 798
780 goto out; 799 goto out;
781 } 800 }
782 801
783 if (clk->flags & CLK_SET_RATE_PARENT) 802 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
784 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
785 else
786 new_rate = clk->ops->round_rate(clk->hw, rate, NULL);
787 803
788 if (best_parent_rate != clk->parent->rate) { 804 if (best_parent_rate != clk->parent->rate) {
789 top = clk_calc_new_rates(clk->parent, best_parent_rate); 805 top = clk_calc_new_rates(clk->parent, best_parent_rate);
@@ -839,7 +855,7 @@ static void clk_change_rate(struct clk *clk)
839 old_rate = clk->rate; 855 old_rate = clk->rate;
840 856
841 if (clk->ops->set_rate) 857 if (clk->ops->set_rate)
842 clk->ops->set_rate(clk->hw, clk->new_rate); 858 clk->ops->set_rate(clk->hw, clk->new_rate, clk->parent->rate);
843 859
844 if (clk->ops->recalc_rate) 860 if (clk->ops->recalc_rate)
845 clk->rate = clk->ops->recalc_rate(clk->hw, 861 clk->rate = clk->ops->recalc_rate(clk->hw,
@@ -859,38 +875,19 @@ static void clk_change_rate(struct clk *clk)
859 * @clk: the clk whose rate is being changed 875 * @clk: the clk whose rate is being changed
860 * @rate: the new rate for clk 876 * @rate: the new rate for clk
861 * 877 *
862 * In the simplest case clk_set_rate will only change the rate of clk. 878 * In the simplest case clk_set_rate will only adjust the rate of clk.
863 *
864 * If clk has the CLK_SET_RATE_GATE flag set and it is enabled this call
865 * will fail; only when the clk is disabled will it be able to change
866 * its rate.
867 * 879 *
868 * Setting the CLK_SET_RATE_PARENT flag allows clk_set_rate to 880 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
869 * recursively propagate up to clk's parent; whether or not this happens 881 * propagate up to clk's parent; whether or not this happens depends on the
870 * depends on the outcome of clk's .round_rate implementation. If 882 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
871 * *parent_rate is 0 after calling .round_rate then upstream parent 883 * after calling .round_rate then upstream parent propagation is ignored. If
872 * propagation is ignored. If *parent_rate comes back with a new rate 884 * *parent_rate comes back with a new rate for clk's parent then we propagate
873 * for clk's parent then we propagate up to clk's parent and set it's 885 * up to clk's parent and set it's rate. Upward propagation will continue
874 * rate. Upward propagation will continue until either a clk does not 886 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
875 * support the CLK_SET_RATE_PARENT flag or .round_rate stops requesting 887 * .round_rate stops requesting changes to clk's parent_rate.
876 * changes to clk's parent_rate. If there is a failure during upstream
877 * propagation then clk_set_rate will unwind and restore each clk's rate
878 * that had been successfully changed. Afterwards a rate change abort
879 * notification will be propagated downstream, starting from the clk
880 * that failed.
881 * 888 *
882 * At the end of all of the rate setting, clk_set_rate internally calls 889 * Rate changes are accomplished via tree traversal that also recalculates the
883 * __clk_recalc_rates and propagates the rate changes downstream, 890 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
884 * starting from the highest clk whose rate was changed. This has the
885 * added benefit of propagating post-rate change notifiers.
886 *
887 * Note that while post-rate change and rate change abort notifications
888 * are guaranteed to be sent to a clk only once per call to
889 * clk_set_rate, pre-change notifications will be sent for every clk
890 * whose rate is changed. Stacking pre-change notifications is noisy
891 * for the drivers subscribed to them, but this allows drivers to react
892 * to intermediate clk rate changes up until the point where the final
893 * rate is achieved at the end of upstream propagation.
894 * 891 *
895 * Returns 0 on success, -EERROR otherwise. 892 * Returns 0 on success, -EERROR otherwise.
896 */ 893 */
@@ -906,6 +903,11 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
906 if (rate == clk->rate) 903 if (rate == clk->rate)
907 goto out; 904 goto out;
908 905
906 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
907 ret = -EBUSY;
908 goto out;
909 }
910
909 /* calculate new rates and get the topmost changed clock */ 911 /* calculate new rates and get the topmost changed clock */
910 top = clk_calc_new_rates(clk, rate); 912 top = clk_calc_new_rates(clk, rate);
911 if (!top) { 913 if (!top) {
@@ -1175,40 +1177,41 @@ EXPORT_SYMBOL_GPL(clk_set_parent);
1175 * 1177 *
1176 * Initializes the lists in struct clk, queries the hardware for the 1178 * Initializes the lists in struct clk, queries the hardware for the
1177 * parent and rate and sets them both. 1179 * parent and rate and sets them both.
1178 *
1179 * Any struct clk passed into __clk_init must have the following members
1180 * populated:
1181 * .name
1182 * .ops
1183 * .hw
1184 * .parent_names
1185 * .num_parents
1186 * .flags
1187 *
1188 * Essentially, everything that would normally be passed into clk_register is
1189 * assumed to be initialized already in __clk_init. The other members may be
1190 * populated, but are optional.
1191 *
1192 * __clk_init is only exposed via clk-private.h and is intended for use with
1193 * very large numbers of clocks that need to be statically initialized. It is
1194 * a layering violation to include clk-private.h from any code which implements
1195 * a clock's .ops; as such any statically initialized clock data MUST be in a
1196 * separate C file from the logic that implements it's operations.
1197 */ 1180 */
1198void __clk_init(struct device *dev, struct clk *clk) 1181int __clk_init(struct device *dev, struct clk *clk)
1199{ 1182{
1200 int i; 1183 int i, ret = 0;
1201 struct clk *orphan; 1184 struct clk *orphan;
1202 struct hlist_node *tmp, *tmp2; 1185 struct hlist_node *tmp, *tmp2;
1203 1186
1204 if (!clk) 1187 if (!clk)
1205 return; 1188 return -EINVAL;
1206 1189
1207 mutex_lock(&prepare_lock); 1190 mutex_lock(&prepare_lock);
1208 1191
1209 /* check to see if a clock with this name is already registered */ 1192 /* check to see if a clock with this name is already registered */
1210 if (__clk_lookup(clk->name)) 1193 if (__clk_lookup(clk->name)) {
1194 pr_debug("%s: clk %s already initialized\n",
1195 __func__, clk->name);
1196 ret = -EEXIST;
1197 goto out;
1198 }
1199
1200 /* check that clk_ops are sane. See Documentation/clk.txt */
1201 if (clk->ops->set_rate &&
1202 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1203 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1204 __func__, clk->name);
1205 ret = -EINVAL;
1206 goto out;
1207 }
1208
1209 if (clk->ops->set_parent && !clk->ops->get_parent) {
1210 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1211 __func__, clk->name);
1212 ret = -EINVAL;
1211 goto out; 1213 goto out;
1214 }
1212 1215
1213 /* throw a WARN if any entries in parent_names are NULL */ 1216 /* throw a WARN if any entries in parent_names are NULL */
1214 for (i = 0; i < clk->num_parents; i++) 1217 for (i = 0; i < clk->num_parents; i++)
@@ -1302,48 +1305,130 @@ void __clk_init(struct device *dev, struct clk *clk)
1302out: 1305out:
1303 mutex_unlock(&prepare_lock); 1306 mutex_unlock(&prepare_lock);
1304 1307
1305 return; 1308 return ret;
1306} 1309}
1307 1310
1308/** 1311/**
1312 * __clk_register - register a clock and return a cookie.
1313 *
1314 * Same as clk_register, except that the .clk field inside hw shall point to a
1315 * preallocated (generally statically allocated) struct clk. None of the fields
1316 * of the struct clk need to be initialized.
1317 *
1318 * The data pointed to by .init and .clk field shall NOT be marked as init
1319 * data.
1320 *
1321 * __clk_register is only exposed via clk-private.h and is intended for use with
1322 * very large numbers of clocks that need to be statically initialized. It is
1323 * a layering violation to include clk-private.h from any code which implements
1324 * a clock's .ops; as such any statically initialized clock data MUST be in a
1325 * separate C file from the logic that implements it's operations. Returns 0
1326 * on success, otherwise an error code.
1327 */
1328struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1329{
1330 int ret;
1331 struct clk *clk;
1332
1333 clk = hw->clk;
1334 clk->name = hw->init->name;
1335 clk->ops = hw->init->ops;
1336 clk->hw = hw;
1337 clk->flags = hw->init->flags;
1338 clk->parent_names = hw->init->parent_names;
1339 clk->num_parents = hw->init->num_parents;
1340
1341 ret = __clk_init(dev, clk);
1342 if (ret)
1343 return ERR_PTR(ret);
1344
1345 return clk;
1346}
1347EXPORT_SYMBOL_GPL(__clk_register);
1348
1349/**
1309 * clk_register - allocate a new clock, register it and return an opaque cookie 1350 * clk_register - allocate a new clock, register it and return an opaque cookie
1310 * @dev: device that is registering this clock 1351 * @dev: device that is registering this clock
1311 * @name: clock name
1312 * @ops: operations this clock supports
1313 * @hw: link to hardware-specific clock data 1352 * @hw: link to hardware-specific clock data
1314 * @parent_names: array of string names for all possible parents
1315 * @num_parents: number of possible parents
1316 * @flags: framework-level hints and quirks
1317 * 1353 *
1318 * clk_register is the primary interface for populating the clock tree with new 1354 * clk_register is the primary interface for populating the clock tree with new
1319 * clock nodes. It returns a pointer to the newly allocated struct clk which 1355 * clock nodes. It returns a pointer to the newly allocated struct clk which
1320 * cannot be dereferenced by driver code but may be used in conjuction with the 1356 * cannot be dereferenced by driver code but may be used in conjuction with the
1321 * rest of the clock API. 1357 * rest of the clock API. In the event of an error clk_register will return an
1358 * error code; drivers must test for an error code after calling clk_register.
1322 */ 1359 */
1323struct clk *clk_register(struct device *dev, const char *name, 1360struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1324 const struct clk_ops *ops, struct clk_hw *hw,
1325 char **parent_names, u8 num_parents, unsigned long flags)
1326{ 1361{
1362 int i, ret;
1327 struct clk *clk; 1363 struct clk *clk;
1328 1364
1329 clk = kzalloc(sizeof(*clk), GFP_KERNEL); 1365 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1330 if (!clk) 1366 if (!clk) {
1331 return NULL; 1367 pr_err("%s: could not allocate clk\n", __func__);
1368 ret = -ENOMEM;
1369 goto fail_out;
1370 }
1332 1371
1333 clk->name = name; 1372 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1334 clk->ops = ops; 1373 if (!clk->name) {
1374 pr_err("%s: could not allocate clk->name\n", __func__);
1375 ret = -ENOMEM;
1376 goto fail_name;
1377 }
1378 clk->ops = hw->init->ops;
1335 clk->hw = hw; 1379 clk->hw = hw;
1336 clk->flags = flags; 1380 clk->flags = hw->init->flags;
1337 clk->parent_names = parent_names; 1381 clk->num_parents = hw->init->num_parents;
1338 clk->num_parents = num_parents;
1339 hw->clk = clk; 1382 hw->clk = clk;
1340 1383
1341 __clk_init(dev, clk); 1384 /* allocate local copy in case parent_names is __initdata */
1385 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1386 GFP_KERNEL);
1342 1387
1343 return clk; 1388 if (!clk->parent_names) {
1389 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1390 ret = -ENOMEM;
1391 goto fail_parent_names;
1392 }
1393
1394
1395 /* copy each string name in case parent_names is __initdata */
1396 for (i = 0; i < clk->num_parents; i++) {
1397 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1398 GFP_KERNEL);
1399 if (!clk->parent_names[i]) {
1400 pr_err("%s: could not copy parent_names\n", __func__);
1401 ret = -ENOMEM;
1402 goto fail_parent_names_copy;
1403 }
1404 }
1405
1406 ret = __clk_init(dev, clk);
1407 if (!ret)
1408 return clk;
1409
1410fail_parent_names_copy:
1411 while (--i >= 0)
1412 kfree(clk->parent_names[i]);
1413 kfree(clk->parent_names);
1414fail_parent_names:
1415 kfree(clk->name);
1416fail_name:
1417 kfree(clk);
1418fail_out:
1419 return ERR_PTR(ret);
1344} 1420}
1345EXPORT_SYMBOL_GPL(clk_register); 1421EXPORT_SYMBOL_GPL(clk_register);
1346 1422
1423/**
1424 * clk_unregister - unregister a currently registered clock
1425 * @clk: clock to unregister
1426 *
1427 * Currently unimplemented.
1428 */
1429void clk_unregister(struct clk *clk) {}
1430EXPORT_SYMBOL_GPL(clk_unregister);
1431
1347/*** clk rate change notifiers ***/ 1432/*** clk rate change notifiers ***/
1348 1433
1349/** 1434/**
diff --git a/drivers/clk/mxs/Makefile b/drivers/clk/mxs/Makefile
new file mode 100644
index 000000000000..7bedeec08524
--- /dev/null
+++ b/drivers/clk/mxs/Makefile
@@ -0,0 +1,8 @@
1#
2# Makefile for mxs specific clk
3#
4
5obj-y += clk.o clk-pll.o clk-ref.o clk-div.o clk-frac.o
6
7obj-$(CONFIG_SOC_IMX23) += clk-imx23.o
8obj-$(CONFIG_SOC_IMX28) += clk-imx28.o
diff --git a/drivers/clk/mxs/clk-div.c b/drivers/clk/mxs/clk-div.c
new file mode 100644
index 000000000000..90e1da93877e
--- /dev/null
+++ b/drivers/clk/mxs/clk-div.c
@@ -0,0 +1,110 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#include <linux/clk.h>
13#include <linux/clk-provider.h>
14#include <linux/err.h>
15#include <linux/slab.h>
16#include "clk.h"
17
18/**
19 * struct clk_div - mxs integer divider clock
20 * @divider: the parent class
21 * @ops: pointer to clk_ops of parent class
22 * @reg: register address
23 * @busy: busy bit shift
24 *
25 * The mxs divider clock is a subclass of basic clk_divider with an
26 * addtional busy bit.
27 */
28struct clk_div {
29 struct clk_divider divider;
30 const struct clk_ops *ops;
31 void __iomem *reg;
32 u8 busy;
33};
34
35static inline struct clk_div *to_clk_div(struct clk_hw *hw)
36{
37 struct clk_divider *divider = container_of(hw, struct clk_divider, hw);
38
39 return container_of(divider, struct clk_div, divider);
40}
41
42static unsigned long clk_div_recalc_rate(struct clk_hw *hw,
43 unsigned long parent_rate)
44{
45 struct clk_div *div = to_clk_div(hw);
46
47 return div->ops->recalc_rate(&div->divider.hw, parent_rate);
48}
49
50static long clk_div_round_rate(struct clk_hw *hw, unsigned long rate,
51 unsigned long *prate)
52{
53 struct clk_div *div = to_clk_div(hw);
54
55 return div->ops->round_rate(&div->divider.hw, rate, prate);
56}
57
58static int clk_div_set_rate(struct clk_hw *hw, unsigned long rate,
59 unsigned long parent_rate)
60{
61 struct clk_div *div = to_clk_div(hw);
62 int ret;
63
64 ret = div->ops->set_rate(&div->divider.hw, rate, parent_rate);
65 if (!ret)
66 ret = mxs_clk_wait(div->reg, div->busy);
67
68 return ret;
69}
70
71static struct clk_ops clk_div_ops = {
72 .recalc_rate = clk_div_recalc_rate,
73 .round_rate = clk_div_round_rate,
74 .set_rate = clk_div_set_rate,
75};
76
77struct clk *mxs_clk_div(const char *name, const char *parent_name,
78 void __iomem *reg, u8 shift, u8 width, u8 busy)
79{
80 struct clk_div *div;
81 struct clk *clk;
82 struct clk_init_data init;
83
84 div = kzalloc(sizeof(*div), GFP_KERNEL);
85 if (!div)
86 return ERR_PTR(-ENOMEM);
87
88 init.name = name;
89 init.ops = &clk_div_ops;
90 init.flags = CLK_SET_RATE_PARENT;
91 init.parent_names = (parent_name ? &parent_name: NULL);
92 init.num_parents = (parent_name ? 1 : 0);
93
94 div->reg = reg;
95 div->busy = busy;
96
97 div->divider.reg = reg;
98 div->divider.shift = shift;
99 div->divider.width = width;
100 div->divider.flags = CLK_DIVIDER_ONE_BASED;
101 div->divider.lock = &mxs_lock;
102 div->divider.hw.init = &init;
103 div->ops = &clk_divider_ops;
104
105 clk = clk_register(NULL, &div->divider.hw);
106 if (IS_ERR(clk))
107 kfree(div);
108
109 return clk;
110}
diff --git a/drivers/clk/mxs/clk-frac.c b/drivers/clk/mxs/clk-frac.c
new file mode 100644
index 000000000000..e6aa6b567d68
--- /dev/null
+++ b/drivers/clk/mxs/clk-frac.c
@@ -0,0 +1,139 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#include <linux/clk.h>
13#include <linux/clk-provider.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include "clk.h"
18
19/**
20 * struct clk_frac - mxs fractional divider clock
21 * @hw: clk_hw for the fractional divider clock
22 * @reg: register address
23 * @shift: the divider bit shift
24 * @width: the divider bit width
25 * @busy: busy bit shift
26 *
27 * The clock is an adjustable fractional divider with a busy bit to wait
28 * when the divider is adjusted.
29 */
30struct clk_frac {
31 struct clk_hw hw;
32 void __iomem *reg;
33 u8 shift;
34 u8 width;
35 u8 busy;
36};
37
38#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
39
40static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
41 unsigned long parent_rate)
42{
43 struct clk_frac *frac = to_clk_frac(hw);
44 u32 div;
45
46 div = readl_relaxed(frac->reg) >> frac->shift;
47 div &= (1 << frac->width) - 1;
48
49 return (parent_rate >> frac->width) * div;
50}
51
52static long clk_frac_round_rate(struct clk_hw *hw, unsigned long rate,
53 unsigned long *prate)
54{
55 struct clk_frac *frac = to_clk_frac(hw);
56 unsigned long parent_rate = *prate;
57 u32 div;
58 u64 tmp;
59
60 if (rate > parent_rate)
61 return -EINVAL;
62
63 tmp = rate;
64 tmp <<= frac->width;
65 do_div(tmp, parent_rate);
66 div = tmp;
67
68 if (!div)
69 return -EINVAL;
70
71 return (parent_rate >> frac->width) * div;
72}
73
74static int clk_frac_set_rate(struct clk_hw *hw, unsigned long rate,
75 unsigned long parent_rate)
76{
77 struct clk_frac *frac = to_clk_frac(hw);
78 unsigned long flags;
79 u32 div, val;
80 u64 tmp;
81
82 if (rate > parent_rate)
83 return -EINVAL;
84
85 tmp = rate;
86 tmp <<= frac->width;
87 do_div(tmp, parent_rate);
88 div = tmp;
89
90 if (!div)
91 return -EINVAL;
92
93 spin_lock_irqsave(&mxs_lock, flags);
94
95 val = readl_relaxed(frac->reg);
96 val &= ~(((1 << frac->width) - 1) << frac->shift);
97 val |= div << frac->shift;
98 writel_relaxed(val, frac->reg);
99
100 spin_unlock_irqrestore(&mxs_lock, flags);
101
102 return mxs_clk_wait(frac->reg, frac->busy);
103}
104
105static struct clk_ops clk_frac_ops = {
106 .recalc_rate = clk_frac_recalc_rate,
107 .round_rate = clk_frac_round_rate,
108 .set_rate = clk_frac_set_rate,
109};
110
111struct clk *mxs_clk_frac(const char *name, const char *parent_name,
112 void __iomem *reg, u8 shift, u8 width, u8 busy)
113{
114 struct clk_frac *frac;
115 struct clk *clk;
116 struct clk_init_data init;
117
118 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
119 if (!frac)
120 return ERR_PTR(-ENOMEM);
121
122 init.name = name;
123 init.ops = &clk_frac_ops;
124 init.flags = CLK_SET_RATE_PARENT;
125 init.parent_names = (parent_name ? &parent_name: NULL);
126 init.num_parents = (parent_name ? 1 : 0);
127
128 frac->reg = reg;
129 frac->shift = shift;
130 frac->width = width;
131 frac->busy = busy;
132 frac->hw.init = &init;
133
134 clk = clk_register(NULL, &frac->hw);
135 if (IS_ERR(clk))
136 kfree(frac);
137
138 return clk;
139}
diff --git a/drivers/clk/mxs/clk-imx23.c b/drivers/clk/mxs/clk-imx23.c
new file mode 100644
index 000000000000..f7be225f544c
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx23.c
@@ -0,0 +1,205 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#include <linux/clk.h>
13#include <linux/clkdev.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <mach/common.h>
18#include <mach/mx23.h>
19#include "clk.h"
20
21#define DIGCTRL MX23_IO_ADDRESS(MX23_DIGCTL_BASE_ADDR)
22#define CLKCTRL MX23_IO_ADDRESS(MX23_CLKCTRL_BASE_ADDR)
23#define PLLCTRL0 (CLKCTRL + 0x0000)
24#define CPU (CLKCTRL + 0x0020)
25#define HBUS (CLKCTRL + 0x0030)
26#define XBUS (CLKCTRL + 0x0040)
27#define XTAL (CLKCTRL + 0x0050)
28#define PIX (CLKCTRL + 0x0060)
29#define SSP (CLKCTRL + 0x0070)
30#define GPMI (CLKCTRL + 0x0080)
31#define SPDIF (CLKCTRL + 0x0090)
32#define EMI (CLKCTRL + 0x00a0)
33#define SAIF (CLKCTRL + 0x00c0)
34#define TV (CLKCTRL + 0x00d0)
35#define ETM (CLKCTRL + 0x00e0)
36#define FRAC (CLKCTRL + 0x00f0)
37#define CLKSEQ (CLKCTRL + 0x0110)
38
39#define BP_CPU_INTERRUPT_WAIT 12
40#define BP_CLKSEQ_BYPASS_SAIF 0
41#define BP_CLKSEQ_BYPASS_SSP 5
42#define BP_SAIF_DIV_FRAC_EN 16
43#define BP_FRAC_IOFRAC 24
44
45static void __init clk_misc_init(void)
46{
47 u32 val;
48
49 /* Gate off cpu clock in WFI for power saving */
50 __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
51
52 /* Clear BYPASS for SAIF */
53 __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SAIF, CLKSEQ);
54
55 /* SAIF has to use frac div for functional operation */
56 val = readl_relaxed(SAIF);
57 val |= 1 << BP_SAIF_DIV_FRAC_EN;
58 writel_relaxed(val, SAIF);
59
60 /*
61 * Source ssp clock from ref_io than ref_xtal,
62 * as ref_xtal only provides 24 MHz as maximum.
63 */
64 __mxs_clrl(1 << BP_CLKSEQ_BYPASS_SSP, CLKSEQ);
65
66 /*
67 * 480 MHz seems too high to be ssp clock source directly,
68 * so set frac to get a 288 MHz ref_io.
69 */
70 __mxs_clrl(0x3f << BP_FRAC_IOFRAC, FRAC);
71 __mxs_setl(30 << BP_FRAC_IOFRAC, FRAC);
72}
73
74static struct clk_lookup uart_lookups[] __initdata = {
75 { .dev_id = "duart", },
76 { .dev_id = "mxs-auart.0", },
77 { .dev_id = "mxs-auart.1", },
78 { .dev_id = "8006c000.serial", },
79 { .dev_id = "8006e000.serial", },
80 { .dev_id = "80070000.serial", },
81};
82
83static struct clk_lookup hbus_lookups[] __initdata = {
84 { .dev_id = "imx23-dma-apbh", },
85 { .dev_id = "80004000.dma-apbh", },
86};
87
88static struct clk_lookup xbus_lookups[] __initdata = {
89 { .dev_id = "duart", .con_id = "apb_pclk"},
90 { .dev_id = "80070000.serial", .con_id = "apb_pclk"},
91 { .dev_id = "imx23-dma-apbx", },
92 { .dev_id = "80024000.dma-apbx", },
93};
94
95static struct clk_lookup ssp_lookups[] __initdata = {
96 { .dev_id = "imx23-mmc.0", },
97 { .dev_id = "imx23-mmc.1", },
98 { .dev_id = "80010000.ssp", },
99 { .dev_id = "80034000.ssp", },
100};
101
102static struct clk_lookup lcdif_lookups[] __initdata = {
103 { .dev_id = "imx23-fb", },
104 { .dev_id = "80030000.lcdif", },
105};
106
107static struct clk_lookup gpmi_lookups[] __initdata = {
108 { .dev_id = "imx23-gpmi-nand", },
109 { .dev_id = "8000c000.gpmi", },
110};
111
112static const char *sel_pll[] __initconst = { "pll", "ref_xtal", };
113static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
114static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
115static const char *sel_io[] __initconst = { "ref_io", "ref_xtal", };
116static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
117static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
118
119enum imx23_clk {
120 ref_xtal, pll, ref_cpu, ref_emi, ref_pix, ref_io, saif_sel,
121 lcdif_sel, gpmi_sel, ssp_sel, emi_sel, cpu, etm_sel, cpu_pll,
122 cpu_xtal, hbus, xbus, lcdif_div, ssp_div, gpmi_div, emi_pll,
123 emi_xtal, etm_div, saif_div, clk32k_div, rtc, adc, spdif_div,
124 clk32k, dri, pwm, filt, uart, ssp, gpmi, spdif, emi, saif,
125 lcdif, etm, usb, usb_pwr,
126 clk_max
127};
128
129static struct clk *clks[clk_max];
130
131static enum imx23_clk clks_init_on[] __initdata = {
132 cpu, hbus, xbus, emi, uart,
133};
134
135int __init mx23_clocks_init(void)
136{
137 int i;
138
139 clk_misc_init();
140
141 clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
142 clks[pll] = mxs_clk_pll("pll", "ref_xtal", PLLCTRL0, 16, 480000000);
143 clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll", FRAC, 0);
144 clks[ref_emi] = mxs_clk_ref("ref_emi", "pll", FRAC, 1);
145 clks[ref_pix] = mxs_clk_ref("ref_pix", "pll", FRAC, 2);
146 clks[ref_io] = mxs_clk_ref("ref_io", "pll", FRAC, 3);
147 clks[saif_sel] = mxs_clk_mux("saif_sel", CLKSEQ, 0, 1, sel_pll, ARRAY_SIZE(sel_pll));
148 clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 1, 1, sel_pix, ARRAY_SIZE(sel_pix));
149 clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 4, 1, sel_io, ARRAY_SIZE(sel_io));
150 clks[ssp_sel] = mxs_clk_mux("ssp_sel", CLKSEQ, 5, 1, sel_io, ARRAY_SIZE(sel_io));
151 clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 6, 1, emi_sels, ARRAY_SIZE(emi_sels));
152 clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 7, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
153 clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
154 clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
155 clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
156 clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 29);
157 clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
158 clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", PIX, 0, 12, 29);
159 clks[ssp_div] = mxs_clk_div("ssp_div", "ssp_sel", SSP, 0, 9, 29);
160 clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
161 clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
162 clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
163 clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 6, 29);
164 clks[saif_div] = mxs_clk_frac("saif_div", "saif_sel", SAIF, 0, 16, 29);
165 clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
166 clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
167 clks[adc] = mxs_clk_fixed_factor("adc", "clk32k", 1, 16);
168 clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll", 1, 4);
169 clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
170 clks[dri] = mxs_clk_gate("dri", "ref_xtal", XTAL, 28);
171 clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
172 clks[filt] = mxs_clk_gate("filt", "ref_xtal", XTAL, 30);
173 clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
174 clks[ssp] = mxs_clk_gate("ssp", "ssp_div", SSP, 31);
175 clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
176 clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
177 clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
178 clks[saif] = mxs_clk_gate("saif", "saif_div", SAIF, 31);
179 clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", PIX, 31);
180 clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
181 clks[usb] = mxs_clk_gate("usb", "usb_pwr", DIGCTRL, 2);
182 clks[usb_pwr] = clk_register_gate(NULL, "usb_pwr", "pll", 0, PLLCTRL0, 18, 0, &mxs_lock);
183
184 for (i = 0; i < ARRAY_SIZE(clks); i++)
185 if (IS_ERR(clks[i])) {
186 pr_err("i.MX23 clk %d: register failed with %ld\n",
187 i, PTR_ERR(clks[i]));
188 return PTR_ERR(clks[i]);
189 }
190
191 clk_register_clkdev(clks[clk32k], NULL, "timrot");
192 clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
193 clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
194 clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
195 clk_register_clkdevs(clks[ssp], ssp_lookups, ARRAY_SIZE(ssp_lookups));
196 clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
197 clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
198
199 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
200 clk_prepare_enable(clks[clks_init_on[i]]);
201
202 mxs_timer_init(MX23_INT_TIMER0);
203
204 return 0;
205}
diff --git a/drivers/clk/mxs/clk-imx28.c b/drivers/clk/mxs/clk-imx28.c
new file mode 100644
index 000000000000..2826a2606a29
--- /dev/null
+++ b/drivers/clk/mxs/clk-imx28.c
@@ -0,0 +1,338 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#include <linux/clk.h>
13#include <linux/clkdev.h>
14#include <linux/err.h>
15#include <linux/init.h>
16#include <linux/io.h>
17#include <mach/common.h>
18#include <mach/mx28.h>
19#include "clk.h"
20
21#define CLKCTRL MX28_IO_ADDRESS(MX28_CLKCTRL_BASE_ADDR)
22#define PLL0CTRL0 (CLKCTRL + 0x0000)
23#define PLL1CTRL0 (CLKCTRL + 0x0020)
24#define PLL2CTRL0 (CLKCTRL + 0x0040)
25#define CPU (CLKCTRL + 0x0050)
26#define HBUS (CLKCTRL + 0x0060)
27#define XBUS (CLKCTRL + 0x0070)
28#define XTAL (CLKCTRL + 0x0080)
29#define SSP0 (CLKCTRL + 0x0090)
30#define SSP1 (CLKCTRL + 0x00a0)
31#define SSP2 (CLKCTRL + 0x00b0)
32#define SSP3 (CLKCTRL + 0x00c0)
33#define GPMI (CLKCTRL + 0x00d0)
34#define SPDIF (CLKCTRL + 0x00e0)
35#define EMI (CLKCTRL + 0x00f0)
36#define SAIF0 (CLKCTRL + 0x0100)
37#define SAIF1 (CLKCTRL + 0x0110)
38#define LCDIF (CLKCTRL + 0x0120)
39#define ETM (CLKCTRL + 0x0130)
40#define ENET (CLKCTRL + 0x0140)
41#define FLEXCAN (CLKCTRL + 0x0160)
42#define FRAC0 (CLKCTRL + 0x01b0)
43#define FRAC1 (CLKCTRL + 0x01c0)
44#define CLKSEQ (CLKCTRL + 0x01d0)
45
46#define BP_CPU_INTERRUPT_WAIT 12
47#define BP_SAIF_DIV_FRAC_EN 16
48#define BP_ENET_DIV_TIME 21
49#define BP_ENET_SLEEP 31
50#define BP_CLKSEQ_BYPASS_SAIF0 0
51#define BP_CLKSEQ_BYPASS_SSP0 3
52#define BP_FRAC0_IO1FRAC 16
53#define BP_FRAC0_IO0FRAC 24
54
55#define DIGCTRL MX28_IO_ADDRESS(MX28_DIGCTL_BASE_ADDR)
56#define BP_SAIF_CLKMUX 10
57
58/*
59 * HW_SAIF_CLKMUX_SEL:
60 * DIRECT(0x0): SAIF0 clock pins selected for SAIF0 input clocks, and SAIF1
61 * clock pins selected for SAIF1 input clocks.
62 * CROSSINPUT(0x1): SAIF1 clock inputs selected for SAIF0 input clocks, and
63 * SAIF0 clock inputs selected for SAIF1 input clocks.
64 * EXTMSTR0(0x2): SAIF0 clock pin selected for both SAIF0 and SAIF1 input
65 * clocks.
66 * EXTMSTR1(0x3): SAIF1 clock pin selected for both SAIF0 and SAIF1 input
67 * clocks.
68 */
69int mxs_saif_clkmux_select(unsigned int clkmux)
70{
71 if (clkmux > 0x3)
72 return -EINVAL;
73
74 __mxs_clrl(0x3 << BP_SAIF_CLKMUX, DIGCTRL);
75 __mxs_setl(clkmux << BP_SAIF_CLKMUX, DIGCTRL);
76
77 return 0;
78}
79
80static void __init clk_misc_init(void)
81{
82 u32 val;
83
84 /* Gate off cpu clock in WFI for power saving */
85 __mxs_setl(1 << BP_CPU_INTERRUPT_WAIT, CPU);
86
87 /* 0 is a bad default value for a divider */
88 __mxs_setl(1 << BP_ENET_DIV_TIME, ENET);
89
90 /* Clear BYPASS for SAIF */
91 __mxs_clrl(0x3 << BP_CLKSEQ_BYPASS_SAIF0, CLKSEQ);
92
93 /* SAIF has to use frac div for functional operation */
94 val = readl_relaxed(SAIF0);
95 val |= 1 << BP_SAIF_DIV_FRAC_EN;
96 writel_relaxed(val, SAIF0);
97
98 val = readl_relaxed(SAIF1);
99 val |= 1 << BP_SAIF_DIV_FRAC_EN;
100 writel_relaxed(val, SAIF1);
101
102 /* Extra fec clock setting */
103 val = readl_relaxed(ENET);
104 val &= ~(1 << BP_ENET_SLEEP);
105 writel_relaxed(val, ENET);
106
107 /*
108 * Source ssp clock from ref_io than ref_xtal,
109 * as ref_xtal only provides 24 MHz as maximum.
110 */
111 __mxs_clrl(0xf << BP_CLKSEQ_BYPASS_SSP0, CLKSEQ);
112
113 /*
114 * 480 MHz seems too high to be ssp clock source directly,
115 * so set frac0 to get a 288 MHz ref_io0.
116 */
117 val = readl_relaxed(FRAC0);
118 val &= ~(0x3f << BP_FRAC0_IO0FRAC);
119 val |= 30 << BP_FRAC0_IO0FRAC;
120 writel_relaxed(val, FRAC0);
121}
122
123static struct clk_lookup uart_lookups[] __initdata = {
124 { .dev_id = "duart", },
125 { .dev_id = "mxs-auart.0", },
126 { .dev_id = "mxs-auart.1", },
127 { .dev_id = "mxs-auart.2", },
128 { .dev_id = "mxs-auart.3", },
129 { .dev_id = "mxs-auart.4", },
130 { .dev_id = "8006a000.serial", },
131 { .dev_id = "8006c000.serial", },
132 { .dev_id = "8006e000.serial", },
133 { .dev_id = "80070000.serial", },
134 { .dev_id = "80072000.serial", },
135 { .dev_id = "80074000.serial", },
136};
137
138static struct clk_lookup hbus_lookups[] __initdata = {
139 { .dev_id = "imx28-dma-apbh", },
140 { .dev_id = "80004000.dma-apbh", },
141};
142
143static struct clk_lookup xbus_lookups[] __initdata = {
144 { .dev_id = "duart", .con_id = "apb_pclk"},
145 { .dev_id = "80074000.serial", .con_id = "apb_pclk"},
146 { .dev_id = "imx28-dma-apbx", },
147 { .dev_id = "80024000.dma-apbx", },
148};
149
150static struct clk_lookup ssp0_lookups[] __initdata = {
151 { .dev_id = "imx28-mmc.0", },
152 { .dev_id = "80010000.ssp", },
153};
154
155static struct clk_lookup ssp1_lookups[] __initdata = {
156 { .dev_id = "imx28-mmc.1", },
157 { .dev_id = "80012000.ssp", },
158};
159
160static struct clk_lookup ssp2_lookups[] __initdata = {
161 { .dev_id = "imx28-mmc.2", },
162 { .dev_id = "80014000.ssp", },
163};
164
165static struct clk_lookup ssp3_lookups[] __initdata = {
166 { .dev_id = "imx28-mmc.3", },
167 { .dev_id = "80016000.ssp", },
168};
169
170static struct clk_lookup lcdif_lookups[] __initdata = {
171 { .dev_id = "imx28-fb", },
172 { .dev_id = "80030000.lcdif", },
173};
174
175static struct clk_lookup gpmi_lookups[] __initdata = {
176 { .dev_id = "imx28-gpmi-nand", },
177 { .dev_id = "8000c000.gpmi", },
178};
179
180static struct clk_lookup fec_lookups[] __initdata = {
181 { .dev_id = "imx28-fec.0", },
182 { .dev_id = "imx28-fec.1", },
183 { .dev_id = "800f0000.ethernet", },
184 { .dev_id = "800f4000.ethernet", },
185};
186
187static struct clk_lookup can0_lookups[] __initdata = {
188 { .dev_id = "flexcan.0", },
189 { .dev_id = "80032000.can", },
190};
191
192static struct clk_lookup can1_lookups[] __initdata = {
193 { .dev_id = "flexcan.1", },
194 { .dev_id = "80034000.can", },
195};
196
197static struct clk_lookup saif0_lookups[] __initdata = {
198 { .dev_id = "mxs-saif.0", },
199 { .dev_id = "80042000.saif", },
200};
201
202static struct clk_lookup saif1_lookups[] __initdata = {
203 { .dev_id = "mxs-saif.1", },
204 { .dev_id = "80046000.saif", },
205};
206
207static const char *sel_cpu[] __initconst = { "ref_cpu", "ref_xtal", };
208static const char *sel_io0[] __initconst = { "ref_io0", "ref_xtal", };
209static const char *sel_io1[] __initconst = { "ref_io1", "ref_xtal", };
210static const char *sel_pix[] __initconst = { "ref_pix", "ref_xtal", };
211static const char *sel_gpmi[] __initconst = { "ref_gpmi", "ref_xtal", };
212static const char *sel_pll0[] __initconst = { "pll0", "ref_xtal", };
213static const char *cpu_sels[] __initconst = { "cpu_pll", "cpu_xtal", };
214static const char *emi_sels[] __initconst = { "emi_pll", "emi_xtal", };
215static const char *ptp_sels[] __initconst = { "ref_xtal", "pll0", };
216
217enum imx28_clk {
218 ref_xtal, pll0, pll1, pll2, ref_cpu, ref_emi, ref_io0, ref_io1,
219 ref_pix, ref_hsadc, ref_gpmi, saif0_sel, saif1_sel, gpmi_sel,
220 ssp0_sel, ssp1_sel, ssp2_sel, ssp3_sel, emi_sel, etm_sel,
221 lcdif_sel, cpu, ptp_sel, cpu_pll, cpu_xtal, hbus, xbus,
222 ssp0_div, ssp1_div, ssp2_div, ssp3_div, gpmi_div, emi_pll,
223 emi_xtal, lcdif_div, etm_div, ptp, saif0_div, saif1_div,
224 clk32k_div, rtc, lradc, spdif_div, clk32k, pwm, uart, ssp0,
225 ssp1, ssp2, ssp3, gpmi, spdif, emi, saif0, saif1, lcdif, etm,
226 fec, can0, can1, usb0, usb1, usb0_pwr, usb1_pwr, enet_out,
227 clk_max
228};
229
230static struct clk *clks[clk_max];
231
232static enum imx28_clk clks_init_on[] __initdata = {
233 cpu, hbus, xbus, emi, uart,
234};
235
236int __init mx28_clocks_init(void)
237{
238 int i;
239
240 clk_misc_init();
241
242 clks[ref_xtal] = mxs_clk_fixed("ref_xtal", 24000000);
243 clks[pll0] = mxs_clk_pll("pll0", "ref_xtal", PLL0CTRL0, 17, 480000000);
244 clks[pll1] = mxs_clk_pll("pll1", "ref_xtal", PLL1CTRL0, 17, 480000000);
245 clks[pll2] = mxs_clk_pll("pll2", "ref_xtal", PLL2CTRL0, 23, 50000000);
246 clks[ref_cpu] = mxs_clk_ref("ref_cpu", "pll0", FRAC0, 0);
247 clks[ref_emi] = mxs_clk_ref("ref_emi", "pll0", FRAC0, 1);
248 clks[ref_io0] = mxs_clk_ref("ref_io0", "pll0", FRAC0, 2);
249 clks[ref_io1] = mxs_clk_ref("ref_io1", "pll0", FRAC0, 3);
250 clks[ref_pix] = mxs_clk_ref("ref_pix", "pll0", FRAC1, 0);
251 clks[ref_hsadc] = mxs_clk_ref("ref_hsadc", "pll0", FRAC1, 1);
252 clks[ref_gpmi] = mxs_clk_ref("ref_gpmi", "pll0", FRAC1, 2);
253 clks[saif0_sel] = mxs_clk_mux("saif0_sel", CLKSEQ, 0, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
254 clks[saif1_sel] = mxs_clk_mux("saif1_sel", CLKSEQ, 1, 1, sel_pll0, ARRAY_SIZE(sel_pll0));
255 clks[gpmi_sel] = mxs_clk_mux("gpmi_sel", CLKSEQ, 2, 1, sel_gpmi, ARRAY_SIZE(sel_gpmi));
256 clks[ssp0_sel] = mxs_clk_mux("ssp0_sel", CLKSEQ, 3, 1, sel_io0, ARRAY_SIZE(sel_io0));
257 clks[ssp1_sel] = mxs_clk_mux("ssp1_sel", CLKSEQ, 4, 1, sel_io0, ARRAY_SIZE(sel_io0));
258 clks[ssp2_sel] = mxs_clk_mux("ssp2_sel", CLKSEQ, 5, 1, sel_io1, ARRAY_SIZE(sel_io1));
259 clks[ssp3_sel] = mxs_clk_mux("ssp3_sel", CLKSEQ, 6, 1, sel_io1, ARRAY_SIZE(sel_io1));
260 clks[emi_sel] = mxs_clk_mux("emi_sel", CLKSEQ, 7, 1, emi_sels, ARRAY_SIZE(emi_sels));
261 clks[etm_sel] = mxs_clk_mux("etm_sel", CLKSEQ, 8, 1, sel_cpu, ARRAY_SIZE(sel_cpu));
262 clks[lcdif_sel] = mxs_clk_mux("lcdif_sel", CLKSEQ, 14, 1, sel_pix, ARRAY_SIZE(sel_pix));
263 clks[cpu] = mxs_clk_mux("cpu", CLKSEQ, 18, 1, cpu_sels, ARRAY_SIZE(cpu_sels));
264 clks[ptp_sel] = mxs_clk_mux("ptp_sel", ENET, 19, 1, ptp_sels, ARRAY_SIZE(ptp_sels));
265 clks[cpu_pll] = mxs_clk_div("cpu_pll", "ref_cpu", CPU, 0, 6, 28);
266 clks[cpu_xtal] = mxs_clk_div("cpu_xtal", "ref_xtal", CPU, 16, 10, 29);
267 clks[hbus] = mxs_clk_div("hbus", "cpu", HBUS, 0, 5, 31);
268 clks[xbus] = mxs_clk_div("xbus", "ref_xtal", XBUS, 0, 10, 31);
269 clks[ssp0_div] = mxs_clk_div("ssp0_div", "ssp0_sel", SSP0, 0, 9, 29);
270 clks[ssp1_div] = mxs_clk_div("ssp1_div", "ssp1_sel", SSP1, 0, 9, 29);
271 clks[ssp2_div] = mxs_clk_div("ssp2_div", "ssp2_sel", SSP2, 0, 9, 29);
272 clks[ssp3_div] = mxs_clk_div("ssp3_div", "ssp3_sel", SSP3, 0, 9, 29);
273 clks[gpmi_div] = mxs_clk_div("gpmi_div", "gpmi_sel", GPMI, 0, 10, 29);
274 clks[emi_pll] = mxs_clk_div("emi_pll", "ref_emi", EMI, 0, 6, 28);
275 clks[emi_xtal] = mxs_clk_div("emi_xtal", "ref_xtal", EMI, 8, 4, 29);
276 clks[lcdif_div] = mxs_clk_div("lcdif_div", "lcdif_sel", LCDIF, 0, 13, 29);
277 clks[etm_div] = mxs_clk_div("etm_div", "etm_sel", ETM, 0, 7, 29);
278 clks[ptp] = mxs_clk_div("ptp", "ptp_sel", ENET, 21, 6, 27);
279 clks[saif0_div] = mxs_clk_frac("saif0_div", "saif0_sel", SAIF0, 0, 16, 29);
280 clks[saif1_div] = mxs_clk_frac("saif1_div", "saif1_sel", SAIF1, 0, 16, 29);
281 clks[clk32k_div] = mxs_clk_fixed_factor("clk32k_div", "ref_xtal", 1, 750);
282 clks[rtc] = mxs_clk_fixed_factor("rtc", "ref_xtal", 1, 768);
283 clks[lradc] = mxs_clk_fixed_factor("lradc", "clk32k", 1, 16);
284 clks[spdif_div] = mxs_clk_fixed_factor("spdif_div", "pll0", 1, 4);
285 clks[clk32k] = mxs_clk_gate("clk32k", "clk32k_div", XTAL, 26);
286 clks[pwm] = mxs_clk_gate("pwm", "ref_xtal", XTAL, 29);
287 clks[uart] = mxs_clk_gate("uart", "ref_xtal", XTAL, 31);
288 clks[ssp0] = mxs_clk_gate("ssp0", "ssp0_div", SSP0, 31);
289 clks[ssp1] = mxs_clk_gate("ssp1", "ssp1_div", SSP1, 31);
290 clks[ssp2] = mxs_clk_gate("ssp2", "ssp2_div", SSP2, 31);
291 clks[ssp3] = mxs_clk_gate("ssp3", "ssp3_div", SSP3, 31);
292 clks[gpmi] = mxs_clk_gate("gpmi", "gpmi_div", GPMI, 31);
293 clks[spdif] = mxs_clk_gate("spdif", "spdif_div", SPDIF, 31);
294 clks[emi] = mxs_clk_gate("emi", "emi_sel", EMI, 31);
295 clks[saif0] = mxs_clk_gate("saif0", "saif0_div", SAIF0, 31);
296 clks[saif1] = mxs_clk_gate("saif1", "saif1_div", SAIF1, 31);
297 clks[lcdif] = mxs_clk_gate("lcdif", "lcdif_div", LCDIF, 31);
298 clks[etm] = mxs_clk_gate("etm", "etm_div", ETM, 31);
299 clks[fec] = mxs_clk_gate("fec", "hbus", ENET, 30);
300 clks[can0] = mxs_clk_gate("can0", "ref_xtal", FLEXCAN, 30);
301 clks[can1] = mxs_clk_gate("can1", "ref_xtal", FLEXCAN, 28);
302 clks[usb0] = mxs_clk_gate("usb0", "usb0_pwr", DIGCTRL, 2);
303 clks[usb1] = mxs_clk_gate("usb1", "usb1_pwr", DIGCTRL, 16);
304 clks[usb0_pwr] = clk_register_gate(NULL, "usb0_pwr", "pll0", 0, PLL0CTRL0, 18, 0, &mxs_lock);
305 clks[usb1_pwr] = clk_register_gate(NULL, "usb1_pwr", "pll1", 0, PLL1CTRL0, 18, 0, &mxs_lock);
306 clks[enet_out] = clk_register_gate(NULL, "enet_out", "pll2", 0, ENET, 18, 0, &mxs_lock);
307
308 for (i = 0; i < ARRAY_SIZE(clks); i++)
309 if (IS_ERR(clks[i])) {
310 pr_err("i.MX28 clk %d: register failed with %ld\n",
311 i, PTR_ERR(clks[i]));
312 return PTR_ERR(clks[i]);
313 }
314
315 clk_register_clkdev(clks[clk32k], NULL, "timrot");
316 clk_register_clkdev(clks[enet_out], NULL, "enet_out");
317 clk_register_clkdevs(clks[hbus], hbus_lookups, ARRAY_SIZE(hbus_lookups));
318 clk_register_clkdevs(clks[xbus], xbus_lookups, ARRAY_SIZE(xbus_lookups));
319 clk_register_clkdevs(clks[uart], uart_lookups, ARRAY_SIZE(uart_lookups));
320 clk_register_clkdevs(clks[ssp0], ssp0_lookups, ARRAY_SIZE(ssp0_lookups));
321 clk_register_clkdevs(clks[ssp1], ssp1_lookups, ARRAY_SIZE(ssp1_lookups));
322 clk_register_clkdevs(clks[ssp2], ssp2_lookups, ARRAY_SIZE(ssp2_lookups));
323 clk_register_clkdevs(clks[ssp3], ssp3_lookups, ARRAY_SIZE(ssp3_lookups));
324 clk_register_clkdevs(clks[gpmi], gpmi_lookups, ARRAY_SIZE(gpmi_lookups));
325 clk_register_clkdevs(clks[saif0], saif0_lookups, ARRAY_SIZE(saif0_lookups));
326 clk_register_clkdevs(clks[saif1], saif1_lookups, ARRAY_SIZE(saif1_lookups));
327 clk_register_clkdevs(clks[lcdif], lcdif_lookups, ARRAY_SIZE(lcdif_lookups));
328 clk_register_clkdevs(clks[fec], fec_lookups, ARRAY_SIZE(fec_lookups));
329 clk_register_clkdevs(clks[can0], can0_lookups, ARRAY_SIZE(can0_lookups));
330 clk_register_clkdevs(clks[can1], can1_lookups, ARRAY_SIZE(can1_lookups));
331
332 for (i = 0; i < ARRAY_SIZE(clks_init_on); i++)
333 clk_prepare_enable(clks[clks_init_on[i]]);
334
335 mxs_timer_init(MX28_INT_TIMER0);
336
337 return 0;
338}
diff --git a/drivers/clk/mxs/clk-pll.c b/drivers/clk/mxs/clk-pll.c
new file mode 100644
index 000000000000..fadae41833ec
--- /dev/null
+++ b/drivers/clk/mxs/clk-pll.c
@@ -0,0 +1,116 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#include <linux/clk.h>
13#include <linux/clk-provider.h>
14#include <linux/delay.h>
15#include <linux/err.h>
16#include <linux/io.h>
17#include <linux/slab.h>
18#include "clk.h"
19
20/**
21 * struct clk_pll - mxs pll clock
22 * @hw: clk_hw for the pll
23 * @base: base address of the pll
24 * @power: the shift of power bit
25 * @rate: the clock rate of the pll
26 *
27 * The mxs pll is a fixed rate clock with power and gate control,
28 * and the shift of gate bit is always 31.
29 */
30struct clk_pll {
31 struct clk_hw hw;
32 void __iomem *base;
33 u8 power;
34 unsigned long rate;
35};
36
37#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
38
39static int clk_pll_prepare(struct clk_hw *hw)
40{
41 struct clk_pll *pll = to_clk_pll(hw);
42
43 writel_relaxed(1 << pll->power, pll->base + SET);
44
45 udelay(10);
46
47 return 0;
48}
49
50static void clk_pll_unprepare(struct clk_hw *hw)
51{
52 struct clk_pll *pll = to_clk_pll(hw);
53
54 writel_relaxed(1 << pll->power, pll->base + CLR);
55}
56
57static int clk_pll_enable(struct clk_hw *hw)
58{
59 struct clk_pll *pll = to_clk_pll(hw);
60
61 writel_relaxed(1 << 31, pll->base + CLR);
62
63 return 0;
64}
65
66static void clk_pll_disable(struct clk_hw *hw)
67{
68 struct clk_pll *pll = to_clk_pll(hw);
69
70 writel_relaxed(1 << 31, pll->base + SET);
71}
72
73static unsigned long clk_pll_recalc_rate(struct clk_hw *hw,
74 unsigned long parent_rate)
75{
76 struct clk_pll *pll = to_clk_pll(hw);
77
78 return pll->rate;
79}
80
81static const struct clk_ops clk_pll_ops = {
82 .prepare = clk_pll_prepare,
83 .unprepare = clk_pll_unprepare,
84 .enable = clk_pll_enable,
85 .disable = clk_pll_disable,
86 .recalc_rate = clk_pll_recalc_rate,
87};
88
89struct clk *mxs_clk_pll(const char *name, const char *parent_name,
90 void __iomem *base, u8 power, unsigned long rate)
91{
92 struct clk_pll *pll;
93 struct clk *clk;
94 struct clk_init_data init;
95
96 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
97 if (!pll)
98 return ERR_PTR(-ENOMEM);
99
100 init.name = name;
101 init.ops = &clk_pll_ops;
102 init.flags = 0;
103 init.parent_names = (parent_name ? &parent_name: NULL);
104 init.num_parents = (parent_name ? 1 : 0);
105
106 pll->base = base;
107 pll->rate = rate;
108 pll->power = power;
109 pll->hw.init = &init;
110
111 clk = clk_register(NULL, &pll->hw);
112 if (IS_ERR(clk))
113 kfree(pll);
114
115 return clk;
116}
diff --git a/drivers/clk/mxs/clk-ref.c b/drivers/clk/mxs/clk-ref.c
new file mode 100644
index 000000000000..4adeed6c2f94
--- /dev/null
+++ b/drivers/clk/mxs/clk-ref.c
@@ -0,0 +1,154 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#include <linux/clk.h>
13#include <linux/clk-provider.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/slab.h>
17#include "clk.h"
18
19/**
20 * struct clk_ref - mxs reference clock
21 * @hw: clk_hw for the reference clock
22 * @reg: register address
23 * @idx: the index of the reference clock within the same register
24 *
25 * The mxs reference clock sources from pll. Every 4 reference clocks share
26 * one register space, and @idx is used to identify them. Each reference
27 * clock has a gate control and a fractional * divider. The rate is calculated
28 * as pll rate * (18 / FRAC), where FRAC = 18 ~ 35.
29 */
30struct clk_ref {
31 struct clk_hw hw;
32 void __iomem *reg;
33 u8 idx;
34};
35
36#define to_clk_ref(_hw) container_of(_hw, struct clk_ref, hw)
37
38static int clk_ref_enable(struct clk_hw *hw)
39{
40 struct clk_ref *ref = to_clk_ref(hw);
41
42 writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR);
43
44 return 0;
45}
46
47static void clk_ref_disable(struct clk_hw *hw)
48{
49 struct clk_ref *ref = to_clk_ref(hw);
50
51 writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET);
52}
53
54static unsigned long clk_ref_recalc_rate(struct clk_hw *hw,
55 unsigned long parent_rate)
56{
57 struct clk_ref *ref = to_clk_ref(hw);
58 u64 tmp = parent_rate;
59 u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f;
60
61 tmp *= 18;
62 do_div(tmp, frac);
63
64 return tmp;
65}
66
67static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate,
68 unsigned long *prate)
69{
70 unsigned long parent_rate = *prate;
71 u64 tmp = parent_rate;
72 u8 frac;
73
74 tmp = tmp * 18 + rate / 2;
75 do_div(tmp, rate);
76 frac = tmp;
77
78 if (frac < 18)
79 frac = 18;
80 else if (frac > 35)
81 frac = 35;
82
83 tmp = parent_rate;
84 tmp *= 18;
85 do_div(tmp, frac);
86
87 return tmp;
88}
89
90static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate,
91 unsigned long parent_rate)
92{
93 struct clk_ref *ref = to_clk_ref(hw);
94 unsigned long flags;
95 u64 tmp = parent_rate;
96 u32 val;
97 u8 frac, shift = ref->idx * 8;
98
99 tmp = tmp * 18 + rate / 2;
100 do_div(tmp, rate);
101 frac = tmp;
102
103 if (frac < 18)
104 frac = 18;
105 else if (frac > 35)
106 frac = 35;
107
108 spin_lock_irqsave(&mxs_lock, flags);
109
110 val = readl_relaxed(ref->reg);
111 val &= ~(0x3f << shift);
112 val |= frac << shift;
113 writel_relaxed(val, ref->reg);
114
115 spin_unlock_irqrestore(&mxs_lock, flags);
116
117 return 0;
118}
119
120static const struct clk_ops clk_ref_ops = {
121 .enable = clk_ref_enable,
122 .disable = clk_ref_disable,
123 .recalc_rate = clk_ref_recalc_rate,
124 .round_rate = clk_ref_round_rate,
125 .set_rate = clk_ref_set_rate,
126};
127
128struct clk *mxs_clk_ref(const char *name, const char *parent_name,
129 void __iomem *reg, u8 idx)
130{
131 struct clk_ref *ref;
132 struct clk *clk;
133 struct clk_init_data init;
134
135 ref = kzalloc(sizeof(*ref), GFP_KERNEL);
136 if (!ref)
137 return ERR_PTR(-ENOMEM);
138
139 init.name = name;
140 init.ops = &clk_ref_ops;
141 init.flags = 0;
142 init.parent_names = (parent_name ? &parent_name: NULL);
143 init.num_parents = (parent_name ? 1 : 0);
144
145 ref->reg = reg;
146 ref->idx = idx;
147 ref->hw.init = &init;
148
149 clk = clk_register(NULL, &ref->hw);
150 if (IS_ERR(clk))
151 kfree(ref);
152
153 return clk;
154}
diff --git a/drivers/clk/mxs/clk.c b/drivers/clk/mxs/clk.c
new file mode 100644
index 000000000000..b24d56067c80
--- /dev/null
+++ b/drivers/clk/mxs/clk.c
@@ -0,0 +1,28 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#include <linux/err.h>
13#include <linux/io.h>
14#include <linux/jiffies.h>
15#include <linux/spinlock.h>
16
17DEFINE_SPINLOCK(mxs_lock);
18
19int mxs_clk_wait(void __iomem *reg, u8 shift)
20{
21 unsigned long timeout = jiffies + msecs_to_jiffies(10);
22
23 while (readl_relaxed(reg) & (1 << shift))
24 if (time_after(jiffies, timeout))
25 return -ETIMEDOUT;
26
27 return 0;
28}
diff --git a/drivers/clk/mxs/clk.h b/drivers/clk/mxs/clk.h
new file mode 100644
index 000000000000..81421e28e69c
--- /dev/null
+++ b/drivers/clk/mxs/clk.h
@@ -0,0 +1,66 @@
1/*
2 * Copyright 2012 Freescale Semiconductor, Inc.
3 *
4 * The code contained herein is licensed under the GNU General Public
5 * License. You may obtain a copy of the GNU General Public License
6 * Version 2 or later at the following locations:
7 *
8 * http://www.opensource.org/licenses/gpl-license.html
9 * http://www.gnu.org/copyleft/gpl.html
10 */
11
12#ifndef __MXS_CLK_H
13#define __MXS_CLK_H
14
15#include <linux/clk.h>
16#include <linux/clk-provider.h>
17#include <linux/spinlock.h>
18
19#define SET 0x4
20#define CLR 0x8
21
22extern spinlock_t mxs_lock;
23
24int mxs_clk_wait(void __iomem *reg, u8 shift);
25
26struct clk *mxs_clk_pll(const char *name, const char *parent_name,
27 void __iomem *base, u8 power, unsigned long rate);
28
29struct clk *mxs_clk_ref(const char *name, const char *parent_name,
30 void __iomem *reg, u8 idx);
31
32struct clk *mxs_clk_div(const char *name, const char *parent_name,
33 void __iomem *reg, u8 shift, u8 width, u8 busy);
34
35struct clk *mxs_clk_frac(const char *name, const char *parent_name,
36 void __iomem *reg, u8 shift, u8 width, u8 busy);
37
38static inline struct clk *mxs_clk_fixed(const char *name, int rate)
39{
40 return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate);
41}
42
43static inline struct clk *mxs_clk_gate(const char *name,
44 const char *parent_name, void __iomem *reg, u8 shift)
45{
46 return clk_register_gate(NULL, name, parent_name, CLK_SET_RATE_PARENT,
47 reg, shift, CLK_GATE_SET_TO_DISABLE,
48 &mxs_lock);
49}
50
51static inline struct clk *mxs_clk_mux(const char *name, void __iomem *reg,
52 u8 shift, u8 width, const char **parent_names, int num_parents)
53{
54 return clk_register_mux(NULL, name, parent_names, num_parents,
55 CLK_SET_RATE_PARENT, reg, shift, width,
56 0, &mxs_lock);
57}
58
59static inline struct clk *mxs_clk_fixed_factor(const char *name,
60 const char *parent_name, unsigned int mult, unsigned int div)
61{
62 return clk_register_fixed_factor(NULL, name, parent_name,
63 CLK_SET_RATE_PARENT, mult, div);
64}
65
66#endif /* __MXS_CLK_H */
diff --git a/drivers/clk/spear/Makefile b/drivers/clk/spear/Makefile
new file mode 100644
index 000000000000..cdb425d3b8ee
--- /dev/null
+++ b/drivers/clk/spear/Makefile
@@ -0,0 +1,10 @@
1#
2# SPEAr Clock specific Makefile
3#
4
5obj-y += clk.o clk-aux-synth.o clk-frac-synth.o clk-gpt-synth.o clk-vco-pll.o
6
7obj-$(CONFIG_ARCH_SPEAR3XX) += spear3xx_clock.o
8obj-$(CONFIG_ARCH_SPEAR6XX) += spear6xx_clock.o
9obj-$(CONFIG_MACH_SPEAR1310) += spear1310_clock.o
10obj-$(CONFIG_MACH_SPEAR1340) += spear1340_clock.o
diff --git a/drivers/clk/spear/clk-aux-synth.c b/drivers/clk/spear/clk-aux-synth.c
new file mode 100644
index 000000000000..af34074e702b
--- /dev/null
+++ b/drivers/clk/spear/clk-aux-synth.c
@@ -0,0 +1,198 @@
1/*
2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com>
4 *
5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any
7 * warranty of any kind, whether express or implied.
8 *
9 * Auxiliary Synthesizer clock implementation
10 */
11
12#define pr_fmt(fmt) "clk-aux-synth: " fmt
13
14#include <linux/clk-provider.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/err.h>
18#include "clk.h"
19
20/*
21 * DOC: Auxiliary Synthesizer clock
22 *
23 * Aux synth gives rate for different values of eq, x and y
24 *
25 * Fout from synthesizer can be given from two equations:
26 * Fout1 = (Fin * X/Y)/2 EQ1
27 * Fout2 = Fin * X/Y EQ2
28 */
29
30#define to_clk_aux(_hw) container_of(_hw, struct clk_aux, hw)
31
32static struct aux_clk_masks default_aux_masks = {
33 .eq_sel_mask = AUX_EQ_SEL_MASK,
34 .eq_sel_shift = AUX_EQ_SEL_SHIFT,
35 .eq1_mask = AUX_EQ1_SEL,
36 .eq2_mask = AUX_EQ2_SEL,
37 .xscale_sel_mask = AUX_XSCALE_MASK,
38 .xscale_sel_shift = AUX_XSCALE_SHIFT,
39 .yscale_sel_mask = AUX_YSCALE_MASK,
40 .yscale_sel_shift = AUX_YSCALE_SHIFT,
41 .enable_bit = AUX_SYNT_ENB,
42};
43
44static unsigned long aux_calc_rate(struct clk_hw *hw, unsigned long prate,
45 int index)
46{
47 struct clk_aux *aux = to_clk_aux(hw);
48 struct aux_rate_tbl *rtbl = aux->rtbl;
49 u8 eq = rtbl[index].eq ? 1 : 2;
50
51 return (((prate / 10000) * rtbl[index].xscale) /
52 (rtbl[index].yscale * eq)) * 10000;
53}
54
55static long clk_aux_round_rate(struct clk_hw *hw, unsigned long drate,
56 unsigned long *prate)
57{
58 struct clk_aux *aux = to_clk_aux(hw);
59 int unused;
60
61 return clk_round_rate_index(hw, drate, *prate, aux_calc_rate,
62 aux->rtbl_cnt, &unused);
63}
64
65static unsigned long clk_aux_recalc_rate(struct clk_hw *hw,
66 unsigned long parent_rate)
67{
68 struct clk_aux *aux = to_clk_aux(hw);
69 unsigned int num = 1, den = 1, val, eqn;
70 unsigned long flags = 0;
71
72 if (aux->lock)
73 spin_lock_irqsave(aux->lock, flags);
74
75 val = readl_relaxed(aux->reg);
76
77 if (aux->lock)
78 spin_unlock_irqrestore(aux->lock, flags);
79
80 eqn = (val >> aux->masks->eq_sel_shift) & aux->masks->eq_sel_mask;
81 if (eqn == aux->masks->eq1_mask)
82 den = 2;
83
84 /* calculate numerator */
85 num = (val >> aux->masks->xscale_sel_shift) &
86 aux->masks->xscale_sel_mask;
87
88 /* calculate denominator */
89 den *= (val >> aux->masks->yscale_sel_shift) &
90 aux->masks->yscale_sel_mask;
91
92 if (!den)
93 return 0;
94
95 return (((parent_rate / 10000) * num) / den) * 10000;
96}
97
98/* Configures new clock rate of aux */
99static int clk_aux_set_rate(struct clk_hw *hw, unsigned long drate,
100 unsigned long prate)
101{
102 struct clk_aux *aux = to_clk_aux(hw);
103 struct aux_rate_tbl *rtbl = aux->rtbl;
104 unsigned long val, flags = 0;
105 int i;
106
107 clk_round_rate_index(hw, drate, prate, aux_calc_rate, aux->rtbl_cnt,
108 &i);
109
110 if (aux->lock)
111 spin_lock_irqsave(aux->lock, flags);
112
113 val = readl_relaxed(aux->reg) &
114 ~(aux->masks->eq_sel_mask << aux->masks->eq_sel_shift);
115 val |= (rtbl[i].eq & aux->masks->eq_sel_mask) <<
116 aux->masks->eq_sel_shift;
117 val &= ~(aux->masks->xscale_sel_mask << aux->masks->xscale_sel_shift);
118 val |= (rtbl[i].xscale & aux->masks->xscale_sel_mask) <<
119 aux->masks->xscale_sel_shift;
120 val &= ~(aux->masks->yscale_sel_mask << aux->masks->yscale_sel_shift);
121 val |= (rtbl[i].yscale & aux->masks->yscale_sel_mask) <<
122 aux->masks->yscale_sel_shift;
123 writel_relaxed(val, aux->reg);
124
125 if (aux->lock)
126 spin_unlock_irqrestore(aux->lock, flags);
127
128 return 0;
129}
130
131static struct clk_ops clk_aux_ops = {
132 .recalc_rate = clk_aux_recalc_rate,
133 .round_rate = clk_aux_round_rate,
134 .set_rate = clk_aux_set_rate,
135};
136
137struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
138 const char *parent_name, unsigned long flags, void __iomem *reg,
139 struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
140 u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk)
141{
142 struct clk_aux *aux;
143 struct clk_init_data init;
144 struct clk *clk;
145
146 if (!aux_name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
147 pr_err("Invalid arguments passed");
148 return ERR_PTR(-EINVAL);
149 }
150
151 aux = kzalloc(sizeof(*aux), GFP_KERNEL);
152 if (!aux) {
153 pr_err("could not allocate aux clk\n");
154 return ERR_PTR(-ENOMEM);
155 }
156
157 /* struct clk_aux assignments */
158 if (!masks)
159 aux->masks = &default_aux_masks;
160 else
161 aux->masks = masks;
162
163 aux->reg = reg;
164 aux->rtbl = rtbl;
165 aux->rtbl_cnt = rtbl_cnt;
166 aux->lock = lock;
167 aux->hw.init = &init;
168
169 init.name = aux_name;
170 init.ops = &clk_aux_ops;
171 init.flags = flags;
172 init.parent_names = &parent_name;
173 init.num_parents = 1;
174
175 clk = clk_register(NULL, &aux->hw);
176 if (IS_ERR_OR_NULL(clk))
177 goto free_aux;
178
179 if (gate_name) {
180 struct clk *tgate_clk;
181
182 tgate_clk = clk_register_gate(NULL, gate_name, aux_name, 0, reg,
183 aux->masks->enable_bit, 0, lock);
184 if (IS_ERR_OR_NULL(tgate_clk))
185 goto free_aux;
186
187 if (gate_clk)
188 *gate_clk = tgate_clk;
189 }
190
191 return clk;
192
193free_aux:
194 kfree(aux);
195 pr_err("clk register failed\n");
196
197 return NULL;
198}
diff --git a/drivers/clk/spear/clk-frac-synth.c b/drivers/clk/spear/clk-frac-synth.c
new file mode 100644
index 000000000000..4dbdb3fe18e0
--- /dev/null
+++ b/drivers/clk/spear/clk-frac-synth.c
@@ -0,0 +1,165 @@
1/*
2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com>
4 *
5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any
7 * warranty of any kind, whether express or implied.
8 *
9 * Fractional Synthesizer clock implementation
10 */
11
12#define pr_fmt(fmt) "clk-frac-synth: " fmt
13
14#include <linux/clk-provider.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/err.h>
18#include "clk.h"
19
20#define DIV_FACTOR_MASK 0x1FFFF
21
22/*
23 * DOC: Fractional Synthesizer clock
24 *
25 * Fout from synthesizer can be given from below equation:
26 *
27 * Fout= Fin/2*div (division factor)
28 * div is 17 bits:-
29 * 0-13 (fractional part)
30 * 14-16 (integer part)
31 * div is (16-14 bits).(13-0 bits) (in binary)
32 *
33 * Fout = Fin/(2 * div)
34 * Fout = ((Fin / 10000)/(2 * div)) * 10000
35 * Fout = (2^14 * (Fin / 10000)/(2^14 * (2 * div))) * 10000
36 * Fout = (((Fin / 10000) << 14)/(2 * (div << 14))) * 10000
37 *
38 * div << 14 simply 17 bit value written at register.
39 * Max error due to scaling down by 10000 is 10 KHz
40 */
41
42#define to_clk_frac(_hw) container_of(_hw, struct clk_frac, hw)
43
44static unsigned long frac_calc_rate(struct clk_hw *hw, unsigned long prate,
45 int index)
46{
47 struct clk_frac *frac = to_clk_frac(hw);
48 struct frac_rate_tbl *rtbl = frac->rtbl;
49
50 prate /= 10000;
51 prate <<= 14;
52 prate /= (2 * rtbl[index].div);
53 prate *= 10000;
54
55 return prate;
56}
57
58static long clk_frac_round_rate(struct clk_hw *hw, unsigned long drate,
59 unsigned long *prate)
60{
61 struct clk_frac *frac = to_clk_frac(hw);
62 int unused;
63
64 return clk_round_rate_index(hw, drate, *prate, frac_calc_rate,
65 frac->rtbl_cnt, &unused);
66}
67
68static unsigned long clk_frac_recalc_rate(struct clk_hw *hw,
69 unsigned long parent_rate)
70{
71 struct clk_frac *frac = to_clk_frac(hw);
72 unsigned long flags = 0;
73 unsigned int div = 1, val;
74
75 if (frac->lock)
76 spin_lock_irqsave(frac->lock, flags);
77
78 val = readl_relaxed(frac->reg);
79
80 if (frac->lock)
81 spin_unlock_irqrestore(frac->lock, flags);
82
83 div = val & DIV_FACTOR_MASK;
84
85 if (!div)
86 return 0;
87
88 parent_rate = parent_rate / 10000;
89
90 parent_rate = (parent_rate << 14) / (2 * div);
91 return parent_rate * 10000;
92}
93
94/* Configures new clock rate of frac */
95static int clk_frac_set_rate(struct clk_hw *hw, unsigned long drate,
96 unsigned long prate)
97{
98 struct clk_frac *frac = to_clk_frac(hw);
99 struct frac_rate_tbl *rtbl = frac->rtbl;
100 unsigned long flags = 0, val;
101 int i;
102
103 clk_round_rate_index(hw, drate, prate, frac_calc_rate, frac->rtbl_cnt,
104 &i);
105
106 if (frac->lock)
107 spin_lock_irqsave(frac->lock, flags);
108
109 val = readl_relaxed(frac->reg) & ~DIV_FACTOR_MASK;
110 val |= rtbl[i].div & DIV_FACTOR_MASK;
111 writel_relaxed(val, frac->reg);
112
113 if (frac->lock)
114 spin_unlock_irqrestore(frac->lock, flags);
115
116 return 0;
117}
118
119struct clk_ops clk_frac_ops = {
120 .recalc_rate = clk_frac_recalc_rate,
121 .round_rate = clk_frac_round_rate,
122 .set_rate = clk_frac_set_rate,
123};
124
125struct clk *clk_register_frac(const char *name, const char *parent_name,
126 unsigned long flags, void __iomem *reg,
127 struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock)
128{
129 struct clk_init_data init;
130 struct clk_frac *frac;
131 struct clk *clk;
132
133 if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
134 pr_err("Invalid arguments passed");
135 return ERR_PTR(-EINVAL);
136 }
137
138 frac = kzalloc(sizeof(*frac), GFP_KERNEL);
139 if (!frac) {
140 pr_err("could not allocate frac clk\n");
141 return ERR_PTR(-ENOMEM);
142 }
143
144 /* struct clk_frac assignments */
145 frac->reg = reg;
146 frac->rtbl = rtbl;
147 frac->rtbl_cnt = rtbl_cnt;
148 frac->lock = lock;
149 frac->hw.init = &init;
150
151 init.name = name;
152 init.ops = &clk_frac_ops;
153 init.flags = flags;
154 init.parent_names = &parent_name;
155 init.num_parents = 1;
156
157 clk = clk_register(NULL, &frac->hw);
158 if (!IS_ERR_OR_NULL(clk))
159 return clk;
160
161 pr_err("clk register failed\n");
162 kfree(frac);
163
164 return NULL;
165}
diff --git a/drivers/clk/spear/clk-gpt-synth.c b/drivers/clk/spear/clk-gpt-synth.c
new file mode 100644
index 000000000000..b471c9762a97
--- /dev/null
+++ b/drivers/clk/spear/clk-gpt-synth.c
@@ -0,0 +1,154 @@
1/*
2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com>
4 *
5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any
7 * warranty of any kind, whether express or implied.
8 *
9 * General Purpose Timer Synthesizer clock implementation
10 */
11
12#define pr_fmt(fmt) "clk-gpt-synth: " fmt
13
14#include <linux/clk-provider.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/err.h>
18#include "clk.h"
19
20#define GPT_MSCALE_MASK 0xFFF
21#define GPT_NSCALE_SHIFT 12
22#define GPT_NSCALE_MASK 0xF
23
24/*
25 * DOC: General Purpose Timer Synthesizer clock
26 *
27 * Calculates gpt synth clk rate for different values of mscale and nscale
28 *
29 * Fout= Fin/((2 ^ (N+1)) * (M+1))
30 */
31
32#define to_clk_gpt(_hw) container_of(_hw, struct clk_gpt, hw)
33
34static unsigned long gpt_calc_rate(struct clk_hw *hw, unsigned long prate,
35 int index)
36{
37 struct clk_gpt *gpt = to_clk_gpt(hw);
38 struct gpt_rate_tbl *rtbl = gpt->rtbl;
39
40 prate /= ((1 << (rtbl[index].nscale + 1)) * (rtbl[index].mscale + 1));
41
42 return prate;
43}
44
45static long clk_gpt_round_rate(struct clk_hw *hw, unsigned long drate,
46 unsigned long *prate)
47{
48 struct clk_gpt *gpt = to_clk_gpt(hw);
49 int unused;
50
51 return clk_round_rate_index(hw, drate, *prate, gpt_calc_rate,
52 gpt->rtbl_cnt, &unused);
53}
54
55static unsigned long clk_gpt_recalc_rate(struct clk_hw *hw,
56 unsigned long parent_rate)
57{
58 struct clk_gpt *gpt = to_clk_gpt(hw);
59 unsigned long flags = 0;
60 unsigned int div = 1, val;
61
62 if (gpt->lock)
63 spin_lock_irqsave(gpt->lock, flags);
64
65 val = readl_relaxed(gpt->reg);
66
67 if (gpt->lock)
68 spin_unlock_irqrestore(gpt->lock, flags);
69
70 div += val & GPT_MSCALE_MASK;
71 div *= 1 << (((val >> GPT_NSCALE_SHIFT) & GPT_NSCALE_MASK) + 1);
72
73 if (!div)
74 return 0;
75
76 return parent_rate / div;
77}
78
79/* Configures new clock rate of gpt */
80static int clk_gpt_set_rate(struct clk_hw *hw, unsigned long drate,
81 unsigned long prate)
82{
83 struct clk_gpt *gpt = to_clk_gpt(hw);
84 struct gpt_rate_tbl *rtbl = gpt->rtbl;
85 unsigned long flags = 0, val;
86 int i;
87
88 clk_round_rate_index(hw, drate, prate, gpt_calc_rate, gpt->rtbl_cnt,
89 &i);
90
91 if (gpt->lock)
92 spin_lock_irqsave(gpt->lock, flags);
93
94 val = readl(gpt->reg) & ~GPT_MSCALE_MASK;
95 val &= ~(GPT_NSCALE_MASK << GPT_NSCALE_SHIFT);
96
97 val |= rtbl[i].mscale & GPT_MSCALE_MASK;
98 val |= (rtbl[i].nscale & GPT_NSCALE_MASK) << GPT_NSCALE_SHIFT;
99
100 writel_relaxed(val, gpt->reg);
101
102 if (gpt->lock)
103 spin_unlock_irqrestore(gpt->lock, flags);
104
105 return 0;
106}
107
108static struct clk_ops clk_gpt_ops = {
109 .recalc_rate = clk_gpt_recalc_rate,
110 .round_rate = clk_gpt_round_rate,
111 .set_rate = clk_gpt_set_rate,
112};
113
114struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
115 long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
116 rtbl_cnt, spinlock_t *lock)
117{
118 struct clk_init_data init;
119 struct clk_gpt *gpt;
120 struct clk *clk;
121
122 if (!name || !parent_name || !reg || !rtbl || !rtbl_cnt) {
123 pr_err("Invalid arguments passed");
124 return ERR_PTR(-EINVAL);
125 }
126
127 gpt = kzalloc(sizeof(*gpt), GFP_KERNEL);
128 if (!gpt) {
129 pr_err("could not allocate gpt clk\n");
130 return ERR_PTR(-ENOMEM);
131 }
132
133 /* struct clk_gpt assignments */
134 gpt->reg = reg;
135 gpt->rtbl = rtbl;
136 gpt->rtbl_cnt = rtbl_cnt;
137 gpt->lock = lock;
138 gpt->hw.init = &init;
139
140 init.name = name;
141 init.ops = &clk_gpt_ops;
142 init.flags = flags;
143 init.parent_names = &parent_name;
144 init.num_parents = 1;
145
146 clk = clk_register(NULL, &gpt->hw);
147 if (!IS_ERR_OR_NULL(clk))
148 return clk;
149
150 pr_err("clk register failed\n");
151 kfree(gpt);
152
153 return NULL;
154}
diff --git a/drivers/clk/spear/clk-vco-pll.c b/drivers/clk/spear/clk-vco-pll.c
new file mode 100644
index 000000000000..dcd4bdf4b0d9
--- /dev/null
+++ b/drivers/clk/spear/clk-vco-pll.c
@@ -0,0 +1,363 @@
1/*
2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com>
4 *
5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any
7 * warranty of any kind, whether express or implied.
8 *
9 * VCO-PLL clock implementation
10 */
11
12#define pr_fmt(fmt) "clk-vco-pll: " fmt
13
14#include <linux/clk-provider.h>
15#include <linux/slab.h>
16#include <linux/io.h>
17#include <linux/err.h>
18#include "clk.h"
19
20/*
21 * DOC: VCO-PLL clock
22 *
23 * VCO and PLL rate are derived from following equations:
24 *
25 * In normal mode
26 * vco = (2 * M[15:8] * Fin)/N
27 *
28 * In Dithered mode
29 * vco = (2 * M[15:0] * Fin)/(256 * N)
30 *
31 * pll_rate = pll/2^p
32 *
33 * vco and pll are very closely bound to each other, "vco needs to program:
34 * mode, m & n" and "pll needs to program p", both share common enable/disable
35 * logic.
36 *
37 * clk_register_vco_pll() registers instances of both vco & pll.
38 * CLK_SET_RATE_PARENT flag is forced for pll, as it will always pass its
39 * set_rate to vco. A single rate table exists for both the clocks, which
40 * configures m, n and p.
41 */
42
43/* PLL_CTR register masks */
44#define PLL_MODE_NORMAL 0
45#define PLL_MODE_FRACTION 1
46#define PLL_MODE_DITH_DSM 2
47#define PLL_MODE_DITH_SSM 3
48#define PLL_MODE_MASK 3
49#define PLL_MODE_SHIFT 3
50#define PLL_ENABLE 2
51
52#define PLL_LOCK_SHIFT 0
53#define PLL_LOCK_MASK 1
54
55/* PLL FRQ register masks */
56#define PLL_NORM_FDBK_M_MASK 0xFF
57#define PLL_NORM_FDBK_M_SHIFT 24
58#define PLL_DITH_FDBK_M_MASK 0xFFFF
59#define PLL_DITH_FDBK_M_SHIFT 16
60#define PLL_DIV_P_MASK 0x7
61#define PLL_DIV_P_SHIFT 8
62#define PLL_DIV_N_MASK 0xFF
63#define PLL_DIV_N_SHIFT 0
64
65#define to_clk_vco(_hw) container_of(_hw, struct clk_vco, hw)
66#define to_clk_pll(_hw) container_of(_hw, struct clk_pll, hw)
67
68/* Calculates pll clk rate for specific value of mode, m, n and p */
69static unsigned long pll_calc_rate(struct pll_rate_tbl *rtbl,
70 unsigned long prate, int index, unsigned long *pll_rate)
71{
72 unsigned long rate = prate;
73 unsigned int mode;
74
75 mode = rtbl[index].mode ? 256 : 1;
76 rate = (((2 * rate / 10000) * rtbl[index].m) / (mode * rtbl[index].n));
77
78 if (pll_rate)
79 *pll_rate = (rate / (1 << rtbl[index].p)) * 10000;
80
81 return rate * 10000;
82}
83
84static long clk_pll_round_rate_index(struct clk_hw *hw, unsigned long drate,
85 unsigned long *prate, int *index)
86{
87 struct clk_pll *pll = to_clk_pll(hw);
88 unsigned long prev_rate, vco_prev_rate, rate = 0;
89 unsigned long vco_parent_rate =
90 __clk_get_rate(__clk_get_parent(__clk_get_parent(hw->clk)));
91
92 if (!prate) {
93 pr_err("%s: prate is must for pll clk\n", __func__);
94 return -EINVAL;
95 }
96
97 for (*index = 0; *index < pll->vco->rtbl_cnt; (*index)++) {
98 prev_rate = rate;
99 vco_prev_rate = *prate;
100 *prate = pll_calc_rate(pll->vco->rtbl, vco_parent_rate, *index,
101 &rate);
102 if (drate < rate) {
103 /* previous clock was best */
104 if (*index) {
105 rate = prev_rate;
106 *prate = vco_prev_rate;
107 (*index)--;
108 }
109 break;
110 }
111 }
112
113 return rate;
114}
115
116static long clk_pll_round_rate(struct clk_hw *hw, unsigned long drate,
117 unsigned long *prate)
118{
119 int unused;
120
121 return clk_pll_round_rate_index(hw, drate, prate, &unused);
122}
123
124static unsigned long clk_pll_recalc_rate(struct clk_hw *hw, unsigned long
125 parent_rate)
126{
127 struct clk_pll *pll = to_clk_pll(hw);
128 unsigned long flags = 0;
129 unsigned int p;
130
131 if (pll->vco->lock)
132 spin_lock_irqsave(pll->vco->lock, flags);
133
134 p = readl_relaxed(pll->vco->cfg_reg);
135
136 if (pll->vco->lock)
137 spin_unlock_irqrestore(pll->vco->lock, flags);
138
139 p = (p >> PLL_DIV_P_SHIFT) & PLL_DIV_P_MASK;
140
141 return parent_rate / (1 << p);
142}
143
144static int clk_pll_set_rate(struct clk_hw *hw, unsigned long drate,
145 unsigned long prate)
146{
147 struct clk_pll *pll = to_clk_pll(hw);
148 struct pll_rate_tbl *rtbl = pll->vco->rtbl;
149 unsigned long flags = 0, val;
150 int i;
151
152 clk_pll_round_rate_index(hw, drate, NULL, &i);
153
154 if (pll->vco->lock)
155 spin_lock_irqsave(pll->vco->lock, flags);
156
157 val = readl_relaxed(pll->vco->cfg_reg);
158 val &= ~(PLL_DIV_P_MASK << PLL_DIV_P_SHIFT);
159 val |= (rtbl[i].p & PLL_DIV_P_MASK) << PLL_DIV_P_SHIFT;
160 writel_relaxed(val, pll->vco->cfg_reg);
161
162 if (pll->vco->lock)
163 spin_unlock_irqrestore(pll->vco->lock, flags);
164
165 return 0;
166}
167
168static struct clk_ops clk_pll_ops = {
169 .recalc_rate = clk_pll_recalc_rate,
170 .round_rate = clk_pll_round_rate,
171 .set_rate = clk_pll_set_rate,
172};
173
174static inline unsigned long vco_calc_rate(struct clk_hw *hw,
175 unsigned long prate, int index)
176{
177 struct clk_vco *vco = to_clk_vco(hw);
178
179 return pll_calc_rate(vco->rtbl, prate, index, NULL);
180}
181
182static long clk_vco_round_rate(struct clk_hw *hw, unsigned long drate,
183 unsigned long *prate)
184{
185 struct clk_vco *vco = to_clk_vco(hw);
186 int unused;
187
188 return clk_round_rate_index(hw, drate, *prate, vco_calc_rate,
189 vco->rtbl_cnt, &unused);
190}
191
192static unsigned long clk_vco_recalc_rate(struct clk_hw *hw,
193 unsigned long parent_rate)
194{
195 struct clk_vco *vco = to_clk_vco(hw);
196 unsigned long flags = 0;
197 unsigned int num = 2, den = 0, val, mode = 0;
198
199 if (vco->lock)
200 spin_lock_irqsave(vco->lock, flags);
201
202 mode = (readl_relaxed(vco->mode_reg) >> PLL_MODE_SHIFT) & PLL_MODE_MASK;
203
204 val = readl_relaxed(vco->cfg_reg);
205
206 if (vco->lock)
207 spin_unlock_irqrestore(vco->lock, flags);
208
209 den = (val >> PLL_DIV_N_SHIFT) & PLL_DIV_N_MASK;
210
211 /* calculate numerator & denominator */
212 if (!mode) {
213 /* Normal mode */
214 num *= (val >> PLL_NORM_FDBK_M_SHIFT) & PLL_NORM_FDBK_M_MASK;
215 } else {
216 /* Dithered mode */
217 num *= (val >> PLL_DITH_FDBK_M_SHIFT) & PLL_DITH_FDBK_M_MASK;
218 den *= 256;
219 }
220
221 if (!den) {
222 WARN(1, "%s: denominator can't be zero\n", __func__);
223 return 0;
224 }
225
226 return (((parent_rate / 10000) * num) / den) * 10000;
227}
228
229/* Configures new clock rate of vco */
230static int clk_vco_set_rate(struct clk_hw *hw, unsigned long drate,
231 unsigned long prate)
232{
233 struct clk_vco *vco = to_clk_vco(hw);
234 struct pll_rate_tbl *rtbl = vco->rtbl;
235 unsigned long flags = 0, val;
236 int i;
237
238 clk_round_rate_index(hw, drate, prate, vco_calc_rate, vco->rtbl_cnt,
239 &i);
240
241 if (vco->lock)
242 spin_lock_irqsave(vco->lock, flags);
243
244 val = readl_relaxed(vco->mode_reg);
245 val &= ~(PLL_MODE_MASK << PLL_MODE_SHIFT);
246 val |= (rtbl[i].mode & PLL_MODE_MASK) << PLL_MODE_SHIFT;
247 writel_relaxed(val, vco->mode_reg);
248
249 val = readl_relaxed(vco->cfg_reg);
250 val &= ~(PLL_DIV_N_MASK << PLL_DIV_N_SHIFT);
251 val |= (rtbl[i].n & PLL_DIV_N_MASK) << PLL_DIV_N_SHIFT;
252
253 val &= ~(PLL_DITH_FDBK_M_MASK << PLL_DITH_FDBK_M_SHIFT);
254 if (rtbl[i].mode)
255 val |= (rtbl[i].m & PLL_DITH_FDBK_M_MASK) <<
256 PLL_DITH_FDBK_M_SHIFT;
257 else
258 val |= (rtbl[i].m & PLL_NORM_FDBK_M_MASK) <<
259 PLL_NORM_FDBK_M_SHIFT;
260
261 writel_relaxed(val, vco->cfg_reg);
262
263 if (vco->lock)
264 spin_unlock_irqrestore(vco->lock, flags);
265
266 return 0;
267}
268
269static struct clk_ops clk_vco_ops = {
270 .recalc_rate = clk_vco_recalc_rate,
271 .round_rate = clk_vco_round_rate,
272 .set_rate = clk_vco_set_rate,
273};
274
275struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
276 const char *vco_gate_name, const char *parent_name,
277 unsigned long flags, void __iomem *mode_reg, void __iomem
278 *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
279 spinlock_t *lock, struct clk **pll_clk,
280 struct clk **vco_gate_clk)
281{
282 struct clk_vco *vco;
283 struct clk_pll *pll;
284 struct clk *vco_clk, *tpll_clk, *tvco_gate_clk;
285 struct clk_init_data vco_init, pll_init;
286 const char **vco_parent_name;
287
288 if (!vco_name || !pll_name || !parent_name || !mode_reg || !cfg_reg ||
289 !rtbl || !rtbl_cnt) {
290 pr_err("Invalid arguments passed");
291 return ERR_PTR(-EINVAL);
292 }
293
294 vco = kzalloc(sizeof(*vco), GFP_KERNEL);
295 if (!vco) {
296 pr_err("could not allocate vco clk\n");
297 return ERR_PTR(-ENOMEM);
298 }
299
300 pll = kzalloc(sizeof(*pll), GFP_KERNEL);
301 if (!pll) {
302 pr_err("could not allocate pll clk\n");
303 goto free_vco;
304 }
305
306 /* struct clk_vco assignments */
307 vco->mode_reg = mode_reg;
308 vco->cfg_reg = cfg_reg;
309 vco->rtbl = rtbl;
310 vco->rtbl_cnt = rtbl_cnt;
311 vco->lock = lock;
312 vco->hw.init = &vco_init;
313
314 pll->vco = vco;
315 pll->hw.init = &pll_init;
316
317 if (vco_gate_name) {
318 tvco_gate_clk = clk_register_gate(NULL, vco_gate_name,
319 parent_name, 0, mode_reg, PLL_ENABLE, 0, lock);
320 if (IS_ERR_OR_NULL(tvco_gate_clk))
321 goto free_pll;
322
323 if (vco_gate_clk)
324 *vco_gate_clk = tvco_gate_clk;
325 vco_parent_name = &vco_gate_name;
326 } else {
327 vco_parent_name = &parent_name;
328 }
329
330 vco_init.name = vco_name;
331 vco_init.ops = &clk_vco_ops;
332 vco_init.flags = flags;
333 vco_init.parent_names = vco_parent_name;
334 vco_init.num_parents = 1;
335
336 pll_init.name = pll_name;
337 pll_init.ops = &clk_pll_ops;
338 pll_init.flags = CLK_SET_RATE_PARENT;
339 pll_init.parent_names = &vco_name;
340 pll_init.num_parents = 1;
341
342 vco_clk = clk_register(NULL, &vco->hw);
343 if (IS_ERR_OR_NULL(vco_clk))
344 goto free_pll;
345
346 tpll_clk = clk_register(NULL, &pll->hw);
347 if (IS_ERR_OR_NULL(tpll_clk))
348 goto free_pll;
349
350 if (pll_clk)
351 *pll_clk = tpll_clk;
352
353 return vco_clk;
354
355free_pll:
356 kfree(pll);
357free_vco:
358 kfree(vco);
359
360 pr_err("Failed to register vco pll clock\n");
361
362 return ERR_PTR(-ENOMEM);
363}
diff --git a/drivers/clk/spear/clk.c b/drivers/clk/spear/clk.c
new file mode 100644
index 000000000000..376d4e5ff326
--- /dev/null
+++ b/drivers/clk/spear/clk.c
@@ -0,0 +1,36 @@
1/*
2 * Copyright (C) 2012 ST Microelectronics
3 * Viresh Kumar <viresh.kumar@st.com>
4 *
5 * This file is licensed under the terms of the GNU General Public
6 * License version 2. This program is licensed "as is" without any
7 * warranty of any kind, whether express or implied.
8 *
9 * SPEAr clk - Common routines
10 */
11
12#include <linux/clk-provider.h>
13#include <linux/types.h>
14#include "clk.h"
15
16long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
17 unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
18 int *index)
19{
20 unsigned long prev_rate, rate = 0;
21
22 for (*index = 0; *index < rtbl_cnt; (*index)++) {
23 prev_rate = rate;
24 rate = calc_rate(hw, parent_rate, *index);
25 if (drate < rate) {
26 /* previous clock was best */
27 if (*index) {
28 rate = prev_rate;
29 (*index)--;
30 }
31 break;
32 }
33 }
34
35 return rate;
36}
diff --git a/drivers/clk/spear/clk.h b/drivers/clk/spear/clk.h
new file mode 100644
index 000000000000..3321c46a071c
--- /dev/null
+++ b/drivers/clk/spear/clk.h
@@ -0,0 +1,134 @@
1/*
2 * Clock framework definitions for SPEAr platform
3 *
4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#ifndef __SPEAR_CLK_H
13#define __SPEAR_CLK_H
14
15#include <linux/clk-provider.h>
16#include <linux/spinlock_types.h>
17#include <linux/types.h>
18
19/* Auxiliary Synth clk */
20/* Default masks */
21#define AUX_EQ_SEL_SHIFT 30
22#define AUX_EQ_SEL_MASK 1
23#define AUX_EQ1_SEL 0
24#define AUX_EQ2_SEL 1
25#define AUX_XSCALE_SHIFT 16
26#define AUX_XSCALE_MASK 0xFFF
27#define AUX_YSCALE_SHIFT 0
28#define AUX_YSCALE_MASK 0xFFF
29#define AUX_SYNT_ENB 31
30
31struct aux_clk_masks {
32 u32 eq_sel_mask;
33 u32 eq_sel_shift;
34 u32 eq1_mask;
35 u32 eq2_mask;
36 u32 xscale_sel_mask;
37 u32 xscale_sel_shift;
38 u32 yscale_sel_mask;
39 u32 yscale_sel_shift;
40 u32 enable_bit;
41};
42
43struct aux_rate_tbl {
44 u16 xscale;
45 u16 yscale;
46 u8 eq;
47};
48
49struct clk_aux {
50 struct clk_hw hw;
51 void __iomem *reg;
52 struct aux_clk_masks *masks;
53 struct aux_rate_tbl *rtbl;
54 u8 rtbl_cnt;
55 spinlock_t *lock;
56};
57
58/* Fractional Synth clk */
59struct frac_rate_tbl {
60 u32 div;
61};
62
63struct clk_frac {
64 struct clk_hw hw;
65 void __iomem *reg;
66 struct frac_rate_tbl *rtbl;
67 u8 rtbl_cnt;
68 spinlock_t *lock;
69};
70
71/* GPT clk */
72struct gpt_rate_tbl {
73 u16 mscale;
74 u16 nscale;
75};
76
77struct clk_gpt {
78 struct clk_hw hw;
79 void __iomem *reg;
80 struct gpt_rate_tbl *rtbl;
81 u8 rtbl_cnt;
82 spinlock_t *lock;
83};
84
85/* VCO-PLL clk */
86struct pll_rate_tbl {
87 u8 mode;
88 u16 m;
89 u8 n;
90 u8 p;
91};
92
93struct clk_vco {
94 struct clk_hw hw;
95 void __iomem *mode_reg;
96 void __iomem *cfg_reg;
97 struct pll_rate_tbl *rtbl;
98 u8 rtbl_cnt;
99 spinlock_t *lock;
100};
101
102struct clk_pll {
103 struct clk_hw hw;
104 struct clk_vco *vco;
105 const char *parent[1];
106 spinlock_t *lock;
107};
108
109typedef unsigned long (*clk_calc_rate)(struct clk_hw *hw, unsigned long prate,
110 int index);
111
112/* clk register routines */
113struct clk *clk_register_aux(const char *aux_name, const char *gate_name,
114 const char *parent_name, unsigned long flags, void __iomem *reg,
115 struct aux_clk_masks *masks, struct aux_rate_tbl *rtbl,
116 u8 rtbl_cnt, spinlock_t *lock, struct clk **gate_clk);
117struct clk *clk_register_frac(const char *name, const char *parent_name,
118 unsigned long flags, void __iomem *reg,
119 struct frac_rate_tbl *rtbl, u8 rtbl_cnt, spinlock_t *lock);
120struct clk *clk_register_gpt(const char *name, const char *parent_name, unsigned
121 long flags, void __iomem *reg, struct gpt_rate_tbl *rtbl, u8
122 rtbl_cnt, spinlock_t *lock);
123struct clk *clk_register_vco_pll(const char *vco_name, const char *pll_name,
124 const char *vco_gate_name, const char *parent_name,
125 unsigned long flags, void __iomem *mode_reg, void __iomem
126 *cfg_reg, struct pll_rate_tbl *rtbl, u8 rtbl_cnt,
127 spinlock_t *lock, struct clk **pll_clk,
128 struct clk **vco_gate_clk);
129
130long clk_round_rate_index(struct clk_hw *hw, unsigned long drate,
131 unsigned long parent_rate, clk_calc_rate calc_rate, u8 rtbl_cnt,
132 int *index);
133
134#endif /* __SPEAR_CLK_H */
diff --git a/drivers/clk/spear/spear1310_clock.c b/drivers/clk/spear/spear1310_clock.c
new file mode 100644
index 000000000000..42b68df9aeef
--- /dev/null
+++ b/drivers/clk/spear/spear1310_clock.c
@@ -0,0 +1,1106 @@
1/*
2 * arch/arm/mach-spear13xx/spear1310_clock.c
3 *
4 * SPEAr1310 machine clock framework source file
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/clk.h>
15#include <linux/clkdev.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/of_platform.h>
19#include <linux/spinlock_types.h>
20#include <mach/spear.h>
21#include "clk.h"
22
23/* PLL related registers and bit values */
24#define SPEAR1310_PLL_CFG (VA_MISC_BASE + 0x210)
25 /* PLL_CFG bit values */
26 #define SPEAR1310_CLCD_SYNT_CLK_MASK 1
27 #define SPEAR1310_CLCD_SYNT_CLK_SHIFT 31
28 #define SPEAR1310_RAS_SYNT2_3_CLK_MASK 2
29 #define SPEAR1310_RAS_SYNT2_3_CLK_SHIFT 29
30 #define SPEAR1310_RAS_SYNT_CLK_MASK 2
31 #define SPEAR1310_RAS_SYNT0_1_CLK_SHIFT 27
32 #define SPEAR1310_PLL_CLK_MASK 2
33 #define SPEAR1310_PLL3_CLK_SHIFT 24
34 #define SPEAR1310_PLL2_CLK_SHIFT 22
35 #define SPEAR1310_PLL1_CLK_SHIFT 20
36
37#define SPEAR1310_PLL1_CTR (VA_MISC_BASE + 0x214)
38#define SPEAR1310_PLL1_FRQ (VA_MISC_BASE + 0x218)
39#define SPEAR1310_PLL2_CTR (VA_MISC_BASE + 0x220)
40#define SPEAR1310_PLL2_FRQ (VA_MISC_BASE + 0x224)
41#define SPEAR1310_PLL3_CTR (VA_MISC_BASE + 0x22C)
42#define SPEAR1310_PLL3_FRQ (VA_MISC_BASE + 0x230)
43#define SPEAR1310_PLL4_CTR (VA_MISC_BASE + 0x238)
44#define SPEAR1310_PLL4_FRQ (VA_MISC_BASE + 0x23C)
45#define SPEAR1310_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
46 /* PERIP_CLK_CFG bit values */
47 #define SPEAR1310_GPT_OSC24_VAL 0
48 #define SPEAR1310_GPT_APB_VAL 1
49 #define SPEAR1310_GPT_CLK_MASK 1
50 #define SPEAR1310_GPT3_CLK_SHIFT 11
51 #define SPEAR1310_GPT2_CLK_SHIFT 10
52 #define SPEAR1310_GPT1_CLK_SHIFT 9
53 #define SPEAR1310_GPT0_CLK_SHIFT 8
54 #define SPEAR1310_UART_CLK_PLL5_VAL 0
55 #define SPEAR1310_UART_CLK_OSC24_VAL 1
56 #define SPEAR1310_UART_CLK_SYNT_VAL 2
57 #define SPEAR1310_UART_CLK_MASK 2
58 #define SPEAR1310_UART_CLK_SHIFT 4
59
60 #define SPEAR1310_AUX_CLK_PLL5_VAL 0
61 #define SPEAR1310_AUX_CLK_SYNT_VAL 1
62 #define SPEAR1310_CLCD_CLK_MASK 2
63 #define SPEAR1310_CLCD_CLK_SHIFT 2
64 #define SPEAR1310_C3_CLK_MASK 1
65 #define SPEAR1310_C3_CLK_SHIFT 1
66
67#define SPEAR1310_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
68 #define SPEAR1310_GMAC_PHY_IF_SEL_MASK 3
69 #define SPEAR1310_GMAC_PHY_IF_SEL_SHIFT 4
70 #define SPEAR1310_GMAC_PHY_CLK_MASK 1
71 #define SPEAR1310_GMAC_PHY_CLK_SHIFT 3
72 #define SPEAR1310_GMAC_PHY_INPUT_CLK_MASK 2
73 #define SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT 1
74
75#define SPEAR1310_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
76 /* I2S_CLK_CFG register mask */
77 #define SPEAR1310_I2S_SCLK_X_MASK 0x1F
78 #define SPEAR1310_I2S_SCLK_X_SHIFT 27
79 #define SPEAR1310_I2S_SCLK_Y_MASK 0x1F
80 #define SPEAR1310_I2S_SCLK_Y_SHIFT 22
81 #define SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT 21
82 #define SPEAR1310_I2S_SCLK_SYNTH_ENB 20
83 #define SPEAR1310_I2S_PRS1_CLK_X_MASK 0xFF
84 #define SPEAR1310_I2S_PRS1_CLK_X_SHIFT 12
85 #define SPEAR1310_I2S_PRS1_CLK_Y_MASK 0xFF
86 #define SPEAR1310_I2S_PRS1_CLK_Y_SHIFT 4
87 #define SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT 3
88 #define SPEAR1310_I2S_REF_SEL_MASK 1
89 #define SPEAR1310_I2S_REF_SHIFT 2
90 #define SPEAR1310_I2S_SRC_CLK_MASK 2
91 #define SPEAR1310_I2S_SRC_CLK_SHIFT 0
92
93#define SPEAR1310_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
94#define SPEAR1310_UART_CLK_SYNT (VA_MISC_BASE + 0x254)
95#define SPEAR1310_GMAC_CLK_SYNT (VA_MISC_BASE + 0x258)
96#define SPEAR1310_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x25C)
97#define SPEAR1310_CFXD_CLK_SYNT (VA_MISC_BASE + 0x260)
98#define SPEAR1310_ADC_CLK_SYNT (VA_MISC_BASE + 0x264)
99#define SPEAR1310_AMBA_CLK_SYNT (VA_MISC_BASE + 0x268)
100#define SPEAR1310_CLCD_CLK_SYNT (VA_MISC_BASE + 0x270)
101#define SPEAR1310_RAS_CLK_SYNT0 (VA_MISC_BASE + 0x280)
102#define SPEAR1310_RAS_CLK_SYNT1 (VA_MISC_BASE + 0x288)
103#define SPEAR1310_RAS_CLK_SYNT2 (VA_MISC_BASE + 0x290)
104#define SPEAR1310_RAS_CLK_SYNT3 (VA_MISC_BASE + 0x298)
105 /* Check Fractional synthesizer reg masks */
106
107#define SPEAR1310_PERIP1_CLK_ENB (VA_MISC_BASE + 0x300)
108 /* PERIP1_CLK_ENB register masks */
109 #define SPEAR1310_RTC_CLK_ENB 31
110 #define SPEAR1310_ADC_CLK_ENB 30
111 #define SPEAR1310_C3_CLK_ENB 29
112 #define SPEAR1310_JPEG_CLK_ENB 28
113 #define SPEAR1310_CLCD_CLK_ENB 27
114 #define SPEAR1310_DMA_CLK_ENB 25
115 #define SPEAR1310_GPIO1_CLK_ENB 24
116 #define SPEAR1310_GPIO0_CLK_ENB 23
117 #define SPEAR1310_GPT1_CLK_ENB 22
118 #define SPEAR1310_GPT0_CLK_ENB 21
119 #define SPEAR1310_I2S0_CLK_ENB 20
120 #define SPEAR1310_I2S1_CLK_ENB 19
121 #define SPEAR1310_I2C0_CLK_ENB 18
122 #define SPEAR1310_SSP_CLK_ENB 17
123 #define SPEAR1310_UART_CLK_ENB 15
124 #define SPEAR1310_PCIE_SATA_2_CLK_ENB 14
125 #define SPEAR1310_PCIE_SATA_1_CLK_ENB 13
126 #define SPEAR1310_PCIE_SATA_0_CLK_ENB 12
127 #define SPEAR1310_UOC_CLK_ENB 11
128 #define SPEAR1310_UHC1_CLK_ENB 10
129 #define SPEAR1310_UHC0_CLK_ENB 9
130 #define SPEAR1310_GMAC_CLK_ENB 8
131 #define SPEAR1310_CFXD_CLK_ENB 7
132 #define SPEAR1310_SDHCI_CLK_ENB 6
133 #define SPEAR1310_SMI_CLK_ENB 5
134 #define SPEAR1310_FSMC_CLK_ENB 4
135 #define SPEAR1310_SYSRAM0_CLK_ENB 3
136 #define SPEAR1310_SYSRAM1_CLK_ENB 2
137 #define SPEAR1310_SYSROM_CLK_ENB 1
138 #define SPEAR1310_BUS_CLK_ENB 0
139
140#define SPEAR1310_PERIP2_CLK_ENB (VA_MISC_BASE + 0x304)
141 /* PERIP2_CLK_ENB register masks */
142 #define SPEAR1310_THSENS_CLK_ENB 8
143 #define SPEAR1310_I2S_REF_PAD_CLK_ENB 7
144 #define SPEAR1310_ACP_CLK_ENB 6
145 #define SPEAR1310_GPT3_CLK_ENB 5
146 #define SPEAR1310_GPT2_CLK_ENB 4
147 #define SPEAR1310_KBD_CLK_ENB 3
148 #define SPEAR1310_CPU_DBG_CLK_ENB 2
149 #define SPEAR1310_DDR_CORE_CLK_ENB 1
150 #define SPEAR1310_DDR_CTRL_CLK_ENB 0
151
152#define SPEAR1310_RAS_CLK_ENB (VA_MISC_BASE + 0x310)
153 /* RAS_CLK_ENB register masks */
154 #define SPEAR1310_SYNT3_CLK_ENB 17
155 #define SPEAR1310_SYNT2_CLK_ENB 16
156 #define SPEAR1310_SYNT1_CLK_ENB 15
157 #define SPEAR1310_SYNT0_CLK_ENB 14
158 #define SPEAR1310_PCLK3_CLK_ENB 13
159 #define SPEAR1310_PCLK2_CLK_ENB 12
160 #define SPEAR1310_PCLK1_CLK_ENB 11
161 #define SPEAR1310_PCLK0_CLK_ENB 10
162 #define SPEAR1310_PLL3_CLK_ENB 9
163 #define SPEAR1310_PLL2_CLK_ENB 8
164 #define SPEAR1310_C125M_PAD_CLK_ENB 7
165 #define SPEAR1310_C30M_CLK_ENB 6
166 #define SPEAR1310_C48M_CLK_ENB 5
167 #define SPEAR1310_OSC_25M_CLK_ENB 4
168 #define SPEAR1310_OSC_32K_CLK_ENB 3
169 #define SPEAR1310_OSC_24M_CLK_ENB 2
170 #define SPEAR1310_PCLK_CLK_ENB 1
171 #define SPEAR1310_ACLK_CLK_ENB 0
172
173/* RAS Area Control Register */
174#define SPEAR1310_RAS_CTRL_REG0 (VA_SPEAR1310_RAS_BASE + 0x000)
175 #define SPEAR1310_SSP1_CLK_MASK 3
176 #define SPEAR1310_SSP1_CLK_SHIFT 26
177 #define SPEAR1310_TDM_CLK_MASK 1
178 #define SPEAR1310_TDM2_CLK_SHIFT 24
179 #define SPEAR1310_TDM1_CLK_SHIFT 23
180 #define SPEAR1310_I2C_CLK_MASK 1
181 #define SPEAR1310_I2C7_CLK_SHIFT 22
182 #define SPEAR1310_I2C6_CLK_SHIFT 21
183 #define SPEAR1310_I2C5_CLK_SHIFT 20
184 #define SPEAR1310_I2C4_CLK_SHIFT 19
185 #define SPEAR1310_I2C3_CLK_SHIFT 18
186 #define SPEAR1310_I2C2_CLK_SHIFT 17
187 #define SPEAR1310_I2C1_CLK_SHIFT 16
188 #define SPEAR1310_GPT64_CLK_MASK 1
189 #define SPEAR1310_GPT64_CLK_SHIFT 15
190 #define SPEAR1310_RAS_UART_CLK_MASK 1
191 #define SPEAR1310_UART5_CLK_SHIFT 14
192 #define SPEAR1310_UART4_CLK_SHIFT 13
193 #define SPEAR1310_UART3_CLK_SHIFT 12
194 #define SPEAR1310_UART2_CLK_SHIFT 11
195 #define SPEAR1310_UART1_CLK_SHIFT 10
196 #define SPEAR1310_PCI_CLK_MASK 1
197 #define SPEAR1310_PCI_CLK_SHIFT 0
198
199#define SPEAR1310_RAS_CTRL_REG1 (VA_SPEAR1310_RAS_BASE + 0x004)
200 #define SPEAR1310_PHY_CLK_MASK 0x3
201 #define SPEAR1310_RMII_PHY_CLK_SHIFT 0
202 #define SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT 2
203
204#define SPEAR1310_RAS_SW_CLK_CTRL (VA_SPEAR1310_RAS_BASE + 0x0148)
205 #define SPEAR1310_CAN1_CLK_ENB 25
206 #define SPEAR1310_CAN0_CLK_ENB 24
207 #define SPEAR1310_GPT64_CLK_ENB 23
208 #define SPEAR1310_SSP1_CLK_ENB 22
209 #define SPEAR1310_I2C7_CLK_ENB 21
210 #define SPEAR1310_I2C6_CLK_ENB 20
211 #define SPEAR1310_I2C5_CLK_ENB 19
212 #define SPEAR1310_I2C4_CLK_ENB 18
213 #define SPEAR1310_I2C3_CLK_ENB 17
214 #define SPEAR1310_I2C2_CLK_ENB 16
215 #define SPEAR1310_I2C1_CLK_ENB 15
216 #define SPEAR1310_UART5_CLK_ENB 14
217 #define SPEAR1310_UART4_CLK_ENB 13
218 #define SPEAR1310_UART3_CLK_ENB 12
219 #define SPEAR1310_UART2_CLK_ENB 11
220 #define SPEAR1310_UART1_CLK_ENB 10
221 #define SPEAR1310_RS485_1_CLK_ENB 9
222 #define SPEAR1310_RS485_0_CLK_ENB 8
223 #define SPEAR1310_TDM2_CLK_ENB 7
224 #define SPEAR1310_TDM1_CLK_ENB 6
225 #define SPEAR1310_PCI_CLK_ENB 5
226 #define SPEAR1310_GMII_CLK_ENB 4
227 #define SPEAR1310_MII2_CLK_ENB 3
228 #define SPEAR1310_MII1_CLK_ENB 2
229 #define SPEAR1310_MII0_CLK_ENB 1
230 #define SPEAR1310_ESRAM_CLK_ENB 0
231
232static DEFINE_SPINLOCK(_lock);
233
234/* pll rate configuration table, in ascending order of rates */
235static struct pll_rate_tbl pll_rtbl[] = {
236 /* PCLK 24MHz */
237 {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
238 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
239 {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
240 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
241 {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
242 {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
243 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
244};
245
246/* vco-pll4 rate configuration table, in ascending order of rates */
247static struct pll_rate_tbl pll4_rtbl[] = {
248 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
249 {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
250 {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
251 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
252};
253
254/* aux rate configuration table, in ascending order of rates */
255static struct aux_rate_tbl aux_rtbl[] = {
256 /* For VCO1div2 = 500 MHz */
257 {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
258 {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
259 {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
260 {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
261 {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
262 {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
263};
264
265/* gmac rate configuration table, in ascending order of rates */
266static struct aux_rate_tbl gmac_rtbl[] = {
267 /* For gmac phy input clk */
268 {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
269 {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
270 {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
271 {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
272};
273
274/* clcd rate configuration table, in ascending order of rates */
275static struct frac_rate_tbl clcd_rtbl[] = {
276 {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
277 {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
278 {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
279 {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
280 {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
281 {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
282 {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
283 {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
284 {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
285 {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
286};
287
288/* i2s prescaler1 masks */
289static struct aux_clk_masks i2s_prs1_masks = {
290 .eq_sel_mask = AUX_EQ_SEL_MASK,
291 .eq_sel_shift = SPEAR1310_I2S_PRS1_EQ_SEL_SHIFT,
292 .eq1_mask = AUX_EQ1_SEL,
293 .eq2_mask = AUX_EQ2_SEL,
294 .xscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_X_MASK,
295 .xscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_X_SHIFT,
296 .yscale_sel_mask = SPEAR1310_I2S_PRS1_CLK_Y_MASK,
297 .yscale_sel_shift = SPEAR1310_I2S_PRS1_CLK_Y_SHIFT,
298};
299
300/* i2s sclk (bit clock) syynthesizers masks */
301static struct aux_clk_masks i2s_sclk_masks = {
302 .eq_sel_mask = AUX_EQ_SEL_MASK,
303 .eq_sel_shift = SPEAR1310_I2S_SCLK_EQ_SEL_SHIFT,
304 .eq1_mask = AUX_EQ1_SEL,
305 .eq2_mask = AUX_EQ2_SEL,
306 .xscale_sel_mask = SPEAR1310_I2S_SCLK_X_MASK,
307 .xscale_sel_shift = SPEAR1310_I2S_SCLK_X_SHIFT,
308 .yscale_sel_mask = SPEAR1310_I2S_SCLK_Y_MASK,
309 .yscale_sel_shift = SPEAR1310_I2S_SCLK_Y_SHIFT,
310 .enable_bit = SPEAR1310_I2S_SCLK_SYNTH_ENB,
311};
312
313/* i2s prs1 aux rate configuration table, in ascending order of rates */
314static struct aux_rate_tbl i2s_prs1_rtbl[] = {
315 /* For parent clk = 49.152 MHz */
316 {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz */
317};
318
319/* i2s sclk aux rate configuration table, in ascending order of rates */
320static struct aux_rate_tbl i2s_sclk_rtbl[] = {
321 /* For i2s_ref_clk = 12.288MHz */
322 {.xscale = 1, .yscale = 4, .eq = 0}, /* 1.53 MHz */
323 {.xscale = 1, .yscale = 2, .eq = 0}, /* 3.07 Mhz */
324};
325
326/* adc rate configuration table, in ascending order of rates */
327/* possible adc range is 2.5 MHz to 20 MHz. */
328static struct aux_rate_tbl adc_rtbl[] = {
329 /* For ahb = 166.67 MHz */
330 {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
331 {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
332 {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
333 {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
334};
335
336/* General synth rate configuration table, in ascending order of rates */
337static struct frac_rate_tbl gen_rtbl[] = {
338 /* For vco1div4 = 250 MHz */
339 {.div = 0x14000}, /* 25 MHz */
340 {.div = 0x0A000}, /* 50 MHz */
341 {.div = 0x05000}, /* 100 MHz */
342 {.div = 0x02000}, /* 250 MHz */
343};
344
345/* clock parents */
346static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
347static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
348static const char *uart0_parents[] = { "pll5_clk", "uart_synth_gate_clk", };
349static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
350static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
351 "osc_25m_clk", };
352static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
353 "gmac_phy_synth_gate_clk", };
354static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
355static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
356static const char *i2s_src_parents[] = { "vco1div2_clk", "none", "pll3_clk",
357 "i2s_src_pad_clk", };
358static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
359static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
360 "pll3_clk", };
361static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
362 "pll2_clk", };
363static const char *rmii_phy_parents[] = { "ras_tx50_clk", "none",
364 "ras_pll2_clk", "ras_synth0_clk", };
365static const char *smii_rgmii_phy_parents[] = { "none", "ras_tx125_clk",
366 "ras_pll2_clk", "ras_synth0_clk", };
367static const char *uart_parents[] = { "ras_apb_clk", "gen_synth3_clk", };
368static const char *i2c_parents[] = { "ras_apb_clk", "gen_synth1_clk", };
369static const char *ssp1_parents[] = { "ras_apb_clk", "gen_synth1_clk",
370 "ras_plclk0_clk", };
371static const char *pci_parents[] = { "ras_pll3_clk", "gen_synth2_clk", };
372static const char *tdm_parents[] = { "ras_pll3_clk", "gen_synth1_clk", };
373
374void __init spear1310_clk_init(void)
375{
376 struct clk *clk, *clk1;
377
378 clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
379 clk_register_clkdev(clk, "apb_pclk", NULL);
380
381 clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
382 32000);
383 clk_register_clkdev(clk, "osc_32k_clk", NULL);
384
385 clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
386 24000000);
387 clk_register_clkdev(clk, "osc_24m_clk", NULL);
388
389 clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
390 25000000);
391 clk_register_clkdev(clk, "osc_25m_clk", NULL);
392
393 clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
394 CLK_IS_ROOT, 125000000);
395 clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
396
397 clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
398 CLK_IS_ROOT, 12288000);
399 clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
400
401 /* clock derived from 32 KHz osc clk */
402 clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
403 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_RTC_CLK_ENB, 0,
404 &_lock);
405 clk_register_clkdev(clk, NULL, "fc900000.rtc");
406
407 /* clock derived from 24 or 25 MHz osc clk */
408 /* vco-pll */
409 clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
410 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
411 SPEAR1310_PLL1_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
412 &_lock);
413 clk_register_clkdev(clk, "vco1_mux_clk", NULL);
414 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
415 0, SPEAR1310_PLL1_CTR, SPEAR1310_PLL1_FRQ, pll_rtbl,
416 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
417 clk_register_clkdev(clk, "vco1_clk", NULL);
418 clk_register_clkdev(clk1, "pll1_clk", NULL);
419
420 clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
421 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
422 SPEAR1310_PLL2_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
423 &_lock);
424 clk_register_clkdev(clk, "vco2_mux_clk", NULL);
425 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
426 0, SPEAR1310_PLL2_CTR, SPEAR1310_PLL2_FRQ, pll_rtbl,
427 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
428 clk_register_clkdev(clk, "vco2_clk", NULL);
429 clk_register_clkdev(clk1, "pll2_clk", NULL);
430
431 clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
432 ARRAY_SIZE(vco_parents), 0, SPEAR1310_PLL_CFG,
433 SPEAR1310_PLL3_CLK_SHIFT, SPEAR1310_PLL_CLK_MASK, 0,
434 &_lock);
435 clk_register_clkdev(clk, "vco3_mux_clk", NULL);
436 clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
437 0, SPEAR1310_PLL3_CTR, SPEAR1310_PLL3_FRQ, pll_rtbl,
438 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
439 clk_register_clkdev(clk, "vco3_clk", NULL);
440 clk_register_clkdev(clk1, "pll3_clk", NULL);
441
442 clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
443 0, SPEAR1310_PLL4_CTR, SPEAR1310_PLL4_FRQ, pll4_rtbl,
444 ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
445 clk_register_clkdev(clk, "vco4_clk", NULL);
446 clk_register_clkdev(clk1, "pll4_clk", NULL);
447
448 clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
449 48000000);
450 clk_register_clkdev(clk, "pll5_clk", NULL);
451
452 clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
453 25000000);
454 clk_register_clkdev(clk, "pll6_clk", NULL);
455
456 /* vco div n clocks */
457 clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
458 2);
459 clk_register_clkdev(clk, "vco1div2_clk", NULL);
460
461 clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
462 4);
463 clk_register_clkdev(clk, "vco1div4_clk", NULL);
464
465 clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
466 2);
467 clk_register_clkdev(clk, "vco2div2_clk", NULL);
468
469 clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
470 2);
471 clk_register_clkdev(clk, "vco3div2_clk", NULL);
472
473 /* peripherals */
474 clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
475 128);
476 clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
477 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_THSENS_CLK_ENB, 0,
478 &_lock);
479 clk_register_clkdev(clk, NULL, "spear_thermal");
480
481 /* clock derived from pll4 clk */
482 clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
483 1);
484 clk_register_clkdev(clk, "ddr_clk", NULL);
485
486 /* clock derived from pll1 clk */
487 clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 2);
488 clk_register_clkdev(clk, "cpu_clk", NULL);
489
490 clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
491 2);
492 clk_register_clkdev(clk, NULL, "ec800620.wdt");
493
494 clk = clk_register_fixed_factor(NULL, "ahb_clk", "pll1_clk", 0, 1,
495 6);
496 clk_register_clkdev(clk, "ahb_clk", NULL);
497
498 clk = clk_register_fixed_factor(NULL, "apb_clk", "pll1_clk", 0, 1,
499 12);
500 clk_register_clkdev(clk, "apb_clk", NULL);
501
502 /* gpt clocks */
503 clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
504 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
505 SPEAR1310_GPT0_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
506 &_lock);
507 clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
508 clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
509 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT0_CLK_ENB, 0,
510 &_lock);
511 clk_register_clkdev(clk, NULL, "gpt0");
512
513 clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
514 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
515 SPEAR1310_GPT1_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
516 &_lock);
517 clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
518 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
519 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPT1_CLK_ENB, 0,
520 &_lock);
521 clk_register_clkdev(clk, NULL, "gpt1");
522
523 clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
524 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
525 SPEAR1310_GPT2_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
526 &_lock);
527 clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
528 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
529 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT2_CLK_ENB, 0,
530 &_lock);
531 clk_register_clkdev(clk, NULL, "gpt2");
532
533 clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
534 ARRAY_SIZE(gpt_parents), 0, SPEAR1310_PERIP_CLK_CFG,
535 SPEAR1310_GPT3_CLK_SHIFT, SPEAR1310_GPT_CLK_MASK, 0,
536 &_lock);
537 clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
538 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
539 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_GPT3_CLK_ENB, 0,
540 &_lock);
541 clk_register_clkdev(clk, NULL, "gpt3");
542
543 /* others */
544 clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
545 "vco1div2_clk", 0, SPEAR1310_UART_CLK_SYNT, NULL,
546 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
547 clk_register_clkdev(clk, "uart_synth_clk", NULL);
548 clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
549
550 clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
551 ARRAY_SIZE(uart0_parents), 0, SPEAR1310_PERIP_CLK_CFG,
552 SPEAR1310_UART_CLK_SHIFT, SPEAR1310_UART_CLK_MASK, 0,
553 &_lock);
554 clk_register_clkdev(clk, "uart0_mux_clk", NULL);
555
556 clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
557 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UART_CLK_ENB, 0,
558 &_lock);
559 clk_register_clkdev(clk, NULL, "e0000000.serial");
560
561 clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
562 "vco1div2_clk", 0, SPEAR1310_SDHCI_CLK_SYNT, NULL,
563 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
564 clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
565 clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
566
567 clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
568 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SDHCI_CLK_ENB, 0,
569 &_lock);
570 clk_register_clkdev(clk, NULL, "b3000000.sdhci");
571
572 clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
573 "vco1div2_clk", 0, SPEAR1310_CFXD_CLK_SYNT, NULL,
574 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
575 clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
576 clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
577
578 clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
579 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CFXD_CLK_ENB, 0,
580 &_lock);
581 clk_register_clkdev(clk, NULL, "b2800000.cf");
582 clk_register_clkdev(clk, NULL, "arasan_xd");
583
584 clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
585 "vco1div2_clk", 0, SPEAR1310_C3_CLK_SYNT, NULL,
586 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
587 clk_register_clkdev(clk, "c3_synth_clk", NULL);
588 clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
589
590 clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
591 ARRAY_SIZE(c3_parents), 0, SPEAR1310_PERIP_CLK_CFG,
592 SPEAR1310_C3_CLK_SHIFT, SPEAR1310_C3_CLK_MASK, 0,
593 &_lock);
594 clk_register_clkdev(clk, "c3_mux_clk", NULL);
595
596 clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
597 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_C3_CLK_ENB, 0,
598 &_lock);
599 clk_register_clkdev(clk, NULL, "c3");
600
601 /* gmac */
602 clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
603 gmac_phy_input_parents,
604 ARRAY_SIZE(gmac_phy_input_parents), 0,
605 SPEAR1310_GMAC_CLK_CFG,
606 SPEAR1310_GMAC_PHY_INPUT_CLK_SHIFT,
607 SPEAR1310_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
608 clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
609
610 clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
611 "gmac_phy_input_mux_clk", 0, SPEAR1310_GMAC_CLK_SYNT,
612 NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
613 clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
614 clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
615
616 clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
617 ARRAY_SIZE(gmac_phy_parents), 0,
618 SPEAR1310_PERIP_CLK_CFG, SPEAR1310_GMAC_PHY_CLK_SHIFT,
619 SPEAR1310_GMAC_PHY_CLK_MASK, 0, &_lock);
620 clk_register_clkdev(clk, NULL, "stmmacphy.0");
621
622 /* clcd */
623 clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
624 ARRAY_SIZE(clcd_synth_parents), 0,
625 SPEAR1310_CLCD_CLK_SYNT, SPEAR1310_CLCD_SYNT_CLK_SHIFT,
626 SPEAR1310_CLCD_SYNT_CLK_MASK, 0, &_lock);
627 clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
628
629 clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
630 SPEAR1310_CLCD_CLK_SYNT, clcd_rtbl,
631 ARRAY_SIZE(clcd_rtbl), &_lock);
632 clk_register_clkdev(clk, "clcd_synth_clk", NULL);
633
634 clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
635 ARRAY_SIZE(clcd_pixel_parents), 0,
636 SPEAR1310_PERIP_CLK_CFG, SPEAR1310_CLCD_CLK_SHIFT,
637 SPEAR1310_CLCD_CLK_MASK, 0, &_lock);
638 clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
639
640 clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
641 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_CLCD_CLK_ENB, 0,
642 &_lock);
643 clk_register_clkdev(clk, "clcd_clk", NULL);
644
645 /* i2s */
646 clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
647 ARRAY_SIZE(i2s_src_parents), 0, SPEAR1310_I2S_CLK_CFG,
648 SPEAR1310_I2S_SRC_CLK_SHIFT, SPEAR1310_I2S_SRC_CLK_MASK,
649 0, &_lock);
650 clk_register_clkdev(clk, "i2s_src_clk", NULL);
651
652 clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
653 SPEAR1310_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
654 ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
655 clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
656
657 clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
658 ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1310_I2S_CLK_CFG,
659 SPEAR1310_I2S_REF_SHIFT, SPEAR1310_I2S_REF_SEL_MASK, 0,
660 &_lock);
661 clk_register_clkdev(clk, "i2s_ref_clk", NULL);
662
663 clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
664 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_I2S_REF_PAD_CLK_ENB,
665 0, &_lock);
666 clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
667
668 clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
669 "i2s_ref_pad_clk", 0, SPEAR1310_I2S_CLK_CFG,
670 &i2s_sclk_masks, i2s_sclk_rtbl,
671 ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
672 clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
673 clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
674
675 /* clock derived from ahb clk */
676 clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
677 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2C0_CLK_ENB, 0,
678 &_lock);
679 clk_register_clkdev(clk, NULL, "e0280000.i2c");
680
681 clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
682 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_DMA_CLK_ENB, 0,
683 &_lock);
684 clk_register_clkdev(clk, NULL, "ea800000.dma");
685 clk_register_clkdev(clk, NULL, "eb000000.dma");
686
687 clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0,
688 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_JPEG_CLK_ENB, 0,
689 &_lock);
690 clk_register_clkdev(clk, NULL, "b2000000.jpeg");
691
692 clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
693 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GMAC_CLK_ENB, 0,
694 &_lock);
695 clk_register_clkdev(clk, NULL, "e2000000.eth");
696
697 clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
698 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_FSMC_CLK_ENB, 0,
699 &_lock);
700 clk_register_clkdev(clk, NULL, "b0000000.flash");
701
702 clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
703 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SMI_CLK_ENB, 0,
704 &_lock);
705 clk_register_clkdev(clk, NULL, "ea000000.flash");
706
707 clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
708 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC0_CLK_ENB, 0,
709 &_lock);
710 clk_register_clkdev(clk, "usbh.0_clk", NULL);
711
712 clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
713 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UHC1_CLK_ENB, 0,
714 &_lock);
715 clk_register_clkdev(clk, "usbh.1_clk", NULL);
716
717 clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
718 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_UOC_CLK_ENB, 0,
719 &_lock);
720 clk_register_clkdev(clk, NULL, "uoc");
721
722 clk = clk_register_gate(NULL, "pcie_sata_0_clk", "ahb_clk", 0,
723 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_0_CLK_ENB,
724 0, &_lock);
725 clk_register_clkdev(clk, NULL, "dw_pcie.0");
726 clk_register_clkdev(clk, NULL, "ahci.0");
727
728 clk = clk_register_gate(NULL, "pcie_sata_1_clk", "ahb_clk", 0,
729 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_1_CLK_ENB,
730 0, &_lock);
731 clk_register_clkdev(clk, NULL, "dw_pcie.1");
732 clk_register_clkdev(clk, NULL, "ahci.1");
733
734 clk = clk_register_gate(NULL, "pcie_sata_2_clk", "ahb_clk", 0,
735 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_PCIE_SATA_2_CLK_ENB,
736 0, &_lock);
737 clk_register_clkdev(clk, NULL, "dw_pcie.2");
738 clk_register_clkdev(clk, NULL, "ahci.2");
739
740 clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
741 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM0_CLK_ENB, 0,
742 &_lock);
743 clk_register_clkdev(clk, "sysram0_clk", NULL);
744
745 clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
746 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SYSRAM1_CLK_ENB, 0,
747 &_lock);
748 clk_register_clkdev(clk, "sysram1_clk", NULL);
749
750 clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
751 0, SPEAR1310_ADC_CLK_SYNT, NULL, adc_rtbl,
752 ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
753 clk_register_clkdev(clk, "adc_synth_clk", NULL);
754 clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
755
756 clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
757 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_ADC_CLK_ENB, 0,
758 &_lock);
759 clk_register_clkdev(clk, NULL, "adc_clk");
760
761 /* clock derived from apb clk */
762 clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0,
763 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_SSP_CLK_ENB, 0,
764 &_lock);
765 clk_register_clkdev(clk, NULL, "e0100000.spi");
766
767 clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
768 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO0_CLK_ENB, 0,
769 &_lock);
770 clk_register_clkdev(clk, NULL, "e0600000.gpio");
771
772 clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
773 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_GPIO1_CLK_ENB, 0,
774 &_lock);
775 clk_register_clkdev(clk, NULL, "e0680000.gpio");
776
777 clk = clk_register_gate(NULL, "i2s0_clk", "apb_clk", 0,
778 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S0_CLK_ENB, 0,
779 &_lock);
780 clk_register_clkdev(clk, NULL, "e0180000.i2s");
781
782 clk = clk_register_gate(NULL, "i2s1_clk", "apb_clk", 0,
783 SPEAR1310_PERIP1_CLK_ENB, SPEAR1310_I2S1_CLK_ENB, 0,
784 &_lock);
785 clk_register_clkdev(clk, NULL, "e0200000.i2s");
786
787 clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
788 SPEAR1310_PERIP2_CLK_ENB, SPEAR1310_KBD_CLK_ENB, 0,
789 &_lock);
790 clk_register_clkdev(clk, NULL, "e0300000.kbd");
791
792 /* RAS clks */
793 clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
794 gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
795 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT0_1_CLK_SHIFT,
796 SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
797 clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
798
799 clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
800 gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
801 0, SPEAR1310_PLL_CFG, SPEAR1310_RAS_SYNT2_3_CLK_SHIFT,
802 SPEAR1310_RAS_SYNT_CLK_MASK, 0, &_lock);
803 clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
804
805 clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
806 SPEAR1310_RAS_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
807 &_lock);
808 clk_register_clkdev(clk, "gen_synth0_clk", NULL);
809
810 clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
811 SPEAR1310_RAS_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
812 &_lock);
813 clk_register_clkdev(clk, "gen_synth1_clk", NULL);
814
815 clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
816 SPEAR1310_RAS_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
817 &_lock);
818 clk_register_clkdev(clk, "gen_synth2_clk", NULL);
819
820 clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
821 SPEAR1310_RAS_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
822 &_lock);
823 clk_register_clkdev(clk, "gen_synth3_clk", NULL);
824
825 clk = clk_register_gate(NULL, "ras_osc_24m_clk", "osc_24m_clk", 0,
826 SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_24M_CLK_ENB, 0,
827 &_lock);
828 clk_register_clkdev(clk, "ras_osc_24m_clk", NULL);
829
830 clk = clk_register_gate(NULL, "ras_osc_25m_clk", "osc_25m_clk", 0,
831 SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_25M_CLK_ENB, 0,
832 &_lock);
833 clk_register_clkdev(clk, "ras_osc_25m_clk", NULL);
834
835 clk = clk_register_gate(NULL, "ras_osc_32k_clk", "osc_32k_clk", 0,
836 SPEAR1310_RAS_CLK_ENB, SPEAR1310_OSC_32K_CLK_ENB, 0,
837 &_lock);
838 clk_register_clkdev(clk, "ras_osc_32k_clk", NULL);
839
840 clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
841 SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL2_CLK_ENB, 0,
842 &_lock);
843 clk_register_clkdev(clk, "ras_pll2_clk", NULL);
844
845 clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0,
846 SPEAR1310_RAS_CLK_ENB, SPEAR1310_PLL3_CLK_ENB, 0,
847 &_lock);
848 clk_register_clkdev(clk, "ras_pll3_clk", NULL);
849
850 clk = clk_register_gate(NULL, "ras_tx125_clk", "gmii_125m_pad_clk", 0,
851 SPEAR1310_RAS_CLK_ENB, SPEAR1310_C125M_PAD_CLK_ENB, 0,
852 &_lock);
853 clk_register_clkdev(clk, "ras_tx125_clk", NULL);
854
855 clk = clk_register_fixed_rate(NULL, "ras_30m_fixed_clk", "pll5_clk", 0,
856 30000000);
857 clk = clk_register_gate(NULL, "ras_30m_clk", "ras_30m_fixed_clk", 0,
858 SPEAR1310_RAS_CLK_ENB, SPEAR1310_C30M_CLK_ENB, 0,
859 &_lock);
860 clk_register_clkdev(clk, "ras_30m_clk", NULL);
861
862 clk = clk_register_fixed_rate(NULL, "ras_48m_fixed_clk", "pll5_clk", 0,
863 48000000);
864 clk = clk_register_gate(NULL, "ras_48m_clk", "ras_48m_fixed_clk", 0,
865 SPEAR1310_RAS_CLK_ENB, SPEAR1310_C48M_CLK_ENB, 0,
866 &_lock);
867 clk_register_clkdev(clk, "ras_48m_clk", NULL);
868
869 clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0,
870 SPEAR1310_RAS_CLK_ENB, SPEAR1310_ACLK_CLK_ENB, 0,
871 &_lock);
872 clk_register_clkdev(clk, "ras_ahb_clk", NULL);
873
874 clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0,
875 SPEAR1310_RAS_CLK_ENB, SPEAR1310_PCLK_CLK_ENB, 0,
876 &_lock);
877 clk_register_clkdev(clk, "ras_apb_clk", NULL);
878
879 clk = clk_register_fixed_rate(NULL, "ras_plclk0_clk", NULL, CLK_IS_ROOT,
880 50000000);
881
882 clk = clk_register_fixed_rate(NULL, "ras_tx50_clk", NULL, CLK_IS_ROOT,
883 50000000);
884
885 clk = clk_register_gate(NULL, "can0_clk", "apb_clk", 0,
886 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN0_CLK_ENB, 0,
887 &_lock);
888 clk_register_clkdev(clk, NULL, "c_can_platform.0");
889
890 clk = clk_register_gate(NULL, "can1_clk", "apb_clk", 0,
891 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_CAN1_CLK_ENB, 0,
892 &_lock);
893 clk_register_clkdev(clk, NULL, "c_can_platform.1");
894
895 clk = clk_register_gate(NULL, "ras_smii0_clk", "ras_ahb_clk", 0,
896 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII0_CLK_ENB, 0,
897 &_lock);
898 clk_register_clkdev(clk, NULL, "5c400000.eth");
899
900 clk = clk_register_gate(NULL, "ras_smii1_clk", "ras_ahb_clk", 0,
901 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII1_CLK_ENB, 0,
902 &_lock);
903 clk_register_clkdev(clk, NULL, "5c500000.eth");
904
905 clk = clk_register_gate(NULL, "ras_smii2_clk", "ras_ahb_clk", 0,
906 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_MII2_CLK_ENB, 0,
907 &_lock);
908 clk_register_clkdev(clk, NULL, "5c600000.eth");
909
910 clk = clk_register_gate(NULL, "ras_rgmii_clk", "ras_ahb_clk", 0,
911 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_GMII_CLK_ENB, 0,
912 &_lock);
913 clk_register_clkdev(clk, NULL, "5c700000.eth");
914
915 clk = clk_register_mux(NULL, "smii_rgmii_phy_mux_clk",
916 smii_rgmii_phy_parents,
917 ARRAY_SIZE(smii_rgmii_phy_parents), 0,
918 SPEAR1310_RAS_CTRL_REG1,
919 SPEAR1310_SMII_RGMII_PHY_CLK_SHIFT,
920 SPEAR1310_PHY_CLK_MASK, 0, &_lock);
921 clk_register_clkdev(clk, NULL, "stmmacphy.1");
922 clk_register_clkdev(clk, NULL, "stmmacphy.2");
923 clk_register_clkdev(clk, NULL, "stmmacphy.4");
924
925 clk = clk_register_mux(NULL, "rmii_phy_mux_clk", rmii_phy_parents,
926 ARRAY_SIZE(rmii_phy_parents), 0,
927 SPEAR1310_RAS_CTRL_REG1, SPEAR1310_RMII_PHY_CLK_SHIFT,
928 SPEAR1310_PHY_CLK_MASK, 0, &_lock);
929 clk_register_clkdev(clk, NULL, "stmmacphy.3");
930
931 clk = clk_register_mux(NULL, "uart1_mux_clk", uart_parents,
932 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
933 SPEAR1310_UART1_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
934 0, &_lock);
935 clk_register_clkdev(clk, "uart1_mux_clk", NULL);
936
937 clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
938 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART1_CLK_ENB, 0,
939 &_lock);
940 clk_register_clkdev(clk, NULL, "5c800000.serial");
941
942 clk = clk_register_mux(NULL, "uart2_mux_clk", uart_parents,
943 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
944 SPEAR1310_UART2_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
945 0, &_lock);
946 clk_register_clkdev(clk, "uart2_mux_clk", NULL);
947
948 clk = clk_register_gate(NULL, "uart2_clk", "uart2_mux_clk", 0,
949 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART2_CLK_ENB, 0,
950 &_lock);
951 clk_register_clkdev(clk, NULL, "5c900000.serial");
952
953 clk = clk_register_mux(NULL, "uart3_mux_clk", uart_parents,
954 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
955 SPEAR1310_UART3_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
956 0, &_lock);
957 clk_register_clkdev(clk, "uart3_mux_clk", NULL);
958
959 clk = clk_register_gate(NULL, "uart3_clk", "uart3_mux_clk", 0,
960 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART3_CLK_ENB, 0,
961 &_lock);
962 clk_register_clkdev(clk, NULL, "5ca00000.serial");
963
964 clk = clk_register_mux(NULL, "uart4_mux_clk", uart_parents,
965 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
966 SPEAR1310_UART4_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
967 0, &_lock);
968 clk_register_clkdev(clk, "uart4_mux_clk", NULL);
969
970 clk = clk_register_gate(NULL, "uart4_clk", "uart4_mux_clk", 0,
971 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART4_CLK_ENB, 0,
972 &_lock);
973 clk_register_clkdev(clk, NULL, "5cb00000.serial");
974
975 clk = clk_register_mux(NULL, "uart5_mux_clk", uart_parents,
976 ARRAY_SIZE(uart_parents), 0, SPEAR1310_RAS_CTRL_REG0,
977 SPEAR1310_UART5_CLK_SHIFT, SPEAR1310_RAS_UART_CLK_MASK,
978 0, &_lock);
979 clk_register_clkdev(clk, "uart5_mux_clk", NULL);
980
981 clk = clk_register_gate(NULL, "uart5_clk", "uart5_mux_clk", 0,
982 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_UART5_CLK_ENB, 0,
983 &_lock);
984 clk_register_clkdev(clk, NULL, "5cc00000.serial");
985
986 clk = clk_register_mux(NULL, "i2c1_mux_clk", i2c_parents,
987 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
988 SPEAR1310_I2C1_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
989 &_lock);
990 clk_register_clkdev(clk, "i2c1_mux_clk", NULL);
991
992 clk = clk_register_gate(NULL, "i2c1_clk", "i2c1_mux_clk", 0,
993 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C1_CLK_ENB, 0,
994 &_lock);
995 clk_register_clkdev(clk, NULL, "5cd00000.i2c");
996
997 clk = clk_register_mux(NULL, "i2c2_mux_clk", i2c_parents,
998 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
999 SPEAR1310_I2C2_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1000 &_lock);
1001 clk_register_clkdev(clk, "i2c2_mux_clk", NULL);
1002
1003 clk = clk_register_gate(NULL, "i2c2_clk", "i2c2_mux_clk", 0,
1004 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C2_CLK_ENB, 0,
1005 &_lock);
1006 clk_register_clkdev(clk, NULL, "5ce00000.i2c");
1007
1008 clk = clk_register_mux(NULL, "i2c3_mux_clk", i2c_parents,
1009 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1010 SPEAR1310_I2C3_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1011 &_lock);
1012 clk_register_clkdev(clk, "i2c3_mux_clk", NULL);
1013
1014 clk = clk_register_gate(NULL, "i2c3_clk", "i2c3_mux_clk", 0,
1015 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C3_CLK_ENB, 0,
1016 &_lock);
1017 clk_register_clkdev(clk, NULL, "5cf00000.i2c");
1018
1019 clk = clk_register_mux(NULL, "i2c4_mux_clk", i2c_parents,
1020 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1021 SPEAR1310_I2C4_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1022 &_lock);
1023 clk_register_clkdev(clk, "i2c4_mux_clk", NULL);
1024
1025 clk = clk_register_gate(NULL, "i2c4_clk", "i2c4_mux_clk", 0,
1026 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C4_CLK_ENB, 0,
1027 &_lock);
1028 clk_register_clkdev(clk, NULL, "5d000000.i2c");
1029
1030 clk = clk_register_mux(NULL, "i2c5_mux_clk", i2c_parents,
1031 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1032 SPEAR1310_I2C5_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1033 &_lock);
1034 clk_register_clkdev(clk, "i2c5_mux_clk", NULL);
1035
1036 clk = clk_register_gate(NULL, "i2c5_clk", "i2c5_mux_clk", 0,
1037 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C5_CLK_ENB, 0,
1038 &_lock);
1039 clk_register_clkdev(clk, NULL, "5d100000.i2c");
1040
1041 clk = clk_register_mux(NULL, "i2c6_mux_clk", i2c_parents,
1042 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1043 SPEAR1310_I2C6_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1044 &_lock);
1045 clk_register_clkdev(clk, "i2c6_mux_clk", NULL);
1046
1047 clk = clk_register_gate(NULL, "i2c6_clk", "i2c6_mux_clk", 0,
1048 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C6_CLK_ENB, 0,
1049 &_lock);
1050 clk_register_clkdev(clk, NULL, "5d200000.i2c");
1051
1052 clk = clk_register_mux(NULL, "i2c7_mux_clk", i2c_parents,
1053 ARRAY_SIZE(i2c_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1054 SPEAR1310_I2C7_CLK_SHIFT, SPEAR1310_I2C_CLK_MASK, 0,
1055 &_lock);
1056 clk_register_clkdev(clk, "i2c7_mux_clk", NULL);
1057
1058 clk = clk_register_gate(NULL, "i2c7_clk", "i2c7_mux_clk", 0,
1059 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_I2C7_CLK_ENB, 0,
1060 &_lock);
1061 clk_register_clkdev(clk, NULL, "5d300000.i2c");
1062
1063 clk = clk_register_mux(NULL, "ssp1_mux_clk", ssp1_parents,
1064 ARRAY_SIZE(ssp1_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1065 SPEAR1310_SSP1_CLK_SHIFT, SPEAR1310_SSP1_CLK_MASK, 0,
1066 &_lock);
1067 clk_register_clkdev(clk, "ssp1_mux_clk", NULL);
1068
1069 clk = clk_register_gate(NULL, "ssp1_clk", "ssp1_mux_clk", 0,
1070 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_SSP1_CLK_ENB, 0,
1071 &_lock);
1072 clk_register_clkdev(clk, NULL, "5d400000.spi");
1073
1074 clk = clk_register_mux(NULL, "pci_mux_clk", pci_parents,
1075 ARRAY_SIZE(pci_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1076 SPEAR1310_PCI_CLK_SHIFT, SPEAR1310_PCI_CLK_MASK, 0,
1077 &_lock);
1078 clk_register_clkdev(clk, "pci_mux_clk", NULL);
1079
1080 clk = clk_register_gate(NULL, "pci_clk", "pci_mux_clk", 0,
1081 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_PCI_CLK_ENB, 0,
1082 &_lock);
1083 clk_register_clkdev(clk, NULL, "pci");
1084
1085 clk = clk_register_mux(NULL, "tdm1_mux_clk", tdm_parents,
1086 ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1087 SPEAR1310_TDM1_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
1088 &_lock);
1089 clk_register_clkdev(clk, "tdm1_mux_clk", NULL);
1090
1091 clk = clk_register_gate(NULL, "tdm1_clk", "tdm1_mux_clk", 0,
1092 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM1_CLK_ENB, 0,
1093 &_lock);
1094 clk_register_clkdev(clk, NULL, "tdm_hdlc.0");
1095
1096 clk = clk_register_mux(NULL, "tdm2_mux_clk", tdm_parents,
1097 ARRAY_SIZE(tdm_parents), 0, SPEAR1310_RAS_CTRL_REG0,
1098 SPEAR1310_TDM2_CLK_SHIFT, SPEAR1310_TDM_CLK_MASK, 0,
1099 &_lock);
1100 clk_register_clkdev(clk, "tdm2_mux_clk", NULL);
1101
1102 clk = clk_register_gate(NULL, "tdm2_clk", "tdm2_mux_clk", 0,
1103 SPEAR1310_RAS_SW_CLK_CTRL, SPEAR1310_TDM2_CLK_ENB, 0,
1104 &_lock);
1105 clk_register_clkdev(clk, NULL, "tdm_hdlc.1");
1106}
diff --git a/drivers/clk/spear/spear1340_clock.c b/drivers/clk/spear/spear1340_clock.c
new file mode 100644
index 000000000000..f130919d5bf8
--- /dev/null
+++ b/drivers/clk/spear/spear1340_clock.c
@@ -0,0 +1,964 @@
1/*
2 * arch/arm/mach-spear13xx/spear1340_clock.c
3 *
4 * SPEAr1340 machine clock framework source file
5 *
6 * Copyright (C) 2012 ST Microelectronics
7 * Viresh Kumar <viresh.kumar@st.com>
8 *
9 * This file is licensed under the terms of the GNU General Public
10 * License version 2. This program is licensed "as is" without any
11 * warranty of any kind, whether express or implied.
12 */
13
14#include <linux/clk.h>
15#include <linux/clkdev.h>
16#include <linux/err.h>
17#include <linux/io.h>
18#include <linux/of_platform.h>
19#include <linux/spinlock_types.h>
20#include <mach/spear.h>
21#include "clk.h"
22
23/* Clock Configuration Registers */
24#define SPEAR1340_SYS_CLK_CTRL (VA_MISC_BASE + 0x200)
25 #define SPEAR1340_HCLK_SRC_SEL_SHIFT 27
26 #define SPEAR1340_HCLK_SRC_SEL_MASK 1
27 #define SPEAR1340_SCLK_SRC_SEL_SHIFT 23
28 #define SPEAR1340_SCLK_SRC_SEL_MASK 3
29
30/* PLL related registers and bit values */
31#define SPEAR1340_PLL_CFG (VA_MISC_BASE + 0x210)
32 /* PLL_CFG bit values */
33 #define SPEAR1340_CLCD_SYNT_CLK_MASK 1
34 #define SPEAR1340_CLCD_SYNT_CLK_SHIFT 31
35 #define SPEAR1340_GEN_SYNT2_3_CLK_SHIFT 29
36 #define SPEAR1340_GEN_SYNT_CLK_MASK 2
37 #define SPEAR1340_GEN_SYNT0_1_CLK_SHIFT 27
38 #define SPEAR1340_PLL_CLK_MASK 2
39 #define SPEAR1340_PLL3_CLK_SHIFT 24
40 #define SPEAR1340_PLL2_CLK_SHIFT 22
41 #define SPEAR1340_PLL1_CLK_SHIFT 20
42
43#define SPEAR1340_PLL1_CTR (VA_MISC_BASE + 0x214)
44#define SPEAR1340_PLL1_FRQ (VA_MISC_BASE + 0x218)
45#define SPEAR1340_PLL2_CTR (VA_MISC_BASE + 0x220)
46#define SPEAR1340_PLL2_FRQ (VA_MISC_BASE + 0x224)
47#define SPEAR1340_PLL3_CTR (VA_MISC_BASE + 0x22C)
48#define SPEAR1340_PLL3_FRQ (VA_MISC_BASE + 0x230)
49#define SPEAR1340_PLL4_CTR (VA_MISC_BASE + 0x238)
50#define SPEAR1340_PLL4_FRQ (VA_MISC_BASE + 0x23C)
51#define SPEAR1340_PERIP_CLK_CFG (VA_MISC_BASE + 0x244)
52 /* PERIP_CLK_CFG bit values */
53 #define SPEAR1340_SPDIF_CLK_MASK 1
54 #define SPEAR1340_SPDIF_OUT_CLK_SHIFT 15
55 #define SPEAR1340_SPDIF_IN_CLK_SHIFT 14
56 #define SPEAR1340_GPT3_CLK_SHIFT 13
57 #define SPEAR1340_GPT2_CLK_SHIFT 12
58 #define SPEAR1340_GPT_CLK_MASK 1
59 #define SPEAR1340_GPT1_CLK_SHIFT 9
60 #define SPEAR1340_GPT0_CLK_SHIFT 8
61 #define SPEAR1340_UART_CLK_MASK 2
62 #define SPEAR1340_UART1_CLK_SHIFT 6
63 #define SPEAR1340_UART0_CLK_SHIFT 4
64 #define SPEAR1340_CLCD_CLK_MASK 2
65 #define SPEAR1340_CLCD_CLK_SHIFT 2
66 #define SPEAR1340_C3_CLK_MASK 1
67 #define SPEAR1340_C3_CLK_SHIFT 1
68
69#define SPEAR1340_GMAC_CLK_CFG (VA_MISC_BASE + 0x248)
70 #define SPEAR1340_GMAC_PHY_CLK_MASK 1
71 #define SPEAR1340_GMAC_PHY_CLK_SHIFT 2
72 #define SPEAR1340_GMAC_PHY_INPUT_CLK_MASK 2
73 #define SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT 0
74
75#define SPEAR1340_I2S_CLK_CFG (VA_MISC_BASE + 0x24C)
76 /* I2S_CLK_CFG register mask */
77 #define SPEAR1340_I2S_SCLK_X_MASK 0x1F
78 #define SPEAR1340_I2S_SCLK_X_SHIFT 27
79 #define SPEAR1340_I2S_SCLK_Y_MASK 0x1F
80 #define SPEAR1340_I2S_SCLK_Y_SHIFT 22
81 #define SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT 21
82 #define SPEAR1340_I2S_SCLK_SYNTH_ENB 20
83 #define SPEAR1340_I2S_PRS1_CLK_X_MASK 0xFF
84 #define SPEAR1340_I2S_PRS1_CLK_X_SHIFT 12
85 #define SPEAR1340_I2S_PRS1_CLK_Y_MASK 0xFF
86 #define SPEAR1340_I2S_PRS1_CLK_Y_SHIFT 4
87 #define SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT 3
88 #define SPEAR1340_I2S_REF_SEL_MASK 1
89 #define SPEAR1340_I2S_REF_SHIFT 2
90 #define SPEAR1340_I2S_SRC_CLK_MASK 2
91 #define SPEAR1340_I2S_SRC_CLK_SHIFT 0
92
93#define SPEAR1340_C3_CLK_SYNT (VA_MISC_BASE + 0x250)
94#define SPEAR1340_UART0_CLK_SYNT (VA_MISC_BASE + 0x254)
95#define SPEAR1340_UART1_CLK_SYNT (VA_MISC_BASE + 0x258)
96#define SPEAR1340_GMAC_CLK_SYNT (VA_MISC_BASE + 0x25C)
97#define SPEAR1340_SDHCI_CLK_SYNT (VA_MISC_BASE + 0x260)
98#define SPEAR1340_CFXD_CLK_SYNT (VA_MISC_BASE + 0x264)
99#define SPEAR1340_ADC_CLK_SYNT (VA_MISC_BASE + 0x270)
100#define SPEAR1340_AMBA_CLK_SYNT (VA_MISC_BASE + 0x274)
101#define SPEAR1340_CLCD_CLK_SYNT (VA_MISC_BASE + 0x27C)
102#define SPEAR1340_SYS_CLK_SYNT (VA_MISC_BASE + 0x284)
103#define SPEAR1340_GEN_CLK_SYNT0 (VA_MISC_BASE + 0x28C)
104#define SPEAR1340_GEN_CLK_SYNT1 (VA_MISC_BASE + 0x294)
105#define SPEAR1340_GEN_CLK_SYNT2 (VA_MISC_BASE + 0x29C)
106#define SPEAR1340_GEN_CLK_SYNT3 (VA_MISC_BASE + 0x304)
107#define SPEAR1340_PERIP1_CLK_ENB (VA_MISC_BASE + 0x30C)
108 #define SPEAR1340_RTC_CLK_ENB 31
109 #define SPEAR1340_ADC_CLK_ENB 30
110 #define SPEAR1340_C3_CLK_ENB 29
111 #define SPEAR1340_CLCD_CLK_ENB 27
112 #define SPEAR1340_DMA_CLK_ENB 25
113 #define SPEAR1340_GPIO1_CLK_ENB 24
114 #define SPEAR1340_GPIO0_CLK_ENB 23
115 #define SPEAR1340_GPT1_CLK_ENB 22
116 #define SPEAR1340_GPT0_CLK_ENB 21
117 #define SPEAR1340_I2S_PLAY_CLK_ENB 20
118 #define SPEAR1340_I2S_REC_CLK_ENB 19
119 #define SPEAR1340_I2C0_CLK_ENB 18
120 #define SPEAR1340_SSP_CLK_ENB 17
121 #define SPEAR1340_UART0_CLK_ENB 15
122 #define SPEAR1340_PCIE_SATA_CLK_ENB 12
123 #define SPEAR1340_UOC_CLK_ENB 11
124 #define SPEAR1340_UHC1_CLK_ENB 10
125 #define SPEAR1340_UHC0_CLK_ENB 9
126 #define SPEAR1340_GMAC_CLK_ENB 8
127 #define SPEAR1340_CFXD_CLK_ENB 7
128 #define SPEAR1340_SDHCI_CLK_ENB 6
129 #define SPEAR1340_SMI_CLK_ENB 5
130 #define SPEAR1340_FSMC_CLK_ENB 4
131 #define SPEAR1340_SYSRAM0_CLK_ENB 3
132 #define SPEAR1340_SYSRAM1_CLK_ENB 2
133 #define SPEAR1340_SYSROM_CLK_ENB 1
134 #define SPEAR1340_BUS_CLK_ENB 0
135
136#define SPEAR1340_PERIP2_CLK_ENB (VA_MISC_BASE + 0x310)
137 #define SPEAR1340_THSENS_CLK_ENB 8
138 #define SPEAR1340_I2S_REF_PAD_CLK_ENB 7
139 #define SPEAR1340_ACP_CLK_ENB 6
140 #define SPEAR1340_GPT3_CLK_ENB 5
141 #define SPEAR1340_GPT2_CLK_ENB 4
142 #define SPEAR1340_KBD_CLK_ENB 3
143 #define SPEAR1340_CPU_DBG_CLK_ENB 2
144 #define SPEAR1340_DDR_CORE_CLK_ENB 1
145 #define SPEAR1340_DDR_CTRL_CLK_ENB 0
146
147#define SPEAR1340_PERIP3_CLK_ENB (VA_MISC_BASE + 0x314)
148 #define SPEAR1340_PLGPIO_CLK_ENB 18
149 #define SPEAR1340_VIDEO_DEC_CLK_ENB 16
150 #define SPEAR1340_VIDEO_ENC_CLK_ENB 15
151 #define SPEAR1340_SPDIF_OUT_CLK_ENB 13
152 #define SPEAR1340_SPDIF_IN_CLK_ENB 12
153 #define SPEAR1340_VIDEO_IN_CLK_ENB 11
154 #define SPEAR1340_CAM0_CLK_ENB 10
155 #define SPEAR1340_CAM1_CLK_ENB 9
156 #define SPEAR1340_CAM2_CLK_ENB 8
157 #define SPEAR1340_CAM3_CLK_ENB 7
158 #define SPEAR1340_MALI_CLK_ENB 6
159 #define SPEAR1340_CEC0_CLK_ENB 5
160 #define SPEAR1340_CEC1_CLK_ENB 4
161 #define SPEAR1340_PWM_CLK_ENB 3
162 #define SPEAR1340_I2C1_CLK_ENB 2
163 #define SPEAR1340_UART1_CLK_ENB 1
164
165static DEFINE_SPINLOCK(_lock);
166
167/* pll rate configuration table, in ascending order of rates */
168static struct pll_rate_tbl pll_rtbl[] = {
169 /* PCLK 24MHz */
170 {.mode = 0, .m = 0x83, .n = 0x04, .p = 0x5}, /* vco 1572, pll 49.125 MHz */
171 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x3}, /* vco 1000, pll 125 MHz */
172 {.mode = 0, .m = 0x64, .n = 0x06, .p = 0x1}, /* vco 800, pll 400 MHz */
173 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x1}, /* vco 1000, pll 500 MHz */
174 {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x1}, /* vco 1328, pll 664 MHz */
175 {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x1}, /* vco 1600, pll 800 MHz */
176 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
177 {.mode = 0, .m = 0x96, .n = 0x06, .p = 0x0}, /* vco 1200, pll 1200 MHz */
178};
179
180/* vco-pll4 rate configuration table, in ascending order of rates */
181static struct pll_rate_tbl pll4_rtbl[] = {
182 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x2}, /* vco 1000, pll 250 MHz */
183 {.mode = 0, .m = 0xA6, .n = 0x06, .p = 0x2}, /* vco 1328, pll 332 MHz */
184 {.mode = 0, .m = 0xC8, .n = 0x06, .p = 0x2}, /* vco 1600, pll 400 MHz */
185 {.mode = 0, .m = 0x7D, .n = 0x06, .p = 0x0}, /* vco 1, pll 1 GHz */
186};
187
188/*
189 * All below entries generate 166 MHz for
190 * different values of vco1div2
191 */
192static struct frac_rate_tbl amba_synth_rtbl[] = {
193 {.div = 0x06062}, /* for vco1div2 = 500 MHz */
194 {.div = 0x04D1B}, /* for vco1div2 = 400 MHz */
195 {.div = 0x04000}, /* for vco1div2 = 332 MHz */
196 {.div = 0x03031}, /* for vco1div2 = 250 MHz */
197 {.div = 0x0268D}, /* for vco1div2 = 200 MHz */
198};
199
200/*
201 * Synthesizer Clock derived from vcodiv2. This clock is one of the
202 * possible clocks to feed cpu directly.
203 * We can program this synthesizer to make cpu run on different clock
204 * frequencies.
205 * Following table provides configuration values to let cpu run on 200,
206 * 250, 332, 400 or 500 MHz considering different possibilites of input
207 * (vco1div2) clock.
208 *
209 * --------------------------------------------------------------------
210 * vco1div2(Mhz) fout(Mhz) cpuclk = fout/2 div
211 * --------------------------------------------------------------------
212 * 400 200 100 0x04000
213 * 400 250 125 0x03333
214 * 400 332 166 0x0268D
215 * 400 400 200 0x02000
216 * --------------------------------------------------------------------
217 * 500 200 100 0x05000
218 * 500 250 125 0x04000
219 * 500 332 166 0x03031
220 * 500 400 200 0x02800
221 * 500 500 250 0x02000
222 * --------------------------------------------------------------------
223 * 664 200 100 0x06a38
224 * 664 250 125 0x054FD
225 * 664 332 166 0x04000
226 * 664 400 200 0x0351E
227 * 664 500 250 0x02A7E
228 * --------------------------------------------------------------------
229 * 800 200 100 0x08000
230 * 800 250 125 0x06666
231 * 800 332 166 0x04D18
232 * 800 400 200 0x04000
233 * 800 500 250 0x03333
234 * --------------------------------------------------------------------
235 * sys rate configuration table is in descending order of divisor.
236 */
237static struct frac_rate_tbl sys_synth_rtbl[] = {
238 {.div = 0x08000},
239 {.div = 0x06a38},
240 {.div = 0x06666},
241 {.div = 0x054FD},
242 {.div = 0x05000},
243 {.div = 0x04D18},
244 {.div = 0x04000},
245 {.div = 0x0351E},
246 {.div = 0x03333},
247 {.div = 0x03031},
248 {.div = 0x02A7E},
249 {.div = 0x02800},
250 {.div = 0x0268D},
251 {.div = 0x02000},
252};
253
254/* aux rate configuration table, in ascending order of rates */
255static struct aux_rate_tbl aux_rtbl[] = {
256 /* For VCO1div2 = 500 MHz */
257 {.xscale = 10, .yscale = 204, .eq = 0}, /* 12.29 MHz */
258 {.xscale = 4, .yscale = 21, .eq = 0}, /* 48 MHz */
259 {.xscale = 2, .yscale = 6, .eq = 0}, /* 83 MHz */
260 {.xscale = 2, .yscale = 4, .eq = 0}, /* 125 MHz */
261 {.xscale = 1, .yscale = 3, .eq = 1}, /* 166 MHz */
262 {.xscale = 1, .yscale = 2, .eq = 1}, /* 250 MHz */
263};
264
265/* gmac rate configuration table, in ascending order of rates */
266static struct aux_rate_tbl gmac_rtbl[] = {
267 /* For gmac phy input clk */
268 {.xscale = 2, .yscale = 6, .eq = 0}, /* divided by 6 */
269 {.xscale = 2, .yscale = 4, .eq = 0}, /* divided by 4 */
270 {.xscale = 1, .yscale = 3, .eq = 1}, /* divided by 3 */
271 {.xscale = 1, .yscale = 2, .eq = 1}, /* divided by 2 */
272};
273
274/* clcd rate configuration table, in ascending order of rates */
275static struct frac_rate_tbl clcd_rtbl[] = {
276 {.div = 0x14000}, /* 25 Mhz , for vc01div4 = 250 MHz*/
277 {.div = 0x1284B}, /* 27 Mhz , for vc01div4 = 250 MHz*/
278 {.div = 0x0D8D3}, /* 58 Mhz , for vco1div4 = 393 MHz */
279 {.div = 0x0B72C}, /* 58 Mhz , for vco1div4 = 332 MHz */
280 {.div = 0x089EE}, /* 58 Mhz , for vc01div4 = 250 MHz*/
281 {.div = 0x07BA0}, /* 65 Mhz , for vc01div4 = 250 MHz*/
282 {.div = 0x06f1C}, /* 72 Mhz , for vc01div4 = 250 MHz*/
283 {.div = 0x06E58}, /* 58 Mhz , for vco1div4 = 200 MHz */
284 {.div = 0x06c1B}, /* 74 Mhz , for vc01div4 = 250 MHz*/
285 {.div = 0x04A12}, /* 108 Mhz , for vc01div4 = 250 MHz*/
286 {.div = 0x0378E}, /* 144 Mhz , for vc01div4 = 250 MHz*/
287 {.div = 0x0360D}, /* 148 Mhz , for vc01div4 = 250 MHz*/
288 {.div = 0x035E0}, /* 148.5 MHz, for vc01div4 = 250 MHz*/
289};
290
291/* i2s prescaler1 masks */
292static struct aux_clk_masks i2s_prs1_masks = {
293 .eq_sel_mask = AUX_EQ_SEL_MASK,
294 .eq_sel_shift = SPEAR1340_I2S_PRS1_EQ_SEL_SHIFT,
295 .eq1_mask = AUX_EQ1_SEL,
296 .eq2_mask = AUX_EQ2_SEL,
297 .xscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_X_MASK,
298 .xscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_X_SHIFT,
299 .yscale_sel_mask = SPEAR1340_I2S_PRS1_CLK_Y_MASK,
300 .yscale_sel_shift = SPEAR1340_I2S_PRS1_CLK_Y_SHIFT,
301};
302
303/* i2s sclk (bit clock) syynthesizers masks */
304static struct aux_clk_masks i2s_sclk_masks = {
305 .eq_sel_mask = AUX_EQ_SEL_MASK,
306 .eq_sel_shift = SPEAR1340_I2S_SCLK_EQ_SEL_SHIFT,
307 .eq1_mask = AUX_EQ1_SEL,
308 .eq2_mask = AUX_EQ2_SEL,
309 .xscale_sel_mask = SPEAR1340_I2S_SCLK_X_MASK,
310 .xscale_sel_shift = SPEAR1340_I2S_SCLK_X_SHIFT,
311 .yscale_sel_mask = SPEAR1340_I2S_SCLK_Y_MASK,
312 .yscale_sel_shift = SPEAR1340_I2S_SCLK_Y_SHIFT,
313 .enable_bit = SPEAR1340_I2S_SCLK_SYNTH_ENB,
314};
315
316/* i2s prs1 aux rate configuration table, in ascending order of rates */
317static struct aux_rate_tbl i2s_prs1_rtbl[] = {
318 /* For parent clk = 49.152 MHz */
319 {.xscale = 1, .yscale = 12, .eq = 0}, /* 2.048 MHz, smp freq = 8Khz */
320 {.xscale = 11, .yscale = 96, .eq = 0}, /* 2.816 MHz, smp freq = 11Khz */
321 {.xscale = 1, .yscale = 6, .eq = 0}, /* 4.096 MHz, smp freq = 16Khz */
322 {.xscale = 11, .yscale = 48, .eq = 0}, /* 5.632 MHz, smp freq = 22Khz */
323
324 /*
325 * with parent clk = 49.152, freq gen is 8.192 MHz, smp freq = 32Khz
326 * with parent clk = 12.288, freq gen is 2.048 MHz, smp freq = 8Khz
327 */
328 {.xscale = 1, .yscale = 3, .eq = 0},
329
330 /* For parent clk = 49.152 MHz */
331 {.xscale = 17, .yscale = 37, .eq = 0}, /* 11.289 MHz, smp freq = 44Khz*/
332 {.xscale = 1, .yscale = 2, .eq = 0}, /* 12.288 MHz, smp freq = 48Khz*/
333};
334
335/* i2s sclk aux rate configuration table, in ascending order of rates */
336static struct aux_rate_tbl i2s_sclk_rtbl[] = {
337 /* For sclk = ref_clk * x/2/y */
338 {.xscale = 1, .yscale = 4, .eq = 0},
339 {.xscale = 1, .yscale = 2, .eq = 0},
340};
341
342/* adc rate configuration table, in ascending order of rates */
343/* possible adc range is 2.5 MHz to 20 MHz. */
344static struct aux_rate_tbl adc_rtbl[] = {
345 /* For ahb = 166.67 MHz */
346 {.xscale = 1, .yscale = 31, .eq = 0}, /* 2.68 MHz */
347 {.xscale = 2, .yscale = 21, .eq = 0}, /* 7.94 MHz */
348 {.xscale = 4, .yscale = 21, .eq = 0}, /* 15.87 MHz */
349 {.xscale = 10, .yscale = 42, .eq = 0}, /* 19.84 MHz */
350};
351
352/* General synth rate configuration table, in ascending order of rates */
353static struct frac_rate_tbl gen_rtbl[] = {
354 /* For vco1div4 = 250 MHz */
355 {.div = 0x1624E}, /* 22.5792 MHz */
356 {.div = 0x14585}, /* 24.576 MHz */
357 {.div = 0x14000}, /* 25 MHz */
358 {.div = 0x0B127}, /* 45.1584 MHz */
359 {.div = 0x0A000}, /* 50 MHz */
360 {.div = 0x061A8}, /* 81.92 MHz */
361 {.div = 0x05000}, /* 100 MHz */
362 {.div = 0x02800}, /* 200 MHz */
363 {.div = 0x02620}, /* 210 MHz */
364 {.div = 0x02460}, /* 220 MHz */
365 {.div = 0x022C0}, /* 230 MHz */
366 {.div = 0x02160}, /* 240 MHz */
367 {.div = 0x02000}, /* 250 MHz */
368};
369
370/* clock parents */
371static const char *vco_parents[] = { "osc_24m_clk", "osc_25m_clk", };
372static const char *sys_parents[] = { "none", "pll1_clk", "none", "none",
373 "sys_synth_clk", "none", "pll2_clk", "pll3_clk", };
374static const char *ahb_parents[] = { "cpu_div3_clk", "amba_synth_clk", };
375static const char *gpt_parents[] = { "osc_24m_clk", "apb_clk", };
376static const char *uart0_parents[] = { "pll5_clk", "osc_24m_clk",
377 "uart0_synth_gate_clk", };
378static const char *uart1_parents[] = { "pll5_clk", "osc_24m_clk",
379 "uart1_synth_gate_clk", };
380static const char *c3_parents[] = { "pll5_clk", "c3_synth_gate_clk", };
381static const char *gmac_phy_input_parents[] = { "gmii_125m_pad_clk", "pll2_clk",
382 "osc_25m_clk", };
383static const char *gmac_phy_parents[] = { "gmac_phy_input_mux_clk",
384 "gmac_phy_synth_gate_clk", };
385static const char *clcd_synth_parents[] = { "vco1div4_clk", "pll2_clk", };
386static const char *clcd_pixel_parents[] = { "pll5_clk", "clcd_synth_clk", };
387static const char *i2s_src_parents[] = { "vco1div2_clk", "pll2_clk", "pll3_clk",
388 "i2s_src_pad_clk", };
389static const char *i2s_ref_parents[] = { "i2s_src_mux_clk", "i2s_prs1_clk", };
390static const char *spdif_out_parents[] = { "i2s_src_pad_clk", "gen_synth2_clk",
391};
392static const char *spdif_in_parents[] = { "pll2_clk", "gen_synth3_clk", };
393
394static const char *gen_synth0_1_parents[] = { "vco1div4_clk", "vco3div2_clk",
395 "pll3_clk", };
396static const char *gen_synth2_3_parents[] = { "vco1div4_clk", "vco3div2_clk",
397 "pll2_clk", };
398
399void __init spear1340_clk_init(void)
400{
401 struct clk *clk, *clk1;
402
403 clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
404 clk_register_clkdev(clk, "apb_pclk", NULL);
405
406 clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
407 32000);
408 clk_register_clkdev(clk, "osc_32k_clk", NULL);
409
410 clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
411 24000000);
412 clk_register_clkdev(clk, "osc_24m_clk", NULL);
413
414 clk = clk_register_fixed_rate(NULL, "osc_25m_clk", NULL, CLK_IS_ROOT,
415 25000000);
416 clk_register_clkdev(clk, "osc_25m_clk", NULL);
417
418 clk = clk_register_fixed_rate(NULL, "gmii_125m_pad_clk", NULL,
419 CLK_IS_ROOT, 125000000);
420 clk_register_clkdev(clk, "gmii_125m_pad_clk", NULL);
421
422 clk = clk_register_fixed_rate(NULL, "i2s_src_pad_clk", NULL,
423 CLK_IS_ROOT, 12288000);
424 clk_register_clkdev(clk, "i2s_src_pad_clk", NULL);
425
426 /* clock derived from 32 KHz osc clk */
427 clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
428 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_RTC_CLK_ENB, 0,
429 &_lock);
430 clk_register_clkdev(clk, NULL, "fc900000.rtc");
431
432 /* clock derived from 24 or 25 MHz osc clk */
433 /* vco-pll */
434 clk = clk_register_mux(NULL, "vco1_mux_clk", vco_parents,
435 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
436 SPEAR1340_PLL1_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
437 &_lock);
438 clk_register_clkdev(clk, "vco1_mux_clk", NULL);
439 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "vco1_mux_clk",
440 0, SPEAR1340_PLL1_CTR, SPEAR1340_PLL1_FRQ, pll_rtbl,
441 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
442 clk_register_clkdev(clk, "vco1_clk", NULL);
443 clk_register_clkdev(clk1, "pll1_clk", NULL);
444
445 clk = clk_register_mux(NULL, "vco2_mux_clk", vco_parents,
446 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
447 SPEAR1340_PLL2_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
448 &_lock);
449 clk_register_clkdev(clk, "vco2_mux_clk", NULL);
450 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "vco2_mux_clk",
451 0, SPEAR1340_PLL2_CTR, SPEAR1340_PLL2_FRQ, pll_rtbl,
452 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
453 clk_register_clkdev(clk, "vco2_clk", NULL);
454 clk_register_clkdev(clk1, "pll2_clk", NULL);
455
456 clk = clk_register_mux(NULL, "vco3_mux_clk", vco_parents,
457 ARRAY_SIZE(vco_parents), 0, SPEAR1340_PLL_CFG,
458 SPEAR1340_PLL3_CLK_SHIFT, SPEAR1340_PLL_CLK_MASK, 0,
459 &_lock);
460 clk_register_clkdev(clk, "vco3_mux_clk", NULL);
461 clk = clk_register_vco_pll("vco3_clk", "pll3_clk", NULL, "vco3_mux_clk",
462 0, SPEAR1340_PLL3_CTR, SPEAR1340_PLL3_FRQ, pll_rtbl,
463 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
464 clk_register_clkdev(clk, "vco3_clk", NULL);
465 clk_register_clkdev(clk1, "pll3_clk", NULL);
466
467 clk = clk_register_vco_pll("vco4_clk", "pll4_clk", NULL, "osc_24m_clk",
468 0, SPEAR1340_PLL4_CTR, SPEAR1340_PLL4_FRQ, pll4_rtbl,
469 ARRAY_SIZE(pll4_rtbl), &_lock, &clk1, NULL);
470 clk_register_clkdev(clk, "vco4_clk", NULL);
471 clk_register_clkdev(clk1, "pll4_clk", NULL);
472
473 clk = clk_register_fixed_rate(NULL, "pll5_clk", "osc_24m_clk", 0,
474 48000000);
475 clk_register_clkdev(clk, "pll5_clk", NULL);
476
477 clk = clk_register_fixed_rate(NULL, "pll6_clk", "osc_25m_clk", 0,
478 25000000);
479 clk_register_clkdev(clk, "pll6_clk", NULL);
480
481 /* vco div n clocks */
482 clk = clk_register_fixed_factor(NULL, "vco1div2_clk", "vco1_clk", 0, 1,
483 2);
484 clk_register_clkdev(clk, "vco1div2_clk", NULL);
485
486 clk = clk_register_fixed_factor(NULL, "vco1div4_clk", "vco1_clk", 0, 1,
487 4);
488 clk_register_clkdev(clk, "vco1div4_clk", NULL);
489
490 clk = clk_register_fixed_factor(NULL, "vco2div2_clk", "vco2_clk", 0, 1,
491 2);
492 clk_register_clkdev(clk, "vco2div2_clk", NULL);
493
494 clk = clk_register_fixed_factor(NULL, "vco3div2_clk", "vco3_clk", 0, 1,
495 2);
496 clk_register_clkdev(clk, "vco3div2_clk", NULL);
497
498 /* peripherals */
499 clk_register_fixed_factor(NULL, "thermal_clk", "osc_24m_clk", 0, 1,
500 128);
501 clk = clk_register_gate(NULL, "thermal_gate_clk", "thermal_clk", 0,
502 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_THSENS_CLK_ENB, 0,
503 &_lock);
504 clk_register_clkdev(clk, NULL, "spear_thermal");
505
506 /* clock derived from pll4 clk */
507 clk = clk_register_fixed_factor(NULL, "ddr_clk", "pll4_clk", 0, 1,
508 1);
509 clk_register_clkdev(clk, "ddr_clk", NULL);
510
511 /* clock derived from pll1 clk */
512 clk = clk_register_frac("sys_synth_clk", "vco1div2_clk", 0,
513 SPEAR1340_SYS_CLK_SYNT, sys_synth_rtbl,
514 ARRAY_SIZE(sys_synth_rtbl), &_lock);
515 clk_register_clkdev(clk, "sys_synth_clk", NULL);
516
517 clk = clk_register_frac("amba_synth_clk", "vco1div2_clk", 0,
518 SPEAR1340_AMBA_CLK_SYNT, amba_synth_rtbl,
519 ARRAY_SIZE(amba_synth_rtbl), &_lock);
520 clk_register_clkdev(clk, "amba_synth_clk", NULL);
521
522 clk = clk_register_mux(NULL, "sys_mux_clk", sys_parents,
523 ARRAY_SIZE(sys_parents), 0, SPEAR1340_SYS_CLK_CTRL,
524 SPEAR1340_SCLK_SRC_SEL_SHIFT,
525 SPEAR1340_SCLK_SRC_SEL_MASK, 0, &_lock);
526 clk_register_clkdev(clk, "sys_clk", NULL);
527
528 clk = clk_register_fixed_factor(NULL, "cpu_clk", "sys_mux_clk", 0, 1,
529 2);
530 clk_register_clkdev(clk, "cpu_clk", NULL);
531
532 clk = clk_register_fixed_factor(NULL, "cpu_div3_clk", "cpu_clk", 0, 1,
533 3);
534 clk_register_clkdev(clk, "cpu_div3_clk", NULL);
535
536 clk = clk_register_fixed_factor(NULL, "wdt_clk", "cpu_clk", 0, 1,
537 2);
538 clk_register_clkdev(clk, NULL, "ec800620.wdt");
539
540 clk = clk_register_mux(NULL, "ahb_clk", ahb_parents,
541 ARRAY_SIZE(ahb_parents), 0, SPEAR1340_SYS_CLK_CTRL,
542 SPEAR1340_HCLK_SRC_SEL_SHIFT,
543 SPEAR1340_HCLK_SRC_SEL_MASK, 0, &_lock);
544 clk_register_clkdev(clk, "ahb_clk", NULL);
545
546 clk = clk_register_fixed_factor(NULL, "apb_clk", "ahb_clk", 0, 1,
547 2);
548 clk_register_clkdev(clk, "apb_clk", NULL);
549
550 /* gpt clocks */
551 clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt_parents,
552 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
553 SPEAR1340_GPT0_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
554 &_lock);
555 clk_register_clkdev(clk, "gpt0_mux_clk", NULL);
556 clk = clk_register_gate(NULL, "gpt0_clk", "gpt0_mux_clk", 0,
557 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT0_CLK_ENB, 0,
558 &_lock);
559 clk_register_clkdev(clk, NULL, "gpt0");
560
561 clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt_parents,
562 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
563 SPEAR1340_GPT1_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
564 &_lock);
565 clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
566 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
567 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPT1_CLK_ENB, 0,
568 &_lock);
569 clk_register_clkdev(clk, NULL, "gpt1");
570
571 clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt_parents,
572 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
573 SPEAR1340_GPT2_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
574 &_lock);
575 clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
576 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
577 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT2_CLK_ENB, 0,
578 &_lock);
579 clk_register_clkdev(clk, NULL, "gpt2");
580
581 clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt_parents,
582 ARRAY_SIZE(gpt_parents), 0, SPEAR1340_PERIP_CLK_CFG,
583 SPEAR1340_GPT3_CLK_SHIFT, SPEAR1340_GPT_CLK_MASK, 0,
584 &_lock);
585 clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
586 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
587 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_GPT3_CLK_ENB, 0,
588 &_lock);
589 clk_register_clkdev(clk, NULL, "gpt3");
590
591 /* others */
592 clk = clk_register_aux("uart0_synth_clk", "uart0_synth_gate_clk",
593 "vco1div2_clk", 0, SPEAR1340_UART0_CLK_SYNT, NULL,
594 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
595 clk_register_clkdev(clk, "uart0_synth_clk", NULL);
596 clk_register_clkdev(clk1, "uart0_synth_gate_clk", NULL);
597
598 clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
599 ARRAY_SIZE(uart0_parents), 0, SPEAR1340_PERIP_CLK_CFG,
600 SPEAR1340_UART0_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
601 &_lock);
602 clk_register_clkdev(clk, "uart0_mux_clk", NULL);
603
604 clk = clk_register_gate(NULL, "uart0_clk", "uart0_mux_clk", 0,
605 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART0_CLK_ENB, 0,
606 &_lock);
607 clk_register_clkdev(clk, NULL, "e0000000.serial");
608
609 clk = clk_register_aux("uart1_synth_clk", "uart1_synth_gate_clk",
610 "vco1div2_clk", 0, SPEAR1340_UART1_CLK_SYNT, NULL,
611 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
612 clk_register_clkdev(clk, "uart1_synth_clk", NULL);
613 clk_register_clkdev(clk1, "uart1_synth_gate_clk", NULL);
614
615 clk = clk_register_mux(NULL, "uart1_mux_clk", uart1_parents,
616 ARRAY_SIZE(uart1_parents), 0, SPEAR1340_PERIP_CLK_CFG,
617 SPEAR1340_UART1_CLK_SHIFT, SPEAR1340_UART_CLK_MASK, 0,
618 &_lock);
619 clk_register_clkdev(clk, "uart1_mux_clk", NULL);
620
621 clk = clk_register_gate(NULL, "uart1_clk", "uart1_mux_clk", 0,
622 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UART1_CLK_ENB, 0,
623 &_lock);
624 clk_register_clkdev(clk, NULL, "b4100000.serial");
625
626 clk = clk_register_aux("sdhci_synth_clk", "sdhci_synth_gate_clk",
627 "vco1div2_clk", 0, SPEAR1340_SDHCI_CLK_SYNT, NULL,
628 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
629 clk_register_clkdev(clk, "sdhci_synth_clk", NULL);
630 clk_register_clkdev(clk1, "sdhci_synth_gate_clk", NULL);
631
632 clk = clk_register_gate(NULL, "sdhci_clk", "sdhci_synth_gate_clk", 0,
633 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SDHCI_CLK_ENB, 0,
634 &_lock);
635 clk_register_clkdev(clk, NULL, "b3000000.sdhci");
636
637 clk = clk_register_aux("cfxd_synth_clk", "cfxd_synth_gate_clk",
638 "vco1div2_clk", 0, SPEAR1340_CFXD_CLK_SYNT, NULL,
639 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
640 clk_register_clkdev(clk, "cfxd_synth_clk", NULL);
641 clk_register_clkdev(clk1, "cfxd_synth_gate_clk", NULL);
642
643 clk = clk_register_gate(NULL, "cfxd_clk", "cfxd_synth_gate_clk", 0,
644 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CFXD_CLK_ENB, 0,
645 &_lock);
646 clk_register_clkdev(clk, NULL, "b2800000.cf");
647 clk_register_clkdev(clk, NULL, "arasan_xd");
648
649 clk = clk_register_aux("c3_synth_clk", "c3_synth_gate_clk",
650 "vco1div2_clk", 0, SPEAR1340_C3_CLK_SYNT, NULL,
651 aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
652 clk_register_clkdev(clk, "c3_synth_clk", NULL);
653 clk_register_clkdev(clk1, "c3_synth_gate_clk", NULL);
654
655 clk = clk_register_mux(NULL, "c3_mux_clk", c3_parents,
656 ARRAY_SIZE(c3_parents), 0, SPEAR1340_PERIP_CLK_CFG,
657 SPEAR1340_C3_CLK_SHIFT, SPEAR1340_C3_CLK_MASK, 0,
658 &_lock);
659 clk_register_clkdev(clk, "c3_mux_clk", NULL);
660
661 clk = clk_register_gate(NULL, "c3_clk", "c3_mux_clk", 0,
662 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_C3_CLK_ENB, 0,
663 &_lock);
664 clk_register_clkdev(clk, NULL, "c3");
665
666 /* gmac */
667 clk = clk_register_mux(NULL, "gmac_phy_input_mux_clk",
668 gmac_phy_input_parents,
669 ARRAY_SIZE(gmac_phy_input_parents), 0,
670 SPEAR1340_GMAC_CLK_CFG,
671 SPEAR1340_GMAC_PHY_INPUT_CLK_SHIFT,
672 SPEAR1340_GMAC_PHY_INPUT_CLK_MASK, 0, &_lock);
673 clk_register_clkdev(clk, "gmac_phy_input_mux_clk", NULL);
674
675 clk = clk_register_aux("gmac_phy_synth_clk", "gmac_phy_synth_gate_clk",
676 "gmac_phy_input_mux_clk", 0, SPEAR1340_GMAC_CLK_SYNT,
677 NULL, gmac_rtbl, ARRAY_SIZE(gmac_rtbl), &_lock, &clk1);
678 clk_register_clkdev(clk, "gmac_phy_synth_clk", NULL);
679 clk_register_clkdev(clk1, "gmac_phy_synth_gate_clk", NULL);
680
681 clk = clk_register_mux(NULL, "gmac_phy_mux_clk", gmac_phy_parents,
682 ARRAY_SIZE(gmac_phy_parents), 0,
683 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_GMAC_PHY_CLK_SHIFT,
684 SPEAR1340_GMAC_PHY_CLK_MASK, 0, &_lock);
685 clk_register_clkdev(clk, NULL, "stmmacphy.0");
686
687 /* clcd */
688 clk = clk_register_mux(NULL, "clcd_synth_mux_clk", clcd_synth_parents,
689 ARRAY_SIZE(clcd_synth_parents), 0,
690 SPEAR1340_CLCD_CLK_SYNT, SPEAR1340_CLCD_SYNT_CLK_SHIFT,
691 SPEAR1340_CLCD_SYNT_CLK_MASK, 0, &_lock);
692 clk_register_clkdev(clk, "clcd_synth_mux_clk", NULL);
693
694 clk = clk_register_frac("clcd_synth_clk", "clcd_synth_mux_clk", 0,
695 SPEAR1340_CLCD_CLK_SYNT, clcd_rtbl,
696 ARRAY_SIZE(clcd_rtbl), &_lock);
697 clk_register_clkdev(clk, "clcd_synth_clk", NULL);
698
699 clk = clk_register_mux(NULL, "clcd_pixel_mux_clk", clcd_pixel_parents,
700 ARRAY_SIZE(clcd_pixel_parents), 0,
701 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_CLCD_CLK_SHIFT,
702 SPEAR1340_CLCD_CLK_MASK, 0, &_lock);
703 clk_register_clkdev(clk, "clcd_pixel_clk", NULL);
704
705 clk = clk_register_gate(NULL, "clcd_clk", "clcd_pixel_mux_clk", 0,
706 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_CLCD_CLK_ENB, 0,
707 &_lock);
708 clk_register_clkdev(clk, "clcd_clk", NULL);
709
710 /* i2s */
711 clk = clk_register_mux(NULL, "i2s_src_mux_clk", i2s_src_parents,
712 ARRAY_SIZE(i2s_src_parents), 0, SPEAR1340_I2S_CLK_CFG,
713 SPEAR1340_I2S_SRC_CLK_SHIFT, SPEAR1340_I2S_SRC_CLK_MASK,
714 0, &_lock);
715 clk_register_clkdev(clk, "i2s_src_clk", NULL);
716
717 clk = clk_register_aux("i2s_prs1_clk", NULL, "i2s_src_mux_clk", 0,
718 SPEAR1340_I2S_CLK_CFG, &i2s_prs1_masks, i2s_prs1_rtbl,
719 ARRAY_SIZE(i2s_prs1_rtbl), &_lock, NULL);
720 clk_register_clkdev(clk, "i2s_prs1_clk", NULL);
721
722 clk = clk_register_mux(NULL, "i2s_ref_mux_clk", i2s_ref_parents,
723 ARRAY_SIZE(i2s_ref_parents), 0, SPEAR1340_I2S_CLK_CFG,
724 SPEAR1340_I2S_REF_SHIFT, SPEAR1340_I2S_REF_SEL_MASK, 0,
725 &_lock);
726 clk_register_clkdev(clk, "i2s_ref_clk", NULL);
727
728 clk = clk_register_gate(NULL, "i2s_ref_pad_clk", "i2s_ref_mux_clk", 0,
729 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_I2S_REF_PAD_CLK_ENB,
730 0, &_lock);
731 clk_register_clkdev(clk, "i2s_ref_pad_clk", NULL);
732
733 clk = clk_register_aux("i2s_sclk_clk", "i2s_sclk_gate_clk",
734 "i2s_ref_mux_clk", 0, SPEAR1340_I2S_CLK_CFG,
735 &i2s_sclk_masks, i2s_sclk_rtbl,
736 ARRAY_SIZE(i2s_sclk_rtbl), &_lock, &clk1);
737 clk_register_clkdev(clk, "i2s_sclk_clk", NULL);
738 clk_register_clkdev(clk1, "i2s_sclk_gate_clk", NULL);
739
740 /* clock derived from ahb clk */
741 clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0,
742 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C0_CLK_ENB, 0,
743 &_lock);
744 clk_register_clkdev(clk, NULL, "e0280000.i2c");
745
746 clk = clk_register_gate(NULL, "i2c1_clk", "ahb_clk", 0,
747 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2C1_CLK_ENB, 0,
748 &_lock);
749 clk_register_clkdev(clk, NULL, "b4000000.i2c");
750
751 clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0,
752 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_DMA_CLK_ENB, 0,
753 &_lock);
754 clk_register_clkdev(clk, NULL, "ea800000.dma");
755 clk_register_clkdev(clk, NULL, "eb000000.dma");
756
757 clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0,
758 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GMAC_CLK_ENB, 0,
759 &_lock);
760 clk_register_clkdev(clk, NULL, "e2000000.eth");
761
762 clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0,
763 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_FSMC_CLK_ENB, 0,
764 &_lock);
765 clk_register_clkdev(clk, NULL, "b0000000.flash");
766
767 clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0,
768 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SMI_CLK_ENB, 0,
769 &_lock);
770 clk_register_clkdev(clk, NULL, "ea000000.flash");
771
772 clk = clk_register_gate(NULL, "usbh0_clk", "ahb_clk", 0,
773 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC0_CLK_ENB, 0,
774 &_lock);
775 clk_register_clkdev(clk, "usbh.0_clk", NULL);
776
777 clk = clk_register_gate(NULL, "usbh1_clk", "ahb_clk", 0,
778 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UHC1_CLK_ENB, 0,
779 &_lock);
780 clk_register_clkdev(clk, "usbh.1_clk", NULL);
781
782 clk = clk_register_gate(NULL, "uoc_clk", "ahb_clk", 0,
783 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_UOC_CLK_ENB, 0,
784 &_lock);
785 clk_register_clkdev(clk, NULL, "uoc");
786
787 clk = clk_register_gate(NULL, "pcie_sata_clk", "ahb_clk", 0,
788 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_PCIE_SATA_CLK_ENB,
789 0, &_lock);
790 clk_register_clkdev(clk, NULL, "dw_pcie");
791 clk_register_clkdev(clk, NULL, "ahci");
792
793 clk = clk_register_gate(NULL, "sysram0_clk", "ahb_clk", 0,
794 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM0_CLK_ENB, 0,
795 &_lock);
796 clk_register_clkdev(clk, "sysram0_clk", NULL);
797
798 clk = clk_register_gate(NULL, "sysram1_clk", "ahb_clk", 0,
799 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SYSRAM1_CLK_ENB, 0,
800 &_lock);
801 clk_register_clkdev(clk, "sysram1_clk", NULL);
802
803 clk = clk_register_aux("adc_synth_clk", "adc_synth_gate_clk", "ahb_clk",
804 0, SPEAR1340_ADC_CLK_SYNT, NULL, adc_rtbl,
805 ARRAY_SIZE(adc_rtbl), &_lock, &clk1);
806 clk_register_clkdev(clk, "adc_synth_clk", NULL);
807 clk_register_clkdev(clk1, "adc_synth_gate_clk", NULL);
808
809 clk = clk_register_gate(NULL, "adc_clk", "adc_synth_gate_clk", 0,
810 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_ADC_CLK_ENB, 0,
811 &_lock);
812 clk_register_clkdev(clk, NULL, "adc_clk");
813
814 /* clock derived from apb clk */
815 clk = clk_register_gate(NULL, "ssp_clk", "apb_clk", 0,
816 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_SSP_CLK_ENB, 0,
817 &_lock);
818 clk_register_clkdev(clk, NULL, "e0100000.spi");
819
820 clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0,
821 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO0_CLK_ENB, 0,
822 &_lock);
823 clk_register_clkdev(clk, NULL, "e0600000.gpio");
824
825 clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0,
826 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_GPIO1_CLK_ENB, 0,
827 &_lock);
828 clk_register_clkdev(clk, NULL, "e0680000.gpio");
829
830 clk = clk_register_gate(NULL, "i2s_play_clk", "apb_clk", 0,
831 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_PLAY_CLK_ENB, 0,
832 &_lock);
833 clk_register_clkdev(clk, NULL, "b2400000.i2s");
834
835 clk = clk_register_gate(NULL, "i2s_rec_clk", "apb_clk", 0,
836 SPEAR1340_PERIP1_CLK_ENB, SPEAR1340_I2S_REC_CLK_ENB, 0,
837 &_lock);
838 clk_register_clkdev(clk, NULL, "b2000000.i2s");
839
840 clk = clk_register_gate(NULL, "kbd_clk", "apb_clk", 0,
841 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_KBD_CLK_ENB, 0,
842 &_lock);
843 clk_register_clkdev(clk, NULL, "e0300000.kbd");
844
845 /* RAS clks */
846 clk = clk_register_mux(NULL, "gen_synth0_1_mux_clk",
847 gen_synth0_1_parents, ARRAY_SIZE(gen_synth0_1_parents),
848 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT0_1_CLK_SHIFT,
849 SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
850 clk_register_clkdev(clk, "gen_synth0_1_clk", NULL);
851
852 clk = clk_register_mux(NULL, "gen_synth2_3_mux_clk",
853 gen_synth2_3_parents, ARRAY_SIZE(gen_synth2_3_parents),
854 0, SPEAR1340_PLL_CFG, SPEAR1340_GEN_SYNT2_3_CLK_SHIFT,
855 SPEAR1340_GEN_SYNT_CLK_MASK, 0, &_lock);
856 clk_register_clkdev(clk, "gen_synth2_3_clk", NULL);
857
858 clk = clk_register_frac("gen_synth0_clk", "gen_synth0_1_clk", 0,
859 SPEAR1340_GEN_CLK_SYNT0, gen_rtbl, ARRAY_SIZE(gen_rtbl),
860 &_lock);
861 clk_register_clkdev(clk, "gen_synth0_clk", NULL);
862
863 clk = clk_register_frac("gen_synth1_clk", "gen_synth0_1_clk", 0,
864 SPEAR1340_GEN_CLK_SYNT1, gen_rtbl, ARRAY_SIZE(gen_rtbl),
865 &_lock);
866 clk_register_clkdev(clk, "gen_synth1_clk", NULL);
867
868 clk = clk_register_frac("gen_synth2_clk", "gen_synth2_3_clk", 0,
869 SPEAR1340_GEN_CLK_SYNT2, gen_rtbl, ARRAY_SIZE(gen_rtbl),
870 &_lock);
871 clk_register_clkdev(clk, "gen_synth2_clk", NULL);
872
873 clk = clk_register_frac("gen_synth3_clk", "gen_synth2_3_clk", 0,
874 SPEAR1340_GEN_CLK_SYNT3, gen_rtbl, ARRAY_SIZE(gen_rtbl),
875 &_lock);
876 clk_register_clkdev(clk, "gen_synth3_clk", NULL);
877
878 clk = clk_register_gate(NULL, "mali_clk", "gen_synth3_clk", 0,
879 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_MALI_CLK_ENB, 0,
880 &_lock);
881 clk_register_clkdev(clk, NULL, "mali");
882
883 clk = clk_register_gate(NULL, "cec0_clk", "ahb_clk", 0,
884 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC0_CLK_ENB, 0,
885 &_lock);
886 clk_register_clkdev(clk, NULL, "spear_cec.0");
887
888 clk = clk_register_gate(NULL, "cec1_clk", "ahb_clk", 0,
889 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CEC1_CLK_ENB, 0,
890 &_lock);
891 clk_register_clkdev(clk, NULL, "spear_cec.1");
892
893 clk = clk_register_mux(NULL, "spdif_out_mux_clk", spdif_out_parents,
894 ARRAY_SIZE(spdif_out_parents), 0,
895 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_OUT_CLK_SHIFT,
896 SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
897 clk_register_clkdev(clk, "spdif_out_mux_clk", NULL);
898
899 clk = clk_register_gate(NULL, "spdif_out_clk", "spdif_out_mux_clk", 0,
900 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_OUT_CLK_ENB,
901 0, &_lock);
902 clk_register_clkdev(clk, NULL, "spdif-out");
903
904 clk = clk_register_mux(NULL, "spdif_in_mux_clk", spdif_in_parents,
905 ARRAY_SIZE(spdif_in_parents), 0,
906 SPEAR1340_PERIP_CLK_CFG, SPEAR1340_SPDIF_IN_CLK_SHIFT,
907 SPEAR1340_SPDIF_CLK_MASK, 0, &_lock);
908 clk_register_clkdev(clk, "spdif_in_mux_clk", NULL);
909
910 clk = clk_register_gate(NULL, "spdif_in_clk", "spdif_in_mux_clk", 0,
911 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_SPDIF_IN_CLK_ENB, 0,
912 &_lock);
913 clk_register_clkdev(clk, NULL, "spdif-in");
914
915 clk = clk_register_gate(NULL, "acp_clk", "acp_mux_clk", 0,
916 SPEAR1340_PERIP2_CLK_ENB, SPEAR1340_ACP_CLK_ENB, 0,
917 &_lock);
918 clk_register_clkdev(clk, NULL, "acp_clk");
919
920 clk = clk_register_gate(NULL, "plgpio_clk", "plgpio_mux_clk", 0,
921 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PLGPIO_CLK_ENB, 0,
922 &_lock);
923 clk_register_clkdev(clk, NULL, "plgpio");
924
925 clk = clk_register_gate(NULL, "video_dec_clk", "video_dec_mux_clk", 0,
926 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_DEC_CLK_ENB,
927 0, &_lock);
928 clk_register_clkdev(clk, NULL, "video_dec");
929
930 clk = clk_register_gate(NULL, "video_enc_clk", "video_enc_mux_clk", 0,
931 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_ENC_CLK_ENB,
932 0, &_lock);
933 clk_register_clkdev(clk, NULL, "video_enc");
934
935 clk = clk_register_gate(NULL, "video_in_clk", "video_in_mux_clk", 0,
936 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_VIDEO_IN_CLK_ENB, 0,
937 &_lock);
938 clk_register_clkdev(clk, NULL, "spear_vip");
939
940 clk = clk_register_gate(NULL, "cam0_clk", "cam0_mux_clk", 0,
941 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM0_CLK_ENB, 0,
942 &_lock);
943 clk_register_clkdev(clk, NULL, "spear_camif.0");
944
945 clk = clk_register_gate(NULL, "cam1_clk", "cam1_mux_clk", 0,
946 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM1_CLK_ENB, 0,
947 &_lock);
948 clk_register_clkdev(clk, NULL, "spear_camif.1");
949
950 clk = clk_register_gate(NULL, "cam2_clk", "cam2_mux_clk", 0,
951 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM2_CLK_ENB, 0,
952 &_lock);
953 clk_register_clkdev(clk, NULL, "spear_camif.2");
954
955 clk = clk_register_gate(NULL, "cam3_clk", "cam3_mux_clk", 0,
956 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_CAM3_CLK_ENB, 0,
957 &_lock);
958 clk_register_clkdev(clk, NULL, "spear_camif.3");
959
960 clk = clk_register_gate(NULL, "pwm_clk", "pwm_mux_clk", 0,
961 SPEAR1340_PERIP3_CLK_ENB, SPEAR1340_PWM_CLK_ENB, 0,
962 &_lock);
963 clk_register_clkdev(clk, NULL, "pwm");
964}
diff --git a/drivers/clk/spear/spear3xx_clock.c b/drivers/clk/spear/spear3xx_clock.c
new file mode 100644
index 000000000000..440bb3e4c971
--- /dev/null
+++ b/drivers/clk/spear/spear3xx_clock.c
@@ -0,0 +1,612 @@
1/*
2 * SPEAr3xx machines clock framework source file
3 *
4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk.h>
13#include <linux/clkdev.h>
14#include <linux/err.h>
15#include <linux/io.h>
16#include <linux/of_platform.h>
17#include <linux/spinlock_types.h>
18#include <mach/misc_regs.h>
19#include "clk.h"
20
21static DEFINE_SPINLOCK(_lock);
22
23#define PLL1_CTR (MISC_BASE + 0x008)
24#define PLL1_FRQ (MISC_BASE + 0x00C)
25#define PLL2_CTR (MISC_BASE + 0x014)
26#define PLL2_FRQ (MISC_BASE + 0x018)
27#define PLL_CLK_CFG (MISC_BASE + 0x020)
28 /* PLL_CLK_CFG register masks */
29 #define MCTR_CLK_SHIFT 28
30 #define MCTR_CLK_MASK 3
31
32#define CORE_CLK_CFG (MISC_BASE + 0x024)
33 /* CORE CLK CFG register masks */
34 #define GEN_SYNTH2_3_CLK_SHIFT 18
35 #define GEN_SYNTH2_3_CLK_MASK 1
36
37 #define HCLK_RATIO_SHIFT 10
38 #define HCLK_RATIO_MASK 2
39 #define PCLK_RATIO_SHIFT 8
40 #define PCLK_RATIO_MASK 2
41
42#define PERIP_CLK_CFG (MISC_BASE + 0x028)
43 /* PERIP_CLK_CFG register masks */
44 #define UART_CLK_SHIFT 4
45 #define UART_CLK_MASK 1
46 #define FIRDA_CLK_SHIFT 5
47 #define FIRDA_CLK_MASK 2
48 #define GPT0_CLK_SHIFT 8
49 #define GPT1_CLK_SHIFT 11
50 #define GPT2_CLK_SHIFT 12
51 #define GPT_CLK_MASK 1
52
53#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
54 /* PERIP1_CLK_ENB register masks */
55 #define UART_CLK_ENB 3
56 #define SSP_CLK_ENB 5
57 #define I2C_CLK_ENB 7
58 #define JPEG_CLK_ENB 8
59 #define FIRDA_CLK_ENB 10
60 #define GPT1_CLK_ENB 11
61 #define GPT2_CLK_ENB 12
62 #define ADC_CLK_ENB 15
63 #define RTC_CLK_ENB 17
64 #define GPIO_CLK_ENB 18
65 #define DMA_CLK_ENB 19
66 #define SMI_CLK_ENB 21
67 #define GMAC_CLK_ENB 23
68 #define USBD_CLK_ENB 24
69 #define USBH_CLK_ENB 25
70 #define C3_CLK_ENB 31
71
72#define RAS_CLK_ENB (MISC_BASE + 0x034)
73 #define RAS_AHB_CLK_ENB 0
74 #define RAS_PLL1_CLK_ENB 1
75 #define RAS_APB_CLK_ENB 2
76 #define RAS_32K_CLK_ENB 3
77 #define RAS_24M_CLK_ENB 4
78 #define RAS_48M_CLK_ENB 5
79 #define RAS_PLL2_CLK_ENB 7
80 #define RAS_SYNT0_CLK_ENB 8
81 #define RAS_SYNT1_CLK_ENB 9
82 #define RAS_SYNT2_CLK_ENB 10
83 #define RAS_SYNT3_CLK_ENB 11
84
85#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
86#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
87#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
88#define AMEM_CLK_CFG (MISC_BASE + 0x050)
89 #define AMEM_CLK_ENB 0
90
91#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
92#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
93#define UART_CLK_SYNT (MISC_BASE + 0x064)
94#define GMAC_CLK_SYNT (MISC_BASE + 0x068)
95#define GEN0_CLK_SYNT (MISC_BASE + 0x06C)
96#define GEN1_CLK_SYNT (MISC_BASE + 0x070)
97#define GEN2_CLK_SYNT (MISC_BASE + 0x074)
98#define GEN3_CLK_SYNT (MISC_BASE + 0x078)
99
100/* pll rate configuration table, in ascending order of rates */
101static struct pll_rate_tbl pll_rtbl[] = {
102 {.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */
103 {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */
104 {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */
105};
106
107/* aux rate configuration table, in ascending order of rates */
108static struct aux_rate_tbl aux_rtbl[] = {
109 /* For PLL1 = 332 MHz */
110 {.xscale = 2, .yscale = 27, .eq = 0}, /* 12.296 MHz */
111 {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
112 {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
113 {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
114};
115
116/* gpt rate configuration table, in ascending order of rates */
117static struct gpt_rate_tbl gpt_rtbl[] = {
118 /* For pll1 = 332 MHz */
119 {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
120 {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
121 {.mscale = 1, .nscale = 0}, /* 83 MHz */
122};
123
124/* clock parents */
125static const char *uart0_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
126static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
127};
128static const char *gpt0_parents[] = { "pll3_48m_clk", "gpt0_synth_clk", };
129static const char *gpt1_parents[] = { "pll3_48m_clk", "gpt1_synth_clk", };
130static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
131static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", };
132static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
133 "pll2_clk", };
134
135#ifdef CONFIG_MACH_SPEAR300
136static void __init spear300_clk_init(void)
137{
138 struct clk *clk;
139
140 clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
141 1, 1);
142 clk_register_clkdev(clk, NULL, "60000000.clcd");
143
144 clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
145 1);
146 clk_register_clkdev(clk, NULL, "94000000.flash");
147
148 clk = clk_register_fixed_factor(NULL, "sdhci_clk", "ras_ahb_clk", 0, 1,
149 1);
150 clk_register_clkdev(clk, NULL, "70000000.sdhci");
151
152 clk = clk_register_fixed_factor(NULL, "gpio1_clk", "ras_apb_clk", 0, 1,
153 1);
154 clk_register_clkdev(clk, NULL, "a9000000.gpio");
155
156 clk = clk_register_fixed_factor(NULL, "kbd_clk", "ras_apb_clk", 0, 1,
157 1);
158 clk_register_clkdev(clk, NULL, "a0000000.kbd");
159}
160#endif
161
162/* array of all spear 310 clock lookups */
163#ifdef CONFIG_MACH_SPEAR310
164static void __init spear310_clk_init(void)
165{
166 struct clk *clk;
167
168 clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
169 1);
170 clk_register_clkdev(clk, "emi", NULL);
171
172 clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
173 1);
174 clk_register_clkdev(clk, NULL, "44000000.flash");
175
176 clk = clk_register_fixed_factor(NULL, "tdm_clk", "ras_ahb_clk", 0, 1,
177 1);
178 clk_register_clkdev(clk, NULL, "tdm");
179
180 clk = clk_register_fixed_factor(NULL, "uart1_clk", "ras_apb_clk", 0, 1,
181 1);
182 clk_register_clkdev(clk, NULL, "b2000000.serial");
183
184 clk = clk_register_fixed_factor(NULL, "uart2_clk", "ras_apb_clk", 0, 1,
185 1);
186 clk_register_clkdev(clk, NULL, "b2080000.serial");
187
188 clk = clk_register_fixed_factor(NULL, "uart3_clk", "ras_apb_clk", 0, 1,
189 1);
190 clk_register_clkdev(clk, NULL, "b2100000.serial");
191
192 clk = clk_register_fixed_factor(NULL, "uart4_clk", "ras_apb_clk", 0, 1,
193 1);
194 clk_register_clkdev(clk, NULL, "b2180000.serial");
195
196 clk = clk_register_fixed_factor(NULL, "uart5_clk", "ras_apb_clk", 0, 1,
197 1);
198 clk_register_clkdev(clk, NULL, "b2200000.serial");
199}
200#endif
201
202/* array of all spear 320 clock lookups */
203#ifdef CONFIG_MACH_SPEAR320
204 #define SMII_PCLK_SHIFT 18
205 #define SMII_PCLK_MASK 2
206 #define SMII_PCLK_VAL_PAD 0x0
207 #define SMII_PCLK_VAL_PLL2 0x1
208 #define SMII_PCLK_VAL_SYNTH0 0x2
209 #define SDHCI_PCLK_SHIFT 15
210 #define SDHCI_PCLK_MASK 1
211 #define SDHCI_PCLK_VAL_48M 0x0
212 #define SDHCI_PCLK_VAL_SYNTH3 0x1
213 #define I2S_REF_PCLK_SHIFT 8
214 #define I2S_REF_PCLK_MASK 1
215 #define I2S_REF_PCLK_SYNTH_VAL 0x1
216 #define I2S_REF_PCLK_PLL2_VAL 0x0
217 #define UART1_PCLK_SHIFT 6
218 #define UART1_PCLK_MASK 1
219 #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0
220 #define SPEAR320_UARTX_PCLK_VAL_APB 0x1
221
222static const char *i2s_ref_parents[] = { "ras_pll2_clk",
223 "ras_gen2_synth_gate_clk", };
224static const char *sdhci_parents[] = { "ras_pll3_48m_clk",
225 "ras_gen3_synth_gate_clk",
226};
227static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk",
228 "ras_gen0_synth_gate_clk", };
229static const char *uartx_parents[] = { "ras_gen1_synth_gate_clk", "ras_apb_clk",
230};
231
232static void __init spear320_clk_init(void)
233{
234 struct clk *clk;
235
236 clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL,
237 CLK_IS_ROOT, 125000000);
238 clk_register_clkdev(clk, "smii_125m_pad", NULL);
239
240 clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_48m_clk", 0,
241 1, 1);
242 clk_register_clkdev(clk, NULL, "90000000.clcd");
243
244 clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1,
245 1);
246 clk_register_clkdev(clk, "emi", NULL);
247
248 clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1,
249 1);
250 clk_register_clkdev(clk, NULL, "4c000000.flash");
251
252 clk = clk_register_fixed_factor(NULL, "i2c1_clk", "ras_ahb_clk", 0, 1,
253 1);
254 clk_register_clkdev(clk, NULL, "a7000000.i2c");
255
256 clk = clk_register_fixed_factor(NULL, "pwm_clk", "ras_ahb_clk", 0, 1,
257 1);
258 clk_register_clkdev(clk, "pwm", NULL);
259
260 clk = clk_register_fixed_factor(NULL, "ssp1_clk", "ras_ahb_clk", 0, 1,
261 1);
262 clk_register_clkdev(clk, NULL, "a5000000.spi");
263
264 clk = clk_register_fixed_factor(NULL, "ssp2_clk", "ras_ahb_clk", 0, 1,
265 1);
266 clk_register_clkdev(clk, NULL, "a6000000.spi");
267
268 clk = clk_register_fixed_factor(NULL, "can0_clk", "ras_apb_clk", 0, 1,
269 1);
270 clk_register_clkdev(clk, NULL, "c_can_platform.0");
271
272 clk = clk_register_fixed_factor(NULL, "can1_clk", "ras_apb_clk", 0, 1,
273 1);
274 clk_register_clkdev(clk, NULL, "c_can_platform.1");
275
276 clk = clk_register_fixed_factor(NULL, "i2s_clk", "ras_apb_clk", 0, 1,
277 1);
278 clk_register_clkdev(clk, NULL, "i2s");
279
280 clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents,
281 ARRAY_SIZE(i2s_ref_parents), 0, SPEAR320_CONTROL_REG,
282 I2S_REF_PCLK_SHIFT, I2S_REF_PCLK_MASK, 0, &_lock);
283 clk_register_clkdev(clk, "i2s_ref_clk", NULL);
284
285 clk = clk_register_fixed_factor(NULL, "i2s_sclk", "i2s_ref_clk", 0, 1,
286 4);
287 clk_register_clkdev(clk, "i2s_sclk", NULL);
288
289 clk = clk_register_mux(NULL, "rs485_clk", uartx_parents,
290 ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
291 SPEAR320_RS485_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
292 &_lock);
293 clk_register_clkdev(clk, NULL, "a9300000.serial");
294
295 clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents,
296 ARRAY_SIZE(sdhci_parents), 0, SPEAR320_CONTROL_REG,
297 SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK, 0, &_lock);
298 clk_register_clkdev(clk, NULL, "70000000.sdhci");
299
300 clk = clk_register_mux(NULL, "smii_pclk", smii0_parents,
301 ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG,
302 SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock);
303 clk_register_clkdev(clk, NULL, "smii_pclk");
304
305 clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1);
306 clk_register_clkdev(clk, NULL, "smii");
307
308 clk = clk_register_mux(NULL, "uart1_clk", uartx_parents,
309 ARRAY_SIZE(uartx_parents), 0, SPEAR320_CONTROL_REG,
310 UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock);
311 clk_register_clkdev(clk, NULL, "a3000000.serial");
312
313 clk = clk_register_mux(NULL, "uart2_clk", uartx_parents,
314 ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
315 SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
316 &_lock);
317 clk_register_clkdev(clk, NULL, "a4000000.serial");
318
319 clk = clk_register_mux(NULL, "uart3_clk", uartx_parents,
320 ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
321 SPEAR320_UART3_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
322 &_lock);
323 clk_register_clkdev(clk, NULL, "a9100000.serial");
324
325 clk = clk_register_mux(NULL, "uart4_clk", uartx_parents,
326 ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
327 SPEAR320_UART4_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
328 &_lock);
329 clk_register_clkdev(clk, NULL, "a9200000.serial");
330
331 clk = clk_register_mux(NULL, "uart5_clk", uartx_parents,
332 ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
333 SPEAR320_UART5_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
334 &_lock);
335 clk_register_clkdev(clk, NULL, "60000000.serial");
336
337 clk = clk_register_mux(NULL, "uart6_clk", uartx_parents,
338 ARRAY_SIZE(uartx_parents), 0, SPEAR320_EXT_CTRL_REG,
339 SPEAR320_UART6_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0,
340 &_lock);
341 clk_register_clkdev(clk, NULL, "60100000.serial");
342}
343#endif
344
345void __init spear3xx_clk_init(void)
346{
347 struct clk *clk, *clk1;
348
349 clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
350 clk_register_clkdev(clk, "apb_pclk", NULL);
351
352 clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
353 32000);
354 clk_register_clkdev(clk, "osc_32k_clk", NULL);
355
356 clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT,
357 24000000);
358 clk_register_clkdev(clk, "osc_24m_clk", NULL);
359
360 /* clock derived from 32 KHz osc clk */
361 clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0,
362 PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
363 clk_register_clkdev(clk, NULL, "fc900000.rtc");
364
365 /* clock derived from 24 MHz osc clk */
366 clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
367 48000000);
368 clk_register_clkdev(clk, "pll3_48m_clk", NULL);
369
370 clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1,
371 1);
372 clk_register_clkdev(clk, NULL, "fc880000.wdt");
373
374 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL,
375 "osc_24m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl,
376 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
377 clk_register_clkdev(clk, "vco1_clk", NULL);
378 clk_register_clkdev(clk1, "pll1_clk", NULL);
379
380 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
381 "osc_24m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
382 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
383 clk_register_clkdev(clk, "vco2_clk", NULL);
384 clk_register_clkdev(clk1, "pll2_clk", NULL);
385
386 /* clock derived from pll1 clk */
387 clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
388 clk_register_clkdev(clk, "cpu_clk", NULL);
389
390 clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
391 CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
392 HCLK_RATIO_MASK, 0, &_lock);
393 clk_register_clkdev(clk, "ahb_clk", NULL);
394
395 clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
396 "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
397 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
398 clk_register_clkdev(clk, "uart_synth_clk", NULL);
399 clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
400
401 clk = clk_register_mux(NULL, "uart0_mux_clk", uart0_parents,
402 ARRAY_SIZE(uart0_parents), 0, PERIP_CLK_CFG,
403 UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
404 clk_register_clkdev(clk, "uart0_mux_clk", NULL);
405
406 clk = clk_register_gate(NULL, "uart0", "uart0_mux_clk", 0,
407 PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock);
408 clk_register_clkdev(clk, NULL, "d0000000.serial");
409
410 clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
411 "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
412 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
413 clk_register_clkdev(clk, "firda_synth_clk", NULL);
414 clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
415
416 clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
417 ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
418 FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
419 clk_register_clkdev(clk, "firda_mux_clk", NULL);
420
421 clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
422 PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
423 clk_register_clkdev(clk, NULL, "firda");
424
425 /* gpt clocks */
426 clk_register_gpt("gpt0_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
427 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
428 clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents,
429 ARRAY_SIZE(gpt0_parents), 0, PERIP_CLK_CFG,
430 GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
431 clk_register_clkdev(clk, NULL, "gpt0");
432
433 clk_register_gpt("gpt1_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
434 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
435 clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt1_parents,
436 ARRAY_SIZE(gpt1_parents), 0, PERIP_CLK_CFG,
437 GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
438 clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
439 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
440 PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
441 clk_register_clkdev(clk, NULL, "gpt1");
442
443 clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
444 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
445 clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
446 ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
447 GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
448 clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
449 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
450 PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
451 clk_register_clkdev(clk, NULL, "gpt2");
452
453 /* general synths clocks */
454 clk = clk_register_aux("gen0_synth_clk", "gen0_synth_gate_clk",
455 "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl,
456 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
457 clk_register_clkdev(clk, "gen0_synth_clk", NULL);
458 clk_register_clkdev(clk1, "gen0_synth_gate_clk", NULL);
459
460 clk = clk_register_aux("gen1_synth_clk", "gen1_synth_gate_clk",
461 "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl,
462 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
463 clk_register_clkdev(clk, "gen1_synth_clk", NULL);
464 clk_register_clkdev(clk1, "gen1_synth_gate_clk", NULL);
465
466 clk = clk_register_mux(NULL, "gen2_3_parent_clk", gen2_3_parents,
467 ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG,
468 GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0,
469 &_lock);
470 clk_register_clkdev(clk, "gen2_3_parent_clk", NULL);
471
472 clk = clk_register_aux("gen2_synth_clk", "gen2_synth_gate_clk",
473 "gen2_3_parent_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl,
474 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
475 clk_register_clkdev(clk, "gen2_synth_clk", NULL);
476 clk_register_clkdev(clk1, "gen2_synth_gate_clk", NULL);
477
478 clk = clk_register_aux("gen3_synth_clk", "gen3_synth_gate_clk",
479 "gen2_3_parent_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl,
480 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
481 clk_register_clkdev(clk, "gen3_synth_clk", NULL);
482 clk_register_clkdev(clk1, "gen3_synth_gate_clk", NULL);
483
484 /* clock derived from pll3 clk */
485 clk = clk_register_gate(NULL, "usbh_clk", "pll3_48m_clk", 0,
486 PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock);
487 clk_register_clkdev(clk, "usbh_clk", NULL);
488
489 clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1,
490 1);
491 clk_register_clkdev(clk, "usbh.0_clk", NULL);
492
493 clk = clk_register_fixed_factor(NULL, "usbh.1_clk", "usbh_clk", 0, 1,
494 1);
495 clk_register_clkdev(clk, "usbh.1_clk", NULL);
496
497 clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
498 PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
499 clk_register_clkdev(clk, NULL, "designware_udc");
500
501 /* clock derived from ahb clk */
502 clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
503 1);
504 clk_register_clkdev(clk, "ahbmult2_clk", NULL);
505
506 clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
507 ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT,
508 MCTR_CLK_MASK, 0, &_lock);
509 clk_register_clkdev(clk, "ddr_clk", NULL);
510
511 clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
512 CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
513 PCLK_RATIO_MASK, 0, &_lock);
514 clk_register_clkdev(clk, "apb_clk", NULL);
515
516 clk = clk_register_gate(NULL, "amem_clk", "ahb_clk", 0, AMEM_CLK_CFG,
517 AMEM_CLK_ENB, 0, &_lock);
518 clk_register_clkdev(clk, "amem_clk", NULL);
519
520 clk = clk_register_gate(NULL, "c3_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
521 C3_CLK_ENB, 0, &_lock);
522 clk_register_clkdev(clk, NULL, "c3_clk");
523
524 clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
525 DMA_CLK_ENB, 0, &_lock);
526 clk_register_clkdev(clk, NULL, "fc400000.dma");
527
528 clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
529 GMAC_CLK_ENB, 0, &_lock);
530 clk_register_clkdev(clk, NULL, "e0800000.eth");
531
532 clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
533 I2C_CLK_ENB, 0, &_lock);
534 clk_register_clkdev(clk, NULL, "d0180000.i2c");
535
536 clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
537 JPEG_CLK_ENB, 0, &_lock);
538 clk_register_clkdev(clk, NULL, "jpeg");
539
540 clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
541 SMI_CLK_ENB, 0, &_lock);
542 clk_register_clkdev(clk, NULL, "fc000000.flash");
543
544 /* clock derived from apb clk */
545 clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
546 ADC_CLK_ENB, 0, &_lock);
547 clk_register_clkdev(clk, NULL, "adc");
548
549 clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
550 GPIO_CLK_ENB, 0, &_lock);
551 clk_register_clkdev(clk, NULL, "fc980000.gpio");
552
553 clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
554 SSP_CLK_ENB, 0, &_lock);
555 clk_register_clkdev(clk, NULL, "d0100000.spi");
556
557 /* RAS clk enable */
558 clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0, RAS_CLK_ENB,
559 RAS_AHB_CLK_ENB, 0, &_lock);
560 clk_register_clkdev(clk, "ras_ahb_clk", NULL);
561
562 clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB,
563 RAS_APB_CLK_ENB, 0, &_lock);
564 clk_register_clkdev(clk, "ras_apb_clk", NULL);
565
566 clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0,
567 RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock);
568 clk_register_clkdev(clk, "ras_32k_clk", NULL);
569
570 clk = clk_register_gate(NULL, "ras_24m_clk", "osc_24m_clk", 0,
571 RAS_CLK_ENB, RAS_24M_CLK_ENB, 0, &_lock);
572 clk_register_clkdev(clk, "ras_24m_clk", NULL);
573
574 clk = clk_register_gate(NULL, "ras_pll1_clk", "pll1_clk", 0,
575 RAS_CLK_ENB, RAS_PLL1_CLK_ENB, 0, &_lock);
576 clk_register_clkdev(clk, "ras_pll1_clk", NULL);
577
578 clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0,
579 RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock);
580 clk_register_clkdev(clk, "ras_pll2_clk", NULL);
581
582 clk = clk_register_gate(NULL, "ras_pll3_48m_clk", "pll3_48m_clk", 0,
583 RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock);
584 clk_register_clkdev(clk, "ras_pll3_48m_clk", NULL);
585
586 clk = clk_register_gate(NULL, "ras_gen0_synth_gate_clk",
587 "gen0_synth_gate_clk", 0, RAS_CLK_ENB,
588 RAS_SYNT0_CLK_ENB, 0, &_lock);
589 clk_register_clkdev(clk, "ras_gen0_synth_gate_clk", NULL);
590
591 clk = clk_register_gate(NULL, "ras_gen1_synth_gate_clk",
592 "gen1_synth_gate_clk", 0, RAS_CLK_ENB,
593 RAS_SYNT1_CLK_ENB, 0, &_lock);
594 clk_register_clkdev(clk, "ras_gen1_synth_gate_clk", NULL);
595
596 clk = clk_register_gate(NULL, "ras_gen2_synth_gate_clk",
597 "gen2_synth_gate_clk", 0, RAS_CLK_ENB,
598 RAS_SYNT2_CLK_ENB, 0, &_lock);
599 clk_register_clkdev(clk, "ras_gen2_synth_gate_clk", NULL);
600
601 clk = clk_register_gate(NULL, "ras_gen3_synth_gate_clk",
602 "gen3_synth_gate_clk", 0, RAS_CLK_ENB,
603 RAS_SYNT3_CLK_ENB, 0, &_lock);
604 clk_register_clkdev(clk, "ras_gen3_synth_gate_clk", NULL);
605
606 if (of_machine_is_compatible("st,spear300"))
607 spear300_clk_init();
608 else if (of_machine_is_compatible("st,spear310"))
609 spear310_clk_init();
610 else if (of_machine_is_compatible("st,spear320"))
611 spear320_clk_init();
612}
diff --git a/drivers/clk/spear/spear6xx_clock.c b/drivers/clk/spear/spear6xx_clock.c
new file mode 100644
index 000000000000..f9a20b382304
--- /dev/null
+++ b/drivers/clk/spear/spear6xx_clock.c
@@ -0,0 +1,342 @@
1/*
2 * SPEAr6xx machines clock framework source file
3 *
4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/clk.h>
13#include <linux/clkdev.h>
14#include <linux/io.h>
15#include <linux/spinlock_types.h>
16#include <mach/misc_regs.h>
17#include "clk.h"
18
19static DEFINE_SPINLOCK(_lock);
20
21#define PLL1_CTR (MISC_BASE + 0x008)
22#define PLL1_FRQ (MISC_BASE + 0x00C)
23#define PLL2_CTR (MISC_BASE + 0x014)
24#define PLL2_FRQ (MISC_BASE + 0x018)
25#define PLL_CLK_CFG (MISC_BASE + 0x020)
26 /* PLL_CLK_CFG register masks */
27 #define MCTR_CLK_SHIFT 28
28 #define MCTR_CLK_MASK 3
29
30#define CORE_CLK_CFG (MISC_BASE + 0x024)
31 /* CORE CLK CFG register masks */
32 #define HCLK_RATIO_SHIFT 10
33 #define HCLK_RATIO_MASK 2
34 #define PCLK_RATIO_SHIFT 8
35 #define PCLK_RATIO_MASK 2
36
37#define PERIP_CLK_CFG (MISC_BASE + 0x028)
38 /* PERIP_CLK_CFG register masks */
39 #define CLCD_CLK_SHIFT 2
40 #define CLCD_CLK_MASK 2
41 #define UART_CLK_SHIFT 4
42 #define UART_CLK_MASK 1
43 #define FIRDA_CLK_SHIFT 5
44 #define FIRDA_CLK_MASK 2
45 #define GPT0_CLK_SHIFT 8
46 #define GPT1_CLK_SHIFT 10
47 #define GPT2_CLK_SHIFT 11
48 #define GPT3_CLK_SHIFT 12
49 #define GPT_CLK_MASK 1
50
51#define PERIP1_CLK_ENB (MISC_BASE + 0x02C)
52 /* PERIP1_CLK_ENB register masks */
53 #define UART0_CLK_ENB 3
54 #define UART1_CLK_ENB 4
55 #define SSP0_CLK_ENB 5
56 #define SSP1_CLK_ENB 6
57 #define I2C_CLK_ENB 7
58 #define JPEG_CLK_ENB 8
59 #define FSMC_CLK_ENB 9
60 #define FIRDA_CLK_ENB 10
61 #define GPT2_CLK_ENB 11
62 #define GPT3_CLK_ENB 12
63 #define GPIO2_CLK_ENB 13
64 #define SSP2_CLK_ENB 14
65 #define ADC_CLK_ENB 15
66 #define GPT1_CLK_ENB 11
67 #define RTC_CLK_ENB 17
68 #define GPIO1_CLK_ENB 18
69 #define DMA_CLK_ENB 19
70 #define SMI_CLK_ENB 21
71 #define CLCD_CLK_ENB 22
72 #define GMAC_CLK_ENB 23
73 #define USBD_CLK_ENB 24
74 #define USBH0_CLK_ENB 25
75 #define USBH1_CLK_ENB 26
76
77#define PRSC0_CLK_CFG (MISC_BASE + 0x044)
78#define PRSC1_CLK_CFG (MISC_BASE + 0x048)
79#define PRSC2_CLK_CFG (MISC_BASE + 0x04C)
80
81#define CLCD_CLK_SYNT (MISC_BASE + 0x05C)
82#define FIRDA_CLK_SYNT (MISC_BASE + 0x060)
83#define UART_CLK_SYNT (MISC_BASE + 0x064)
84
85/* vco rate configuration table, in ascending order of rates */
86static struct pll_rate_tbl pll_rtbl[] = {
87 {.mode = 0, .m = 0x53, .n = 0x0F, .p = 0x1}, /* vco 332 & pll 166 MHz */
88 {.mode = 0, .m = 0x85, .n = 0x0F, .p = 0x1}, /* vco 532 & pll 266 MHz */
89 {.mode = 0, .m = 0xA6, .n = 0x0F, .p = 0x1}, /* vco 664 & pll 332 MHz */
90};
91
92/* aux rate configuration table, in ascending order of rates */
93static struct aux_rate_tbl aux_rtbl[] = {
94 /* For PLL1 = 332 MHz */
95 {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */
96 {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */
97 {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */
98};
99
100static const char *clcd_parents[] = { "pll3_48m_clk", "clcd_synth_gate_clk", };
101static const char *firda_parents[] = { "pll3_48m_clk", "firda_synth_gate_clk",
102};
103static const char *uart_parents[] = { "pll3_48m_clk", "uart_synth_gate_clk", };
104static const char *gpt0_1_parents[] = { "pll3_48m_clk", "gpt0_1_synth_clk", };
105static const char *gpt2_parents[] = { "pll3_48m_clk", "gpt2_synth_clk", };
106static const char *gpt3_parents[] = { "pll3_48m_clk", "gpt3_synth_clk", };
107static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none",
108 "pll2_clk", };
109
110/* gpt rate configuration table, in ascending order of rates */
111static struct gpt_rate_tbl gpt_rtbl[] = {
112 /* For pll1 = 332 MHz */
113 {.mscale = 4, .nscale = 0}, /* 41.5 MHz */
114 {.mscale = 2, .nscale = 0}, /* 55.3 MHz */
115 {.mscale = 1, .nscale = 0}, /* 83 MHz */
116};
117
118void __init spear6xx_clk_init(void)
119{
120 struct clk *clk, *clk1;
121
122 clk = clk_register_fixed_rate(NULL, "apb_pclk", NULL, CLK_IS_ROOT, 0);
123 clk_register_clkdev(clk, "apb_pclk", NULL);
124
125 clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT,
126 32000);
127 clk_register_clkdev(clk, "osc_32k_clk", NULL);
128
129 clk = clk_register_fixed_rate(NULL, "osc_30m_clk", NULL, CLK_IS_ROOT,
130 30000000);
131 clk_register_clkdev(clk, "osc_30m_clk", NULL);
132
133 /* clock derived from 32 KHz osc clk */
134 clk = clk_register_gate(NULL, "rtc_spear", "osc_32k_clk", 0,
135 PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock);
136 clk_register_clkdev(clk, NULL, "rtc-spear");
137
138 /* clock derived from 30 MHz osc clk */
139 clk = clk_register_fixed_rate(NULL, "pll3_48m_clk", "osc_24m_clk", 0,
140 48000000);
141 clk_register_clkdev(clk, "pll3_48m_clk", NULL);
142
143 clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_30m_clk",
144 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl),
145 &_lock, &clk1, NULL);
146 clk_register_clkdev(clk, "vco1_clk", NULL);
147 clk_register_clkdev(clk1, "pll1_clk", NULL);
148
149 clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL,
150 "osc_30m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl,
151 ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL);
152 clk_register_clkdev(clk, "vco2_clk", NULL);
153 clk_register_clkdev(clk1, "pll2_clk", NULL);
154
155 clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_30m_clk", 0, 1,
156 1);
157 clk_register_clkdev(clk, NULL, "wdt");
158
159 /* clock derived from pll1 clk */
160 clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", 0, 1, 1);
161 clk_register_clkdev(clk, "cpu_clk", NULL);
162
163 clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk",
164 CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT,
165 HCLK_RATIO_MASK, 0, &_lock);
166 clk_register_clkdev(clk, "ahb_clk", NULL);
167
168 clk = clk_register_aux("uart_synth_clk", "uart_synth_gate_clk",
169 "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl,
170 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
171 clk_register_clkdev(clk, "uart_synth_clk", NULL);
172 clk_register_clkdev(clk1, "uart_synth_gate_clk", NULL);
173
174 clk = clk_register_mux(NULL, "uart_mux_clk", uart_parents,
175 ARRAY_SIZE(uart_parents), 0, PERIP_CLK_CFG,
176 UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock);
177 clk_register_clkdev(clk, "uart_mux_clk", NULL);
178
179 clk = clk_register_gate(NULL, "uart0", "uart_mux_clk", 0,
180 PERIP1_CLK_ENB, UART0_CLK_ENB, 0, &_lock);
181 clk_register_clkdev(clk, NULL, "d0000000.serial");
182
183 clk = clk_register_gate(NULL, "uart1", "uart_mux_clk", 0,
184 PERIP1_CLK_ENB, UART1_CLK_ENB, 0, &_lock);
185 clk_register_clkdev(clk, NULL, "d0080000.serial");
186
187 clk = clk_register_aux("firda_synth_clk", "firda_synth_gate_clk",
188 "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl,
189 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
190 clk_register_clkdev(clk, "firda_synth_clk", NULL);
191 clk_register_clkdev(clk1, "firda_synth_gate_clk", NULL);
192
193 clk = clk_register_mux(NULL, "firda_mux_clk", firda_parents,
194 ARRAY_SIZE(firda_parents), 0, PERIP_CLK_CFG,
195 FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock);
196 clk_register_clkdev(clk, "firda_mux_clk", NULL);
197
198 clk = clk_register_gate(NULL, "firda_clk", "firda_mux_clk", 0,
199 PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock);
200 clk_register_clkdev(clk, NULL, "firda");
201
202 clk = clk_register_aux("clcd_synth_clk", "clcd_synth_gate_clk",
203 "pll1_clk", 0, CLCD_CLK_SYNT, NULL, aux_rtbl,
204 ARRAY_SIZE(aux_rtbl), &_lock, &clk1);
205 clk_register_clkdev(clk, "clcd_synth_clk", NULL);
206 clk_register_clkdev(clk1, "clcd_synth_gate_clk", NULL);
207
208 clk = clk_register_mux(NULL, "clcd_mux_clk", clcd_parents,
209 ARRAY_SIZE(clcd_parents), 0, PERIP_CLK_CFG,
210 CLCD_CLK_SHIFT, CLCD_CLK_MASK, 0, &_lock);
211 clk_register_clkdev(clk, "clcd_mux_clk", NULL);
212
213 clk = clk_register_gate(NULL, "clcd_clk", "clcd_mux_clk", 0,
214 PERIP1_CLK_ENB, CLCD_CLK_ENB, 0, &_lock);
215 clk_register_clkdev(clk, NULL, "clcd");
216
217 /* gpt clocks */
218 clk = clk_register_gpt("gpt0_1_synth_clk", "pll1_clk", 0, PRSC0_CLK_CFG,
219 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
220 clk_register_clkdev(clk, "gpt0_1_synth_clk", NULL);
221
222 clk = clk_register_mux(NULL, "gpt0_mux_clk", gpt0_1_parents,
223 ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
224 GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
225 clk_register_clkdev(clk, NULL, "gpt0");
226
227 clk = clk_register_mux(NULL, "gpt1_mux_clk", gpt0_1_parents,
228 ARRAY_SIZE(gpt0_1_parents), 0, PERIP_CLK_CFG,
229 GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
230 clk_register_clkdev(clk, "gpt1_mux_clk", NULL);
231
232 clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mux_clk", 0,
233 PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock);
234 clk_register_clkdev(clk, NULL, "gpt1");
235
236 clk = clk_register_gpt("gpt2_synth_clk", "pll1_clk", 0, PRSC1_CLK_CFG,
237 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
238 clk_register_clkdev(clk, "gpt2_synth_clk", NULL);
239
240 clk = clk_register_mux(NULL, "gpt2_mux_clk", gpt2_parents,
241 ARRAY_SIZE(gpt2_parents), 0, PERIP_CLK_CFG,
242 GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
243 clk_register_clkdev(clk, "gpt2_mux_clk", NULL);
244
245 clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mux_clk", 0,
246 PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock);
247 clk_register_clkdev(clk, NULL, "gpt2");
248
249 clk = clk_register_gpt("gpt3_synth_clk", "pll1_clk", 0, PRSC2_CLK_CFG,
250 gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock);
251 clk_register_clkdev(clk, "gpt3_synth_clk", NULL);
252
253 clk = clk_register_mux(NULL, "gpt3_mux_clk", gpt3_parents,
254 ARRAY_SIZE(gpt3_parents), 0, PERIP_CLK_CFG,
255 GPT3_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock);
256 clk_register_clkdev(clk, "gpt3_mux_clk", NULL);
257
258 clk = clk_register_gate(NULL, "gpt3_clk", "gpt3_mux_clk", 0,
259 PERIP1_CLK_ENB, GPT3_CLK_ENB, 0, &_lock);
260 clk_register_clkdev(clk, NULL, "gpt3");
261
262 /* clock derived from pll3 clk */
263 clk = clk_register_gate(NULL, "usbh0_clk", "pll3_48m_clk", 0,
264 PERIP1_CLK_ENB, USBH0_CLK_ENB, 0, &_lock);
265 clk_register_clkdev(clk, NULL, "usbh.0_clk");
266
267 clk = clk_register_gate(NULL, "usbh1_clk", "pll3_48m_clk", 0,
268 PERIP1_CLK_ENB, USBH1_CLK_ENB, 0, &_lock);
269 clk_register_clkdev(clk, NULL, "usbh.1_clk");
270
271 clk = clk_register_gate(NULL, "usbd_clk", "pll3_48m_clk", 0,
272 PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock);
273 clk_register_clkdev(clk, NULL, "designware_udc");
274
275 /* clock derived from ahb clk */
276 clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2,
277 1);
278 clk_register_clkdev(clk, "ahbmult2_clk", NULL);
279
280 clk = clk_register_mux(NULL, "ddr_clk", ddr_parents,
281 ARRAY_SIZE(ddr_parents),
282 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0,
283 &_lock);
284 clk_register_clkdev(clk, "ddr_clk", NULL);
285
286 clk = clk_register_divider(NULL, "apb_clk", "ahb_clk",
287 CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT,
288 PCLK_RATIO_MASK, 0, &_lock);
289 clk_register_clkdev(clk, "apb_clk", NULL);
290
291 clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
292 DMA_CLK_ENB, 0, &_lock);
293 clk_register_clkdev(clk, NULL, "fc400000.dma");
294
295 clk = clk_register_gate(NULL, "fsmc_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
296 FSMC_CLK_ENB, 0, &_lock);
297 clk_register_clkdev(clk, NULL, "d1800000.flash");
298
299 clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
300 GMAC_CLK_ENB, 0, &_lock);
301 clk_register_clkdev(clk, NULL, "gmac");
302
303 clk = clk_register_gate(NULL, "i2c_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
304 I2C_CLK_ENB, 0, &_lock);
305 clk_register_clkdev(clk, NULL, "d0200000.i2c");
306
307 clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
308 JPEG_CLK_ENB, 0, &_lock);
309 clk_register_clkdev(clk, NULL, "jpeg");
310
311 clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB,
312 SMI_CLK_ENB, 0, &_lock);
313 clk_register_clkdev(clk, NULL, "fc000000.flash");
314
315 /* clock derived from apb clk */
316 clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB,
317 ADC_CLK_ENB, 0, &_lock);
318 clk_register_clkdev(clk, NULL, "adc");
319
320 clk = clk_register_fixed_factor(NULL, "gpio0_clk", "apb_clk", 0, 1, 1);
321 clk_register_clkdev(clk, NULL, "f0100000.gpio");
322
323 clk = clk_register_gate(NULL, "gpio1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
324 GPIO1_CLK_ENB, 0, &_lock);
325 clk_register_clkdev(clk, NULL, "fc980000.gpio");
326
327 clk = clk_register_gate(NULL, "gpio2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
328 GPIO2_CLK_ENB, 0, &_lock);
329 clk_register_clkdev(clk, NULL, "d8100000.gpio");
330
331 clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB,
332 SSP0_CLK_ENB, 0, &_lock);
333 clk_register_clkdev(clk, NULL, "ssp-pl022.0");
334
335 clk = clk_register_gate(NULL, "ssp1_clk", "apb_clk", 0, PERIP1_CLK_ENB,
336 SSP1_CLK_ENB, 0, &_lock);
337 clk_register_clkdev(clk, NULL, "ssp-pl022.1");
338
339 clk = clk_register_gate(NULL, "ssp2_clk", "apb_clk", 0, PERIP1_CLK_ENB,
340 SSP2_CLK_ENB, 0, &_lock);
341 clk_register_clkdev(clk, NULL, "ssp-pl022.2");
342}
diff --git a/drivers/crypto/mv_cesa.c b/drivers/crypto/mv_cesa.c
index e6ecc5f23943..1cc6b3f3e262 100644
--- a/drivers/crypto/mv_cesa.c
+++ b/drivers/crypto/mv_cesa.c
@@ -16,6 +16,7 @@
16#include <linux/scatterlist.h> 16#include <linux/scatterlist.h>
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/clk.h>
19#include <crypto/internal/hash.h> 20#include <crypto/internal/hash.h>
20#include <crypto/sha.h> 21#include <crypto/sha.h>
21 22
@@ -79,6 +80,7 @@ struct crypto_priv {
79 void __iomem *reg; 80 void __iomem *reg;
80 void __iomem *sram; 81 void __iomem *sram;
81 int irq; 82 int irq;
83 struct clk *clk;
82 struct task_struct *queue_th; 84 struct task_struct *queue_th;
83 85
84 /* the lock protects queue and eng_st */ 86 /* the lock protects queue and eng_st */
@@ -1053,6 +1055,12 @@ static int mv_probe(struct platform_device *pdev)
1053 if (ret) 1055 if (ret)
1054 goto err_thread; 1056 goto err_thread;
1055 1057
1058 /* Not all platforms can gate the clock, so it is not
1059 an error if the clock does not exists. */
1060 cp->clk = clk_get(&pdev->dev, NULL);
1061 if (!IS_ERR(cp->clk))
1062 clk_prepare_enable(cp->clk);
1063
1056 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK); 1064 writel(SEC_INT_ACCEL0_DONE, cpg->reg + SEC_ACCEL_INT_MASK);
1057 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG); 1065 writel(SEC_CFG_STOP_DIG_ERR, cpg->reg + SEC_ACCEL_CFG);
1058 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0); 1066 writel(SRAM_CONFIG, cpg->reg + SEC_ACCEL_DESC_P0);
@@ -1118,6 +1126,12 @@ static int mv_remove(struct platform_device *pdev)
1118 memset(cp->sram, 0, cp->sram_size); 1126 memset(cp->sram, 0, cp->sram_size);
1119 iounmap(cp->sram); 1127 iounmap(cp->sram);
1120 iounmap(cp->reg); 1128 iounmap(cp->reg);
1129
1130 if (!IS_ERR(cp->clk)) {
1131 clk_disable_unprepare(cp->clk);
1132 clk_put(cp->clk);
1133 }
1134
1121 kfree(cp); 1135 kfree(cp);
1122 cpg = NULL; 1136 cpg = NULL;
1123 return 0; 1137 return 0;
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index ef378b5b17e4..aadeb5be9dba 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -238,6 +238,7 @@ config IMX_DMA
238config MXS_DMA 238config MXS_DMA
239 bool "MXS DMA support" 239 bool "MXS DMA support"
240 depends on SOC_IMX23 || SOC_IMX28 240 depends on SOC_IMX23 || SOC_IMX28
241 select STMP_DEVICE
241 select DMA_ENGINE 242 select DMA_ENGINE
242 help 243 help
243 Support the MXS DMA engine. This engine including APBH-DMA 244 Support the MXS DMA engine. This engine including APBH-DMA
diff --git a/drivers/dma/amba-pl08x.c b/drivers/dma/amba-pl08x.c
index 3d704abd7912..49ecbbb8932d 100644
--- a/drivers/dma/amba-pl08x.c
+++ b/drivers/dma/amba-pl08x.c
@@ -95,10 +95,14 @@ static struct amba_driver pl08x_amba_driver;
95 * struct vendor_data - vendor-specific config parameters for PL08x derivatives 95 * struct vendor_data - vendor-specific config parameters for PL08x derivatives
96 * @channels: the number of channels available in this variant 96 * @channels: the number of channels available in this variant
97 * @dualmaster: whether this version supports dual AHB masters or not. 97 * @dualmaster: whether this version supports dual AHB masters or not.
98 * @nomadik: whether the channels have Nomadik security extension bits
99 * that need to be checked for permission before use and some registers are
100 * missing
98 */ 101 */
99struct vendor_data { 102struct vendor_data {
100 u8 channels; 103 u8 channels;
101 bool dualmaster; 104 bool dualmaster;
105 bool nomadik;
102}; 106};
103 107
104/* 108/*
@@ -385,7 +389,7 @@ pl08x_get_phy_channel(struct pl08x_driver_data *pl08x,
385 389
386 spin_lock_irqsave(&ch->lock, flags); 390 spin_lock_irqsave(&ch->lock, flags);
387 391
388 if (!ch->serving) { 392 if (!ch->locked && !ch->serving) {
389 ch->serving = virt_chan; 393 ch->serving = virt_chan;
390 ch->signal = -1; 394 ch->signal = -1;
391 spin_unlock_irqrestore(&ch->lock, flags); 395 spin_unlock_irqrestore(&ch->lock, flags);
@@ -1324,7 +1328,7 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1324 int ret, tmp; 1328 int ret, tmp;
1325 1329
1326 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n", 1330 dev_dbg(&pl08x->adev->dev, "%s prepare transaction of %d bytes from %s\n",
1327 __func__, sgl->length, plchan->name); 1331 __func__, sg_dma_len(sgl), plchan->name);
1328 1332
1329 txd = pl08x_get_txd(plchan, flags); 1333 txd = pl08x_get_txd(plchan, flags);
1330 if (!txd) { 1334 if (!txd) {
@@ -1378,11 +1382,11 @@ static struct dma_async_tx_descriptor *pl08x_prep_slave_sg(
1378 1382
1379 dsg->len = sg_dma_len(sg); 1383 dsg->len = sg_dma_len(sg);
1380 if (direction == DMA_MEM_TO_DEV) { 1384 if (direction == DMA_MEM_TO_DEV) {
1381 dsg->src_addr = sg_phys(sg); 1385 dsg->src_addr = sg_dma_address(sg);
1382 dsg->dst_addr = slave_addr; 1386 dsg->dst_addr = slave_addr;
1383 } else { 1387 } else {
1384 dsg->src_addr = slave_addr; 1388 dsg->src_addr = slave_addr;
1385 dsg->dst_addr = sg_phys(sg); 1389 dsg->dst_addr = sg_dma_address(sg);
1386 } 1390 }
1387 } 1391 }
1388 1392
@@ -1484,6 +1488,9 @@ bool pl08x_filter_id(struct dma_chan *chan, void *chan_id)
1484 */ 1488 */
1485static void pl08x_ensure_on(struct pl08x_driver_data *pl08x) 1489static void pl08x_ensure_on(struct pl08x_driver_data *pl08x)
1486{ 1490{
1491 /* The Nomadik variant does not have the config register */
1492 if (pl08x->vd->nomadik)
1493 return;
1487 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG); 1494 writel(PL080_CONFIG_ENABLE, pl08x->base + PL080_CONFIG);
1488} 1495}
1489 1496
@@ -1616,7 +1623,7 @@ static irqreturn_t pl08x_irq(int irq, void *dev)
1616 __func__, err); 1623 __func__, err);
1617 writel(err, pl08x->base + PL080_ERR_CLEAR); 1624 writel(err, pl08x->base + PL080_ERR_CLEAR);
1618 } 1625 }
1619 tc = readl(pl08x->base + PL080_INT_STATUS); 1626 tc = readl(pl08x->base + PL080_TC_STATUS);
1620 if (tc) 1627 if (tc)
1621 writel(tc, pl08x->base + PL080_TC_CLEAR); 1628 writel(tc, pl08x->base + PL080_TC_CLEAR);
1622 1629
@@ -1773,8 +1780,10 @@ static int pl08x_debugfs_show(struct seq_file *s, void *data)
1773 spin_lock_irqsave(&ch->lock, flags); 1780 spin_lock_irqsave(&ch->lock, flags);
1774 virt_chan = ch->serving; 1781 virt_chan = ch->serving;
1775 1782
1776 seq_printf(s, "%d\t\t%s\n", 1783 seq_printf(s, "%d\t\t%s%s\n",
1777 ch->id, virt_chan ? virt_chan->name : "(none)"); 1784 ch->id,
1785 virt_chan ? virt_chan->name : "(none)",
1786 ch->locked ? " LOCKED" : "");
1778 1787
1779 spin_unlock_irqrestore(&ch->lock, flags); 1788 spin_unlock_irqrestore(&ch->lock, flags);
1780 } 1789 }
@@ -1918,7 +1927,7 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1918 } 1927 }
1919 1928
1920 /* Initialize physical channels */ 1929 /* Initialize physical channels */
1921 pl08x->phy_chans = kmalloc((vd->channels * sizeof(*pl08x->phy_chans)), 1930 pl08x->phy_chans = kzalloc((vd->channels * sizeof(*pl08x->phy_chans)),
1922 GFP_KERNEL); 1931 GFP_KERNEL);
1923 if (!pl08x->phy_chans) { 1932 if (!pl08x->phy_chans) {
1924 dev_err(&adev->dev, "%s failed to allocate " 1933 dev_err(&adev->dev, "%s failed to allocate "
@@ -1933,8 +1942,23 @@ static int pl08x_probe(struct amba_device *adev, const struct amba_id *id)
1933 ch->id = i; 1942 ch->id = i;
1934 ch->base = pl08x->base + PL080_Cx_BASE(i); 1943 ch->base = pl08x->base + PL080_Cx_BASE(i);
1935 spin_lock_init(&ch->lock); 1944 spin_lock_init(&ch->lock);
1936 ch->serving = NULL;
1937 ch->signal = -1; 1945 ch->signal = -1;
1946
1947 /*
1948 * Nomadik variants can have channels that are locked
1949 * down for the secure world only. Lock up these channels
1950 * by perpetually serving a dummy virtual channel.
1951 */
1952 if (vd->nomadik) {
1953 u32 val;
1954
1955 val = readl(ch->base + PL080_CH_CONFIG);
1956 if (val & (PL080N_CONFIG_ITPROT | PL080N_CONFIG_SECPROT)) {
1957 dev_info(&adev->dev, "physical channel %d reserved for secure access only\n", i);
1958 ch->locked = true;
1959 }
1960 }
1961
1938 dev_dbg(&adev->dev, "physical channel %d is %s\n", 1962 dev_dbg(&adev->dev, "physical channel %d is %s\n",
1939 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE"); 1963 i, pl08x_phy_channel_busy(ch) ? "BUSY" : "FREE");
1940 } 1964 }
@@ -2017,6 +2041,12 @@ static struct vendor_data vendor_pl080 = {
2017 .dualmaster = true, 2041 .dualmaster = true,
2018}; 2042};
2019 2043
2044static struct vendor_data vendor_nomadik = {
2045 .channels = 8,
2046 .dualmaster = true,
2047 .nomadik = true,
2048};
2049
2020static struct vendor_data vendor_pl081 = { 2050static struct vendor_data vendor_pl081 = {
2021 .channels = 2, 2051 .channels = 2,
2022 .dualmaster = false, 2052 .dualmaster = false,
@@ -2037,9 +2067,9 @@ static struct amba_id pl08x_ids[] = {
2037 }, 2067 },
2038 /* Nomadik 8815 PL080 variant */ 2068 /* Nomadik 8815 PL080 variant */
2039 { 2069 {
2040 .id = 0x00280880, 2070 .id = 0x00280080,
2041 .mask = 0x00ffffff, 2071 .mask = 0x00ffffff,
2042 .data = &vendor_pl080, 2072 .data = &vendor_nomadik,
2043 }, 2073 },
2044 { 0, 0 }, 2074 { 0, 0 },
2045}; 2075};
diff --git a/drivers/dma/at_hdmac.c b/drivers/dma/at_hdmac.c
index bf0d7e4e345b..7292aa87b2dd 100644
--- a/drivers/dma/at_hdmac.c
+++ b/drivers/dma/at_hdmac.c
@@ -39,7 +39,6 @@
39 */ 39 */
40 40
41#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO) 41#define ATC_DEFAULT_CFG (ATC_FIFOCFG_HALFFIFO)
42#define ATC_DEFAULT_CTRLA (0)
43#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \ 42#define ATC_DEFAULT_CTRLB (ATC_SIF(AT_DMA_MEM_IF) \
44 |ATC_DIF(AT_DMA_MEM_IF)) 43 |ATC_DIF(AT_DMA_MEM_IF))
45 44
@@ -574,7 +573,6 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
574 return NULL; 573 return NULL;
575 } 574 }
576 575
577 ctrla = ATC_DEFAULT_CTRLA;
578 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN 576 ctrlb = ATC_DEFAULT_CTRLB | ATC_IEN
579 | ATC_SRC_ADDR_MODE_INCR 577 | ATC_SRC_ADDR_MODE_INCR
580 | ATC_DST_ADDR_MODE_INCR 578 | ATC_DST_ADDR_MODE_INCR
@@ -585,13 +583,13 @@ atc_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
585 * of the most common optimization. 583 * of the most common optimization.
586 */ 584 */
587 if (!((src | dest | len) & 3)) { 585 if (!((src | dest | len) & 3)) {
588 ctrla |= ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD; 586 ctrla = ATC_SRC_WIDTH_WORD | ATC_DST_WIDTH_WORD;
589 src_width = dst_width = 2; 587 src_width = dst_width = 2;
590 } else if (!((src | dest | len) & 1)) { 588 } else if (!((src | dest | len) & 1)) {
591 ctrla |= ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD; 589 ctrla = ATC_SRC_WIDTH_HALFWORD | ATC_DST_WIDTH_HALFWORD;
592 src_width = dst_width = 1; 590 src_width = dst_width = 1;
593 } else { 591 } else {
594 ctrla |= ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE; 592 ctrla = ATC_SRC_WIDTH_BYTE | ATC_DST_WIDTH_BYTE;
595 src_width = dst_width = 0; 593 src_width = dst_width = 0;
596 } 594 }
597 595
@@ -668,7 +666,8 @@ atc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
668 return NULL; 666 return NULL;
669 } 667 }
670 668
671 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla; 669 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
670 | ATC_DCSIZE(sconfig->dst_maxburst);
672 ctrlb = ATC_IEN; 671 ctrlb = ATC_IEN;
673 672
674 switch (direction) { 673 switch (direction) {
@@ -796,12 +795,12 @@ atc_dma_cyclic_fill_desc(struct dma_chan *chan, struct at_desc *desc,
796 enum dma_transfer_direction direction) 795 enum dma_transfer_direction direction)
797{ 796{
798 struct at_dma_chan *atchan = to_at_dma_chan(chan); 797 struct at_dma_chan *atchan = to_at_dma_chan(chan);
799 struct at_dma_slave *atslave = chan->private;
800 struct dma_slave_config *sconfig = &atchan->dma_sconfig; 798 struct dma_slave_config *sconfig = &atchan->dma_sconfig;
801 u32 ctrla; 799 u32 ctrla;
802 800
803 /* prepare common CRTLA value */ 801 /* prepare common CRTLA value */
804 ctrla = ATC_DEFAULT_CTRLA | atslave->ctrla 802 ctrla = ATC_SCSIZE(sconfig->src_maxburst)
803 | ATC_DCSIZE(sconfig->dst_maxburst)
805 | ATC_DST_WIDTH(reg_width) 804 | ATC_DST_WIDTH(reg_width)
806 | ATC_SRC_WIDTH(reg_width) 805 | ATC_SRC_WIDTH(reg_width)
807 | period_len >> reg_width; 806 | period_len >> reg_width;
diff --git a/drivers/dma/at_hdmac_regs.h b/drivers/dma/at_hdmac_regs.h
index 897a8bcaec90..8a6c8e8b2940 100644
--- a/drivers/dma/at_hdmac_regs.h
+++ b/drivers/dma/at_hdmac_regs.h
@@ -87,7 +87,26 @@
87/* Bitfields in CTRLA */ 87/* Bitfields in CTRLA */
88#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */ 88#define ATC_BTSIZE_MAX 0xFFFFUL /* Maximum Buffer Transfer Size */
89#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */ 89#define ATC_BTSIZE(x) (ATC_BTSIZE_MAX & (x)) /* Buffer Transfer Size */
90/* Chunck Tranfer size definitions are in at_hdmac.h */ 90#define ATC_SCSIZE_MASK (0x7 << 16) /* Source Chunk Transfer Size */
91#define ATC_SCSIZE(x) (ATC_SCSIZE_MASK & ((x) << 16))
92#define ATC_SCSIZE_1 (0x0 << 16)
93#define ATC_SCSIZE_4 (0x1 << 16)
94#define ATC_SCSIZE_8 (0x2 << 16)
95#define ATC_SCSIZE_16 (0x3 << 16)
96#define ATC_SCSIZE_32 (0x4 << 16)
97#define ATC_SCSIZE_64 (0x5 << 16)
98#define ATC_SCSIZE_128 (0x6 << 16)
99#define ATC_SCSIZE_256 (0x7 << 16)
100#define ATC_DCSIZE_MASK (0x7 << 20) /* Destination Chunk Transfer Size */
101#define ATC_DCSIZE(x) (ATC_DCSIZE_MASK & ((x) << 20))
102#define ATC_DCSIZE_1 (0x0 << 20)
103#define ATC_DCSIZE_4 (0x1 << 20)
104#define ATC_DCSIZE_8 (0x2 << 20)
105#define ATC_DCSIZE_16 (0x3 << 20)
106#define ATC_DCSIZE_32 (0x4 << 20)
107#define ATC_DCSIZE_64 (0x5 << 20)
108#define ATC_DCSIZE_128 (0x6 << 20)
109#define ATC_DCSIZE_256 (0x7 << 20)
91#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */ 110#define ATC_SRC_WIDTH_MASK (0x3 << 24) /* Source Single Transfer Size */
92#define ATC_SRC_WIDTH(x) ((x) << 24) 111#define ATC_SRC_WIDTH(x) ((x) << 24)
93#define ATC_SRC_WIDTH_BYTE (0x0 << 24) 112#define ATC_SRC_WIDTH_BYTE (0x0 << 24)
diff --git a/drivers/dma/coh901318.c b/drivers/dma/coh901318.c
index 750925f9638b..e67b4e06a918 100644
--- a/drivers/dma/coh901318.c
+++ b/drivers/dma/coh901318.c
@@ -1033,7 +1033,7 @@ coh901318_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
1033 1033
1034 if (!sgl) 1034 if (!sgl)
1035 goto out; 1035 goto out;
1036 if (sgl->length == 0) 1036 if (sg_dma_len(sgl) == 0)
1037 goto out; 1037 goto out;
1038 1038
1039 spin_lock_irqsave(&cohc->lock, flg); 1039 spin_lock_irqsave(&cohc->lock, flg);
diff --git a/drivers/dma/coh901318_lli.c b/drivers/dma/coh901318_lli.c
index 6c0e2d4c6682..780e0429b38c 100644
--- a/drivers/dma/coh901318_lli.c
+++ b/drivers/dma/coh901318_lli.c
@@ -270,10 +270,10 @@ coh901318_lli_fill_sg(struct coh901318_pool *pool,
270 270
271 if (dir == DMA_MEM_TO_DEV) 271 if (dir == DMA_MEM_TO_DEV)
272 /* increment source address */ 272 /* increment source address */
273 src = sg_phys(sg); 273 src = sg_dma_address(sg);
274 else 274 else
275 /* increment destination address */ 275 /* increment destination address */
276 dst = sg_phys(sg); 276 dst = sg_dma_address(sg);
277 277
278 bytes_to_transfer = sg_dma_len(sg); 278 bytes_to_transfer = sg_dma_len(sg);
279 279
diff --git a/drivers/dma/dw_dmac.c b/drivers/dma/dw_dmac.c
index 7439079f5eed..e23dc82d43ac 100644
--- a/drivers/dma/dw_dmac.c
+++ b/drivers/dma/dw_dmac.c
@@ -17,6 +17,7 @@
17#include <linux/init.h> 17#include <linux/init.h>
18#include <linux/interrupt.h> 18#include <linux/interrupt.h>
19#include <linux/io.h> 19#include <linux/io.h>
20#include <linux/of.h>
20#include <linux/mm.h> 21#include <linux/mm.h>
21#include <linux/module.h> 22#include <linux/module.h>
22#include <linux/platform_device.h> 23#include <linux/platform_device.h>
@@ -742,7 +743,7 @@ dwc_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
742 struct dw_desc *desc; 743 struct dw_desc *desc;
743 u32 len, dlen, mem; 744 u32 len, dlen, mem;
744 745
745 mem = sg_phys(sg); 746 mem = sg_dma_address(sg);
746 len = sg_dma_len(sg); 747 len = sg_dma_len(sg);
747 748
748 if (!((mem | len) & 7)) 749 if (!((mem | len) & 7))
@@ -809,7 +810,7 @@ slave_sg_todev_fill_desc:
809 struct dw_desc *desc; 810 struct dw_desc *desc;
810 u32 len, dlen, mem; 811 u32 len, dlen, mem;
811 812
812 mem = sg_phys(sg); 813 mem = sg_dma_address(sg);
813 len = sg_dma_len(sg); 814 len = sg_dma_len(sg);
814 815
815 if (!((mem | len) & 7)) 816 if (!((mem | len) & 7))
@@ -1429,7 +1430,7 @@ static int __init dw_probe(struct platform_device *pdev)
1429 err = PTR_ERR(dw->clk); 1430 err = PTR_ERR(dw->clk);
1430 goto err_clk; 1431 goto err_clk;
1431 } 1432 }
1432 clk_enable(dw->clk); 1433 clk_prepare_enable(dw->clk);
1433 1434
1434 /* force dma off, just in case */ 1435 /* force dma off, just in case */
1435 dw_dma_off(dw); 1436 dw_dma_off(dw);
@@ -1510,7 +1511,7 @@ static int __init dw_probe(struct platform_device *pdev)
1510 return 0; 1511 return 0;
1511 1512
1512err_irq: 1513err_irq:
1513 clk_disable(dw->clk); 1514 clk_disable_unprepare(dw->clk);
1514 clk_put(dw->clk); 1515 clk_put(dw->clk);
1515err_clk: 1516err_clk:
1516 iounmap(dw->regs); 1517 iounmap(dw->regs);
@@ -1540,7 +1541,7 @@ static int __exit dw_remove(struct platform_device *pdev)
1540 channel_clear_bit(dw, CH_EN, dwc->mask); 1541 channel_clear_bit(dw, CH_EN, dwc->mask);
1541 } 1542 }
1542 1543
1543 clk_disable(dw->clk); 1544 clk_disable_unprepare(dw->clk);
1544 clk_put(dw->clk); 1545 clk_put(dw->clk);
1545 1546
1546 iounmap(dw->regs); 1547 iounmap(dw->regs);
@@ -1559,7 +1560,7 @@ static void dw_shutdown(struct platform_device *pdev)
1559 struct dw_dma *dw = platform_get_drvdata(pdev); 1560 struct dw_dma *dw = platform_get_drvdata(pdev);
1560 1561
1561 dw_dma_off(platform_get_drvdata(pdev)); 1562 dw_dma_off(platform_get_drvdata(pdev));
1562 clk_disable(dw->clk); 1563 clk_disable_unprepare(dw->clk);
1563} 1564}
1564 1565
1565static int dw_suspend_noirq(struct device *dev) 1566static int dw_suspend_noirq(struct device *dev)
@@ -1568,7 +1569,7 @@ static int dw_suspend_noirq(struct device *dev)
1568 struct dw_dma *dw = platform_get_drvdata(pdev); 1569 struct dw_dma *dw = platform_get_drvdata(pdev);
1569 1570
1570 dw_dma_off(platform_get_drvdata(pdev)); 1571 dw_dma_off(platform_get_drvdata(pdev));
1571 clk_disable(dw->clk); 1572 clk_disable_unprepare(dw->clk);
1572 1573
1573 return 0; 1574 return 0;
1574} 1575}
@@ -1578,7 +1579,7 @@ static int dw_resume_noirq(struct device *dev)
1578 struct platform_device *pdev = to_platform_device(dev); 1579 struct platform_device *pdev = to_platform_device(dev);
1579 struct dw_dma *dw = platform_get_drvdata(pdev); 1580 struct dw_dma *dw = platform_get_drvdata(pdev);
1580 1581
1581 clk_enable(dw->clk); 1582 clk_prepare_enable(dw->clk);
1582 dma_writel(dw, CFG, DW_CFG_DMA_EN); 1583 dma_writel(dw, CFG, DW_CFG_DMA_EN);
1583 return 0; 1584 return 0;
1584} 1585}
@@ -1592,12 +1593,21 @@ static const struct dev_pm_ops dw_dev_pm_ops = {
1592 .poweroff_noirq = dw_suspend_noirq, 1593 .poweroff_noirq = dw_suspend_noirq,
1593}; 1594};
1594 1595
1596#ifdef CONFIG_OF
1597static const struct of_device_id dw_dma_id_table[] = {
1598 { .compatible = "snps,dma-spear1340" },
1599 {}
1600};
1601MODULE_DEVICE_TABLE(of, dw_dma_id_table);
1602#endif
1603
1595static struct platform_driver dw_driver = { 1604static struct platform_driver dw_driver = {
1596 .remove = __exit_p(dw_remove), 1605 .remove = __exit_p(dw_remove),
1597 .shutdown = dw_shutdown, 1606 .shutdown = dw_shutdown,
1598 .driver = { 1607 .driver = {
1599 .name = "dw_dmac", 1608 .name = "dw_dmac",
1600 .pm = &dw_dev_pm_ops, 1609 .pm = &dw_dev_pm_ops,
1610 .of_match_table = of_match_ptr(dw_dma_id_table),
1601 }, 1611 },
1602}; 1612};
1603 1613
diff --git a/drivers/dma/ep93xx_dma.c b/drivers/dma/ep93xx_dma.c
index f6e9b572b998..c64917ec313d 100644
--- a/drivers/dma/ep93xx_dma.c
+++ b/drivers/dma/ep93xx_dma.c
@@ -71,6 +71,7 @@
71#define M2M_CONTROL_TM_SHIFT 13 71#define M2M_CONTROL_TM_SHIFT 13
72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT) 72#define M2M_CONTROL_TM_TX (1 << M2M_CONTROL_TM_SHIFT)
73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT) 73#define M2M_CONTROL_TM_RX (2 << M2M_CONTROL_TM_SHIFT)
74#define M2M_CONTROL_NFBINT BIT(21)
74#define M2M_CONTROL_RSS_SHIFT 22 75#define M2M_CONTROL_RSS_SHIFT 22
75#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT) 76#define M2M_CONTROL_RSS_SSPRX (1 << M2M_CONTROL_RSS_SHIFT)
76#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT) 77#define M2M_CONTROL_RSS_SSPTX (2 << M2M_CONTROL_RSS_SHIFT)
@@ -79,7 +80,22 @@
79#define M2M_CONTROL_PWSC_SHIFT 25 80#define M2M_CONTROL_PWSC_SHIFT 25
80 81
81#define M2M_INTERRUPT 0x0004 82#define M2M_INTERRUPT 0x0004
82#define M2M_INTERRUPT_DONEINT BIT(1) 83#define M2M_INTERRUPT_MASK 6
84
85#define M2M_STATUS 0x000c
86#define M2M_STATUS_CTL_SHIFT 1
87#define M2M_STATUS_CTL_IDLE (0 << M2M_STATUS_CTL_SHIFT)
88#define M2M_STATUS_CTL_STALL (1 << M2M_STATUS_CTL_SHIFT)
89#define M2M_STATUS_CTL_MEMRD (2 << M2M_STATUS_CTL_SHIFT)
90#define M2M_STATUS_CTL_MEMWR (3 << M2M_STATUS_CTL_SHIFT)
91#define M2M_STATUS_CTL_BWCWAIT (4 << M2M_STATUS_CTL_SHIFT)
92#define M2M_STATUS_CTL_MASK (7 << M2M_STATUS_CTL_SHIFT)
93#define M2M_STATUS_BUF_SHIFT 4
94#define M2M_STATUS_BUF_NO (0 << M2M_STATUS_BUF_SHIFT)
95#define M2M_STATUS_BUF_ON (1 << M2M_STATUS_BUF_SHIFT)
96#define M2M_STATUS_BUF_NEXT (2 << M2M_STATUS_BUF_SHIFT)
97#define M2M_STATUS_BUF_MASK (3 << M2M_STATUS_BUF_SHIFT)
98#define M2M_STATUS_DONE BIT(6)
83 99
84#define M2M_BCR0 0x0010 100#define M2M_BCR0 0x0010
85#define M2M_BCR1 0x0014 101#define M2M_BCR1 0x0014
@@ -426,15 +442,6 @@ static int m2p_hw_interrupt(struct ep93xx_dma_chan *edmac)
426 442
427/* 443/*
428 * M2M DMA implementation 444 * M2M DMA implementation
429 *
430 * For the M2M transfers we don't use NFB at all. This is because it simply
431 * doesn't work well with memcpy transfers. When you submit both buffers it is
432 * extremely unlikely that you get an NFB interrupt, but it instead reports
433 * DONE interrupt and both buffers are already transferred which means that we
434 * weren't able to update the next buffer.
435 *
436 * So for now we "simulate" NFB by just submitting buffer after buffer
437 * without double buffering.
438 */ 445 */
439 446
440static int m2m_hw_setup(struct ep93xx_dma_chan *edmac) 447static int m2m_hw_setup(struct ep93xx_dma_chan *edmac)
@@ -543,6 +550,11 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
543 m2m_fill_desc(edmac); 550 m2m_fill_desc(edmac);
544 control |= M2M_CONTROL_DONEINT; 551 control |= M2M_CONTROL_DONEINT;
545 552
553 if (ep93xx_dma_advance_active(edmac)) {
554 m2m_fill_desc(edmac);
555 control |= M2M_CONTROL_NFBINT;
556 }
557
546 /* 558 /*
547 * Now we can finally enable the channel. For M2M channel this must be 559 * Now we can finally enable the channel. For M2M channel this must be
548 * done _after_ the BCRx registers are programmed. 560 * done _after_ the BCRx registers are programmed.
@@ -560,32 +572,89 @@ static void m2m_hw_submit(struct ep93xx_dma_chan *edmac)
560 } 572 }
561} 573}
562 574
575/*
576 * According to EP93xx User's Guide, we should receive DONE interrupt when all
577 * M2M DMA controller transactions complete normally. This is not always the
578 * case - sometimes EP93xx M2M DMA asserts DONE interrupt when the DMA channel
579 * is still running (channel Buffer FSM in DMA_BUF_ON state, and channel
580 * Control FSM in DMA_MEM_RD state, observed at least in IDE-DMA operation).
581 * In effect, disabling the channel when only DONE bit is set could stop
582 * currently running DMA transfer. To avoid this, we use Buffer FSM and
583 * Control FSM to check current state of DMA channel.
584 */
563static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac) 585static int m2m_hw_interrupt(struct ep93xx_dma_chan *edmac)
564{ 586{
587 u32 status = readl(edmac->regs + M2M_STATUS);
588 u32 ctl_fsm = status & M2M_STATUS_CTL_MASK;
589 u32 buf_fsm = status & M2M_STATUS_BUF_MASK;
590 bool done = status & M2M_STATUS_DONE;
591 bool last_done;
565 u32 control; 592 u32 control;
593 struct ep93xx_dma_desc *desc;
566 594
567 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_DONEINT)) 595 /* Accept only DONE and NFB interrupts */
596 if (!(readl(edmac->regs + M2M_INTERRUPT) & M2M_INTERRUPT_MASK))
568 return INTERRUPT_UNKNOWN; 597 return INTERRUPT_UNKNOWN;
569 598
570 /* Clear the DONE bit */ 599 if (done) {
571 writel(0, edmac->regs + M2M_INTERRUPT); 600 /* Clear the DONE bit */
601 writel(0, edmac->regs + M2M_INTERRUPT);
602 }
572 603
573 /* Disable interrupts and the channel */ 604 /*
574 control = readl(edmac->regs + M2M_CONTROL); 605 * Check whether we are done with descriptors or not. This, together
575 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_ENABLE); 606 * with DMA channel state, determines action to take in interrupt.
576 writel(control, edmac->regs + M2M_CONTROL); 607 */
608 desc = ep93xx_dma_get_active(edmac);
609 last_done = !desc || desc->txd.cookie;
577 610
578 /* 611 /*
579 * Since we only get DONE interrupt we have to find out ourselves 612 * Use M2M DMA Buffer FSM and Control FSM to check current state of
580 * whether there still is something to process. So we try to advance 613 * DMA channel. Using DONE and NFB bits from channel status register
581 * the chain an see whether it succeeds. 614 * or bits from channel interrupt register is not reliable.
582 */ 615 */
583 if (ep93xx_dma_advance_active(edmac)) { 616 if (!last_done &&
584 edmac->edma->hw_submit(edmac); 617 (buf_fsm == M2M_STATUS_BUF_NO ||
585 return INTERRUPT_NEXT_BUFFER; 618 buf_fsm == M2M_STATUS_BUF_ON)) {
619 /*
620 * Two buffers are ready for update when Buffer FSM is in
621 * DMA_NO_BUF state. Only one buffer can be prepared without
622 * disabling the channel or polling the DONE bit.
623 * To simplify things, always prepare only one buffer.
624 */
625 if (ep93xx_dma_advance_active(edmac)) {
626 m2m_fill_desc(edmac);
627 if (done && !edmac->chan.private) {
628 /* Software trigger for memcpy channel */
629 control = readl(edmac->regs + M2M_CONTROL);
630 control |= M2M_CONTROL_START;
631 writel(control, edmac->regs + M2M_CONTROL);
632 }
633 return INTERRUPT_NEXT_BUFFER;
634 } else {
635 last_done = true;
636 }
637 }
638
639 /*
640 * Disable the channel only when Buffer FSM is in DMA_NO_BUF state
641 * and Control FSM is in DMA_STALL state.
642 */
643 if (last_done &&
644 buf_fsm == M2M_STATUS_BUF_NO &&
645 ctl_fsm == M2M_STATUS_CTL_STALL) {
646 /* Disable interrupts and the channel */
647 control = readl(edmac->regs + M2M_CONTROL);
648 control &= ~(M2M_CONTROL_DONEINT | M2M_CONTROL_NFBINT
649 | M2M_CONTROL_ENABLE);
650 writel(control, edmac->regs + M2M_CONTROL);
651 return INTERRUPT_DONE;
586 } 652 }
587 653
588 return INTERRUPT_DONE; 654 /*
655 * Nothing to do this time.
656 */
657 return INTERRUPT_NEXT_BUFFER;
589} 658}
590 659
591/* 660/*
diff --git a/drivers/dma/imx-dma.c b/drivers/dma/imx-dma.c
index bb787d8e1529..fcfeb3cd8d31 100644
--- a/drivers/dma/imx-dma.c
+++ b/drivers/dma/imx-dma.c
@@ -227,7 +227,7 @@ static inline int imxdma_sg_next(struct imxdma_desc *d)
227 struct scatterlist *sg = d->sg; 227 struct scatterlist *sg = d->sg;
228 unsigned long now; 228 unsigned long now;
229 229
230 now = min(d->len, sg->length); 230 now = min(d->len, sg_dma_len(sg));
231 if (d->len != IMX_DMA_LENGTH_LOOP) 231 if (d->len != IMX_DMA_LENGTH_LOOP)
232 d->len -= now; 232 d->len -= now;
233 233
@@ -763,16 +763,16 @@ static struct dma_async_tx_descriptor *imxdma_prep_slave_sg(
763 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node); 763 desc = list_first_entry(&imxdmac->ld_free, struct imxdma_desc, node);
764 764
765 for_each_sg(sgl, sg, sg_len, i) { 765 for_each_sg(sgl, sg, sg_len, i) {
766 dma_length += sg->length; 766 dma_length += sg_dma_len(sg);
767 } 767 }
768 768
769 switch (imxdmac->word_size) { 769 switch (imxdmac->word_size) {
770 case DMA_SLAVE_BUSWIDTH_4_BYTES: 770 case DMA_SLAVE_BUSWIDTH_4_BYTES:
771 if (sgl->length & 3 || sgl->dma_address & 3) 771 if (sg_dma_len(sgl) & 3 || sgl->dma_address & 3)
772 return NULL; 772 return NULL;
773 break; 773 break;
774 case DMA_SLAVE_BUSWIDTH_2_BYTES: 774 case DMA_SLAVE_BUSWIDTH_2_BYTES:
775 if (sgl->length & 1 || sgl->dma_address & 1) 775 if (sg_dma_len(sgl) & 1 || sgl->dma_address & 1)
776 return NULL; 776 return NULL;
777 break; 777 break;
778 case DMA_SLAVE_BUSWIDTH_1_BYTE: 778 case DMA_SLAVE_BUSWIDTH_1_BYTE:
@@ -831,13 +831,13 @@ static struct dma_async_tx_descriptor *imxdma_prep_dma_cyclic(
831 imxdmac->sg_list[i].page_link = 0; 831 imxdmac->sg_list[i].page_link = 0;
832 imxdmac->sg_list[i].offset = 0; 832 imxdmac->sg_list[i].offset = 0;
833 imxdmac->sg_list[i].dma_address = dma_addr; 833 imxdmac->sg_list[i].dma_address = dma_addr;
834 imxdmac->sg_list[i].length = period_len; 834 sg_dma_len(&imxdmac->sg_list[i]) = period_len;
835 dma_addr += period_len; 835 dma_addr += period_len;
836 } 836 }
837 837
838 /* close the loop */ 838 /* close the loop */
839 imxdmac->sg_list[periods].offset = 0; 839 imxdmac->sg_list[periods].offset = 0;
840 imxdmac->sg_list[periods].length = 0; 840 sg_dma_len(&imxdmac->sg_list[periods]) = 0;
841 imxdmac->sg_list[periods].page_link = 841 imxdmac->sg_list[periods].page_link =
842 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02; 842 ((unsigned long)imxdmac->sg_list | 0x01) & ~0x02;
843 843
diff --git a/drivers/dma/imx-sdma.c b/drivers/dma/imx-sdma.c
index d3e38e28bb6b..fb4f4990f5eb 100644
--- a/drivers/dma/imx-sdma.c
+++ b/drivers/dma/imx-sdma.c
@@ -24,7 +24,7 @@
24#include <linux/mm.h> 24#include <linux/mm.h>
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/wait.h> 27#include <linux/delay.h>
28#include <linux/sched.h> 28#include <linux/sched.h>
29#include <linux/semaphore.h> 29#include <linux/semaphore.h>
30#include <linux/spinlock.h> 30#include <linux/spinlock.h>
@@ -271,6 +271,7 @@ struct sdma_channel {
271 enum dma_status status; 271 enum dma_status status;
272 unsigned int chn_count; 272 unsigned int chn_count;
273 unsigned int chn_real_count; 273 unsigned int chn_real_count;
274 struct tasklet_struct tasklet;
274}; 275};
275 276
276#define IMX_DMA_SG_LOOP BIT(0) 277#define IMX_DMA_SG_LOOP BIT(0)
@@ -322,8 +323,9 @@ struct sdma_engine {
322 struct sdma_context_data *context; 323 struct sdma_context_data *context;
323 dma_addr_t context_phys; 324 dma_addr_t context_phys;
324 struct dma_device dma_device; 325 struct dma_device dma_device;
325 struct clk *clk; 326 struct clk *clk_ipg;
326 struct mutex channel_0_lock; 327 struct clk *clk_ahb;
328 spinlock_t channel_0_lock;
327 struct sdma_script_start_addrs *script_addrs; 329 struct sdma_script_start_addrs *script_addrs;
328}; 330};
329 331
@@ -401,19 +403,27 @@ static void sdma_enable_channel(struct sdma_engine *sdma, int channel)
401} 403}
402 404
403/* 405/*
404 * sdma_run_channel - run a channel and wait till it's done 406 * sdma_run_channel0 - run a channel and wait till it's done
405 */ 407 */
406static int sdma_run_channel(struct sdma_channel *sdmac) 408static int sdma_run_channel0(struct sdma_engine *sdma)
407{ 409{
408 struct sdma_engine *sdma = sdmac->sdma;
409 int channel = sdmac->channel;
410 int ret; 410 int ret;
411 unsigned long timeout = 500;
411 412
412 init_completion(&sdmac->done); 413 sdma_enable_channel(sdma, 0);
413 414
414 sdma_enable_channel(sdma, channel); 415 while (!(ret = readl_relaxed(sdma->regs + SDMA_H_INTR) & 1)) {
416 if (timeout-- <= 0)
417 break;
418 udelay(1);
419 }
415 420
416 ret = wait_for_completion_timeout(&sdmac->done, HZ); 421 if (ret) {
422 /* Clear the interrupt status */
423 writel_relaxed(ret, sdma->regs + SDMA_H_INTR);
424 } else {
425 dev_err(sdma->dev, "Timeout waiting for CH0 ready\n");
426 }
417 427
418 return ret ? 0 : -ETIMEDOUT; 428 return ret ? 0 : -ETIMEDOUT;
419} 429}
@@ -425,17 +435,17 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
425 void *buf_virt; 435 void *buf_virt;
426 dma_addr_t buf_phys; 436 dma_addr_t buf_phys;
427 int ret; 437 int ret;
428 438 unsigned long flags;
429 mutex_lock(&sdma->channel_0_lock);
430 439
431 buf_virt = dma_alloc_coherent(NULL, 440 buf_virt = dma_alloc_coherent(NULL,
432 size, 441 size,
433 &buf_phys, GFP_KERNEL); 442 &buf_phys, GFP_KERNEL);
434 if (!buf_virt) { 443 if (!buf_virt) {
435 ret = -ENOMEM; 444 return -ENOMEM;
436 goto err_out;
437 } 445 }
438 446
447 spin_lock_irqsave(&sdma->channel_0_lock, flags);
448
439 bd0->mode.command = C0_SETPM; 449 bd0->mode.command = C0_SETPM;
440 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD; 450 bd0->mode.status = BD_DONE | BD_INTR | BD_WRAP | BD_EXTD;
441 bd0->mode.count = size / 2; 451 bd0->mode.count = size / 2;
@@ -444,12 +454,11 @@ static int sdma_load_script(struct sdma_engine *sdma, void *buf, int size,
444 454
445 memcpy(buf_virt, buf, size); 455 memcpy(buf_virt, buf, size);
446 456
447 ret = sdma_run_channel(&sdma->channel[0]); 457 ret = sdma_run_channel0(sdma);
448 458
449 dma_free_coherent(NULL, size, buf_virt, buf_phys); 459 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
450 460
451err_out: 461 dma_free_coherent(NULL, size, buf_virt, buf_phys);
452 mutex_unlock(&sdma->channel_0_lock);
453 462
454 return ret; 463 return ret;
455} 464}
@@ -534,13 +543,11 @@ static void mxc_sdma_handle_channel_normal(struct sdma_channel *sdmac)
534 sdmac->desc.callback(sdmac->desc.callback_param); 543 sdmac->desc.callback(sdmac->desc.callback_param);
535} 544}
536 545
537static void mxc_sdma_handle_channel(struct sdma_channel *sdmac) 546static void sdma_tasklet(unsigned long data)
538{ 547{
539 complete(&sdmac->done); 548 struct sdma_channel *sdmac = (struct sdma_channel *) data;
540 549
541 /* not interested in channel 0 interrupts */ 550 complete(&sdmac->done);
542 if (sdmac->channel == 0)
543 return;
544 551
545 if (sdmac->flags & IMX_DMA_SG_LOOP) 552 if (sdmac->flags & IMX_DMA_SG_LOOP)
546 sdma_handle_channel_loop(sdmac); 553 sdma_handle_channel_loop(sdmac);
@@ -554,13 +561,15 @@ static irqreturn_t sdma_int_handler(int irq, void *dev_id)
554 unsigned long stat; 561 unsigned long stat;
555 562
556 stat = readl_relaxed(sdma->regs + SDMA_H_INTR); 563 stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
564 /* not interested in channel 0 interrupts */
565 stat &= ~1;
557 writel_relaxed(stat, sdma->regs + SDMA_H_INTR); 566 writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
558 567
559 while (stat) { 568 while (stat) {
560 int channel = fls(stat) - 1; 569 int channel = fls(stat) - 1;
561 struct sdma_channel *sdmac = &sdma->channel[channel]; 570 struct sdma_channel *sdmac = &sdma->channel[channel];
562 571
563 mxc_sdma_handle_channel(sdmac); 572 tasklet_schedule(&sdmac->tasklet);
564 573
565 __clear_bit(channel, &stat); 574 __clear_bit(channel, &stat);
566 } 575 }
@@ -659,6 +668,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
659 struct sdma_context_data *context = sdma->context; 668 struct sdma_context_data *context = sdma->context;
660 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd; 669 struct sdma_buffer_descriptor *bd0 = sdma->channel[0].bd;
661 int ret; 670 int ret;
671 unsigned long flags;
662 672
663 if (sdmac->direction == DMA_DEV_TO_MEM) { 673 if (sdmac->direction == DMA_DEV_TO_MEM) {
664 load_address = sdmac->pc_from_device; 674 load_address = sdmac->pc_from_device;
@@ -676,7 +686,7 @@ static int sdma_load_context(struct sdma_channel *sdmac)
676 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]); 686 dev_dbg(sdma->dev, "event_mask0 = 0x%08x\n", (u32)sdmac->event_mask[0]);
677 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]); 687 dev_dbg(sdma->dev, "event_mask1 = 0x%08x\n", (u32)sdmac->event_mask[1]);
678 688
679 mutex_lock(&sdma->channel_0_lock); 689 spin_lock_irqsave(&sdma->channel_0_lock, flags);
680 690
681 memset(context, 0, sizeof(*context)); 691 memset(context, 0, sizeof(*context));
682 context->channel_state.pc = load_address; 692 context->channel_state.pc = load_address;
@@ -695,10 +705,9 @@ static int sdma_load_context(struct sdma_channel *sdmac)
695 bd0->mode.count = sizeof(*context) / 4; 705 bd0->mode.count = sizeof(*context) / 4;
696 bd0->buffer_addr = sdma->context_phys; 706 bd0->buffer_addr = sdma->context_phys;
697 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel; 707 bd0->ext_buffer_addr = 2048 + (sizeof(*context) / 4) * channel;
708 ret = sdma_run_channel0(sdma);
698 709
699 ret = sdma_run_channel(&sdma->channel[0]); 710 spin_unlock_irqrestore(&sdma->channel_0_lock, flags);
700
701 mutex_unlock(&sdma->channel_0_lock);
702 711
703 return ret; 712 return ret;
704} 713}
@@ -859,7 +868,8 @@ static int sdma_alloc_chan_resources(struct dma_chan *chan)
859 sdmac->peripheral_type = data->peripheral_type; 868 sdmac->peripheral_type = data->peripheral_type;
860 sdmac->event_id0 = data->dma_request; 869 sdmac->event_id0 = data->dma_request;
861 870
862 clk_enable(sdmac->sdma->clk); 871 clk_enable(sdmac->sdma->clk_ipg);
872 clk_enable(sdmac->sdma->clk_ahb);
863 873
864 ret = sdma_request_channel(sdmac); 874 ret = sdma_request_channel(sdmac);
865 if (ret) 875 if (ret)
@@ -896,7 +906,8 @@ static void sdma_free_chan_resources(struct dma_chan *chan)
896 906
897 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys); 907 dma_free_coherent(NULL, PAGE_SIZE, sdmac->bd, sdmac->bd_phys);
898 908
899 clk_disable(sdma->clk); 909 clk_disable(sdma->clk_ipg);
910 clk_disable(sdma->clk_ahb);
900} 911}
901 912
902static struct dma_async_tx_descriptor *sdma_prep_slave_sg( 913static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
@@ -938,7 +949,7 @@ static struct dma_async_tx_descriptor *sdma_prep_slave_sg(
938 949
939 bd->buffer_addr = sg->dma_address; 950 bd->buffer_addr = sg->dma_address;
940 951
941 count = sg->length; 952 count = sg_dma_len(sg);
942 953
943 if (count > 0xffff) { 954 if (count > 0xffff) {
944 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n", 955 dev_err(sdma->dev, "SDMA channel %d: maximum bytes for sg entry exceeded: %d > %d\n",
@@ -1169,12 +1180,14 @@ static void sdma_load_firmware(const struct firmware *fw, void *context)
1169 addr = (void *)header + header->script_addrs_start; 1180 addr = (void *)header + header->script_addrs_start;
1170 ram_code = (void *)header + header->ram_code_start; 1181 ram_code = (void *)header + header->ram_code_start;
1171 1182
1172 clk_enable(sdma->clk); 1183 clk_enable(sdma->clk_ipg);
1184 clk_enable(sdma->clk_ahb);
1173 /* download the RAM image for SDMA */ 1185 /* download the RAM image for SDMA */
1174 sdma_load_script(sdma, ram_code, 1186 sdma_load_script(sdma, ram_code,
1175 header->ram_code_size, 1187 header->ram_code_size,
1176 addr->ram_code_start_addr); 1188 addr->ram_code_start_addr);
1177 clk_disable(sdma->clk); 1189 clk_disable(sdma->clk_ipg);
1190 clk_disable(sdma->clk_ahb);
1178 1191
1179 sdma_add_scripts(sdma, addr); 1192 sdma_add_scripts(sdma, addr);
1180 1193
@@ -1216,7 +1229,8 @@ static int __init sdma_init(struct sdma_engine *sdma)
1216 return -ENODEV; 1229 return -ENODEV;
1217 } 1230 }
1218 1231
1219 clk_enable(sdma->clk); 1232 clk_enable(sdma->clk_ipg);
1233 clk_enable(sdma->clk_ahb);
1220 1234
1221 /* Be sure SDMA has not started yet */ 1235 /* Be sure SDMA has not started yet */
1222 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR); 1236 writel_relaxed(0, sdma->regs + SDMA_H_C0PTR);
@@ -1269,12 +1283,14 @@ static int __init sdma_init(struct sdma_engine *sdma)
1269 /* Initializes channel's priorities */ 1283 /* Initializes channel's priorities */
1270 sdma_set_channel_priority(&sdma->channel[0], 7); 1284 sdma_set_channel_priority(&sdma->channel[0], 7);
1271 1285
1272 clk_disable(sdma->clk); 1286 clk_disable(sdma->clk_ipg);
1287 clk_disable(sdma->clk_ahb);
1273 1288
1274 return 0; 1289 return 0;
1275 1290
1276err_dma_alloc: 1291err_dma_alloc:
1277 clk_disable(sdma->clk); 1292 clk_disable(sdma->clk_ipg);
1293 clk_disable(sdma->clk_ahb);
1278 dev_err(sdma->dev, "initialisation failed with %d\n", ret); 1294 dev_err(sdma->dev, "initialisation failed with %d\n", ret);
1279 return ret; 1295 return ret;
1280} 1296}
@@ -1297,7 +1313,7 @@ static int __init sdma_probe(struct platform_device *pdev)
1297 if (!sdma) 1313 if (!sdma)
1298 return -ENOMEM; 1314 return -ENOMEM;
1299 1315
1300 mutex_init(&sdma->channel_0_lock); 1316 spin_lock_init(&sdma->channel_0_lock);
1301 1317
1302 sdma->dev = &pdev->dev; 1318 sdma->dev = &pdev->dev;
1303 1319
@@ -1313,12 +1329,21 @@ static int __init sdma_probe(struct platform_device *pdev)
1313 goto err_request_region; 1329 goto err_request_region;
1314 } 1330 }
1315 1331
1316 sdma->clk = clk_get(&pdev->dev, NULL); 1332 sdma->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1317 if (IS_ERR(sdma->clk)) { 1333 if (IS_ERR(sdma->clk_ipg)) {
1318 ret = PTR_ERR(sdma->clk); 1334 ret = PTR_ERR(sdma->clk_ipg);
1319 goto err_clk; 1335 goto err_clk;
1320 } 1336 }
1321 1337
1338 sdma->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1339 if (IS_ERR(sdma->clk_ahb)) {
1340 ret = PTR_ERR(sdma->clk_ahb);
1341 goto err_clk;
1342 }
1343
1344 clk_prepare(sdma->clk_ipg);
1345 clk_prepare(sdma->clk_ahb);
1346
1322 sdma->regs = ioremap(iores->start, resource_size(iores)); 1347 sdma->regs = ioremap(iores->start, resource_size(iores));
1323 if (!sdma->regs) { 1348 if (!sdma->regs) {
1324 ret = -ENOMEM; 1349 ret = -ENOMEM;
@@ -1359,6 +1384,8 @@ static int __init sdma_probe(struct platform_device *pdev)
1359 dma_cookie_init(&sdmac->chan); 1384 dma_cookie_init(&sdmac->chan);
1360 sdmac->channel = i; 1385 sdmac->channel = i;
1361 1386
1387 tasklet_init(&sdmac->tasklet, sdma_tasklet,
1388 (unsigned long) sdmac);
1362 /* 1389 /*
1363 * Add the channel to the DMAC list. Do not add channel 0 though 1390 * Add the channel to the DMAC list. Do not add channel 0 though
1364 * because we need it internally in the SDMA driver. This also means 1391 * because we need it internally in the SDMA driver. This also means
@@ -1426,7 +1453,6 @@ err_alloc:
1426err_request_irq: 1453err_request_irq:
1427 iounmap(sdma->regs); 1454 iounmap(sdma->regs);
1428err_ioremap: 1455err_ioremap:
1429 clk_put(sdma->clk);
1430err_clk: 1456err_clk:
1431 release_mem_region(iores->start, resource_size(iores)); 1457 release_mem_region(iores->start, resource_size(iores));
1432err_request_region: 1458err_request_region:
diff --git a/drivers/dma/intel_mid_dma.c b/drivers/dma/intel_mid_dma.c
index c900ca7aaec4..222e907bfaaa 100644
--- a/drivers/dma/intel_mid_dma.c
+++ b/drivers/dma/intel_mid_dma.c
@@ -394,11 +394,11 @@ static int midc_lli_fill_sg(struct intel_mid_dma_chan *midc,
394 } 394 }
395 } 395 }
396 /*Populate CTL_HI values*/ 396 /*Populate CTL_HI values*/
397 ctl_hi.ctlx.block_ts = get_block_ts(sg->length, 397 ctl_hi.ctlx.block_ts = get_block_ts(sg_dma_len(sg),
398 desc->width, 398 desc->width,
399 midc->dma->block_size); 399 midc->dma->block_size);
400 /*Populate SAR and DAR values*/ 400 /*Populate SAR and DAR values*/
401 sg_phy_addr = sg_phys(sg); 401 sg_phy_addr = sg_dma_address(sg);
402 if (desc->dirn == DMA_MEM_TO_DEV) { 402 if (desc->dirn == DMA_MEM_TO_DEV) {
403 lli_bloc_desc->sar = sg_phy_addr; 403 lli_bloc_desc->sar = sg_phy_addr;
404 lli_bloc_desc->dar = mids->dma_slave.dst_addr; 404 lli_bloc_desc->dar = mids->dma_slave.dst_addr;
@@ -747,7 +747,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
747 txd = intel_mid_dma_prep_memcpy(chan, 747 txd = intel_mid_dma_prep_memcpy(chan,
748 mids->dma_slave.dst_addr, 748 mids->dma_slave.dst_addr,
749 mids->dma_slave.src_addr, 749 mids->dma_slave.src_addr,
750 sgl->length, 750 sg_dma_len(sgl),
751 flags); 751 flags);
752 return txd; 752 return txd;
753 } else { 753 } else {
@@ -759,7 +759,7 @@ static struct dma_async_tx_descriptor *intel_mid_dma_prep_slave_sg(
759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n", 759 pr_debug("MDMA: SG Length = %d, direction = %d, Flags = %#lx\n",
760 sg_len, direction, flags); 760 sg_len, direction, flags);
761 761
762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sgl->length, flags); 762 txd = intel_mid_dma_prep_memcpy(chan, 0, 0, sg_dma_len(sgl), flags);
763 if (NULL == txd) { 763 if (NULL == txd) {
764 pr_err("MDMA: Prep memcpy failed\n"); 764 pr_err("MDMA: Prep memcpy failed\n");
765 return NULL; 765 return NULL;
diff --git a/drivers/dma/ipu/ipu_idmac.c b/drivers/dma/ipu/ipu_idmac.c
index 62e3f8ec2461..5ec72044ea4c 100644
--- a/drivers/dma/ipu/ipu_idmac.c
+++ b/drivers/dma/ipu/ipu_idmac.c
@@ -1715,7 +1715,7 @@ static int __init ipu_probe(struct platform_device *pdev)
1715 } 1715 }
1716 1716
1717 /* Make sure IPU HSP clock is running */ 1717 /* Make sure IPU HSP clock is running */
1718 clk_enable(ipu_data.ipu_clk); 1718 clk_prepare_enable(ipu_data.ipu_clk);
1719 1719
1720 /* Disable all interrupts */ 1720 /* Disable all interrupts */
1721 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1); 1721 idmac_write_ipureg(&ipu_data, 0, IPU_INT_CTRL_1);
@@ -1747,7 +1747,7 @@ static int __init ipu_probe(struct platform_device *pdev)
1747err_idmac_init: 1747err_idmac_init:
1748err_attach_irq: 1748err_attach_irq:
1749 ipu_irq_detach_irq(&ipu_data, pdev); 1749 ipu_irq_detach_irq(&ipu_data, pdev);
1750 clk_disable(ipu_data.ipu_clk); 1750 clk_disable_unprepare(ipu_data.ipu_clk);
1751 clk_put(ipu_data.ipu_clk); 1751 clk_put(ipu_data.ipu_clk);
1752err_clk_get: 1752err_clk_get:
1753 iounmap(ipu_data.reg_ic); 1753 iounmap(ipu_data.reg_ic);
@@ -1765,7 +1765,7 @@ static int __exit ipu_remove(struct platform_device *pdev)
1765 1765
1766 ipu_idmac_exit(ipu); 1766 ipu_idmac_exit(ipu);
1767 ipu_irq_detach_irq(ipu, pdev); 1767 ipu_irq_detach_irq(ipu, pdev);
1768 clk_disable(ipu->ipu_clk); 1768 clk_disable_unprepare(ipu->ipu_clk);
1769 clk_put(ipu->ipu_clk); 1769 clk_put(ipu->ipu_clk);
1770 iounmap(ipu->reg_ic); 1770 iounmap(ipu->reg_ic);
1771 iounmap(ipu->reg_ipu); 1771 iounmap(ipu->reg_ipu);
diff --git a/drivers/dma/mv_xor.c b/drivers/dma/mv_xor.c
index fa5d55fea46c..0b12e68bf79c 100644
--- a/drivers/dma/mv_xor.c
+++ b/drivers/dma/mv_xor.c
@@ -25,6 +25,7 @@
25#include <linux/interrupt.h> 25#include <linux/interrupt.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27#include <linux/memory.h> 27#include <linux/memory.h>
28#include <linux/clk.h>
28#include <plat/mv_xor.h> 29#include <plat/mv_xor.h>
29 30
30#include "dmaengine.h" 31#include "dmaengine.h"
@@ -1307,11 +1308,25 @@ static int mv_xor_shared_probe(struct platform_device *pdev)
1307 if (dram) 1308 if (dram)
1308 mv_xor_conf_mbus_windows(msp, dram); 1309 mv_xor_conf_mbus_windows(msp, dram);
1309 1310
1311 /* Not all platforms can gate the clock, so it is not
1312 * an error if the clock does not exists.
1313 */
1314 msp->clk = clk_get(&pdev->dev, NULL);
1315 if (!IS_ERR(msp->clk))
1316 clk_prepare_enable(msp->clk);
1317
1310 return 0; 1318 return 0;
1311} 1319}
1312 1320
1313static int mv_xor_shared_remove(struct platform_device *pdev) 1321static int mv_xor_shared_remove(struct platform_device *pdev)
1314{ 1322{
1323 struct mv_xor_shared_private *msp = platform_get_drvdata(pdev);
1324
1325 if (!IS_ERR(msp->clk)) {
1326 clk_disable_unprepare(msp->clk);
1327 clk_put(msp->clk);
1328 }
1329
1315 return 0; 1330 return 0;
1316} 1331}
1317 1332
diff --git a/drivers/dma/mv_xor.h b/drivers/dma/mv_xor.h
index 654876b7ba1d..a5b422f5a8ab 100644
--- a/drivers/dma/mv_xor.h
+++ b/drivers/dma/mv_xor.h
@@ -55,6 +55,7 @@
55struct mv_xor_shared_private { 55struct mv_xor_shared_private {
56 void __iomem *xor_base; 56 void __iomem *xor_base;
57 void __iomem *xor_high_base; 57 void __iomem *xor_high_base;
58 struct clk *clk;
58}; 59};
59 60
60 61
diff --git a/drivers/dma/mxs-dma.c b/drivers/dma/mxs-dma.c
index 655d4ce6ed0d..c96ab15319f2 100644
--- a/drivers/dma/mxs-dma.c
+++ b/drivers/dma/mxs-dma.c
@@ -22,11 +22,14 @@
22#include <linux/platform_device.h> 22#include <linux/platform_device.h>
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/delay.h> 24#include <linux/delay.h>
25#include <linux/module.h>
25#include <linux/fsl/mxs-dma.h> 26#include <linux/fsl/mxs-dma.h>
27#include <linux/stmp_device.h>
28#include <linux/of.h>
29#include <linux/of_device.h>
26 30
27#include <asm/irq.h> 31#include <asm/irq.h>
28#include <mach/mxs.h> 32#include <mach/mxs.h>
29#include <mach/common.h>
30 33
31#include "dmaengine.h" 34#include "dmaengine.h"
32 35
@@ -36,12 +39,8 @@
36 * dma can program the controller registers of peripheral devices. 39 * dma can program the controller registers of peripheral devices.
37 */ 40 */
38 41
39#define MXS_DMA_APBH 0 42#define dma_is_apbh(mxs_dma) ((mxs_dma)->type == MXS_DMA_APBH)
40#define MXS_DMA_APBX 1 43#define apbh_is_old(mxs_dma) ((mxs_dma)->dev_id == IMX23_DMA)
41#define dma_is_apbh() (mxs_dma->dev_id == MXS_DMA_APBH)
42
43#define APBH_VERSION_LATEST 3
44#define apbh_is_old() (mxs_dma->version < APBH_VERSION_LATEST)
45 44
46#define HW_APBHX_CTRL0 0x000 45#define HW_APBHX_CTRL0 0x000
47#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29) 46#define BM_APBH_CTRL0_APB_BURST8_EN (1 << 29)
@@ -51,13 +50,14 @@
51#define HW_APBHX_CTRL2 0x020 50#define HW_APBHX_CTRL2 0x020
52#define HW_APBHX_CHANNEL_CTRL 0x030 51#define HW_APBHX_CHANNEL_CTRL 0x030
53#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16 52#define BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL 16
54#define HW_APBH_VERSION (cpu_is_mx23() ? 0x3f0 : 0x800) 53/*
55#define HW_APBX_VERSION 0x800 54 * The offset of NXTCMDAR register is different per both dma type and version,
56#define BP_APBHX_VERSION_MAJOR 24 55 * while stride for each channel is all the same 0x70.
57#define HW_APBHX_CHn_NXTCMDAR(n) \ 56 */
58 (((dma_is_apbh() && apbh_is_old()) ? 0x050 : 0x110) + (n) * 0x70) 57#define HW_APBHX_CHn_NXTCMDAR(d, n) \
59#define HW_APBHX_CHn_SEMA(n) \ 58 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x050 : 0x110) + (n) * 0x70)
60 (((dma_is_apbh() && apbh_is_old()) ? 0x080 : 0x140) + (n) * 0x70) 59#define HW_APBHX_CHn_SEMA(d, n) \
60 (((dma_is_apbh(d) && apbh_is_old(d)) ? 0x080 : 0x140) + (n) * 0x70)
61 61
62/* 62/*
63 * ccw bits definitions 63 * ccw bits definitions
@@ -121,9 +121,19 @@ struct mxs_dma_chan {
121#define MXS_DMA_CHANNELS 16 121#define MXS_DMA_CHANNELS 16
122#define MXS_DMA_CHANNELS_MASK 0xffff 122#define MXS_DMA_CHANNELS_MASK 0xffff
123 123
124enum mxs_dma_devtype {
125 MXS_DMA_APBH,
126 MXS_DMA_APBX,
127};
128
129enum mxs_dma_id {
130 IMX23_DMA,
131 IMX28_DMA,
132};
133
124struct mxs_dma_engine { 134struct mxs_dma_engine {
125 int dev_id; 135 enum mxs_dma_id dev_id;
126 unsigned int version; 136 enum mxs_dma_devtype type;
127 void __iomem *base; 137 void __iomem *base;
128 struct clk *clk; 138 struct clk *clk;
129 struct dma_device dma_device; 139 struct dma_device dma_device;
@@ -131,17 +141,86 @@ struct mxs_dma_engine {
131 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS]; 141 struct mxs_dma_chan mxs_chans[MXS_DMA_CHANNELS];
132}; 142};
133 143
144struct mxs_dma_type {
145 enum mxs_dma_id id;
146 enum mxs_dma_devtype type;
147};
148
149static struct mxs_dma_type mxs_dma_types[] = {
150 {
151 .id = IMX23_DMA,
152 .type = MXS_DMA_APBH,
153 }, {
154 .id = IMX23_DMA,
155 .type = MXS_DMA_APBX,
156 }, {
157 .id = IMX28_DMA,
158 .type = MXS_DMA_APBH,
159 }, {
160 .id = IMX28_DMA,
161 .type = MXS_DMA_APBX,
162 }
163};
164
165static struct platform_device_id mxs_dma_ids[] = {
166 {
167 .name = "imx23-dma-apbh",
168 .driver_data = (kernel_ulong_t) &mxs_dma_types[0],
169 }, {
170 .name = "imx23-dma-apbx",
171 .driver_data = (kernel_ulong_t) &mxs_dma_types[1],
172 }, {
173 .name = "imx28-dma-apbh",
174 .driver_data = (kernel_ulong_t) &mxs_dma_types[2],
175 }, {
176 .name = "imx28-dma-apbx",
177 .driver_data = (kernel_ulong_t) &mxs_dma_types[3],
178 }, {
179 /* end of list */
180 }
181};
182
183static const struct of_device_id mxs_dma_dt_ids[] = {
184 { .compatible = "fsl,imx23-dma-apbh", .data = &mxs_dma_ids[0], },
185 { .compatible = "fsl,imx23-dma-apbx", .data = &mxs_dma_ids[1], },
186 { .compatible = "fsl,imx28-dma-apbh", .data = &mxs_dma_ids[2], },
187 { .compatible = "fsl,imx28-dma-apbx", .data = &mxs_dma_ids[3], },
188 { /* sentinel */ }
189};
190MODULE_DEVICE_TABLE(of, mxs_dma_dt_ids);
191
192static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
193{
194 return container_of(chan, struct mxs_dma_chan, chan);
195}
196
197int mxs_dma_is_apbh(struct dma_chan *chan)
198{
199 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
200 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
201
202 return dma_is_apbh(mxs_dma);
203}
204
205int mxs_dma_is_apbx(struct dma_chan *chan)
206{
207 struct mxs_dma_chan *mxs_chan = to_mxs_dma_chan(chan);
208 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
209
210 return !dma_is_apbh(mxs_dma);
211}
212
134static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan) 213static void mxs_dma_reset_chan(struct mxs_dma_chan *mxs_chan)
135{ 214{
136 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma; 215 struct mxs_dma_engine *mxs_dma = mxs_chan->mxs_dma;
137 int chan_id = mxs_chan->chan.chan_id; 216 int chan_id = mxs_chan->chan.chan_id;
138 217
139 if (dma_is_apbh() && apbh_is_old()) 218 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
140 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL), 219 writel(1 << (chan_id + BP_APBH_CTRL0_RESET_CHANNEL),
141 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 220 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
142 else 221 else
143 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL), 222 writel(1 << (chan_id + BP_APBHX_CHANNEL_CTRL_RESET_CHANNEL),
144 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); 223 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
145} 224}
146 225
147static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan) 226static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
@@ -151,10 +230,10 @@ static void mxs_dma_enable_chan(struct mxs_dma_chan *mxs_chan)
151 230
152 /* set cmd_addr up */ 231 /* set cmd_addr up */
153 writel(mxs_chan->ccw_phys, 232 writel(mxs_chan->ccw_phys,
154 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(chan_id)); 233 mxs_dma->base + HW_APBHX_CHn_NXTCMDAR(mxs_dma, chan_id));
155 234
156 /* write 1 to SEMA to kick off the channel */ 235 /* write 1 to SEMA to kick off the channel */
157 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(chan_id)); 236 writel(1, mxs_dma->base + HW_APBHX_CHn_SEMA(mxs_dma, chan_id));
158} 237}
159 238
160static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan) 239static void mxs_dma_disable_chan(struct mxs_dma_chan *mxs_chan)
@@ -168,12 +247,12 @@ static void mxs_dma_pause_chan(struct mxs_dma_chan *mxs_chan)
168 int chan_id = mxs_chan->chan.chan_id; 247 int chan_id = mxs_chan->chan.chan_id;
169 248
170 /* freeze the channel */ 249 /* freeze the channel */
171 if (dma_is_apbh() && apbh_is_old()) 250 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
172 writel(1 << chan_id, 251 writel(1 << chan_id,
173 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 252 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
174 else 253 else
175 writel(1 << chan_id, 254 writel(1 << chan_id,
176 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_SET_ADDR); 255 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_SET);
177 256
178 mxs_chan->status = DMA_PAUSED; 257 mxs_chan->status = DMA_PAUSED;
179} 258}
@@ -184,21 +263,16 @@ static void mxs_dma_resume_chan(struct mxs_dma_chan *mxs_chan)
184 int chan_id = mxs_chan->chan.chan_id; 263 int chan_id = mxs_chan->chan.chan_id;
185 264
186 /* unfreeze the channel */ 265 /* unfreeze the channel */
187 if (dma_is_apbh() && apbh_is_old()) 266 if (dma_is_apbh(mxs_dma) && apbh_is_old(mxs_dma))
188 writel(1 << chan_id, 267 writel(1 << chan_id,
189 mxs_dma->base + HW_APBHX_CTRL0 + MXS_CLR_ADDR); 268 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_CLR);
190 else 269 else
191 writel(1 << chan_id, 270 writel(1 << chan_id,
192 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + MXS_CLR_ADDR); 271 mxs_dma->base + HW_APBHX_CHANNEL_CTRL + STMP_OFFSET_REG_CLR);
193 272
194 mxs_chan->status = DMA_IN_PROGRESS; 273 mxs_chan->status = DMA_IN_PROGRESS;
195} 274}
196 275
197static struct mxs_dma_chan *to_mxs_dma_chan(struct dma_chan *chan)
198{
199 return container_of(chan, struct mxs_dma_chan, chan);
200}
201
202static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx) 276static dma_cookie_t mxs_dma_tx_submit(struct dma_async_tx_descriptor *tx)
203{ 277{
204 return dma_cookie_assign(tx); 278 return dma_cookie_assign(tx);
@@ -220,11 +294,11 @@ static irqreturn_t mxs_dma_int_handler(int irq, void *dev_id)
220 /* completion status */ 294 /* completion status */
221 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1); 295 stat1 = readl(mxs_dma->base + HW_APBHX_CTRL1);
222 stat1 &= MXS_DMA_CHANNELS_MASK; 296 stat1 &= MXS_DMA_CHANNELS_MASK;
223 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + MXS_CLR_ADDR); 297 writel(stat1, mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_CLR);
224 298
225 /* error status */ 299 /* error status */
226 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2); 300 stat2 = readl(mxs_dma->base + HW_APBHX_CTRL2);
227 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + MXS_CLR_ADDR); 301 writel(stat2, mxs_dma->base + HW_APBHX_CTRL2 + STMP_OFFSET_REG_CLR);
228 302
229 /* 303 /*
230 * When both completion and error of termination bits set at the 304 * When both completion and error of termination bits set at the
@@ -415,9 +489,9 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
415 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND); 489 ccw->bits |= BF_CCW(MXS_DMA_CMD_NO_XFER, COMMAND);
416 } else { 490 } else {
417 for_each_sg(sgl, sg, sg_len, i) { 491 for_each_sg(sgl, sg, sg_len, i) {
418 if (sg->length > MAX_XFER_BYTES) { 492 if (sg_dma_len(sg) > MAX_XFER_BYTES) {
419 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n", 493 dev_err(mxs_dma->dma_device.dev, "maximum bytes for sg entry exceeded: %d > %d\n",
420 sg->length, MAX_XFER_BYTES); 494 sg_dma_len(sg), MAX_XFER_BYTES);
421 goto err_out; 495 goto err_out;
422 } 496 }
423 497
@@ -425,7 +499,7 @@ static struct dma_async_tx_descriptor *mxs_dma_prep_slave_sg(
425 499
426 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx; 500 ccw->next = mxs_chan->ccw_phys + sizeof(*ccw) * idx;
427 ccw->bufaddr = sg->dma_address; 501 ccw->bufaddr = sg->dma_address;
428 ccw->xfer_bytes = sg->length; 502 ccw->xfer_bytes = sg_dma_len(sg);
429 503
430 ccw->bits = 0; 504 ccw->bits = 0;
431 ccw->bits |= CCW_CHAIN; 505 ccw->bits |= CCW_CHAIN;
@@ -567,27 +641,21 @@ static int __init mxs_dma_init(struct mxs_dma_engine *mxs_dma)
567 if (ret) 641 if (ret)
568 return ret; 642 return ret;
569 643
570 ret = mxs_reset_block(mxs_dma->base); 644 ret = stmp_reset_block(mxs_dma->base);
571 if (ret) 645 if (ret)
572 goto err_out; 646 goto err_out;
573 647
574 /* only major version matters */
575 mxs_dma->version = readl(mxs_dma->base +
576 ((mxs_dma->dev_id == MXS_DMA_APBX) ?
577 HW_APBX_VERSION : HW_APBH_VERSION)) >>
578 BP_APBHX_VERSION_MAJOR;
579
580 /* enable apbh burst */ 648 /* enable apbh burst */
581 if (dma_is_apbh()) { 649 if (dma_is_apbh(mxs_dma)) {
582 writel(BM_APBH_CTRL0_APB_BURST_EN, 650 writel(BM_APBH_CTRL0_APB_BURST_EN,
583 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 651 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
584 writel(BM_APBH_CTRL0_APB_BURST8_EN, 652 writel(BM_APBH_CTRL0_APB_BURST8_EN,
585 mxs_dma->base + HW_APBHX_CTRL0 + MXS_SET_ADDR); 653 mxs_dma->base + HW_APBHX_CTRL0 + STMP_OFFSET_REG_SET);
586 } 654 }
587 655
588 /* enable irq for all the channels */ 656 /* enable irq for all the channels */
589 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS, 657 writel(MXS_DMA_CHANNELS_MASK << MXS_DMA_CHANNELS,
590 mxs_dma->base + HW_APBHX_CTRL1 + MXS_SET_ADDR); 658 mxs_dma->base + HW_APBHX_CTRL1 + STMP_OFFSET_REG_SET);
591 659
592err_out: 660err_out:
593 clk_disable_unprepare(mxs_dma->clk); 661 clk_disable_unprepare(mxs_dma->clk);
@@ -596,8 +664,9 @@ err_out:
596 664
597static int __init mxs_dma_probe(struct platform_device *pdev) 665static int __init mxs_dma_probe(struct platform_device *pdev)
598{ 666{
599 const struct platform_device_id *id_entry = 667 const struct platform_device_id *id_entry;
600 platform_get_device_id(pdev); 668 const struct of_device_id *of_id;
669 const struct mxs_dma_type *dma_type;
601 struct mxs_dma_engine *mxs_dma; 670 struct mxs_dma_engine *mxs_dma;
602 struct resource *iores; 671 struct resource *iores;
603 int ret, i; 672 int ret, i;
@@ -606,7 +675,15 @@ static int __init mxs_dma_probe(struct platform_device *pdev)
606 if (!mxs_dma) 675 if (!mxs_dma)
607 return -ENOMEM; 676 return -ENOMEM;
608 677
609 mxs_dma->dev_id = id_entry->driver_data; 678 of_id = of_match_device(mxs_dma_dt_ids, &pdev->dev);
679 if (of_id)
680 id_entry = of_id->data;
681 else
682 id_entry = platform_get_device_id(pdev);
683
684 dma_type = (struct mxs_dma_type *)id_entry->driver_data;
685 mxs_dma->type = dma_type->type;
686 mxs_dma->dev_id = dma_type->id;
610 687
611 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 688 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
612 689
@@ -689,23 +766,12 @@ err_request_region:
689 return ret; 766 return ret;
690} 767}
691 768
692static struct platform_device_id mxs_dma_type[] = {
693 {
694 .name = "mxs-dma-apbh",
695 .driver_data = MXS_DMA_APBH,
696 }, {
697 .name = "mxs-dma-apbx",
698 .driver_data = MXS_DMA_APBX,
699 }, {
700 /* end of list */
701 }
702};
703
704static struct platform_driver mxs_dma_driver = { 769static struct platform_driver mxs_dma_driver = {
705 .driver = { 770 .driver = {
706 .name = "mxs-dma", 771 .name = "mxs-dma",
772 .of_match_table = mxs_dma_dt_ids,
707 }, 773 },
708 .id_table = mxs_dma_type, 774 .id_table = mxs_dma_ids,
709}; 775};
710 776
711static int __init mxs_dma_module_init(void) 777static int __init mxs_dma_module_init(void)
diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c
index 65c0495a6d40..987ab5cd2617 100644
--- a/drivers/dma/pch_dma.c
+++ b/drivers/dma/pch_dma.c
@@ -621,7 +621,7 @@ static struct dma_async_tx_descriptor *pd_prep_slave_sg(struct dma_chan *chan,
621 goto err_desc_get; 621 goto err_desc_get;
622 622
623 desc->regs.dev_addr = reg; 623 desc->regs.dev_addr = reg;
624 desc->regs.mem_addr = sg_phys(sg); 624 desc->regs.mem_addr = sg_dma_address(sg);
625 desc->regs.size = sg_dma_len(sg); 625 desc->regs.size = sg_dma_len(sg);
626 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ; 626 desc->regs.next = DMA_DESC_FOLLOW_WITHOUT_IRQ;
627 627
diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c
index fa3fb21e60be..cbcc28e79be6 100644
--- a/drivers/dma/pl330.c
+++ b/drivers/dma/pl330.c
@@ -21,7 +21,6 @@
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/dma-mapping.h> 22#include <linux/dma-mapping.h>
23#include <linux/dmaengine.h> 23#include <linux/dmaengine.h>
24#include <linux/interrupt.h>
25#include <linux/amba/bus.h> 24#include <linux/amba/bus.h>
26#include <linux/amba/pl330.h> 25#include <linux/amba/pl330.h>
27#include <linux/pm_runtime.h> 26#include <linux/pm_runtime.h>
diff --git a/drivers/dma/ste_dma40.c b/drivers/dma/ste_dma40.c
index 2ed1ac3513f3..000d309602b2 100644
--- a/drivers/dma/ste_dma40.c
+++ b/drivers/dma/ste_dma40.c
@@ -2362,7 +2362,7 @@ dma40_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t dma_addr,
2362 } 2362 }
2363 2363
2364 sg[periods].offset = 0; 2364 sg[periods].offset = 0;
2365 sg[periods].length = 0; 2365 sg_dma_len(&sg[periods]) = 0;
2366 sg[periods].page_link = 2366 sg[periods].page_link =
2367 ((unsigned long)sg | 0x01) & ~0x02; 2367 ((unsigned long)sg | 0x01) & ~0x02;
2368 2368
diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c
index 7ef73c919c5d..7be9b7288e90 100644
--- a/drivers/edac/amd64_edac.c
+++ b/drivers/edac/amd64_edac.c
@@ -715,25 +715,6 @@ static inline u64 input_addr_to_sys_addr(struct mem_ctl_info *mci,
715 input_addr_to_dram_addr(mci, input_addr)); 715 input_addr_to_dram_addr(mci, input_addr));
716} 716}
717 717
718/*
719 * Find the minimum and maximum InputAddr values that map to the given @csrow.
720 * Pass back these values in *input_addr_min and *input_addr_max.
721 */
722static void find_csrow_limits(struct mem_ctl_info *mci, int csrow,
723 u64 *input_addr_min, u64 *input_addr_max)
724{
725 struct amd64_pvt *pvt;
726 u64 base, mask;
727
728 pvt = mci->pvt_info;
729 BUG_ON((csrow < 0) || (csrow >= pvt->csels[0].b_cnt));
730
731 get_cs_base_and_mask(pvt, csrow, 0, &base, &mask);
732
733 *input_addr_min = base & ~mask;
734 *input_addr_max = base | mask;
735}
736
737/* Map the Error address to a PAGE and PAGE OFFSET. */ 718/* Map the Error address to a PAGE and PAGE OFFSET. */
738static inline void error_address_to_page_and_offset(u64 error_address, 719static inline void error_address_to_page_and_offset(u64 error_address,
739 u32 *page, u32 *offset) 720 u32 *page, u32 *offset)
@@ -1058,6 +1039,37 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1058 int channel, csrow; 1039 int channel, csrow;
1059 u32 page, offset; 1040 u32 page, offset;
1060 1041
1042 error_address_to_page_and_offset(sys_addr, &page, &offset);
1043
1044 /*
1045 * Find out which node the error address belongs to. This may be
1046 * different from the node that detected the error.
1047 */
1048 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1049 if (!src_mci) {
1050 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1051 (unsigned long)sys_addr);
1052 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1053 page, offset, syndrome,
1054 -1, -1, -1,
1055 EDAC_MOD_STR,
1056 "failed to map error addr to a node",
1057 NULL);
1058 return;
1059 }
1060
1061 /* Now map the sys_addr to a CSROW */
1062 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1063 if (csrow < 0) {
1064 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1065 page, offset, syndrome,
1066 -1, -1, -1,
1067 EDAC_MOD_STR,
1068 "failed to map error addr to a csrow",
1069 NULL);
1070 return;
1071 }
1072
1061 /* CHIPKILL enabled */ 1073 /* CHIPKILL enabled */
1062 if (pvt->nbcfg & NBCFG_CHIPKILL) { 1074 if (pvt->nbcfg & NBCFG_CHIPKILL) {
1063 channel = get_channel_from_ecc_syndrome(mci, syndrome); 1075 channel = get_channel_from_ecc_syndrome(mci, syndrome);
@@ -1067,9 +1079,15 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1067 * 2 DIMMs is in error. So we need to ID 'both' of them 1079 * 2 DIMMs is in error. So we need to ID 'both' of them
1068 * as suspect. 1080 * as suspect.
1069 */ 1081 */
1070 amd64_mc_warn(mci, "unknown syndrome 0x%04x - possible " 1082 amd64_mc_warn(src_mci, "unknown syndrome 0x%04x - "
1071 "error reporting race\n", syndrome); 1083 "possible error reporting race\n",
1072 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1084 syndrome);
1085 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1086 page, offset, syndrome,
1087 csrow, -1, -1,
1088 EDAC_MOD_STR,
1089 "unknown syndrome - possible error reporting race",
1090 NULL);
1073 return; 1091 return;
1074 } 1092 }
1075 } else { 1093 } else {
@@ -1084,28 +1102,10 @@ static void k8_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1084 channel = ((sys_addr & BIT(3)) != 0); 1102 channel = ((sys_addr & BIT(3)) != 0);
1085 } 1103 }
1086 1104
1087 /* 1105 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, src_mci,
1088 * Find out which node the error address belongs to. This may be 1106 page, offset, syndrome,
1089 * different from the node that detected the error. 1107 csrow, channel, -1,
1090 */ 1108 EDAC_MOD_STR, "", NULL);
1091 src_mci = find_mc_by_sys_addr(mci, sys_addr);
1092 if (!src_mci) {
1093 amd64_mc_err(mci, "failed to map error addr 0x%lx to a node\n",
1094 (unsigned long)sys_addr);
1095 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR);
1096 return;
1097 }
1098
1099 /* Now map the sys_addr to a CSROW */
1100 csrow = sys_addr_to_csrow(src_mci, sys_addr);
1101 if (csrow < 0) {
1102 edac_mc_handle_ce_no_info(src_mci, EDAC_MOD_STR);
1103 } else {
1104 error_address_to_page_and_offset(sys_addr, &page, &offset);
1105
1106 edac_mc_handle_ce(src_mci, page, offset, syndrome, csrow,
1107 channel, EDAC_MOD_STR);
1108 }
1109} 1109}
1110 1110
1111static int ddr2_cs_size(unsigned i, bool dct_width) 1111static int ddr2_cs_size(unsigned i, bool dct_width)
@@ -1611,15 +1611,20 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1611 u32 page, offset; 1611 u32 page, offset;
1612 int nid, csrow, chan = 0; 1612 int nid, csrow, chan = 0;
1613 1613
1614 error_address_to_page_and_offset(sys_addr, &page, &offset);
1615
1614 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan); 1616 csrow = f1x_translate_sysaddr_to_cs(pvt, sys_addr, &nid, &chan);
1615 1617
1616 if (csrow < 0) { 1618 if (csrow < 0) {
1617 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1619 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1620 page, offset, syndrome,
1621 -1, -1, -1,
1622 EDAC_MOD_STR,
1623 "failed to map error addr to a csrow",
1624 NULL);
1618 return; 1625 return;
1619 } 1626 }
1620 1627
1621 error_address_to_page_and_offset(sys_addr, &page, &offset);
1622
1623 /* 1628 /*
1624 * We need the syndromes for channel detection only when we're 1629 * We need the syndromes for channel detection only when we're
1625 * ganged. Otherwise @chan should already contain the channel at 1630 * ganged. Otherwise @chan should already contain the channel at
@@ -1628,16 +1633,10 @@ static void f1x_map_sysaddr_to_csrow(struct mem_ctl_info *mci, u64 sys_addr,
1628 if (dct_ganging_enabled(pvt)) 1633 if (dct_ganging_enabled(pvt))
1629 chan = get_channel_from_ecc_syndrome(mci, syndrome); 1634 chan = get_channel_from_ecc_syndrome(mci, syndrome);
1630 1635
1631 if (chan >= 0) 1636 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1632 edac_mc_handle_ce(mci, page, offset, syndrome, csrow, chan, 1637 page, offset, syndrome,
1633 EDAC_MOD_STR); 1638 csrow, chan, -1,
1634 else 1639 EDAC_MOD_STR, "", NULL);
1635 /*
1636 * Channel unknown, report all channels on this CSROW as failed.
1637 */
1638 for (chan = 0; chan < mci->csrows[csrow].nr_channels; chan++)
1639 edac_mc_handle_ce(mci, page, offset, syndrome,
1640 csrow, chan, EDAC_MOD_STR);
1641} 1640}
1642 1641
1643/* 1642/*
@@ -1918,7 +1917,12 @@ static void amd64_handle_ce(struct mem_ctl_info *mci, struct mce *m)
1918 /* Ensure that the Error Address is VALID */ 1917 /* Ensure that the Error Address is VALID */
1919 if (!(m->status & MCI_STATUS_ADDRV)) { 1918 if (!(m->status & MCI_STATUS_ADDRV)) {
1920 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1919 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1921 edac_mc_handle_ce_no_info(mci, EDAC_MOD_STR); 1920 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
1921 0, 0, 0,
1922 -1, -1, -1,
1923 EDAC_MOD_STR,
1924 "HW has no ERROR_ADDRESS available",
1925 NULL);
1922 return; 1926 return;
1923 } 1927 }
1924 1928
@@ -1942,11 +1946,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1942 1946
1943 if (!(m->status & MCI_STATUS_ADDRV)) { 1947 if (!(m->status & MCI_STATUS_ADDRV)) {
1944 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n"); 1948 amd64_mc_err(mci, "HW has no ERROR_ADDRESS available\n");
1945 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1949 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1950 0, 0, 0,
1951 -1, -1, -1,
1952 EDAC_MOD_STR,
1953 "HW has no ERROR_ADDRESS available",
1954 NULL);
1946 return; 1955 return;
1947 } 1956 }
1948 1957
1949 sys_addr = get_error_address(m); 1958 sys_addr = get_error_address(m);
1959 error_address_to_page_and_offset(sys_addr, &page, &offset);
1950 1960
1951 /* 1961 /*
1952 * Find out which node the error address belongs to. This may be 1962 * Find out which node the error address belongs to. This may be
@@ -1956,7 +1966,11 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1956 if (!src_mci) { 1966 if (!src_mci) {
1957 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n", 1967 amd64_mc_err(mci, "ERROR ADDRESS (0x%lx) NOT mapped to a MC\n",
1958 (unsigned long)sys_addr); 1968 (unsigned long)sys_addr);
1959 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1969 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1970 page, offset, 0,
1971 -1, -1, -1,
1972 EDAC_MOD_STR,
1973 "ERROR ADDRESS NOT mapped to a MC", NULL);
1960 return; 1974 return;
1961 } 1975 }
1962 1976
@@ -1966,10 +1980,17 @@ static void amd64_handle_ue(struct mem_ctl_info *mci, struct mce *m)
1966 if (csrow < 0) { 1980 if (csrow < 0) {
1967 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n", 1981 amd64_mc_err(mci, "ERROR_ADDRESS (0x%lx) NOT mapped to CS\n",
1968 (unsigned long)sys_addr); 1982 (unsigned long)sys_addr);
1969 edac_mc_handle_ue_no_info(log_mci, EDAC_MOD_STR); 1983 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1984 page, offset, 0,
1985 -1, -1, -1,
1986 EDAC_MOD_STR,
1987 "ERROR ADDRESS NOT mapped to CS",
1988 NULL);
1970 } else { 1989 } else {
1971 error_address_to_page_and_offset(sys_addr, &page, &offset); 1990 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
1972 edac_mc_handle_ue(log_mci, page, offset, csrow, EDAC_MOD_STR); 1991 page, offset, 0,
1992 csrow, -1, -1,
1993 EDAC_MOD_STR, "", NULL);
1973 } 1994 }
1974} 1995}
1975 1996
@@ -2171,7 +2192,7 @@ static u32 amd64_csrow_nr_pages(struct amd64_pvt *pvt, u8 dct, int csrow_nr)
2171 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT); 2192 nr_pages = pvt->ops->dbam_to_cs(pvt, dct, cs_mode) << (20 - PAGE_SHIFT);
2172 2193
2173 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode); 2194 debugf0(" (csrow=%d) DBAM map index= %d\n", csrow_nr, cs_mode);
2174 debugf0(" nr_pages= %u channel-count = %d\n", 2195 debugf0(" nr_pages/channel= %u channel-count = %d\n",
2175 nr_pages, pvt->channel_count); 2196 nr_pages, pvt->channel_count);
2176 2197
2177 return nr_pages; 2198 return nr_pages;
@@ -2185,9 +2206,12 @@ static int init_csrows(struct mem_ctl_info *mci)
2185{ 2206{
2186 struct csrow_info *csrow; 2207 struct csrow_info *csrow;
2187 struct amd64_pvt *pvt = mci->pvt_info; 2208 struct amd64_pvt *pvt = mci->pvt_info;
2188 u64 input_addr_min, input_addr_max, sys_addr, base, mask; 2209 u64 base, mask;
2189 u32 val; 2210 u32 val;
2190 int i, empty = 1; 2211 int i, j, empty = 1;
2212 enum mem_type mtype;
2213 enum edac_type edac_mode;
2214 int nr_pages = 0;
2191 2215
2192 amd64_read_pci_cfg(pvt->F3, NBCFG, &val); 2216 amd64_read_pci_cfg(pvt->F3, NBCFG, &val);
2193 2217
@@ -2211,41 +2235,32 @@ static int init_csrows(struct mem_ctl_info *mci)
2211 2235
2212 empty = 0; 2236 empty = 0;
2213 if (csrow_enabled(i, 0, pvt)) 2237 if (csrow_enabled(i, 0, pvt))
2214 csrow->nr_pages = amd64_csrow_nr_pages(pvt, 0, i); 2238 nr_pages = amd64_csrow_nr_pages(pvt, 0, i);
2215 if (csrow_enabled(i, 1, pvt)) 2239 if (csrow_enabled(i, 1, pvt))
2216 csrow->nr_pages += amd64_csrow_nr_pages(pvt, 1, i); 2240 nr_pages += amd64_csrow_nr_pages(pvt, 1, i);
2217 find_csrow_limits(mci, i, &input_addr_min, &input_addr_max);
2218 sys_addr = input_addr_to_sys_addr(mci, input_addr_min);
2219 csrow->first_page = (u32) (sys_addr >> PAGE_SHIFT);
2220 sys_addr = input_addr_to_sys_addr(mci, input_addr_max);
2221 csrow->last_page = (u32) (sys_addr >> PAGE_SHIFT);
2222 2241
2223 get_cs_base_and_mask(pvt, i, 0, &base, &mask); 2242 get_cs_base_and_mask(pvt, i, 0, &base, &mask);
2224 csrow->page_mask = ~mask;
2225 /* 8 bytes of resolution */ 2243 /* 8 bytes of resolution */
2226 2244
2227 csrow->mtype = amd64_determine_memory_type(pvt, i); 2245 mtype = amd64_determine_memory_type(pvt, i);
2228 2246
2229 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i); 2247 debugf1(" for MC node %d csrow %d:\n", pvt->mc_node_id, i);
2230 debugf1(" input_addr_min: 0x%lx input_addr_max: 0x%lx\n", 2248 debugf1(" nr_pages: %u\n", nr_pages * pvt->channel_count);
2231 (unsigned long)input_addr_min,
2232 (unsigned long)input_addr_max);
2233 debugf1(" sys_addr: 0x%lx page_mask: 0x%lx\n",
2234 (unsigned long)sys_addr, csrow->page_mask);
2235 debugf1(" nr_pages: %u first_page: 0x%lx "
2236 "last_page: 0x%lx\n",
2237 (unsigned)csrow->nr_pages,
2238 csrow->first_page, csrow->last_page);
2239 2249
2240 /* 2250 /*
2241 * determine whether CHIPKILL or JUST ECC or NO ECC is operating 2251 * determine whether CHIPKILL or JUST ECC or NO ECC is operating
2242 */ 2252 */
2243 if (pvt->nbcfg & NBCFG_ECC_ENABLE) 2253 if (pvt->nbcfg & NBCFG_ECC_ENABLE)
2244 csrow->edac_mode = 2254 edac_mode = (pvt->nbcfg & NBCFG_CHIPKILL) ?
2245 (pvt->nbcfg & NBCFG_CHIPKILL) ? 2255 EDAC_S4ECD4ED : EDAC_SECDED;
2246 EDAC_S4ECD4ED : EDAC_SECDED;
2247 else 2256 else
2248 csrow->edac_mode = EDAC_NONE; 2257 edac_mode = EDAC_NONE;
2258
2259 for (j = 0; j < pvt->channel_count; j++) {
2260 csrow->channels[j].dimm->mtype = mtype;
2261 csrow->channels[j].dimm->edac_mode = edac_mode;
2262 csrow->channels[j].dimm->nr_pages = nr_pages;
2263 }
2249 } 2264 }
2250 2265
2251 return empty; 2266 return empty;
@@ -2540,6 +2555,7 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2540 struct amd64_pvt *pvt = NULL; 2555 struct amd64_pvt *pvt = NULL;
2541 struct amd64_family_type *fam_type = NULL; 2556 struct amd64_family_type *fam_type = NULL;
2542 struct mem_ctl_info *mci = NULL; 2557 struct mem_ctl_info *mci = NULL;
2558 struct edac_mc_layer layers[2];
2543 int err = 0, ret; 2559 int err = 0, ret;
2544 u8 nid = get_node_id(F2); 2560 u8 nid = get_node_id(F2);
2545 2561
@@ -2574,7 +2590,13 @@ static int amd64_init_one_instance(struct pci_dev *F2)
2574 goto err_siblings; 2590 goto err_siblings;
2575 2591
2576 ret = -ENOMEM; 2592 ret = -ENOMEM;
2577 mci = edac_mc_alloc(0, pvt->csels[0].b_cnt, pvt->channel_count, nid); 2593 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
2594 layers[0].size = pvt->csels[0].b_cnt;
2595 layers[0].is_virt_csrow = true;
2596 layers[1].type = EDAC_MC_LAYER_CHANNEL;
2597 layers[1].size = pvt->channel_count;
2598 layers[1].is_virt_csrow = false;
2599 mci = edac_mc_alloc(nid, ARRAY_SIZE(layers), layers, 0);
2578 if (!mci) 2600 if (!mci)
2579 goto err_siblings; 2601 goto err_siblings;
2580 2602
diff --git a/drivers/edac/amd76x_edac.c b/drivers/edac/amd76x_edac.c
index f8fd3c807bde..9774d443fa57 100644
--- a/drivers/edac/amd76x_edac.c
+++ b/drivers/edac/amd76x_edac.c
@@ -29,7 +29,6 @@
29 edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg) 29 edac_mc_chipset_printk(mci, level, "amd76x", fmt, ##arg)
30 30
31#define AMD76X_NR_CSROWS 8 31#define AMD76X_NR_CSROWS 8
32#define AMD76X_NR_CHANS 1
33#define AMD76X_NR_DIMMS 4 32#define AMD76X_NR_DIMMS 4
34 33
35/* AMD 76x register addresses - device 0 function 0 - PCI bridge */ 34/* AMD 76x register addresses - device 0 function 0 - PCI bridge */
@@ -146,8 +145,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
146 145
147 if (handle_errors) { 146 if (handle_errors) {
148 row = (info->ecc_mode_status >> 4) & 0xf; 147 row = (info->ecc_mode_status >> 4) & 0xf;
149 edac_mc_handle_ue(mci, mci->csrows[row].first_page, 0, 148 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
150 row, mci->ctl_name); 149 mci->csrows[row].first_page, 0, 0,
150 row, 0, -1,
151 mci->ctl_name, "", NULL);
151 } 152 }
152 } 153 }
153 154
@@ -159,8 +160,10 @@ static int amd76x_process_error_info(struct mem_ctl_info *mci,
159 160
160 if (handle_errors) { 161 if (handle_errors) {
161 row = info->ecc_mode_status & 0xf; 162 row = info->ecc_mode_status & 0xf;
162 edac_mc_handle_ce(mci, mci->csrows[row].first_page, 0, 163 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
163 0, row, 0, mci->ctl_name); 164 mci->csrows[row].first_page, 0, 0,
165 row, 0, -1,
166 mci->ctl_name, "", NULL);
164 } 167 }
165 } 168 }
166 169
@@ -186,11 +189,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
186 enum edac_type edac_mode) 189 enum edac_type edac_mode)
187{ 190{
188 struct csrow_info *csrow; 191 struct csrow_info *csrow;
192 struct dimm_info *dimm;
189 u32 mba, mba_base, mba_mask, dms; 193 u32 mba, mba_base, mba_mask, dms;
190 int index; 194 int index;
191 195
192 for (index = 0; index < mci->nr_csrows; index++) { 196 for (index = 0; index < mci->nr_csrows; index++) {
193 csrow = &mci->csrows[index]; 197 csrow = &mci->csrows[index];
198 dimm = csrow->channels[0].dimm;
194 199
195 /* find the DRAM Chip Select Base address and mask */ 200 /* find the DRAM Chip Select Base address and mask */
196 pci_read_config_dword(pdev, 201 pci_read_config_dword(pdev,
@@ -203,13 +208,13 @@ static void amd76x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
203 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL; 208 mba_mask = ((mba & 0xff80) << 16) | 0x7fffffUL;
204 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms); 209 pci_read_config_dword(pdev, AMD76X_DRAM_MODE_STATUS, &dms);
205 csrow->first_page = mba_base >> PAGE_SHIFT; 210 csrow->first_page = mba_base >> PAGE_SHIFT;
206 csrow->nr_pages = (mba_mask + 1) >> PAGE_SHIFT; 211 dimm->nr_pages = (mba_mask + 1) >> PAGE_SHIFT;
207 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 212 csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
208 csrow->page_mask = mba_mask >> PAGE_SHIFT; 213 csrow->page_mask = mba_mask >> PAGE_SHIFT;
209 csrow->grain = csrow->nr_pages << PAGE_SHIFT; 214 dimm->grain = dimm->nr_pages << PAGE_SHIFT;
210 csrow->mtype = MEM_RDDR; 215 dimm->mtype = MEM_RDDR;
211 csrow->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN; 216 dimm->dtype = ((dms >> index) & 0x1) ? DEV_X4 : DEV_UNKNOWN;
212 csrow->edac_mode = edac_mode; 217 dimm->edac_mode = edac_mode;
213 } 218 }
214} 219}
215 220
@@ -230,7 +235,8 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
230 EDAC_SECDED, 235 EDAC_SECDED,
231 EDAC_SECDED 236 EDAC_SECDED
232 }; 237 };
233 struct mem_ctl_info *mci = NULL; 238 struct mem_ctl_info *mci;
239 struct edac_mc_layer layers[2];
234 u32 ems; 240 u32 ems;
235 u32 ems_mode; 241 u32 ems_mode;
236 struct amd76x_error_info discard; 242 struct amd76x_error_info discard;
@@ -238,11 +244,17 @@ static int amd76x_probe1(struct pci_dev *pdev, int dev_idx)
238 debugf0("%s()\n", __func__); 244 debugf0("%s()\n", __func__);
239 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems); 245 pci_read_config_dword(pdev, AMD76X_ECC_MODE_STATUS, &ems);
240 ems_mode = (ems >> 10) & 0x3; 246 ems_mode = (ems >> 10) & 0x3;
241 mci = edac_mc_alloc(0, AMD76X_NR_CSROWS, AMD76X_NR_CHANS, 0);
242 247
243 if (mci == NULL) { 248 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
249 layers[0].size = AMD76X_NR_CSROWS;
250 layers[0].is_virt_csrow = true;
251 layers[1].type = EDAC_MC_LAYER_CHANNEL;
252 layers[1].size = 1;
253 layers[1].is_virt_csrow = false;
254 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
255
256 if (mci == NULL)
244 return -ENOMEM; 257 return -ENOMEM;
245 }
246 258
247 debugf0("%s(): mci = %p\n", __func__, mci); 259 debugf0("%s(): mci = %p\n", __func__, mci);
248 mci->dev = &pdev->dev; 260 mci->dev = &pdev->dev;
diff --git a/drivers/edac/cell_edac.c b/drivers/edac/cell_edac.c
index 9a6a274e6925..69ee6aab5c71 100644
--- a/drivers/edac/cell_edac.c
+++ b/drivers/edac/cell_edac.c
@@ -48,8 +48,9 @@ static void cell_edac_count_ce(struct mem_ctl_info *mci, int chan, u64 ar)
48 syndrome = (ar & 0x000000001fe00000ul) >> 21; 48 syndrome = (ar & 0x000000001fe00000ul) >> 21;
49 49
50 /* TODO: Decoding of the error address */ 50 /* TODO: Decoding of the error address */
51 edac_mc_handle_ce(mci, csrow->first_page + pfn, offset, 51 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
52 syndrome, 0, chan, ""); 52 csrow->first_page + pfn, offset, syndrome,
53 0, chan, -1, "", "", NULL);
53} 54}
54 55
55static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar) 56static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
@@ -69,7 +70,9 @@ static void cell_edac_count_ue(struct mem_ctl_info *mci, int chan, u64 ar)
69 offset = address & ~PAGE_MASK; 70 offset = address & ~PAGE_MASK;
70 71
71 /* TODO: Decoding of the error address */ 72 /* TODO: Decoding of the error address */
72 edac_mc_handle_ue(mci, csrow->first_page + pfn, offset, 0, ""); 73 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
74 csrow->first_page + pfn, offset, 0,
75 0, chan, -1, "", "", NULL);
73} 76}
74 77
75static void cell_edac_check(struct mem_ctl_info *mci) 78static void cell_edac_check(struct mem_ctl_info *mci)
@@ -124,8 +127,11 @@ static void cell_edac_check(struct mem_ctl_info *mci)
124static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci) 127static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
125{ 128{
126 struct csrow_info *csrow = &mci->csrows[0]; 129 struct csrow_info *csrow = &mci->csrows[0];
130 struct dimm_info *dimm;
127 struct cell_edac_priv *priv = mci->pvt_info; 131 struct cell_edac_priv *priv = mci->pvt_info;
128 struct device_node *np; 132 struct device_node *np;
133 int j;
134 u32 nr_pages;
129 135
130 for (np = NULL; 136 for (np = NULL;
131 (np = of_find_node_by_name(np, "memory")) != NULL;) { 137 (np = of_find_node_by_name(np, "memory")) != NULL;) {
@@ -140,15 +146,20 @@ static void __devinit cell_edac_init_csrows(struct mem_ctl_info *mci)
140 if (of_node_to_nid(np) != priv->node) 146 if (of_node_to_nid(np) != priv->node)
141 continue; 147 continue;
142 csrow->first_page = r.start >> PAGE_SHIFT; 148 csrow->first_page = r.start >> PAGE_SHIFT;
143 csrow->nr_pages = resource_size(&r) >> PAGE_SHIFT; 149 nr_pages = resource_size(&r) >> PAGE_SHIFT;
144 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 150 csrow->last_page = csrow->first_page + nr_pages - 1;
145 csrow->mtype = MEM_XDR; 151
146 csrow->edac_mode = EDAC_SECDED; 152 for (j = 0; j < csrow->nr_channels; j++) {
153 dimm = csrow->channels[j].dimm;
154 dimm->mtype = MEM_XDR;
155 dimm->edac_mode = EDAC_SECDED;
156 dimm->nr_pages = nr_pages / csrow->nr_channels;
157 }
147 dev_dbg(mci->dev, 158 dev_dbg(mci->dev,
148 "Initialized on node %d, chanmask=0x%x," 159 "Initialized on node %d, chanmask=0x%x,"
149 " first_page=0x%lx, nr_pages=0x%x\n", 160 " first_page=0x%lx, nr_pages=0x%x\n",
150 priv->node, priv->chanmask, 161 priv->node, priv->chanmask,
151 csrow->first_page, csrow->nr_pages); 162 csrow->first_page, nr_pages);
152 break; 163 break;
153 } 164 }
154} 165}
@@ -157,9 +168,10 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
157{ 168{
158 struct cbe_mic_tm_regs __iomem *regs; 169 struct cbe_mic_tm_regs __iomem *regs;
159 struct mem_ctl_info *mci; 170 struct mem_ctl_info *mci;
171 struct edac_mc_layer layers[2];
160 struct cell_edac_priv *priv; 172 struct cell_edac_priv *priv;
161 u64 reg; 173 u64 reg;
162 int rc, chanmask; 174 int rc, chanmask, num_chans;
163 175
164 regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id)); 176 regs = cbe_get_cpu_mic_tm_regs(cbe_node_to_cpu(pdev->id));
165 if (regs == NULL) 177 if (regs == NULL)
@@ -184,8 +196,16 @@ static int __devinit cell_edac_probe(struct platform_device *pdev)
184 in_be64(&regs->mic_fir)); 196 in_be64(&regs->mic_fir));
185 197
186 /* Allocate & init EDAC MC data structure */ 198 /* Allocate & init EDAC MC data structure */
187 mci = edac_mc_alloc(sizeof(struct cell_edac_priv), 1, 199 num_chans = chanmask == 3 ? 2 : 1;
188 chanmask == 3 ? 2 : 1, pdev->id); 200
201 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
202 layers[0].size = 1;
203 layers[0].is_virt_csrow = true;
204 layers[1].type = EDAC_MC_LAYER_CHANNEL;
205 layers[1].size = num_chans;
206 layers[1].is_virt_csrow = false;
207 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
208 sizeof(struct cell_edac_priv));
189 if (mci == NULL) 209 if (mci == NULL)
190 return -ENOMEM; 210 return -ENOMEM;
191 priv = mci->pvt_info; 211 priv = mci->pvt_info;
diff --git a/drivers/edac/cpc925_edac.c b/drivers/edac/cpc925_edac.c
index a774c0ddaf5b..e22030a9de66 100644
--- a/drivers/edac/cpc925_edac.c
+++ b/drivers/edac/cpc925_edac.c
@@ -329,9 +329,10 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
329{ 329{
330 struct cpc925_mc_pdata *pdata = mci->pvt_info; 330 struct cpc925_mc_pdata *pdata = mci->pvt_info;
331 struct csrow_info *csrow; 331 struct csrow_info *csrow;
332 int index; 332 struct dimm_info *dimm;
333 int index, j;
333 u32 mbmr, mbbar, bba; 334 u32 mbmr, mbbar, bba;
334 unsigned long row_size, last_nr_pages = 0; 335 unsigned long row_size, nr_pages, last_nr_pages = 0;
335 336
336 get_total_mem(pdata); 337 get_total_mem(pdata);
337 338
@@ -350,36 +351,41 @@ static void cpc925_init_csrows(struct mem_ctl_info *mci)
350 351
351 row_size = bba * (1UL << 28); /* 256M */ 352 row_size = bba * (1UL << 28); /* 256M */
352 csrow->first_page = last_nr_pages; 353 csrow->first_page = last_nr_pages;
353 csrow->nr_pages = row_size >> PAGE_SHIFT; 354 nr_pages = row_size >> PAGE_SHIFT;
354 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 355 csrow->last_page = csrow->first_page + nr_pages - 1;
355 last_nr_pages = csrow->last_page + 1; 356 last_nr_pages = csrow->last_page + 1;
356 357
357 csrow->mtype = MEM_RDDR; 358 for (j = 0; j < csrow->nr_channels; j++) {
358 csrow->edac_mode = EDAC_SECDED; 359 dimm = csrow->channels[j].dimm;
359 360
360 switch (csrow->nr_channels) { 361 dimm->nr_pages = nr_pages / csrow->nr_channels;
361 case 1: /* Single channel */ 362 dimm->mtype = MEM_RDDR;
362 csrow->grain = 32; /* four-beat burst of 32 bytes */ 363 dimm->edac_mode = EDAC_SECDED;
363 break; 364
364 case 2: /* Dual channel */ 365 switch (csrow->nr_channels) {
365 default: 366 case 1: /* Single channel */
366 csrow->grain = 64; /* four-beat burst of 64 bytes */ 367 dimm->grain = 32; /* four-beat burst of 32 bytes */
367 break; 368 break;
368 } 369 case 2: /* Dual channel */
369 370 default:
370 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) { 371 dimm->grain = 64; /* four-beat burst of 64 bytes */
371 case 6: /* 0110, no way to differentiate X8 VS X16 */ 372 break;
372 case 5: /* 0101 */ 373 }
373 case 8: /* 1000 */ 374
374 csrow->dtype = DEV_X16; 375 switch ((mbmr & MBMR_MODE_MASK) >> MBMR_MODE_SHIFT) {
375 break; 376 case 6: /* 0110, no way to differentiate X8 VS X16 */
376 case 7: /* 0111 */ 377 case 5: /* 0101 */
377 case 9: /* 1001 */ 378 case 8: /* 1000 */
378 csrow->dtype = DEV_X8; 379 dimm->dtype = DEV_X16;
379 break; 380 break;
380 default: 381 case 7: /* 0111 */
381 csrow->dtype = DEV_UNKNOWN; 382 case 9: /* 1001 */
382 break; 383 dimm->dtype = DEV_X8;
384 break;
385 default:
386 dimm->dtype = DEV_UNKNOWN;
387 break;
388 }
383 } 389 }
384 } 390 }
385} 391}
@@ -549,13 +555,18 @@ static void cpc925_mc_check(struct mem_ctl_info *mci)
549 if (apiexcp & CECC_EXCP_DETECTED) { 555 if (apiexcp & CECC_EXCP_DETECTED) {
550 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n"); 556 cpc925_mc_printk(mci, KERN_INFO, "DRAM CECC Fault\n");
551 channel = cpc925_mc_find_channel(mci, syndrome); 557 channel = cpc925_mc_find_channel(mci, syndrome);
552 edac_mc_handle_ce(mci, pfn, offset, syndrome, 558 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
553 csrow, channel, mci->ctl_name); 559 pfn, offset, syndrome,
560 csrow, channel, -1,
561 mci->ctl_name, "", NULL);
554 } 562 }
555 563
556 if (apiexcp & UECC_EXCP_DETECTED) { 564 if (apiexcp & UECC_EXCP_DETECTED) {
557 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n"); 565 cpc925_mc_printk(mci, KERN_INFO, "DRAM UECC Fault\n");
558 edac_mc_handle_ue(mci, pfn, offset, csrow, mci->ctl_name); 566 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
567 pfn, offset, 0,
568 csrow, -1, -1,
569 mci->ctl_name, "", NULL);
559 } 570 }
560 571
561 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n"); 572 cpc925_mc_printk(mci, KERN_INFO, "Dump registers:\n");
@@ -927,6 +938,7 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
927{ 938{
928 static int edac_mc_idx; 939 static int edac_mc_idx;
929 struct mem_ctl_info *mci; 940 struct mem_ctl_info *mci;
941 struct edac_mc_layer layers[2];
930 void __iomem *vbase; 942 void __iomem *vbase;
931 struct cpc925_mc_pdata *pdata; 943 struct cpc925_mc_pdata *pdata;
932 struct resource *r; 944 struct resource *r;
@@ -962,9 +974,16 @@ static int __devinit cpc925_probe(struct platform_device *pdev)
962 goto err2; 974 goto err2;
963 } 975 }
964 976
965 nr_channels = cpc925_mc_get_channels(vbase); 977 nr_channels = cpc925_mc_get_channels(vbase) + 1;
966 mci = edac_mc_alloc(sizeof(struct cpc925_mc_pdata), 978
967 CPC925_NR_CSROWS, nr_channels + 1, edac_mc_idx); 979 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
980 layers[0].size = CPC925_NR_CSROWS;
981 layers[0].is_virt_csrow = true;
982 layers[1].type = EDAC_MC_LAYER_CHANNEL;
983 layers[1].size = nr_channels;
984 layers[1].is_virt_csrow = false;
985 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
986 sizeof(struct cpc925_mc_pdata));
968 if (!mci) { 987 if (!mci) {
969 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n"); 988 cpc925_printk(KERN_ERR, "No memory for mem_ctl_info\n");
970 res = -ENOMEM; 989 res = -ENOMEM;
diff --git a/drivers/edac/e752x_edac.c b/drivers/edac/e752x_edac.c
index 41223261ede9..3186512c9739 100644
--- a/drivers/edac/e752x_edac.c
+++ b/drivers/edac/e752x_edac.c
@@ -4,7 +4,11 @@
4 * This file may be distributed under the terms of the 4 * This file may be distributed under the terms of the
5 * GNU General Public License. 5 * GNU General Public License.
6 * 6 *
7 * See "enum e752x_chips" below for supported chipsets 7 * Implement support for the e7520, E7525, e7320 and i3100 memory controllers.
8 *
9 * Datasheets:
10 * http://www.intel.in/content/www/in/en/chipsets/e7525-memory-controller-hub-datasheet.html
11 * ftp://download.intel.com/design/intarch/datashts/31345803.pdf
8 * 12 *
9 * Written by Tom Zimmerman 13 * Written by Tom Zimmerman
10 * 14 *
@@ -13,8 +17,6 @@
13 * Wang Zhenyu at intel.com 17 * Wang Zhenyu at intel.com
14 * Dave Jiang at mvista.com 18 * Dave Jiang at mvista.com
15 * 19 *
16 * $Id: edac_e752x.c,v 1.5.2.11 2005/10/05 00:43:44 dsp_llnl Exp $
17 *
18 */ 20 */
19 21
20#include <linux/module.h> 22#include <linux/module.h>
@@ -187,6 +189,25 @@ enum e752x_chips {
187 I3100 = 3 189 I3100 = 3
188}; 190};
189 191
192/*
193 * Those chips Support single-rank and dual-rank memories only.
194 *
195 * On e752x chips, the odd rows are present only on dual-rank memories.
196 * Dividing the rank by two will provide the dimm#
197 *
198 * i3100 MC has a different mapping: it supports only 4 ranks.
199 *
200 * The mapping is (from 1 to n):
201 * slot single-ranked double-ranked
202 * dimm #1 -> rank #4 NA
203 * dimm #2 -> rank #3 NA
204 * dimm #3 -> rank #2 Ranks 2 and 3
205 * dimm #4 -> rank $1 Ranks 1 and 4
206 *
207 * FIXME: The current mapping for i3100 considers that it supports up to 8
208 * ranks/chanel, but datasheet says that the MC supports only 4 ranks.
209 */
210
190struct e752x_pvt { 211struct e752x_pvt {
191 struct pci_dev *bridge_ck; 212 struct pci_dev *bridge_ck;
192 struct pci_dev *dev_d0f0; 213 struct pci_dev *dev_d0f0;
@@ -350,8 +371,10 @@ static void do_process_ce(struct mem_ctl_info *mci, u16 error_one,
350 channel = !(error_one & 1); 371 channel = !(error_one & 1);
351 372
352 /* e752x mc reads 34:6 of the DRAM linear address */ 373 /* e752x mc reads 34:6 of the DRAM linear address */
353 edac_mc_handle_ce(mci, page, offset_in_page(sec1_add << 4), 374 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
354 sec1_syndrome, row, channel, "e752x CE"); 375 page, offset_in_page(sec1_add << 4), sec1_syndrome,
376 row, channel, -1,
377 "e752x CE", "", NULL);
355} 378}
356 379
357static inline void process_ce(struct mem_ctl_info *mci, u16 error_one, 380static inline void process_ce(struct mem_ctl_info *mci, u16 error_one,
@@ -385,9 +408,12 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
385 edac_mc_find_csrow_by_page(mci, block_page); 408 edac_mc_find_csrow_by_page(mci, block_page);
386 409
387 /* e752x mc reads 34:6 of the DRAM linear address */ 410 /* e752x mc reads 34:6 of the DRAM linear address */
388 edac_mc_handle_ue(mci, block_page, 411 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
389 offset_in_page(error_2b << 4), 412 block_page,
390 row, "e752x UE from Read"); 413 offset_in_page(error_2b << 4), 0,
414 row, -1, -1,
415 "e752x UE from Read", "", NULL);
416
391 } 417 }
392 if (error_one & 0x0404) { 418 if (error_one & 0x0404) {
393 error_2b = scrb_add; 419 error_2b = scrb_add;
@@ -401,9 +427,11 @@ static void do_process_ue(struct mem_ctl_info *mci, u16 error_one,
401 edac_mc_find_csrow_by_page(mci, block_page); 427 edac_mc_find_csrow_by_page(mci, block_page);
402 428
403 /* e752x mc reads 34:6 of the DRAM linear address */ 429 /* e752x mc reads 34:6 of the DRAM linear address */
404 edac_mc_handle_ue(mci, block_page, 430 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
405 offset_in_page(error_2b << 4), 431 block_page,
406 row, "e752x UE from Scruber"); 432 offset_in_page(error_2b << 4), 0,
433 row, -1, -1,
434 "e752x UE from Scruber", "", NULL);
407 } 435 }
408} 436}
409 437
@@ -426,7 +454,9 @@ static inline void process_ue_no_info_wr(struct mem_ctl_info *mci,
426 return; 454 return;
427 455
428 debugf3("%s()\n", __func__); 456 debugf3("%s()\n", __func__);
429 edac_mc_handle_ue_no_info(mci, "e752x UE log memory write"); 457 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
458 -1, -1, -1,
459 "e752x UE log memory write", "", NULL);
430} 460}
431 461
432static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error, 462static void do_process_ded_retry(struct mem_ctl_info *mci, u16 error,
@@ -1044,7 +1074,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1044 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */ 1074 int drc_drbg; /* DRB granularity 0=64mb, 1=128mb */
1045 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */ 1075 int drc_ddim; /* DRAM Data Integrity Mode 0=none, 2=edac */
1046 u8 value; 1076 u8 value;
1047 u32 dra, drc, cumul_size; 1077 u32 dra, drc, cumul_size, i, nr_pages;
1048 1078
1049 dra = 0; 1079 dra = 0;
1050 for (index = 0; index < 4; index++) { 1080 for (index = 0; index < 4; index++) {
@@ -1053,7 +1083,7 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1053 dra |= dra_reg << (index * 8); 1083 dra |= dra_reg << (index * 8);
1054 } 1084 }
1055 pci_read_config_dword(pdev, E752X_DRC, &drc); 1085 pci_read_config_dword(pdev, E752X_DRC, &drc);
1056 drc_chan = dual_channel_active(ddrcsr); 1086 drc_chan = dual_channel_active(ddrcsr) ? 1 : 0;
1057 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */ 1087 drc_drbg = drc_chan + 1; /* 128 in dual mode, 64 in single */
1058 drc_ddim = (drc >> 20) & 0x3; 1088 drc_ddim = (drc >> 20) & 0x3;
1059 1089
@@ -1078,26 +1108,33 @@ static void e752x_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
1078 1108
1079 csrow->first_page = last_cumul_size; 1109 csrow->first_page = last_cumul_size;
1080 csrow->last_page = cumul_size - 1; 1110 csrow->last_page = cumul_size - 1;
1081 csrow->nr_pages = cumul_size - last_cumul_size; 1111 nr_pages = cumul_size - last_cumul_size;
1082 last_cumul_size = cumul_size; 1112 last_cumul_size = cumul_size;
1083 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 1113
1084 csrow->mtype = MEM_RDDR; /* only one type supported */ 1114 for (i = 0; i < csrow->nr_channels; i++) {
1085 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 1115 struct dimm_info *dimm = csrow->channels[i].dimm;
1086 1116
1087 /* 1117 debugf3("Initializing rank at (%i,%i)\n", index, i);
1088 * if single channel or x8 devices then SECDED 1118 dimm->nr_pages = nr_pages / csrow->nr_channels;
1089 * if dual channel and x4 then S4ECD4ED 1119 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
1090 */ 1120 dimm->mtype = MEM_RDDR; /* only one type supported */
1091 if (drc_ddim) { 1121 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
1092 if (drc_chan && mem_dev) { 1122
1093 csrow->edac_mode = EDAC_S4ECD4ED; 1123 /*
1094 mci->edac_cap |= EDAC_FLAG_S4ECD4ED; 1124 * if single channel or x8 devices then SECDED
1095 } else { 1125 * if dual channel and x4 then S4ECD4ED
1096 csrow->edac_mode = EDAC_SECDED; 1126 */
1097 mci->edac_cap |= EDAC_FLAG_SECDED; 1127 if (drc_ddim) {
1098 } 1128 if (drc_chan && mem_dev) {
1099 } else 1129 dimm->edac_mode = EDAC_S4ECD4ED;
1100 csrow->edac_mode = EDAC_NONE; 1130 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
1131 } else {
1132 dimm->edac_mode = EDAC_SECDED;
1133 mci->edac_cap |= EDAC_FLAG_SECDED;
1134 }
1135 } else
1136 dimm->edac_mode = EDAC_NONE;
1137 }
1101 } 1138 }
1102} 1139}
1103 1140
@@ -1226,6 +1263,7 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1226 u16 pci_data; 1263 u16 pci_data;
1227 u8 stat8; 1264 u8 stat8;
1228 struct mem_ctl_info *mci; 1265 struct mem_ctl_info *mci;
1266 struct edac_mc_layer layers[2];
1229 struct e752x_pvt *pvt; 1267 struct e752x_pvt *pvt;
1230 u16 ddrcsr; 1268 u16 ddrcsr;
1231 int drc_chan; /* Number of channels 0=1chan,1=2chan */ 1269 int drc_chan; /* Number of channels 0=1chan,1=2chan */
@@ -1252,11 +1290,15 @@ static int e752x_probe1(struct pci_dev *pdev, int dev_idx)
1252 /* Dual channel = 1, Single channel = 0 */ 1290 /* Dual channel = 1, Single channel = 0 */
1253 drc_chan = dual_channel_active(ddrcsr); 1291 drc_chan = dual_channel_active(ddrcsr);
1254 1292
1255 mci = edac_mc_alloc(sizeof(*pvt), E752X_NR_CSROWS, drc_chan + 1, 0); 1293 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1256 1294 layers[0].size = E752X_NR_CSROWS;
1257 if (mci == NULL) { 1295 layers[0].is_virt_csrow = true;
1296 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1297 layers[1].size = drc_chan + 1;
1298 layers[1].is_virt_csrow = false;
1299 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1300 if (mci == NULL)
1258 return -ENOMEM; 1301 return -ENOMEM;
1259 }
1260 1302
1261 debugf3("%s(): init mci\n", __func__); 1303 debugf3("%s(): init mci\n", __func__);
1262 mci->mtype_cap = MEM_FLAG_RDDR; 1304 mci->mtype_cap = MEM_FLAG_RDDR;
diff --git a/drivers/edac/e7xxx_edac.c b/drivers/edac/e7xxx_edac.c
index 68dea87b72e6..9a9c1a546797 100644
--- a/drivers/edac/e7xxx_edac.c
+++ b/drivers/edac/e7xxx_edac.c
@@ -10,6 +10,9 @@
10 * Based on work by Dan Hollis <goemon at anime dot net> and others. 10 * Based on work by Dan Hollis <goemon at anime dot net> and others.
11 * http://www.anime.net/~goemon/linux-ecc/ 11 * http://www.anime.net/~goemon/linux-ecc/
12 * 12 *
13 * Datasheet:
14 * http://www.intel.com/content/www/us/en/chipsets/e7501-chipset-memory-controller-hub-datasheet.html
15 *
13 * Contributors: 16 * Contributors:
14 * Eric Biederman (Linux Networx) 17 * Eric Biederman (Linux Networx)
15 * Tom Zimmerman (Linux Networx) 18 * Tom Zimmerman (Linux Networx)
@@ -71,7 +74,7 @@
71#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */ 74#endif /* PCI_DEVICE_ID_INTEL_7505_1_ERR */
72 75
73#define E7XXX_NR_CSROWS 8 /* number of csrows */ 76#define E7XXX_NR_CSROWS 8 /* number of csrows */
74#define E7XXX_NR_DIMMS 8 /* FIXME - is this correct? */ 77#define E7XXX_NR_DIMMS 8 /* 2 channels, 4 dimms/channel */
75 78
76/* E7XXX register addresses - device 0 function 0 */ 79/* E7XXX register addresses - device 0 function 0 */
77#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */ 80#define E7XXX_DRB 0x60 /* DRAM row boundary register (8b) */
@@ -216,13 +219,15 @@ static void process_ce(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
216 row = edac_mc_find_csrow_by_page(mci, page); 219 row = edac_mc_find_csrow_by_page(mci, page);
217 /* convert syndrome to channel */ 220 /* convert syndrome to channel */
218 channel = e7xxx_find_channel(syndrome); 221 channel = e7xxx_find_channel(syndrome);
219 edac_mc_handle_ce(mci, page, 0, syndrome, row, channel, "e7xxx CE"); 222 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, page, 0, syndrome,
223 row, channel, -1, "e7xxx CE", "", NULL);
220} 224}
221 225
222static void process_ce_no_info(struct mem_ctl_info *mci) 226static void process_ce_no_info(struct mem_ctl_info *mci)
223{ 227{
224 debugf3("%s()\n", __func__); 228 debugf3("%s()\n", __func__);
225 edac_mc_handle_ce_no_info(mci, "e7xxx CE log register overflow"); 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
230 "e7xxx CE log register overflow", "", NULL);
226} 231}
227 232
228static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info) 233static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
@@ -236,13 +241,17 @@ static void process_ue(struct mem_ctl_info *mci, struct e7xxx_error_info *info)
236 /* FIXME - should use PAGE_SHIFT */ 241 /* FIXME - should use PAGE_SHIFT */
237 block_page = error_2b >> 6; /* convert to 4k address */ 242 block_page = error_2b >> 6; /* convert to 4k address */
238 row = edac_mc_find_csrow_by_page(mci, block_page); 243 row = edac_mc_find_csrow_by_page(mci, block_page);
239 edac_mc_handle_ue(mci, block_page, 0, row, "e7xxx UE"); 244
245 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, block_page, 0, 0,
246 row, -1, -1, "e7xxx UE", "", NULL);
240} 247}
241 248
242static void process_ue_no_info(struct mem_ctl_info *mci) 249static void process_ue_no_info(struct mem_ctl_info *mci)
243{ 250{
244 debugf3("%s()\n", __func__); 251 debugf3("%s()\n", __func__);
245 edac_mc_handle_ue_no_info(mci, "e7xxx UE log register overflow"); 252
253 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0, -1, -1, -1,
254 "e7xxx UE log register overflow", "", NULL);
246} 255}
247 256
248static void e7xxx_get_error_info(struct mem_ctl_info *mci, 257static void e7xxx_get_error_info(struct mem_ctl_info *mci,
@@ -347,11 +356,12 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
347 int dev_idx, u32 drc) 356 int dev_idx, u32 drc)
348{ 357{
349 unsigned long last_cumul_size; 358 unsigned long last_cumul_size;
350 int index; 359 int index, j;
351 u8 value; 360 u8 value;
352 u32 dra, cumul_size; 361 u32 dra, cumul_size, nr_pages;
353 int drc_chan, drc_drbg, drc_ddim, mem_dev; 362 int drc_chan, drc_drbg, drc_ddim, mem_dev;
354 struct csrow_info *csrow; 363 struct csrow_info *csrow;
364 struct dimm_info *dimm;
355 365
356 pci_read_config_dword(pdev, E7XXX_DRA, &dra); 366 pci_read_config_dword(pdev, E7XXX_DRA, &dra);
357 drc_chan = dual_channel_active(drc, dev_idx); 367 drc_chan = dual_channel_active(drc, dev_idx);
@@ -379,26 +389,32 @@ static void e7xxx_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
379 389
380 csrow->first_page = last_cumul_size; 390 csrow->first_page = last_cumul_size;
381 csrow->last_page = cumul_size - 1; 391 csrow->last_page = cumul_size - 1;
382 csrow->nr_pages = cumul_size - last_cumul_size; 392 nr_pages = cumul_size - last_cumul_size;
383 last_cumul_size = cumul_size; 393 last_cumul_size = cumul_size;
384 csrow->grain = 1 << 12; /* 4KiB - resolution of CELOG */ 394
385 csrow->mtype = MEM_RDDR; /* only one type supported */ 395 for (j = 0; j < drc_chan + 1; j++) {
386 csrow->dtype = mem_dev ? DEV_X4 : DEV_X8; 396 dimm = csrow->channels[j].dimm;
387 397
388 /* 398 dimm->nr_pages = nr_pages / (drc_chan + 1);
389 * if single channel or x8 devices then SECDED 399 dimm->grain = 1 << 12; /* 4KiB - resolution of CELOG */
390 * if dual channel and x4 then S4ECD4ED 400 dimm->mtype = MEM_RDDR; /* only one type supported */
391 */ 401 dimm->dtype = mem_dev ? DEV_X4 : DEV_X8;
392 if (drc_ddim) { 402
393 if (drc_chan && mem_dev) { 403 /*
394 csrow->edac_mode = EDAC_S4ECD4ED; 404 * if single channel or x8 devices then SECDED
395 mci->edac_cap |= EDAC_FLAG_S4ECD4ED; 405 * if dual channel and x4 then S4ECD4ED
396 } else { 406 */
397 csrow->edac_mode = EDAC_SECDED; 407 if (drc_ddim) {
398 mci->edac_cap |= EDAC_FLAG_SECDED; 408 if (drc_chan && mem_dev) {
399 } 409 dimm->edac_mode = EDAC_S4ECD4ED;
400 } else 410 mci->edac_cap |= EDAC_FLAG_S4ECD4ED;
401 csrow->edac_mode = EDAC_NONE; 411 } else {
412 dimm->edac_mode = EDAC_SECDED;
413 mci->edac_cap |= EDAC_FLAG_SECDED;
414 }
415 } else
416 dimm->edac_mode = EDAC_NONE;
417 }
402 } 418 }
403} 419}
404 420
@@ -406,6 +422,7 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
406{ 422{
407 u16 pci_data; 423 u16 pci_data;
408 struct mem_ctl_info *mci = NULL; 424 struct mem_ctl_info *mci = NULL;
425 struct edac_mc_layer layers[2];
409 struct e7xxx_pvt *pvt = NULL; 426 struct e7xxx_pvt *pvt = NULL;
410 u32 drc; 427 u32 drc;
411 int drc_chan; 428 int drc_chan;
@@ -416,8 +433,21 @@ static int e7xxx_probe1(struct pci_dev *pdev, int dev_idx)
416 pci_read_config_dword(pdev, E7XXX_DRC, &drc); 433 pci_read_config_dword(pdev, E7XXX_DRC, &drc);
417 434
418 drc_chan = dual_channel_active(drc, dev_idx); 435 drc_chan = dual_channel_active(drc, dev_idx);
419 mci = edac_mc_alloc(sizeof(*pvt), E7XXX_NR_CSROWS, drc_chan + 1, 0); 436 /*
420 437 * According with the datasheet, this device has a maximum of
438 * 4 DIMMS per channel, either single-rank or dual-rank. So, the
439 * total amount of dimms is 8 (E7XXX_NR_DIMMS).
440 * That means that the DIMM is mapped as CSROWs, and the channel
441 * will map the rank. So, an error to either channel should be
442 * attributed to the same dimm.
443 */
444 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
445 layers[0].size = E7XXX_NR_CSROWS;
446 layers[0].is_virt_csrow = true;
447 layers[1].type = EDAC_MC_LAYER_CHANNEL;
448 layers[1].size = drc_chan + 1;
449 layers[1].is_virt_csrow = false;
450 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
421 if (mci == NULL) 451 if (mci == NULL)
422 return -ENOMEM; 452 return -ENOMEM;
423 453
diff --git a/drivers/edac/edac_core.h b/drivers/edac/edac_core.h
index 5b739411d62f..117490d4f835 100644
--- a/drivers/edac/edac_core.h
+++ b/drivers/edac/edac_core.h
@@ -447,8 +447,10 @@ static inline void pci_write_bits32(struct pci_dev *pdev, int offset,
447 447
448#endif /* CONFIG_PCI */ 448#endif /* CONFIG_PCI */
449 449
450extern struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, 450struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
451 unsigned nr_chans, int edac_index); 451 unsigned n_layers,
452 struct edac_mc_layer *layers,
453 unsigned sz_pvt);
452extern int edac_mc_add_mc(struct mem_ctl_info *mci); 454extern int edac_mc_add_mc(struct mem_ctl_info *mci);
453extern void edac_mc_free(struct mem_ctl_info *mci); 455extern void edac_mc_free(struct mem_ctl_info *mci);
454extern struct mem_ctl_info *edac_mc_find(int idx); 456extern struct mem_ctl_info *edac_mc_find(int idx);
@@ -456,35 +458,17 @@ extern struct mem_ctl_info *find_mci_by_dev(struct device *dev);
456extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev); 458extern struct mem_ctl_info *edac_mc_del_mc(struct device *dev);
457extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, 459extern int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci,
458 unsigned long page); 460 unsigned long page);
459 461void edac_mc_handle_error(const enum hw_event_mc_err_type type,
460/* 462 struct mem_ctl_info *mci,
461 * The no info errors are used when error overflows are reported. 463 const unsigned long page_frame_number,
462 * There are a limited number of error logging registers that can 464 const unsigned long offset_in_page,
463 * be exausted. When all registers are exhausted and an additional 465 const unsigned long syndrome,
464 * error occurs then an error overflow register records that an 466 const int layer0,
465 * error occurred and the type of error, but doesn't have any 467 const int layer1,
466 * further information. The ce/ue versions make for cleaner 468 const int layer2,
467 * reporting logic and function interface - reduces conditional 469 const char *msg,
468 * statement clutter and extra function arguments. 470 const char *other_detail,
469 */ 471 const void *mcelog);
470extern void edac_mc_handle_ce(struct mem_ctl_info *mci,
471 unsigned long page_frame_number,
472 unsigned long offset_in_page,
473 unsigned long syndrome, int row, int channel,
474 const char *msg);
475extern void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci,
476 const char *msg);
477extern void edac_mc_handle_ue(struct mem_ctl_info *mci,
478 unsigned long page_frame_number,
479 unsigned long offset_in_page, int row,
480 const char *msg);
481extern void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci,
482 const char *msg);
483extern void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, unsigned int csrow,
484 unsigned int channel0, unsigned int channel1,
485 char *msg);
486extern void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, unsigned int csrow,
487 unsigned int channel, char *msg);
488 472
489/* 473/*
490 * edac_device APIs 474 * edac_device APIs
@@ -496,6 +480,7 @@ extern void edac_device_handle_ue(struct edac_device_ctl_info *edac_dev,
496extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev, 480extern void edac_device_handle_ce(struct edac_device_ctl_info *edac_dev,
497 int inst_nr, int block_nr, const char *msg); 481 int inst_nr, int block_nr, const char *msg);
498extern int edac_device_alloc_index(void); 482extern int edac_device_alloc_index(void);
483extern const char *edac_layer_name[];
499 484
500/* 485/*
501 * edac_pci APIs 486 * edac_pci APIs
diff --git a/drivers/edac/edac_device.c b/drivers/edac/edac_device.c
index 45b8f4bdd773..ee3f1f810c1e 100644
--- a/drivers/edac/edac_device.c
+++ b/drivers/edac/edac_device.c
@@ -79,7 +79,7 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
79 unsigned total_size; 79 unsigned total_size;
80 unsigned count; 80 unsigned count;
81 unsigned instance, block, attr; 81 unsigned instance, block, attr;
82 void *pvt; 82 void *pvt, *p;
83 int err; 83 int err;
84 84
85 debugf4("%s() instances=%d blocks=%d\n", 85 debugf4("%s() instances=%d blocks=%d\n",
@@ -92,35 +92,30 @@ struct edac_device_ctl_info *edac_device_alloc_ctl_info(
92 * to be at least as stringent as what the compiler would 92 * to be at least as stringent as what the compiler would
93 * provide if we could simply hardcode everything into a single struct. 93 * provide if we could simply hardcode everything into a single struct.
94 */ 94 */
95 dev_ctl = (struct edac_device_ctl_info *)NULL; 95 p = NULL;
96 dev_ctl = edac_align_ptr(&p, sizeof(*dev_ctl), 1);
96 97
97 /* Calc the 'end' offset past end of ONE ctl_info structure 98 /* Calc the 'end' offset past end of ONE ctl_info structure
98 * which will become the start of the 'instance' array 99 * which will become the start of the 'instance' array
99 */ 100 */
100 dev_inst = edac_align_ptr(&dev_ctl[1], sizeof(*dev_inst)); 101 dev_inst = edac_align_ptr(&p, sizeof(*dev_inst), nr_instances);
101 102
102 /* Calc the 'end' offset past the instance array within the ctl_info 103 /* Calc the 'end' offset past the instance array within the ctl_info
103 * which will become the start of the block array 104 * which will become the start of the block array
104 */ 105 */
105 dev_blk = edac_align_ptr(&dev_inst[nr_instances], sizeof(*dev_blk)); 106 count = nr_instances * nr_blocks;
107 dev_blk = edac_align_ptr(&p, sizeof(*dev_blk), count);
106 108
107 /* Calc the 'end' offset past the dev_blk array 109 /* Calc the 'end' offset past the dev_blk array
108 * which will become the start of the attrib array, if any. 110 * which will become the start of the attrib array, if any.
109 */ 111 */
110 count = nr_instances * nr_blocks; 112 /* calc how many nr_attrib we need */
111 dev_attrib = edac_align_ptr(&dev_blk[count], sizeof(*dev_attrib)); 113 if (nr_attrib > 0)
112
113 /* Check for case of when an attribute array is specified */
114 if (nr_attrib > 0) {
115 /* calc how many nr_attrib we need */
116 count *= nr_attrib; 114 count *= nr_attrib;
115 dev_attrib = edac_align_ptr(&p, sizeof(*dev_attrib), count);
117 116
118 /* Calc the 'end' offset past the attributes array */ 117 /* Calc the 'end' offset past the attributes array */
119 pvt = edac_align_ptr(&dev_attrib[count], sz_private); 118 pvt = edac_align_ptr(&p, sz_private, 1);
120 } else {
121 /* no attribute array specified */
122 pvt = edac_align_ptr(dev_attrib, sz_private);
123 }
124 119
125 /* 'pvt' now points to where the private data area is. 120 /* 'pvt' now points to where the private data area is.
126 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib) 121 * At this point 'pvt' (like dev_inst,dev_blk and dev_attrib)
diff --git a/drivers/edac/edac_mc.c b/drivers/edac/edac_mc.c
index feef7733fae7..10f375032e96 100644
--- a/drivers/edac/edac_mc.c
+++ b/drivers/edac/edac_mc.c
@@ -43,9 +43,26 @@ static void edac_mc_dump_channel(struct rank_info *chan)
43{ 43{
44 debugf4("\tchannel = %p\n", chan); 44 debugf4("\tchannel = %p\n", chan);
45 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx); 45 debugf4("\tchannel->chan_idx = %d\n", chan->chan_idx);
46 debugf4("\tchannel->ce_count = %d\n", chan->ce_count);
47 debugf4("\tchannel->label = '%s'\n", chan->label);
48 debugf4("\tchannel->csrow = %p\n\n", chan->csrow); 46 debugf4("\tchannel->csrow = %p\n\n", chan->csrow);
47 debugf4("\tchannel->dimm = %p\n", chan->dimm);
48}
49
50static void edac_mc_dump_dimm(struct dimm_info *dimm)
51{
52 int i;
53
54 debugf4("\tdimm = %p\n", dimm);
55 debugf4("\tdimm->label = '%s'\n", dimm->label);
56 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
57 debugf4("\tdimm location ");
58 for (i = 0; i < dimm->mci->n_layers; i++) {
59 printk(KERN_CONT "%d", dimm->location[i]);
60 if (i < dimm->mci->n_layers - 1)
61 printk(KERN_CONT ".");
62 }
63 printk(KERN_CONT "\n");
64 debugf4("\tdimm->grain = %d\n", dimm->grain);
65 debugf4("\tdimm->nr_pages = 0x%x\n", dimm->nr_pages);
49} 66}
50 67
51static void edac_mc_dump_csrow(struct csrow_info *csrow) 68static void edac_mc_dump_csrow(struct csrow_info *csrow)
@@ -55,7 +72,6 @@ static void edac_mc_dump_csrow(struct csrow_info *csrow)
55 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page); 72 debugf4("\tcsrow->first_page = 0x%lx\n", csrow->first_page);
56 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page); 73 debugf4("\tcsrow->last_page = 0x%lx\n", csrow->last_page);
57 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask); 74 debugf4("\tcsrow->page_mask = 0x%lx\n", csrow->page_mask);
58 debugf4("\tcsrow->nr_pages = 0x%x\n", csrow->nr_pages);
59 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels); 75 debugf4("\tcsrow->nr_channels = %d\n", csrow->nr_channels);
60 debugf4("\tcsrow->channels = %p\n", csrow->channels); 76 debugf4("\tcsrow->channels = %p\n", csrow->channels);
61 debugf4("\tcsrow->mci = %p\n\n", csrow->mci); 77 debugf4("\tcsrow->mci = %p\n\n", csrow->mci);
@@ -70,6 +86,8 @@ static void edac_mc_dump_mci(struct mem_ctl_info *mci)
70 debugf4("\tmci->edac_check = %p\n", mci->edac_check); 86 debugf4("\tmci->edac_check = %p\n", mci->edac_check);
71 debugf3("\tmci->nr_csrows = %d, csrows = %p\n", 87 debugf3("\tmci->nr_csrows = %d, csrows = %p\n",
72 mci->nr_csrows, mci->csrows); 88 mci->nr_csrows, mci->csrows);
89 debugf3("\tmci->nr_dimms = %d, dimms = %p\n",
90 mci->tot_dimms, mci->dimms);
73 debugf3("\tdev = %p\n", mci->dev); 91 debugf3("\tdev = %p\n", mci->dev);
74 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name); 92 debugf3("\tmod_name:ctl_name = %s:%s\n", mci->mod_name, mci->ctl_name);
75 debugf3("\tpvt_info = %p\n\n", mci->pvt_info); 93 debugf3("\tpvt_info = %p\n\n", mci->pvt_info);
@@ -101,18 +119,37 @@ const char *edac_mem_types[] = {
101}; 119};
102EXPORT_SYMBOL_GPL(edac_mem_types); 120EXPORT_SYMBOL_GPL(edac_mem_types);
103 121
104/* 'ptr' points to a possibly unaligned item X such that sizeof(X) is 'size'. 122/**
105 * Adjust 'ptr' so that its alignment is at least as stringent as what the 123 * edac_align_ptr - Prepares the pointer offsets for a single-shot allocation
106 * compiler would provide for X and return the aligned result. 124 * @p: pointer to a pointer with the memory offset to be used. At
125 * return, this will be incremented to point to the next offset
126 * @size: Size of the data structure to be reserved
127 * @n_elems: Number of elements that should be reserved
107 * 128 *
108 * If 'size' is a constant, the compiler will optimize this whole function 129 * If 'size' is a constant, the compiler will optimize this whole function
109 * down to either a no-op or the addition of a constant to the value of 'ptr'. 130 * down to either a no-op or the addition of a constant to the value of '*p'.
131 *
132 * The 'p' pointer is absolutely needed to keep the proper advancing
133 * further in memory to the proper offsets when allocating the struct along
134 * with its embedded structs, as edac_device_alloc_ctl_info() does it
135 * above, for example.
136 *
137 * At return, the pointer 'p' will be incremented to be used on a next call
138 * to this function.
110 */ 139 */
111void *edac_align_ptr(void *ptr, unsigned size) 140void *edac_align_ptr(void **p, unsigned size, int n_elems)
112{ 141{
113 unsigned align, r; 142 unsigned align, r;
143 void *ptr = *p;
144
145 *p += size * n_elems;
114 146
115 /* Here we assume that the alignment of a "long long" is the most 147 /*
148 * 'p' can possibly be an unaligned item X such that sizeof(X) is
149 * 'size'. Adjust 'p' so that its alignment is at least as
150 * stringent as what the compiler would provide for X and return
151 * the aligned result.
152 * Here we assume that the alignment of a "long long" is the most
116 * stringent alignment that the compiler will ever provide by default. 153 * stringent alignment that the compiler will ever provide by default.
117 * As far as I know, this is a reasonable assumption. 154 * As far as I know, this is a reasonable assumption.
118 */ 155 */
@@ -132,14 +169,18 @@ void *edac_align_ptr(void *ptr, unsigned size)
132 if (r == 0) 169 if (r == 0)
133 return (char *)ptr; 170 return (char *)ptr;
134 171
172 *p += align - r;
173
135 return (void *)(((unsigned long)ptr) + align - r); 174 return (void *)(((unsigned long)ptr) + align - r);
136} 175}
137 176
138/** 177/**
139 * edac_mc_alloc: Allocate a struct mem_ctl_info structure 178 * edac_mc_alloc: Allocate and partially fill a struct mem_ctl_info structure
140 * @size_pvt: size of private storage needed 179 * @mc_num: Memory controller number
141 * @nr_csrows: Number of CWROWS needed for this MC 180 * @n_layers: Number of MC hierarchy layers
142 * @nr_chans: Number of channels for the MC 181 * layers: Describes each layer as seen by the Memory Controller
182 * @size_pvt: size of private storage needed
183 *
143 * 184 *
144 * Everything is kmalloc'ed as one big chunk - more efficient. 185 * Everything is kmalloc'ed as one big chunk - more efficient.
145 * Only can be used if all structures have the same lifetime - otherwise 186 * Only can be used if all structures have the same lifetime - otherwise
@@ -147,32 +188,77 @@ void *edac_align_ptr(void *ptr, unsigned size)
147 * 188 *
148 * Use edac_mc_free() to free mc structures allocated by this function. 189 * Use edac_mc_free() to free mc structures allocated by this function.
149 * 190 *
191 * NOTE: drivers handle multi-rank memories in different ways: in some
192 * drivers, one multi-rank memory stick is mapped as one entry, while, in
193 * others, a single multi-rank memory stick would be mapped into several
194 * entries. Currently, this function will allocate multiple struct dimm_info
195 * on such scenarios, as grouping the multiple ranks require drivers change.
196 *
150 * Returns: 197 * Returns:
151 * NULL allocation failed 198 * On failure: NULL
152 * struct mem_ctl_info pointer 199 * On success: struct mem_ctl_info pointer
153 */ 200 */
154struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows, 201struct mem_ctl_info *edac_mc_alloc(unsigned mc_num,
155 unsigned nr_chans, int edac_index) 202 unsigned n_layers,
203 struct edac_mc_layer *layers,
204 unsigned sz_pvt)
156{ 205{
157 struct mem_ctl_info *mci; 206 struct mem_ctl_info *mci;
158 struct csrow_info *csi, *csrow; 207 struct edac_mc_layer *layer;
208 struct csrow_info *csi, *csr;
159 struct rank_info *chi, *chp, *chan; 209 struct rank_info *chi, *chp, *chan;
160 void *pvt; 210 struct dimm_info *dimm;
161 unsigned size; 211 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
162 int row, chn; 212 unsigned pos[EDAC_MAX_LAYERS];
163 int err; 213 unsigned size, tot_dimms = 1, count = 1;
214 unsigned tot_csrows = 1, tot_channels = 1, tot_errcount = 0;
215 void *pvt, *p, *ptr = NULL;
216 int i, j, err, row, chn, n, len;
217 bool per_rank = false;
218
219 BUG_ON(n_layers > EDAC_MAX_LAYERS || n_layers == 0);
220 /*
221 * Calculate the total amount of dimms and csrows/cschannels while
222 * in the old API emulation mode
223 */
224 for (i = 0; i < n_layers; i++) {
225 tot_dimms *= layers[i].size;
226 if (layers[i].is_virt_csrow)
227 tot_csrows *= layers[i].size;
228 else
229 tot_channels *= layers[i].size;
230
231 if (layers[i].type == EDAC_MC_LAYER_CHIP_SELECT)
232 per_rank = true;
233 }
164 234
165 /* Figure out the offsets of the various items from the start of an mc 235 /* Figure out the offsets of the various items from the start of an mc
166 * structure. We want the alignment of each item to be at least as 236 * structure. We want the alignment of each item to be at least as
167 * stringent as what the compiler would provide if we could simply 237 * stringent as what the compiler would provide if we could simply
168 * hardcode everything into a single struct. 238 * hardcode everything into a single struct.
169 */ 239 */
170 mci = (struct mem_ctl_info *)0; 240 mci = edac_align_ptr(&ptr, sizeof(*mci), 1);
171 csi = edac_align_ptr(&mci[1], sizeof(*csi)); 241 layer = edac_align_ptr(&ptr, sizeof(*layer), n_layers);
172 chi = edac_align_ptr(&csi[nr_csrows], sizeof(*chi)); 242 csi = edac_align_ptr(&ptr, sizeof(*csi), tot_csrows);
173 pvt = edac_align_ptr(&chi[nr_chans * nr_csrows], sz_pvt); 243 chi = edac_align_ptr(&ptr, sizeof(*chi), tot_csrows * tot_channels);
244 dimm = edac_align_ptr(&ptr, sizeof(*dimm), tot_dimms);
245 for (i = 0; i < n_layers; i++) {
246 count *= layers[i].size;
247 debugf4("%s: errcount layer %d size %d\n", __func__, i, count);
248 ce_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
249 ue_per_layer[i] = edac_align_ptr(&ptr, sizeof(u32), count);
250 tot_errcount += 2 * count;
251 }
252
253 debugf4("%s: allocating %d error counters\n", __func__, tot_errcount);
254 pvt = edac_align_ptr(&ptr, sz_pvt, 1);
174 size = ((unsigned long)pvt) + sz_pvt; 255 size = ((unsigned long)pvt) + sz_pvt;
175 256
257 debugf1("%s(): allocating %u bytes for mci data (%d %s, %d csrows/channels)\n",
258 __func__, size,
259 tot_dimms,
260 per_rank ? "ranks" : "dimms",
261 tot_csrows * tot_channels);
176 mci = kzalloc(size, GFP_KERNEL); 262 mci = kzalloc(size, GFP_KERNEL);
177 if (mci == NULL) 263 if (mci == NULL)
178 return NULL; 264 return NULL;
@@ -180,28 +266,103 @@ struct mem_ctl_info *edac_mc_alloc(unsigned sz_pvt, unsigned nr_csrows,
180 /* Adjust pointers so they point within the memory we just allocated 266 /* Adjust pointers so they point within the memory we just allocated
181 * rather than an imaginary chunk of memory located at address 0. 267 * rather than an imaginary chunk of memory located at address 0.
182 */ 268 */
269 layer = (struct edac_mc_layer *)(((char *)mci) + ((unsigned long)layer));
183 csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi)); 270 csi = (struct csrow_info *)(((char *)mci) + ((unsigned long)csi));
184 chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi)); 271 chi = (struct rank_info *)(((char *)mci) + ((unsigned long)chi));
272 dimm = (struct dimm_info *)(((char *)mci) + ((unsigned long)dimm));
273 for (i = 0; i < n_layers; i++) {
274 mci->ce_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ce_per_layer[i]));
275 mci->ue_per_layer[i] = (u32 *)((char *)mci + ((unsigned long)ue_per_layer[i]));
276 }
185 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL; 277 pvt = sz_pvt ? (((char *)mci) + ((unsigned long)pvt)) : NULL;
186 278
187 /* setup index and various internal pointers */ 279 /* setup index and various internal pointers */
188 mci->mc_idx = edac_index; 280 mci->mc_idx = mc_num;
189 mci->csrows = csi; 281 mci->csrows = csi;
282 mci->dimms = dimm;
283 mci->tot_dimms = tot_dimms;
190 mci->pvt_info = pvt; 284 mci->pvt_info = pvt;
191 mci->nr_csrows = nr_csrows; 285 mci->n_layers = n_layers;
192 286 mci->layers = layer;
193 for (row = 0; row < nr_csrows; row++) { 287 memcpy(mci->layers, layers, sizeof(*layer) * n_layers);
194 csrow = &csi[row]; 288 mci->nr_csrows = tot_csrows;
195 csrow->csrow_idx = row; 289 mci->num_cschannel = tot_channels;
196 csrow->mci = mci; 290 mci->mem_is_per_rank = per_rank;
197 csrow->nr_channels = nr_chans;
198 chp = &chi[row * nr_chans];
199 csrow->channels = chp;
200 291
201 for (chn = 0; chn < nr_chans; chn++) { 292 /*
293 * Fill the csrow struct
294 */
295 for (row = 0; row < tot_csrows; row++) {
296 csr = &csi[row];
297 csr->csrow_idx = row;
298 csr->mci = mci;
299 csr->nr_channels = tot_channels;
300 chp = &chi[row * tot_channels];
301 csr->channels = chp;
302
303 for (chn = 0; chn < tot_channels; chn++) {
202 chan = &chp[chn]; 304 chan = &chp[chn];
203 chan->chan_idx = chn; 305 chan->chan_idx = chn;
204 chan->csrow = csrow; 306 chan->csrow = csr;
307 }
308 }
309
310 /*
311 * Fill the dimm struct
312 */
313 memset(&pos, 0, sizeof(pos));
314 row = 0;
315 chn = 0;
316 debugf4("%s: initializing %d %s\n", __func__, tot_dimms,
317 per_rank ? "ranks" : "dimms");
318 for (i = 0; i < tot_dimms; i++) {
319 chan = &csi[row].channels[chn];
320 dimm = EDAC_DIMM_PTR(layer, mci->dimms, n_layers,
321 pos[0], pos[1], pos[2]);
322 dimm->mci = mci;
323
324 debugf2("%s: %d: %s%zd (%d:%d:%d): row %d, chan %d\n", __func__,
325 i, per_rank ? "rank" : "dimm", (dimm - mci->dimms),
326 pos[0], pos[1], pos[2], row, chn);
327
328 /*
329 * Copy DIMM location and initialize it.
330 */
331 len = sizeof(dimm->label);
332 p = dimm->label;
333 n = snprintf(p, len, "mc#%u", mc_num);
334 p += n;
335 len -= n;
336 for (j = 0; j < n_layers; j++) {
337 n = snprintf(p, len, "%s#%u",
338 edac_layer_name[layers[j].type],
339 pos[j]);
340 p += n;
341 len -= n;
342 dimm->location[j] = pos[j];
343
344 if (len <= 0)
345 break;
346 }
347
348 /* Link it to the csrows old API data */
349 chan->dimm = dimm;
350 dimm->csrow = row;
351 dimm->cschannel = chn;
352
353 /* Increment csrow location */
354 row++;
355 if (row == tot_csrows) {
356 row = 0;
357 chn++;
358 }
359
360 /* Increment dimm location */
361 for (j = n_layers - 1; j >= 0; j--) {
362 pos[j]++;
363 if (pos[j] < layers[j].size)
364 break;
365 pos[j] = 0;
205 } 366 }
206 } 367 }
207 368
@@ -490,7 +651,6 @@ EXPORT_SYMBOL(edac_mc_find);
490 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and 651 * edac_mc_add_mc: Insert the 'mci' structure into the mci global list and
491 * create sysfs entries associated with mci structure 652 * create sysfs entries associated with mci structure
492 * @mci: pointer to the mci structure to be added to the list 653 * @mci: pointer to the mci structure to be added to the list
493 * @mc_idx: A unique numeric identifier to be assigned to the 'mci' structure.
494 * 654 *
495 * Return: 655 * Return:
496 * 0 Success 656 * 0 Success
@@ -517,6 +677,8 @@ int edac_mc_add_mc(struct mem_ctl_info *mci)
517 edac_mc_dump_channel(&mci->csrows[i]. 677 edac_mc_dump_channel(&mci->csrows[i].
518 channels[j]); 678 channels[j]);
519 } 679 }
680 for (i = 0; i < mci->tot_dimms; i++)
681 edac_mc_dump_dimm(&mci->dimms[i]);
520 } 682 }
521#endif 683#endif
522 mutex_lock(&mem_ctls_mutex); 684 mutex_lock(&mem_ctls_mutex);
@@ -636,15 +798,19 @@ static void edac_mc_scrub_block(unsigned long page, unsigned long offset,
636int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page) 798int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
637{ 799{
638 struct csrow_info *csrows = mci->csrows; 800 struct csrow_info *csrows = mci->csrows;
639 int row, i; 801 int row, i, j, n;
640 802
641 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page); 803 debugf1("MC%d: %s(): 0x%lx\n", mci->mc_idx, __func__, page);
642 row = -1; 804 row = -1;
643 805
644 for (i = 0; i < mci->nr_csrows; i++) { 806 for (i = 0; i < mci->nr_csrows; i++) {
645 struct csrow_info *csrow = &csrows[i]; 807 struct csrow_info *csrow = &csrows[i];
646 808 n = 0;
647 if (csrow->nr_pages == 0) 809 for (j = 0; j < csrow->nr_channels; j++) {
810 struct dimm_info *dimm = csrow->channels[j].dimm;
811 n += dimm->nr_pages;
812 }
813 if (n == 0)
648 continue; 814 continue;
649 815
650 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) " 816 debugf3("MC%d: %s(): first(0x%lx) page(0x%lx) last(0x%lx) "
@@ -670,249 +836,307 @@ int edac_mc_find_csrow_by_page(struct mem_ctl_info *mci, unsigned long page)
670} 836}
671EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page); 837EXPORT_SYMBOL_GPL(edac_mc_find_csrow_by_page);
672 838
673/* FIXME - setable log (warning/emerg) levels */ 839const char *edac_layer_name[] = {
674/* FIXME - integrate with evlog: http://evlog.sourceforge.net/ */ 840 [EDAC_MC_LAYER_BRANCH] = "branch",
675void edac_mc_handle_ce(struct mem_ctl_info *mci, 841 [EDAC_MC_LAYER_CHANNEL] = "channel",
676 unsigned long page_frame_number, 842 [EDAC_MC_LAYER_SLOT] = "slot",
677 unsigned long offset_in_page, unsigned long syndrome, 843 [EDAC_MC_LAYER_CHIP_SELECT] = "csrow",
678 int row, int channel, const char *msg) 844};
679{ 845EXPORT_SYMBOL_GPL(edac_layer_name);
680 unsigned long remapped_page;
681 846
682 debugf3("MC%d: %s()\n", mci->mc_idx, __func__); 847static void edac_inc_ce_error(struct mem_ctl_info *mci,
848 bool enable_per_layer_report,
849 const int pos[EDAC_MAX_LAYERS])
850{
851 int i, index = 0;
683 852
684 /* FIXME - maybe make panic on INTERNAL ERROR an option */ 853 mci->ce_mc++;
685 if (row >= mci->nr_csrows || row < 0) {
686 /* something is wrong */
687 edac_mc_printk(mci, KERN_ERR,
688 "INTERNAL ERROR: row out of range "
689 "(%d >= %d)\n", row, mci->nr_csrows);
690 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
691 return;
692 }
693 854
694 if (channel >= mci->csrows[row].nr_channels || channel < 0) { 855 if (!enable_per_layer_report) {
695 /* something is wrong */ 856 mci->ce_noinfo_count++;
696 edac_mc_printk(mci, KERN_ERR,
697 "INTERNAL ERROR: channel out of range "
698 "(%d >= %d)\n", channel,
699 mci->csrows[row].nr_channels);
700 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
701 return; 857 return;
702 } 858 }
703 859
704 if (edac_mc_get_log_ce()) 860 for (i = 0; i < mci->n_layers; i++) {
705 /* FIXME - put in DIMM location */ 861 if (pos[i] < 0)
706 edac_mc_printk(mci, KERN_WARNING, 862 break;
707 "CE page 0x%lx, offset 0x%lx, grain %d, syndrome " 863 index += pos[i];
708 "0x%lx, row %d, channel %d, label \"%s\": %s\n", 864 mci->ce_per_layer[i][index]++;
709 page_frame_number, offset_in_page,
710 mci->csrows[row].grain, syndrome, row, channel,
711 mci->csrows[row].channels[channel].label, msg);
712
713 mci->ce_count++;
714 mci->csrows[row].ce_count++;
715 mci->csrows[row].channels[channel].ce_count++;
716
717 if (mci->scrub_mode & SCRUB_SW_SRC) {
718 /*
719 * Some MC's can remap memory so that it is still available
720 * at a different address when PCI devices map into memory.
721 * MC's that can't do this lose the memory where PCI devices
722 * are mapped. This mapping is MC dependent and so we call
723 * back into the MC driver for it to map the MC page to
724 * a physical (CPU) page which can then be mapped to a virtual
725 * page - which can then be scrubbed.
726 */
727 remapped_page = mci->ctl_page_to_phys ?
728 mci->ctl_page_to_phys(mci, page_frame_number) :
729 page_frame_number;
730 865
731 edac_mc_scrub_block(remapped_page, offset_in_page, 866 if (i < mci->n_layers - 1)
732 mci->csrows[row].grain); 867 index *= mci->layers[i + 1].size;
733 } 868 }
734} 869}
735EXPORT_SYMBOL_GPL(edac_mc_handle_ce);
736 870
737void edac_mc_handle_ce_no_info(struct mem_ctl_info *mci, const char *msg) 871static void edac_inc_ue_error(struct mem_ctl_info *mci,
872 bool enable_per_layer_report,
873 const int pos[EDAC_MAX_LAYERS])
738{ 874{
739 if (edac_mc_get_log_ce()) 875 int i, index = 0;
740 edac_mc_printk(mci, KERN_WARNING,
741 "CE - no information available: %s\n", msg);
742 876
743 mci->ce_noinfo_count++; 877 mci->ue_mc++;
744 mci->ce_count++;
745}
746EXPORT_SYMBOL_GPL(edac_mc_handle_ce_no_info);
747 878
748void edac_mc_handle_ue(struct mem_ctl_info *mci, 879 if (!enable_per_layer_report) {
749 unsigned long page_frame_number, 880 mci->ce_noinfo_count++;
750 unsigned long offset_in_page, int row, const char *msg)
751{
752 int len = EDAC_MC_LABEL_LEN * 4;
753 char labels[len + 1];
754 char *pos = labels;
755 int chan;
756 int chars;
757
758 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
759
760 /* FIXME - maybe make panic on INTERNAL ERROR an option */
761 if (row >= mci->nr_csrows || row < 0) {
762 /* something is wrong */
763 edac_mc_printk(mci, KERN_ERR,
764 "INTERNAL ERROR: row out of range "
765 "(%d >= %d)\n", row, mci->nr_csrows);
766 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
767 return; 881 return;
768 } 882 }
769 883
770 chars = snprintf(pos, len + 1, "%s", 884 for (i = 0; i < mci->n_layers; i++) {
771 mci->csrows[row].channels[0].label); 885 if (pos[i] < 0)
772 len -= chars; 886 break;
773 pos += chars; 887 index += pos[i];
888 mci->ue_per_layer[i][index]++;
774 889
775 for (chan = 1; (chan < mci->csrows[row].nr_channels) && (len > 0); 890 if (i < mci->n_layers - 1)
776 chan++) { 891 index *= mci->layers[i + 1].size;
777 chars = snprintf(pos, len + 1, ":%s",
778 mci->csrows[row].channels[chan].label);
779 len -= chars;
780 pos += chars;
781 } 892 }
893}
782 894
783 if (edac_mc_get_log_ue()) 895static void edac_ce_error(struct mem_ctl_info *mci,
784 edac_mc_printk(mci, KERN_EMERG, 896 const int pos[EDAC_MAX_LAYERS],
785 "UE page 0x%lx, offset 0x%lx, grain %d, row %d, " 897 const char *msg,
786 "labels \"%s\": %s\n", page_frame_number, 898 const char *location,
787 offset_in_page, mci->csrows[row].grain, row, 899 const char *label,
788 labels, msg); 900 const char *detail,
901 const char *other_detail,
902 const bool enable_per_layer_report,
903 const unsigned long page_frame_number,
904 const unsigned long offset_in_page,
905 u32 grain)
906{
907 unsigned long remapped_page;
789 908
790 if (edac_mc_get_panic_on_ue()) 909 if (edac_mc_get_log_ce()) {
791 panic("EDAC MC%d: UE page 0x%lx, offset 0x%lx, grain %d, " 910 if (other_detail && *other_detail)
792 "row %d, labels \"%s\": %s\n", mci->mc_idx, 911 edac_mc_printk(mci, KERN_WARNING,
793 page_frame_number, offset_in_page, 912 "CE %s on %s (%s%s - %s)\n",
794 mci->csrows[row].grain, row, labels, msg); 913 msg, label, location,
914 detail, other_detail);
915 else
916 edac_mc_printk(mci, KERN_WARNING,
917 "CE %s on %s (%s%s)\n",
918 msg, label, location,
919 detail);
920 }
921 edac_inc_ce_error(mci, enable_per_layer_report, pos);
795 922
796 mci->ue_count++; 923 if (mci->scrub_mode & SCRUB_SW_SRC) {
797 mci->csrows[row].ue_count++; 924 /*
925 * Some memory controllers (called MCs below) can remap
926 * memory so that it is still available at a different
927 * address when PCI devices map into memory.
928 * MC's that can't do this, lose the memory where PCI
929 * devices are mapped. This mapping is MC-dependent
930 * and so we call back into the MC driver for it to
931 * map the MC page to a physical (CPU) page which can
932 * then be mapped to a virtual page - which can then
933 * be scrubbed.
934 */
935 remapped_page = mci->ctl_page_to_phys ?
936 mci->ctl_page_to_phys(mci, page_frame_number) :
937 page_frame_number;
938
939 edac_mc_scrub_block(remapped_page,
940 offset_in_page, grain);
941 }
798} 942}
799EXPORT_SYMBOL_GPL(edac_mc_handle_ue);
800 943
801void edac_mc_handle_ue_no_info(struct mem_ctl_info *mci, const char *msg) 944static void edac_ue_error(struct mem_ctl_info *mci,
945 const int pos[EDAC_MAX_LAYERS],
946 const char *msg,
947 const char *location,
948 const char *label,
949 const char *detail,
950 const char *other_detail,
951 const bool enable_per_layer_report)
802{ 952{
803 if (edac_mc_get_panic_on_ue()) 953 if (edac_mc_get_log_ue()) {
804 panic("EDAC MC%d: Uncorrected Error", mci->mc_idx); 954 if (other_detail && *other_detail)
955 edac_mc_printk(mci, KERN_WARNING,
956 "UE %s on %s (%s%s - %s)\n",
957 msg, label, location, detail,
958 other_detail);
959 else
960 edac_mc_printk(mci, KERN_WARNING,
961 "UE %s on %s (%s%s)\n",
962 msg, label, location, detail);
963 }
805 964
806 if (edac_mc_get_log_ue()) 965 if (edac_mc_get_panic_on_ue()) {
807 edac_mc_printk(mci, KERN_WARNING, 966 if (other_detail && *other_detail)
808 "UE - no information available: %s\n", msg); 967 panic("UE %s on %s (%s%s - %s)\n",
809 mci->ue_noinfo_count++; 968 msg, label, location, detail, other_detail);
810 mci->ue_count++; 969 else
970 panic("UE %s on %s (%s%s)\n",
971 msg, label, location, detail);
972 }
973
974 edac_inc_ue_error(mci, enable_per_layer_report, pos);
811} 975}
812EXPORT_SYMBOL_GPL(edac_mc_handle_ue_no_info);
813 976
814/************************************************************* 977#define OTHER_LABEL " or "
815 * On Fully Buffered DIMM modules, this help function is 978void edac_mc_handle_error(const enum hw_event_mc_err_type type,
816 * called to process UE events 979 struct mem_ctl_info *mci,
817 */ 980 const unsigned long page_frame_number,
818void edac_mc_handle_fbd_ue(struct mem_ctl_info *mci, 981 const unsigned long offset_in_page,
819 unsigned int csrow, 982 const unsigned long syndrome,
820 unsigned int channela, 983 const int layer0,
821 unsigned int channelb, char *msg) 984 const int layer1,
985 const int layer2,
986 const char *msg,
987 const char *other_detail,
988 const void *mcelog)
822{ 989{
823 int len = EDAC_MC_LABEL_LEN * 4; 990 /* FIXME: too much for stack: move it to some pre-alocated area */
824 char labels[len + 1]; 991 char detail[80], location[80];
825 char *pos = labels; 992 char label[(EDAC_MC_LABEL_LEN + 1 + sizeof(OTHER_LABEL)) * mci->tot_dimms];
826 int chars; 993 char *p;
994 int row = -1, chan = -1;
995 int pos[EDAC_MAX_LAYERS] = { layer0, layer1, layer2 };
996 int i;
997 u32 grain;
998 bool enable_per_layer_report = false;
827 999
828 if (csrow >= mci->nr_csrows) { 1000 debugf3("MC%d: %s()\n", mci->mc_idx, __func__);
829 /* something is wrong */
830 edac_mc_printk(mci, KERN_ERR,
831 "INTERNAL ERROR: row out of range (%d >= %d)\n",
832 csrow, mci->nr_csrows);
833 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR");
834 return;
835 }
836 1001
837 if (channela >= mci->csrows[csrow].nr_channels) { 1002 /*
838 /* something is wrong */ 1003 * Check if the event report is consistent and if the memory
839 edac_mc_printk(mci, KERN_ERR, 1004 * location is known. If it is known, enable_per_layer_report will be
840 "INTERNAL ERROR: channel-a out of range " 1005 * true, the DIMM(s) label info will be filled and the per-layer
841 "(%d >= %d)\n", 1006 * error counters will be incremented.
842 channela, mci->csrows[csrow].nr_channels); 1007 */
843 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); 1008 for (i = 0; i < mci->n_layers; i++) {
844 return; 1009 if (pos[i] >= (int)mci->layers[i].size) {
1010 if (type == HW_EVENT_ERR_CORRECTED)
1011 p = "CE";
1012 else
1013 p = "UE";
1014
1015 edac_mc_printk(mci, KERN_ERR,
1016 "INTERNAL ERROR: %s value is out of range (%d >= %d)\n",
1017 edac_layer_name[mci->layers[i].type],
1018 pos[i], mci->layers[i].size);
1019 /*
1020 * Instead of just returning it, let's use what's
1021 * known about the error. The increment routines and
1022 * the DIMM filter logic will do the right thing by
1023 * pointing the likely damaged DIMMs.
1024 */
1025 pos[i] = -1;
1026 }
1027 if (pos[i] >= 0)
1028 enable_per_layer_report = true;
845 } 1029 }
846 1030
847 if (channelb >= mci->csrows[csrow].nr_channels) { 1031 /*
848 /* something is wrong */ 1032 * Get the dimm label/grain that applies to the match criteria.
849 edac_mc_printk(mci, KERN_ERR, 1033 * As the error algorithm may not be able to point to just one memory
850 "INTERNAL ERROR: channel-b out of range " 1034 * stick, the logic here will get all possible labels that could
851 "(%d >= %d)\n", 1035 * pottentially be affected by the error.
852 channelb, mci->csrows[csrow].nr_channels); 1036 * On FB-DIMM memory controllers, for uncorrected errors, it is common
853 edac_mc_handle_ue_no_info(mci, "INTERNAL ERROR"); 1037 * to have only the MC channel and the MC dimm (also called "branch")
854 return; 1038 * but the channel is not known, as the memory is arranged in pairs,
855 } 1039 * where each memory belongs to a separate channel within the same
1040 * branch.
1041 */
1042 grain = 0;
1043 p = label;
1044 *p = '\0';
1045 for (i = 0; i < mci->tot_dimms; i++) {
1046 struct dimm_info *dimm = &mci->dimms[i];
856 1047
857 mci->ue_count++; 1048 if (layer0 >= 0 && layer0 != dimm->location[0])
858 mci->csrows[csrow].ue_count++; 1049 continue;
1050 if (layer1 >= 0 && layer1 != dimm->location[1])
1051 continue;
1052 if (layer2 >= 0 && layer2 != dimm->location[2])
1053 continue;
859 1054
860 /* Generate the DIMM labels from the specified channels */ 1055 /* get the max grain, over the error match range */
861 chars = snprintf(pos, len + 1, "%s", 1056 if (dimm->grain > grain)
862 mci->csrows[csrow].channels[channela].label); 1057 grain = dimm->grain;
863 len -= chars;
864 pos += chars;
865 chars = snprintf(pos, len + 1, "-%s",
866 mci->csrows[csrow].channels[channelb].label);
867 1058
868 if (edac_mc_get_log_ue()) 1059 /*
869 edac_mc_printk(mci, KERN_EMERG, 1060 * If the error is memory-controller wide, there's no need to
870 "UE row %d, channel-a= %d channel-b= %d " 1061 * seek for the affected DIMMs because the whole
871 "labels \"%s\": %s\n", csrow, channela, channelb, 1062 * channel/memory controller/... may be affected.
872 labels, msg); 1063 * Also, don't show errors for empty DIMM slots.
1064 */
1065 if (enable_per_layer_report && dimm->nr_pages) {
1066 if (p != label) {
1067 strcpy(p, OTHER_LABEL);
1068 p += strlen(OTHER_LABEL);
1069 }
1070 strcpy(p, dimm->label);
1071 p += strlen(p);
1072 *p = '\0';
1073
1074 /*
1075 * get csrow/channel of the DIMM, in order to allow
1076 * incrementing the compat API counters
1077 */
1078 debugf4("%s: %s csrows map: (%d,%d)\n",
1079 __func__,
1080 mci->mem_is_per_rank ? "rank" : "dimm",
1081 dimm->csrow, dimm->cschannel);
1082
1083 if (row == -1)
1084 row = dimm->csrow;
1085 else if (row >= 0 && row != dimm->csrow)
1086 row = -2;
1087
1088 if (chan == -1)
1089 chan = dimm->cschannel;
1090 else if (chan >= 0 && chan != dimm->cschannel)
1091 chan = -2;
1092 }
1093 }
873 1094
874 if (edac_mc_get_panic_on_ue()) 1095 if (!enable_per_layer_report) {
875 panic("UE row %d, channel-a= %d channel-b= %d " 1096 strcpy(label, "any memory");
876 "labels \"%s\": %s\n", csrow, channela, 1097 } else {
877 channelb, labels, msg); 1098 debugf4("%s: csrow/channel to increment: (%d,%d)\n",
878} 1099 __func__, row, chan);
879EXPORT_SYMBOL(edac_mc_handle_fbd_ue); 1100 if (p == label)
1101 strcpy(label, "unknown memory");
1102 if (type == HW_EVENT_ERR_CORRECTED) {
1103 if (row >= 0) {
1104 mci->csrows[row].ce_count++;
1105 if (chan >= 0)
1106 mci->csrows[row].channels[chan].ce_count++;
1107 }
1108 } else
1109 if (row >= 0)
1110 mci->csrows[row].ue_count++;
1111 }
880 1112
881/************************************************************* 1113 /* Fill the RAM location data */
882 * On Fully Buffered DIMM modules, this help function is 1114 p = location;
883 * called to process CE events 1115 for (i = 0; i < mci->n_layers; i++) {
884 */ 1116 if (pos[i] < 0)
885void edac_mc_handle_fbd_ce(struct mem_ctl_info *mci, 1117 continue;
886 unsigned int csrow, unsigned int channel, char *msg)
887{
888 1118
889 /* Ensure boundary values */ 1119 p += sprintf(p, "%s:%d ",
890 if (csrow >= mci->nr_csrows) { 1120 edac_layer_name[mci->layers[i].type],
891 /* something is wrong */ 1121 pos[i]);
892 edac_mc_printk(mci, KERN_ERR,
893 "INTERNAL ERROR: row out of range (%d >= %d)\n",
894 csrow, mci->nr_csrows);
895 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
896 return;
897 }
898 if (channel >= mci->csrows[csrow].nr_channels) {
899 /* something is wrong */
900 edac_mc_printk(mci, KERN_ERR,
901 "INTERNAL ERROR: channel out of range (%d >= %d)\n",
902 channel, mci->csrows[csrow].nr_channels);
903 edac_mc_handle_ce_no_info(mci, "INTERNAL ERROR");
904 return;
905 } 1122 }
906 1123
907 if (edac_mc_get_log_ce()) 1124 /* Memory type dependent details about the error */
908 /* FIXME - put in DIMM location */ 1125 if (type == HW_EVENT_ERR_CORRECTED) {
909 edac_mc_printk(mci, KERN_WARNING, 1126 snprintf(detail, sizeof(detail),
910 "CE row %d, channel %d, label \"%s\": %s\n", 1127 "page:0x%lx offset:0x%lx grain:%d syndrome:0x%lx",
911 csrow, channel, 1128 page_frame_number, offset_in_page,
912 mci->csrows[csrow].channels[channel].label, msg); 1129 grain, syndrome);
1130 edac_ce_error(mci, pos, msg, location, label, detail,
1131 other_detail, enable_per_layer_report,
1132 page_frame_number, offset_in_page, grain);
1133 } else {
1134 snprintf(detail, sizeof(detail),
1135 "page:0x%lx offset:0x%lx grain:%d",
1136 page_frame_number, offset_in_page, grain);
913 1137
914 mci->ce_count++; 1138 edac_ue_error(mci, pos, msg, location, label, detail,
915 mci->csrows[csrow].ce_count++; 1139 other_detail, enable_per_layer_report);
916 mci->csrows[csrow].channels[channel].ce_count++; 1140 }
917} 1141}
918EXPORT_SYMBOL(edac_mc_handle_fbd_ce); 1142EXPORT_SYMBOL_GPL(edac_mc_handle_error);
diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
index e9a28f576d14..f6a29b0eedc8 100644
--- a/drivers/edac/edac_mc_sysfs.c
+++ b/drivers/edac/edac_mc_sysfs.c
@@ -144,25 +144,31 @@ static ssize_t csrow_ce_count_show(struct csrow_info *csrow, char *data,
144static ssize_t csrow_size_show(struct csrow_info *csrow, char *data, 144static ssize_t csrow_size_show(struct csrow_info *csrow, char *data,
145 int private) 145 int private)
146{ 146{
147 return sprintf(data, "%u\n", PAGES_TO_MiB(csrow->nr_pages)); 147 int i;
148 u32 nr_pages = 0;
149
150 for (i = 0; i < csrow->nr_channels; i++)
151 nr_pages += csrow->channels[i].dimm->nr_pages;
152
153 return sprintf(data, "%u\n", PAGES_TO_MiB(nr_pages));
148} 154}
149 155
150static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data, 156static ssize_t csrow_mem_type_show(struct csrow_info *csrow, char *data,
151 int private) 157 int private)
152{ 158{
153 return sprintf(data, "%s\n", mem_types[csrow->mtype]); 159 return sprintf(data, "%s\n", mem_types[csrow->channels[0].dimm->mtype]);
154} 160}
155 161
156static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data, 162static ssize_t csrow_dev_type_show(struct csrow_info *csrow, char *data,
157 int private) 163 int private)
158{ 164{
159 return sprintf(data, "%s\n", dev_types[csrow->dtype]); 165 return sprintf(data, "%s\n", dev_types[csrow->channels[0].dimm->dtype]);
160} 166}
161 167
162static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data, 168static ssize_t csrow_edac_mode_show(struct csrow_info *csrow, char *data,
163 int private) 169 int private)
164{ 170{
165 return sprintf(data, "%s\n", edac_caps[csrow->edac_mode]); 171 return sprintf(data, "%s\n", edac_caps[csrow->channels[0].dimm->edac_mode]);
166} 172}
167 173
168/* show/store functions for DIMM Label attributes */ 174/* show/store functions for DIMM Label attributes */
@@ -170,11 +176,11 @@ static ssize_t channel_dimm_label_show(struct csrow_info *csrow,
170 char *data, int channel) 176 char *data, int channel)
171{ 177{
172 /* if field has not been initialized, there is nothing to send */ 178 /* if field has not been initialized, there is nothing to send */
173 if (!csrow->channels[channel].label[0]) 179 if (!csrow->channels[channel].dimm->label[0])
174 return 0; 180 return 0;
175 181
176 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n", 182 return snprintf(data, EDAC_MC_LABEL_LEN, "%s\n",
177 csrow->channels[channel].label); 183 csrow->channels[channel].dimm->label);
178} 184}
179 185
180static ssize_t channel_dimm_label_store(struct csrow_info *csrow, 186static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
@@ -184,8 +190,8 @@ static ssize_t channel_dimm_label_store(struct csrow_info *csrow,
184 ssize_t max_size = 0; 190 ssize_t max_size = 0;
185 191
186 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1); 192 max_size = min((ssize_t) count, (ssize_t) EDAC_MC_LABEL_LEN - 1);
187 strncpy(csrow->channels[channel].label, data, max_size); 193 strncpy(csrow->channels[channel].dimm->label, data, max_size);
188 csrow->channels[channel].label[max_size] = '\0'; 194 csrow->channels[channel].dimm->label[max_size] = '\0';
189 195
190 return max_size; 196 return max_size;
191} 197}
@@ -419,8 +425,8 @@ static ssize_t mci_reset_counters_store(struct mem_ctl_info *mci,
419 425
420 mci->ue_noinfo_count = 0; 426 mci->ue_noinfo_count = 0;
421 mci->ce_noinfo_count = 0; 427 mci->ce_noinfo_count = 0;
422 mci->ue_count = 0; 428 mci->ue_mc = 0;
423 mci->ce_count = 0; 429 mci->ce_mc = 0;
424 430
425 for (row = 0; row < mci->nr_csrows; row++) { 431 for (row = 0; row < mci->nr_csrows; row++) {
426 struct csrow_info *ri = &mci->csrows[row]; 432 struct csrow_info *ri = &mci->csrows[row];
@@ -489,12 +495,12 @@ static ssize_t mci_sdram_scrub_rate_show(struct mem_ctl_info *mci, char *data)
489/* default attribute files for the MCI object */ 495/* default attribute files for the MCI object */
490static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data) 496static ssize_t mci_ue_count_show(struct mem_ctl_info *mci, char *data)
491{ 497{
492 return sprintf(data, "%d\n", mci->ue_count); 498 return sprintf(data, "%d\n", mci->ue_mc);
493} 499}
494 500
495static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data) 501static ssize_t mci_ce_count_show(struct mem_ctl_info *mci, char *data)
496{ 502{
497 return sprintf(data, "%d\n", mci->ce_count); 503 return sprintf(data, "%d\n", mci->ce_mc);
498} 504}
499 505
500static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data) 506static ssize_t mci_ce_noinfo_show(struct mem_ctl_info *mci, char *data)
@@ -519,16 +525,16 @@ static ssize_t mci_ctl_name_show(struct mem_ctl_info *mci, char *data)
519 525
520static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data) 526static ssize_t mci_size_mb_show(struct mem_ctl_info *mci, char *data)
521{ 527{
522 int total_pages, csrow_idx; 528 int total_pages = 0, csrow_idx, j;
523 529
524 for (total_pages = csrow_idx = 0; csrow_idx < mci->nr_csrows; 530 for (csrow_idx = 0; csrow_idx < mci->nr_csrows; csrow_idx++) {
525 csrow_idx++) {
526 struct csrow_info *csrow = &mci->csrows[csrow_idx]; 531 struct csrow_info *csrow = &mci->csrows[csrow_idx];
527 532
528 if (!csrow->nr_pages) 533 for (j = 0; j < csrow->nr_channels; j++) {
529 continue; 534 struct dimm_info *dimm = csrow->channels[j].dimm;
530 535
531 total_pages += csrow->nr_pages; 536 total_pages += dimm->nr_pages;
537 }
532 } 538 }
533 539
534 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages)); 540 return sprintf(data, "%u\n", PAGES_TO_MiB(total_pages));
@@ -900,7 +906,7 @@ static void edac_remove_mci_instance_attributes(struct mem_ctl_info *mci,
900 */ 906 */
901int edac_create_sysfs_mci_device(struct mem_ctl_info *mci) 907int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
902{ 908{
903 int i; 909 int i, j;
904 int err; 910 int err;
905 struct csrow_info *csrow; 911 struct csrow_info *csrow;
906 struct kobject *kobj_mci = &mci->edac_mci_kobj; 912 struct kobject *kobj_mci = &mci->edac_mci_kobj;
@@ -934,10 +940,13 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
934 /* Make directories for each CSROW object under the mc<id> kobject 940 /* Make directories for each CSROW object under the mc<id> kobject
935 */ 941 */
936 for (i = 0; i < mci->nr_csrows; i++) { 942 for (i = 0; i < mci->nr_csrows; i++) {
943 int nr_pages = 0;
944
937 csrow = &mci->csrows[i]; 945 csrow = &mci->csrows[i];
946 for (j = 0; j < csrow->nr_channels; j++)
947 nr_pages += csrow->channels[j].dimm->nr_pages;
938 948
939 /* Only expose populated CSROWs */ 949 if (nr_pages > 0) {
940 if (csrow->nr_pages > 0) {
941 err = edac_create_csrow_object(mci, csrow, i); 950 err = edac_create_csrow_object(mci, csrow, i);
942 if (err) { 951 if (err) {
943 debugf1("%s() failure: create csrow %d obj\n", 952 debugf1("%s() failure: create csrow %d obj\n",
@@ -949,12 +958,15 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
949 958
950 return 0; 959 return 0;
951 960
952 /* CSROW error: backout what has already been registered, */
953fail1: 961fail1:
954 for (i--; i >= 0; i--) { 962 for (i--; i >= 0; i--) {
955 if (csrow->nr_pages > 0) { 963 int nr_pages = 0;
964
965 csrow = &mci->csrows[i];
966 for (j = 0; j < csrow->nr_channels; j++)
967 nr_pages += csrow->channels[j].dimm->nr_pages;
968 if (nr_pages > 0)
956 kobject_put(&mci->csrows[i].kobj); 969 kobject_put(&mci->csrows[i].kobj);
957 }
958 } 970 }
959 971
960 /* remove the mci instance's attributes, if any */ 972 /* remove the mci instance's attributes, if any */
@@ -973,14 +985,20 @@ fail0:
973 */ 985 */
974void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci) 986void edac_remove_sysfs_mci_device(struct mem_ctl_info *mci)
975{ 987{
976 int i; 988 struct csrow_info *csrow;
989 int i, j;
977 990
978 debugf0("%s()\n", __func__); 991 debugf0("%s()\n", __func__);
979 992
980 /* remove all csrow kobjects */ 993 /* remove all csrow kobjects */
981 debugf4("%s() unregister this mci kobj\n", __func__); 994 debugf4("%s() unregister this mci kobj\n", __func__);
982 for (i = 0; i < mci->nr_csrows; i++) { 995 for (i = 0; i < mci->nr_csrows; i++) {
983 if (mci->csrows[i].nr_pages > 0) { 996 int nr_pages = 0;
997
998 csrow = &mci->csrows[i];
999 for (j = 0; j < csrow->nr_channels; j++)
1000 nr_pages += csrow->channels[j].dimm->nr_pages;
1001 if (nr_pages > 0) {
984 debugf0("%s() unreg csrow-%d\n", __func__, i); 1002 debugf0("%s() unreg csrow-%d\n", __func__, i);
985 kobject_put(&mci->csrows[i].kobj); 1003 kobject_put(&mci->csrows[i].kobj);
986 } 1004 }
diff --git a/drivers/edac/edac_module.h b/drivers/edac/edac_module.h
index 00f81b47a51f..0ea7d14cb930 100644
--- a/drivers/edac/edac_module.h
+++ b/drivers/edac/edac_module.h
@@ -50,7 +50,7 @@ extern void edac_device_reset_delay_period(struct edac_device_ctl_info
50 *edac_dev, unsigned long value); 50 *edac_dev, unsigned long value);
51extern void edac_mc_reset_delay_period(int value); 51extern void edac_mc_reset_delay_period(int value);
52 52
53extern void *edac_align_ptr(void *ptr, unsigned size); 53extern void *edac_align_ptr(void **p, unsigned size, int n_elems);
54 54
55/* 55/*
56 * EDAC PCI functions 56 * EDAC PCI functions
diff --git a/drivers/edac/edac_pci.c b/drivers/edac/edac_pci.c
index 63af1c5673d1..f1ac86649886 100644
--- a/drivers/edac/edac_pci.c
+++ b/drivers/edac/edac_pci.c
@@ -42,13 +42,13 @@ struct edac_pci_ctl_info *edac_pci_alloc_ctl_info(unsigned int sz_pvt,
42 const char *edac_pci_name) 42 const char *edac_pci_name)
43{ 43{
44 struct edac_pci_ctl_info *pci; 44 struct edac_pci_ctl_info *pci;
45 void *pvt; 45 void *p = NULL, *pvt;
46 unsigned int size; 46 unsigned int size;
47 47
48 debugf1("%s()\n", __func__); 48 debugf1("%s()\n", __func__);
49 49
50 pci = (struct edac_pci_ctl_info *)0; 50 pci = edac_align_ptr(&p, sizeof(*pci), 1);
51 pvt = edac_align_ptr(&pci[1], sz_pvt); 51 pvt = edac_align_ptr(&p, 1, sz_pvt);
52 size = ((unsigned long)pvt) + sz_pvt; 52 size = ((unsigned long)pvt) + sz_pvt;
53 53
54 /* Alloc the needed control struct memory */ 54 /* Alloc the needed control struct memory */
diff --git a/drivers/edac/i3000_edac.c b/drivers/edac/i3000_edac.c
index 277689a68841..8ad1744faacd 100644
--- a/drivers/edac/i3000_edac.c
+++ b/drivers/edac/i3000_edac.c
@@ -245,7 +245,9 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
245 return 1; 245 return 1;
246 246
247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) { 247 if ((info->errsts ^ info->errsts2) & I3000_ERRSTS_BITS) {
248 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 248 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
249 -1, -1, -1,
250 "UE overwrote CE", "", NULL);
249 info->errsts = info->errsts2; 251 info->errsts = info->errsts2;
250 } 252 }
251 253
@@ -256,10 +258,15 @@ static int i3000_process_error_info(struct mem_ctl_info *mci,
256 row = edac_mc_find_csrow_by_page(mci, pfn); 258 row = edac_mc_find_csrow_by_page(mci, pfn);
257 259
258 if (info->errsts & I3000_ERRSTS_UE) 260 if (info->errsts & I3000_ERRSTS_UE)
259 edac_mc_handle_ue(mci, pfn, offset, row, "i3000 UE"); 261 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
262 pfn, offset, 0,
263 row, -1, -1,
264 "i3000 UE", "", NULL);
260 else 265 else
261 edac_mc_handle_ce(mci, pfn, offset, info->derrsyn, row, 266 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
262 multi_chan ? channel : 0, "i3000 CE"); 267 pfn, offset, info->derrsyn,
268 row, multi_chan ? channel : 0, -1,
269 "i3000 CE", "", NULL);
263 270
264 return 1; 271 return 1;
265} 272}
@@ -304,9 +311,10 @@ static int i3000_is_interleaved(const unsigned char *c0dra,
304static int i3000_probe1(struct pci_dev *pdev, int dev_idx) 311static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
305{ 312{
306 int rc; 313 int rc;
307 int i; 314 int i, j;
308 struct mem_ctl_info *mci = NULL; 315 struct mem_ctl_info *mci = NULL;
309 unsigned long last_cumul_size; 316 struct edac_mc_layer layers[2];
317 unsigned long last_cumul_size, nr_pages;
310 int interleaved, nr_channels; 318 int interleaved, nr_channels;
311 unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS]; 319 unsigned char dra[I3000_RANKS / 2], drb[I3000_RANKS];
312 unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2]; 320 unsigned char *c0dra = dra, *c1dra = &dra[I3000_RANKS_PER_CHANNEL / 2];
@@ -347,7 +355,14 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
347 */ 355 */
348 interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb); 356 interleaved = i3000_is_interleaved(c0dra, c1dra, c0drb, c1drb);
349 nr_channels = interleaved ? 2 : 1; 357 nr_channels = interleaved ? 2 : 1;
350 mci = edac_mc_alloc(0, I3000_RANKS / nr_channels, nr_channels, 0); 358
359 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
360 layers[0].size = I3000_RANKS / nr_channels;
361 layers[0].is_virt_csrow = true;
362 layers[1].type = EDAC_MC_LAYER_CHANNEL;
363 layers[1].size = nr_channels;
364 layers[1].is_virt_csrow = false;
365 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
351 if (!mci) 366 if (!mci)
352 return -ENOMEM; 367 return -ENOMEM;
353 368
@@ -386,19 +401,23 @@ static int i3000_probe1(struct pci_dev *pdev, int dev_idx)
386 cumul_size <<= 1; 401 cumul_size <<= 1;
387 debugf3("MC: %s(): (%d) cumul_size 0x%x\n", 402 debugf3("MC: %s(): (%d) cumul_size 0x%x\n",
388 __func__, i, cumul_size); 403 __func__, i, cumul_size);
389 if (cumul_size == last_cumul_size) { 404 if (cumul_size == last_cumul_size)
390 csrow->mtype = MEM_EMPTY;
391 continue; 405 continue;
392 }
393 406
394 csrow->first_page = last_cumul_size; 407 csrow->first_page = last_cumul_size;
395 csrow->last_page = cumul_size - 1; 408 csrow->last_page = cumul_size - 1;
396 csrow->nr_pages = cumul_size - last_cumul_size; 409 nr_pages = cumul_size - last_cumul_size;
397 last_cumul_size = cumul_size; 410 last_cumul_size = cumul_size;
398 csrow->grain = I3000_DEAP_GRAIN; 411
399 csrow->mtype = MEM_DDR2; 412 for (j = 0; j < nr_channels; j++) {
400 csrow->dtype = DEV_UNKNOWN; 413 struct dimm_info *dimm = csrow->channels[j].dimm;
401 csrow->edac_mode = EDAC_UNKNOWN; 414
415 dimm->nr_pages = nr_pages / nr_channels;
416 dimm->grain = I3000_DEAP_GRAIN;
417 dimm->mtype = MEM_DDR2;
418 dimm->dtype = DEV_UNKNOWN;
419 dimm->edac_mode = EDAC_UNKNOWN;
420 }
402 } 421 }
403 422
404 /* 423 /*
diff --git a/drivers/edac/i3200_edac.c b/drivers/edac/i3200_edac.c
index 046808c6357d..bbe43ef71823 100644
--- a/drivers/edac/i3200_edac.c
+++ b/drivers/edac/i3200_edac.c
@@ -23,6 +23,7 @@
23 23
24#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0 24#define PCI_DEVICE_ID_INTEL_3200_HB 0x29f0
25 25
26#define I3200_DIMMS 4
26#define I3200_RANKS 8 27#define I3200_RANKS 8
27#define I3200_RANKS_PER_CHANNEL 4 28#define I3200_RANKS_PER_CHANNEL 4
28#define I3200_CHANNELS 2 29#define I3200_CHANNELS 2
@@ -217,21 +218,25 @@ static void i3200_process_error_info(struct mem_ctl_info *mci,
217 return; 218 return;
218 219
219 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) { 220 if ((info->errsts ^ info->errsts2) & I3200_ERRSTS_BITS) {
220 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 221 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
222 -1, -1, -1, "UE overwrote CE", "", NULL);
221 info->errsts = info->errsts2; 223 info->errsts = info->errsts2;
222 } 224 }
223 225
224 for (channel = 0; channel < nr_channels; channel++) { 226 for (channel = 0; channel < nr_channels; channel++) {
225 log = info->eccerrlog[channel]; 227 log = info->eccerrlog[channel];
226 if (log & I3200_ECCERRLOG_UE) { 228 if (log & I3200_ECCERRLOG_UE) {
227 edac_mc_handle_ue(mci, 0, 0, 229 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
228 eccerrlog_row(channel, log), 230 0, 0, 0,
229 "i3200 UE"); 231 eccerrlog_row(channel, log),
232 -1, -1,
233 "i3000 UE", "", NULL);
230 } else if (log & I3200_ECCERRLOG_CE) { 234 } else if (log & I3200_ECCERRLOG_CE) {
231 edac_mc_handle_ce(mci, 0, 0, 235 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
232 eccerrlog_syndrome(log), 236 0, 0, eccerrlog_syndrome(log),
233 eccerrlog_row(channel, log), 0, 237 eccerrlog_row(channel, log),
234 "i3200 CE"); 238 -1, -1,
239 "i3000 UE", "", NULL);
235 } 240 }
236 } 241 }
237} 242}
@@ -319,9 +324,9 @@ static unsigned long drb_to_nr_pages(
319static int i3200_probe1(struct pci_dev *pdev, int dev_idx) 324static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
320{ 325{
321 int rc; 326 int rc;
322 int i; 327 int i, j;
323 struct mem_ctl_info *mci = NULL; 328 struct mem_ctl_info *mci = NULL;
324 unsigned long last_page; 329 struct edac_mc_layer layers[2];
325 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL]; 330 u16 drbs[I3200_CHANNELS][I3200_RANKS_PER_CHANNEL];
326 bool stacked; 331 bool stacked;
327 void __iomem *window; 332 void __iomem *window;
@@ -336,8 +341,14 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
336 i3200_get_drbs(window, drbs); 341 i3200_get_drbs(window, drbs);
337 nr_channels = how_many_channels(pdev); 342 nr_channels = how_many_channels(pdev);
338 343
339 mci = edac_mc_alloc(sizeof(struct i3200_priv), I3200_RANKS, 344 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
340 nr_channels, 0); 345 layers[0].size = I3200_DIMMS;
346 layers[0].is_virt_csrow = true;
347 layers[1].type = EDAC_MC_LAYER_CHANNEL;
348 layers[1].size = nr_channels;
349 layers[1].is_virt_csrow = false;
350 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
351 sizeof(struct i3200_priv));
341 if (!mci) 352 if (!mci)
342 return -ENOMEM; 353 return -ENOMEM;
343 354
@@ -366,7 +377,6 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
366 * cumulative; the last one will contain the total memory 377 * cumulative; the last one will contain the total memory
367 * contained in all ranks. 378 * contained in all ranks.
368 */ 379 */
369 last_page = -1UL;
370 for (i = 0; i < mci->nr_csrows; i++) { 380 for (i = 0; i < mci->nr_csrows; i++) {
371 unsigned long nr_pages; 381 unsigned long nr_pages;
372 struct csrow_info *csrow = &mci->csrows[i]; 382 struct csrow_info *csrow = &mci->csrows[i];
@@ -375,20 +385,18 @@ static int i3200_probe1(struct pci_dev *pdev, int dev_idx)
375 i / I3200_RANKS_PER_CHANNEL, 385 i / I3200_RANKS_PER_CHANNEL,
376 i % I3200_RANKS_PER_CHANNEL); 386 i % I3200_RANKS_PER_CHANNEL);
377 387
378 if (nr_pages == 0) { 388 if (nr_pages == 0)
379 csrow->mtype = MEM_EMPTY;
380 continue; 389 continue;
381 }
382 390
383 csrow->first_page = last_page + 1; 391 for (j = 0; j < nr_channels; j++) {
384 last_page += nr_pages; 392 struct dimm_info *dimm = csrow->channels[j].dimm;
385 csrow->last_page = last_page;
386 csrow->nr_pages = nr_pages;
387 393
388 csrow->grain = nr_pages << PAGE_SHIFT; 394 dimm->nr_pages = nr_pages / nr_channels;
389 csrow->mtype = MEM_DDR2; 395 dimm->grain = nr_pages << PAGE_SHIFT;
390 csrow->dtype = DEV_UNKNOWN; 396 dimm->mtype = MEM_DDR2;
391 csrow->edac_mode = EDAC_UNKNOWN; 397 dimm->dtype = DEV_UNKNOWN;
398 dimm->edac_mode = EDAC_UNKNOWN;
399 }
392 } 400 }
393 401
394 i3200_clear_error_info(mci); 402 i3200_clear_error_info(mci);
diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c
index a2680d8e744b..11ea835f155a 100644
--- a/drivers/edac/i5000_edac.c
+++ b/drivers/edac/i5000_edac.c
@@ -270,7 +270,8 @@
270#define MTR3 0x8C 270#define MTR3 0x8C
271 271
272#define NUM_MTRS 4 272#define NUM_MTRS 4
273#define CHANNELS_PER_BRANCH (2) 273#define CHANNELS_PER_BRANCH 2
274#define MAX_BRANCHES 2
274 275
275/* Defines to extract the vaious fields from the 276/* Defines to extract the vaious fields from the
276 * MTRx - Memory Technology Registers 277 * MTRx - Memory Technology Registers
@@ -473,7 +474,6 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
473 char msg[EDAC_MC_LABEL_LEN + 1 + 160]; 474 char msg[EDAC_MC_LABEL_LEN + 1 + 160];
474 char *specific = NULL; 475 char *specific = NULL;
475 u32 allErrors; 476 u32 allErrors;
476 int branch;
477 int channel; 477 int channel;
478 int bank; 478 int bank;
479 int rank; 479 int rank;
@@ -485,8 +485,7 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
485 if (!allErrors) 485 if (!allErrors)
486 return; /* if no error, return now */ 486 return; /* if no error, return now */
487 487
488 branch = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd); 488 channel = EXTRACT_FBDCHAN_INDX(info->ferr_fat_fbd);
489 channel = branch;
490 489
491 /* Use the NON-Recoverable macros to extract data */ 490 /* Use the NON-Recoverable macros to extract data */
492 bank = NREC_BANK(info->nrecmema); 491 bank = NREC_BANK(info->nrecmema);
@@ -495,9 +494,9 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
495 ras = NREC_RAS(info->nrecmemb); 494 ras = NREC_RAS(info->nrecmemb);
496 cas = NREC_CAS(info->nrecmemb); 495 cas = NREC_CAS(info->nrecmemb);
497 496
498 debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " 497 debugf0("\t\tCSROW= %d Channel= %d "
499 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 498 "(DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
500 rank, channel, channel + 1, branch >> 1, bank, 499 rank, channel, bank,
501 rdwr ? "Write" : "Read", ras, cas); 500 rdwr ? "Write" : "Read", ras, cas);
502 501
503 /* Only 1 bit will be on */ 502 /* Only 1 bit will be on */
@@ -533,13 +532,14 @@ static void i5000_process_fatal_error_info(struct mem_ctl_info *mci,
533 532
534 /* Form out message */ 533 /* Form out message */
535 snprintf(msg, sizeof(msg), 534 snprintf(msg, sizeof(msg),
536 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d CAS=%d " 535 "Bank=%d RAS=%d CAS=%d FATAL Err=0x%x (%s)",
537 "FATAL Err=0x%x (%s))", 536 bank, ras, cas, allErrors, specific);
538 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
539 allErrors, specific);
540 537
541 /* Call the helper to output message */ 538 /* Call the helper to output message */
542 edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); 539 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
540 channel >> 1, channel & 1, rank,
541 rdwr ? "Write error" : "Read error",
542 msg, NULL);
543} 543}
544 544
545/* 545/*
@@ -633,13 +633,14 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
633 633
634 /* Form out message */ 634 /* Form out message */
635 snprintf(msg, sizeof(msg), 635 snprintf(msg, sizeof(msg),
636 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " 636 "Rank=%d Bank=%d RAS=%d CAS=%d, UE Err=0x%x (%s)",
637 "CAS=%d, UE Err=0x%x (%s))", 637 rank, bank, ras, cas, ue_errors, specific);
638 branch >> 1, bank, rdwr ? "Write" : "Read", ras, cas,
639 ue_errors, specific);
640 638
641 /* Call the helper to output message */ 639 /* Call the helper to output message */
642 edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); 640 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
641 channel >> 1, -1, rank,
642 rdwr ? "Write error" : "Read error",
643 msg, NULL);
643 } 644 }
644 645
645 /* Check correctable errors */ 646 /* Check correctable errors */
@@ -685,13 +686,16 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
685 686
686 /* Form out message */ 687 /* Form out message */
687 snprintf(msg, sizeof(msg), 688 snprintf(msg, sizeof(msg),
688 "(Branch=%d DRAM-Bank=%d RDWR=%s RAS=%d " 689 "Rank=%d Bank=%d RDWR=%s RAS=%d "
689 "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank, 690 "CAS=%d, CE Err=0x%x (%s))", branch >> 1, bank,
690 rdwr ? "Write" : "Read", ras, cas, ce_errors, 691 rdwr ? "Write" : "Read", ras, cas, ce_errors,
691 specific); 692 specific);
692 693
693 /* Call the helper to output message */ 694 /* Call the helper to output message */
694 edac_mc_handle_fbd_ce(mci, rank, channel, msg); 695 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
696 channel >> 1, channel % 2, rank,
697 rdwr ? "Write error" : "Read error",
698 msg, NULL);
695 } 699 }
696 700
697 if (!misc_messages) 701 if (!misc_messages)
@@ -731,11 +735,12 @@ static void i5000_process_nonfatal_error_info(struct mem_ctl_info *mci,
731 735
732 /* Form out message */ 736 /* Form out message */
733 snprintf(msg, sizeof(msg), 737 snprintf(msg, sizeof(msg),
734 "(Branch=%d Err=%#x (%s))", branch >> 1, 738 "Err=%#x (%s)", misc_errors, specific);
735 misc_errors, specific);
736 739
737 /* Call the helper to output message */ 740 /* Call the helper to output message */
738 edac_mc_handle_fbd_ce(mci, 0, 0, msg); 741 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
742 branch >> 1, -1, -1,
743 "Misc error", msg, NULL);
739 } 744 }
740} 745}
741 746
@@ -956,14 +961,14 @@ static int determine_amb_present_reg(struct i5000_pvt *pvt, int channel)
956 * 961 *
957 * return the proper MTR register as determine by the csrow and channel desired 962 * return the proper MTR register as determine by the csrow and channel desired
958 */ 963 */
959static int determine_mtr(struct i5000_pvt *pvt, int csrow, int channel) 964static int determine_mtr(struct i5000_pvt *pvt, int slot, int channel)
960{ 965{
961 int mtr; 966 int mtr;
962 967
963 if (channel < CHANNELS_PER_BRANCH) 968 if (channel < CHANNELS_PER_BRANCH)
964 mtr = pvt->b0_mtr[csrow >> 1]; 969 mtr = pvt->b0_mtr[slot];
965 else 970 else
966 mtr = pvt->b1_mtr[csrow >> 1]; 971 mtr = pvt->b1_mtr[slot];
967 972
968 return mtr; 973 return mtr;
969} 974}
@@ -988,37 +993,34 @@ static void decode_mtr(int slot_row, u16 mtr)
988 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 993 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
989} 994}
990 995
991static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel, 996static void handle_channel(struct i5000_pvt *pvt, int slot, int channel,
992 struct i5000_dimm_info *dinfo) 997 struct i5000_dimm_info *dinfo)
993{ 998{
994 int mtr; 999 int mtr;
995 int amb_present_reg; 1000 int amb_present_reg;
996 int addrBits; 1001 int addrBits;
997 1002
998 mtr = determine_mtr(pvt, csrow, channel); 1003 mtr = determine_mtr(pvt, slot, channel);
999 if (MTR_DIMMS_PRESENT(mtr)) { 1004 if (MTR_DIMMS_PRESENT(mtr)) {
1000 amb_present_reg = determine_amb_present_reg(pvt, channel); 1005 amb_present_reg = determine_amb_present_reg(pvt, channel);
1001 1006
1002 /* Determine if there is a DIMM present in this DIMM slot */ 1007 /* Determine if there is a DIMM present in this DIMM slot */
1003 if (amb_present_reg & (1 << (csrow >> 1))) { 1008 if (amb_present_reg) {
1004 dinfo->dual_rank = MTR_DIMM_RANK(mtr); 1009 dinfo->dual_rank = MTR_DIMM_RANK(mtr);
1005 1010
1006 if (!((dinfo->dual_rank == 0) && 1011 /* Start with the number of bits for a Bank
1007 ((csrow & 0x1) == 0x1))) { 1012 * on the DRAM */
1008 /* Start with the number of bits for a Bank 1013 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
1009 * on the DRAM */ 1014 /* Add the number of ROW bits */
1010 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); 1015 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr);
1011 /* Add thenumber of ROW bits */ 1016 /* add the number of COLUMN bits */
1012 addrBits += MTR_DIMM_ROWS_ADDR_BITS(mtr); 1017 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr);
1013 /* add the number of COLUMN bits */ 1018
1014 addrBits += MTR_DIMM_COLS_ADDR_BITS(mtr); 1019 addrBits += 6; /* add 64 bits per DIMM */
1015 1020 addrBits -= 20; /* divide by 2^^20 */
1016 addrBits += 6; /* add 64 bits per DIMM */ 1021 addrBits -= 3; /* 8 bits per bytes */
1017 addrBits -= 20; /* divide by 2^^20 */ 1022
1018 addrBits -= 3; /* 8 bits per bytes */ 1023 dinfo->megabytes = 1 << addrBits;
1019
1020 dinfo->megabytes = 1 << addrBits;
1021 }
1022 } 1024 }
1023 } 1025 }
1024} 1026}
@@ -1032,10 +1034,9 @@ static void handle_channel(struct i5000_pvt *pvt, int csrow, int channel,
1032static void calculate_dimm_size(struct i5000_pvt *pvt) 1034static void calculate_dimm_size(struct i5000_pvt *pvt)
1033{ 1035{
1034 struct i5000_dimm_info *dinfo; 1036 struct i5000_dimm_info *dinfo;
1035 int csrow, max_csrows; 1037 int slot, channel, branch;
1036 char *p, *mem_buffer; 1038 char *p, *mem_buffer;
1037 int space, n; 1039 int space, n;
1038 int channel;
1039 1040
1040 /* ================= Generate some debug output ================= */ 1041 /* ================= Generate some debug output ================= */
1041 space = PAGE_SIZE; 1042 space = PAGE_SIZE;
@@ -1046,22 +1047,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1046 return; 1047 return;
1047 } 1048 }
1048 1049
1049 n = snprintf(p, space, "\n"); 1050 /* Scan all the actual slots
1050 p += n;
1051 space -= n;
1052
1053 /* Scan all the actual CSROWS (which is # of DIMMS * 2)
1054 * and calculate the information for each DIMM 1051 * and calculate the information for each DIMM
1055 * Start with the highest csrow first, to display it first 1052 * Start with the highest slot first, to display it first
1056 * and work toward the 0th csrow 1053 * and work toward the 0th slot
1057 */ 1054 */
1058 max_csrows = pvt->maxdimmperch * 2; 1055 for (slot = pvt->maxdimmperch - 1; slot >= 0; slot--) {
1059 for (csrow = max_csrows - 1; csrow >= 0; csrow--) {
1060 1056
1061 /* on an odd csrow, first output a 'boundary' marker, 1057 /* on an odd slot, first output a 'boundary' marker,
1062 * then reset the message buffer */ 1058 * then reset the message buffer */
1063 if (csrow & 0x1) { 1059 if (slot & 0x1) {
1064 n = snprintf(p, space, "---------------------------" 1060 n = snprintf(p, space, "--------------------------"
1065 "--------------------------------"); 1061 "--------------------------------");
1066 p += n; 1062 p += n;
1067 space -= n; 1063 space -= n;
@@ -1069,30 +1065,39 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1069 p = mem_buffer; 1065 p = mem_buffer;
1070 space = PAGE_SIZE; 1066 space = PAGE_SIZE;
1071 } 1067 }
1072 n = snprintf(p, space, "csrow %2d ", csrow); 1068 n = snprintf(p, space, "slot %2d ", slot);
1073 p += n; 1069 p += n;
1074 space -= n; 1070 space -= n;
1075 1071
1076 for (channel = 0; channel < pvt->maxch; channel++) { 1072 for (channel = 0; channel < pvt->maxch; channel++) {
1077 dinfo = &pvt->dimm_info[csrow][channel]; 1073 dinfo = &pvt->dimm_info[slot][channel];
1078 handle_channel(pvt, csrow, channel, dinfo); 1074 handle_channel(pvt, slot, channel, dinfo);
1079 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); 1075 if (dinfo->megabytes)
1076 n = snprintf(p, space, "%4d MB %dR| ",
1077 dinfo->megabytes, dinfo->dual_rank + 1);
1078 else
1079 n = snprintf(p, space, "%4d MB | ", 0);
1080 p += n; 1080 p += n;
1081 space -= n; 1081 space -= n;
1082 } 1082 }
1083 n = snprintf(p, space, "\n");
1084 p += n; 1083 p += n;
1085 space -= n; 1084 space -= n;
1085 debugf2("%s\n", mem_buffer);
1086 p = mem_buffer;
1087 space = PAGE_SIZE;
1086 } 1088 }
1087 1089
1088 /* Output the last bottom 'boundary' marker */ 1090 /* Output the last bottom 'boundary' marker */
1089 n = snprintf(p, space, "---------------------------" 1091 n = snprintf(p, space, "--------------------------"
1090 "--------------------------------\n"); 1092 "--------------------------------");
1091 p += n; 1093 p += n;
1092 space -= n; 1094 space -= n;
1095 debugf2("%s\n", mem_buffer);
1096 p = mem_buffer;
1097 space = PAGE_SIZE;
1093 1098
1094 /* now output the 'channel' labels */ 1099 /* now output the 'channel' labels */
1095 n = snprintf(p, space, " "); 1100 n = snprintf(p, space, " ");
1096 p += n; 1101 p += n;
1097 space -= n; 1102 space -= n;
1098 for (channel = 0; channel < pvt->maxch; channel++) { 1103 for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1100,9 +1105,17 @@ static void calculate_dimm_size(struct i5000_pvt *pvt)
1100 p += n; 1105 p += n;
1101 space -= n; 1106 space -= n;
1102 } 1107 }
1103 n = snprintf(p, space, "\n"); 1108 debugf2("%s\n", mem_buffer);
1109 p = mem_buffer;
1110 space = PAGE_SIZE;
1111
1112 n = snprintf(p, space, " ");
1104 p += n; 1113 p += n;
1105 space -= n; 1114 for (branch = 0; branch < MAX_BRANCHES; branch++) {
1115 n = snprintf(p, space, " branch %d | ", branch);
1116 p += n;
1117 space -= n;
1118 }
1106 1119
1107 /* output the last message and free buffer */ 1120 /* output the last message and free buffer */
1108 debugf2("%s\n", mem_buffer); 1121 debugf2("%s\n", mem_buffer);
@@ -1235,13 +1248,13 @@ static void i5000_get_mc_regs(struct mem_ctl_info *mci)
1235static int i5000_init_csrows(struct mem_ctl_info *mci) 1248static int i5000_init_csrows(struct mem_ctl_info *mci)
1236{ 1249{
1237 struct i5000_pvt *pvt; 1250 struct i5000_pvt *pvt;
1238 struct csrow_info *p_csrow; 1251 struct dimm_info *dimm;
1239 int empty, channel_count; 1252 int empty, channel_count;
1240 int max_csrows; 1253 int max_csrows;
1241 int mtr, mtr1; 1254 int mtr;
1242 int csrow_megs; 1255 int csrow_megs;
1243 int channel; 1256 int channel;
1244 int csrow; 1257 int slot;
1245 1258
1246 pvt = mci->pvt_info; 1259 pvt = mci->pvt_info;
1247 1260
@@ -1250,43 +1263,40 @@ static int i5000_init_csrows(struct mem_ctl_info *mci)
1250 1263
1251 empty = 1; /* Assume NO memory */ 1264 empty = 1; /* Assume NO memory */
1252 1265
1253 for (csrow = 0; csrow < max_csrows; csrow++) { 1266 /*
1254 p_csrow = &mci->csrows[csrow]; 1267 * FIXME: The memory layout used to map slot/channel into the
1255 1268 * real memory architecture is weird: branch+slot are "csrows"
1256 p_csrow->csrow_idx = csrow; 1269 * and channel is channel. That required an extra array (dimm_info)
1257 1270 * to map the dimms. A good cleanup would be to remove this array,
1258 /* use branch 0 for the basis */ 1271 * and do a loop here with branch, channel, slot
1259 mtr = pvt->b0_mtr[csrow >> 1]; 1272 */
1260 mtr1 = pvt->b1_mtr[csrow >> 1]; 1273 for (slot = 0; slot < max_csrows; slot++) {
1261 1274 for (channel = 0; channel < pvt->maxch; channel++) {
1262 /* if no DIMMS on this row, continue */
1263 if (!MTR_DIMMS_PRESENT(mtr) && !MTR_DIMMS_PRESENT(mtr1))
1264 continue;
1265 1275
1266 /* FAKE OUT VALUES, FIXME */ 1276 mtr = determine_mtr(pvt, slot, channel);
1267 p_csrow->first_page = 0 + csrow * 20;
1268 p_csrow->last_page = 9 + csrow * 20;
1269 p_csrow->page_mask = 0xFFF;
1270 1277
1271 p_csrow->grain = 8; 1278 if (!MTR_DIMMS_PRESENT(mtr))
1279 continue;
1272 1280
1273 csrow_megs = 0; 1281 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
1274 for (channel = 0; channel < pvt->maxch; channel++) { 1282 channel / MAX_BRANCHES,
1275 csrow_megs += pvt->dimm_info[csrow][channel].megabytes; 1283 channel % MAX_BRANCHES, slot);
1276 }
1277 1284
1278 p_csrow->nr_pages = csrow_megs << 8; 1285 csrow_megs = pvt->dimm_info[slot][channel].megabytes;
1286 dimm->grain = 8;
1279 1287
1280 /* Assume DDR2 for now */ 1288 /* Assume DDR2 for now */
1281 p_csrow->mtype = MEM_FB_DDR2; 1289 dimm->mtype = MEM_FB_DDR2;
1282 1290
1283 /* ask what device type on this row */ 1291 /* ask what device type on this row */
1284 if (MTR_DRAM_WIDTH(mtr)) 1292 if (MTR_DRAM_WIDTH(mtr))
1285 p_csrow->dtype = DEV_X8; 1293 dimm->dtype = DEV_X8;
1286 else 1294 else
1287 p_csrow->dtype = DEV_X4; 1295 dimm->dtype = DEV_X4;
1288 1296
1289 p_csrow->edac_mode = EDAC_S8ECD8ED; 1297 dimm->edac_mode = EDAC_S8ECD8ED;
1298 dimm->nr_pages = csrow_megs << 8;
1299 }
1290 1300
1291 empty = 0; 1301 empty = 0;
1292 } 1302 }
@@ -1317,7 +1327,7 @@ static void i5000_enable_error_reporting(struct mem_ctl_info *mci)
1317} 1327}
1318 1328
1319/* 1329/*
1320 * i5000_get_dimm_and_channel_counts(pdev, &num_csrows, &num_channels) 1330 * i5000_get_dimm_and_channel_counts(pdev, &nr_csrows, &num_channels)
1321 * 1331 *
1322 * ask the device how many channels are present and how many CSROWS 1332 * ask the device how many channels are present and how many CSROWS
1323 * as well 1333 * as well
@@ -1332,7 +1342,7 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
1332 * supported on this memory controller 1342 * supported on this memory controller
1333 */ 1343 */
1334 pci_read_config_byte(pdev, MAXDIMMPERCH, &value); 1344 pci_read_config_byte(pdev, MAXDIMMPERCH, &value);
1335 *num_dimms_per_channel = (int)value *2; 1345 *num_dimms_per_channel = (int)value;
1336 1346
1337 pci_read_config_byte(pdev, MAXCH, &value); 1347 pci_read_config_byte(pdev, MAXCH, &value);
1338 *num_channels = (int)value; 1348 *num_channels = (int)value;
@@ -1348,10 +1358,10 @@ static void i5000_get_dimm_and_channel_counts(struct pci_dev *pdev,
1348static int i5000_probe1(struct pci_dev *pdev, int dev_idx) 1358static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1349{ 1359{
1350 struct mem_ctl_info *mci; 1360 struct mem_ctl_info *mci;
1361 struct edac_mc_layer layers[3];
1351 struct i5000_pvt *pvt; 1362 struct i5000_pvt *pvt;
1352 int num_channels; 1363 int num_channels;
1353 int num_dimms_per_channel; 1364 int num_dimms_per_channel;
1354 int num_csrows;
1355 1365
1356 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n", 1366 debugf0("MC: %s: %s(), pdev bus %u dev=0x%x fn=0x%x\n",
1357 __FILE__, __func__, 1367 __FILE__, __func__,
@@ -1377,14 +1387,22 @@ static int i5000_probe1(struct pci_dev *pdev, int dev_idx)
1377 */ 1387 */
1378 i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel, 1388 i5000_get_dimm_and_channel_counts(pdev, &num_dimms_per_channel,
1379 &num_channels); 1389 &num_channels);
1380 num_csrows = num_dimms_per_channel * 2;
1381 1390
1382 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", 1391 debugf0("MC: %s(): Number of Branches=2 Channels= %d DIMMS= %d\n",
1383 __func__, num_channels, num_dimms_per_channel, num_csrows); 1392 __func__, num_channels, num_dimms_per_channel);
1384 1393
1385 /* allocate a new MC control structure */ 1394 /* allocate a new MC control structure */
1386 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0);
1387 1395
1396 layers[0].type = EDAC_MC_LAYER_BRANCH;
1397 layers[0].size = MAX_BRANCHES;
1398 layers[0].is_virt_csrow = false;
1399 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1400 layers[1].size = num_channels / MAX_BRANCHES;
1401 layers[1].is_virt_csrow = false;
1402 layers[2].type = EDAC_MC_LAYER_SLOT;
1403 layers[2].size = num_dimms_per_channel;
1404 layers[2].is_virt_csrow = true;
1405 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1388 if (mci == NULL) 1406 if (mci == NULL)
1389 return -ENOMEM; 1407 return -ENOMEM;
1390 1408
diff --git a/drivers/edac/i5100_edac.c b/drivers/edac/i5100_edac.c
index d500749464ea..e9e7c2a29dc3 100644
--- a/drivers/edac/i5100_edac.c
+++ b/drivers/edac/i5100_edac.c
@@ -14,6 +14,11 @@
14 * rows for each respective channel are laid out one after another, 14 * rows for each respective channel are laid out one after another,
15 * the first half belonging to channel 0, the second half belonging 15 * the first half belonging to channel 0, the second half belonging
16 * to channel 1. 16 * to channel 1.
17 *
18 * This driver is for DDR2 DIMMs, and it uses chip select to select among the
19 * several ranks. However, instead of showing memories as ranks, it outputs
20 * them as DIMM's. An internal table creates the association between ranks
21 * and DIMM's.
17 */ 22 */
18#include <linux/module.h> 23#include <linux/module.h>
19#include <linux/init.h> 24#include <linux/init.h>
@@ -410,14 +415,6 @@ static int i5100_csrow_to_chan(const struct mem_ctl_info *mci, int csrow)
410 return csrow / priv->ranksperchan; 415 return csrow / priv->ranksperchan;
411} 416}
412 417
413static unsigned i5100_rank_to_csrow(const struct mem_ctl_info *mci,
414 int chan, int rank)
415{
416 const struct i5100_priv *priv = mci->pvt_info;
417
418 return chan * priv->ranksperchan + rank;
419}
420
421static void i5100_handle_ce(struct mem_ctl_info *mci, 418static void i5100_handle_ce(struct mem_ctl_info *mci,
422 int chan, 419 int chan,
423 unsigned bank, 420 unsigned bank,
@@ -427,17 +424,17 @@ static void i5100_handle_ce(struct mem_ctl_info *mci,
427 unsigned ras, 424 unsigned ras,
428 const char *msg) 425 const char *msg)
429{ 426{
430 const int csrow = i5100_rank_to_csrow(mci, chan, rank); 427 char detail[80];
431 428
432 printk(KERN_ERR 429 /* Form out message */
433 "CE chan %d, bank %u, rank %u, syndrome 0x%lx, " 430 snprintf(detail, sizeof(detail),
434 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 431 "bank %u, cas %u, ras %u\n",
435 chan, bank, rank, syndrome, cas, ras, 432 bank, cas, ras);
436 csrow, mci->csrows[csrow].channels[0].label, msg);
437 433
438 mci->ce_count++; 434 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
439 mci->csrows[csrow].ce_count++; 435 0, 0, syndrome,
440 mci->csrows[csrow].channels[0].ce_count++; 436 chan, rank, -1,
437 msg, detail, NULL);
441} 438}
442 439
443static void i5100_handle_ue(struct mem_ctl_info *mci, 440static void i5100_handle_ue(struct mem_ctl_info *mci,
@@ -449,16 +446,17 @@ static void i5100_handle_ue(struct mem_ctl_info *mci,
449 unsigned ras, 446 unsigned ras,
450 const char *msg) 447 const char *msg)
451{ 448{
452 const int csrow = i5100_rank_to_csrow(mci, chan, rank); 449 char detail[80];
453 450
454 printk(KERN_ERR 451 /* Form out message */
455 "UE chan %d, bank %u, rank %u, syndrome 0x%lx, " 452 snprintf(detail, sizeof(detail),
456 "cas %u, ras %u, csrow %u, label \"%s\": %s\n", 453 "bank %u, cas %u, ras %u\n",
457 chan, bank, rank, syndrome, cas, ras, 454 bank, cas, ras);
458 csrow, mci->csrows[csrow].channels[0].label, msg);
459 455
460 mci->ue_count++; 456 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
461 mci->csrows[csrow].ue_count++; 457 0, 0, syndrome,
458 chan, rank, -1,
459 msg, detail, NULL);
462} 460}
463 461
464static void i5100_read_log(struct mem_ctl_info *mci, int chan, 462static void i5100_read_log(struct mem_ctl_info *mci, int chan,
@@ -835,10 +833,10 @@ static void __devinit i5100_init_interleaving(struct pci_dev *pdev,
835static void __devinit i5100_init_csrows(struct mem_ctl_info *mci) 833static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
836{ 834{
837 int i; 835 int i;
838 unsigned long total_pages = 0UL;
839 struct i5100_priv *priv = mci->pvt_info; 836 struct i5100_priv *priv = mci->pvt_info;
840 837
841 for (i = 0; i < mci->nr_csrows; i++) { 838 for (i = 0; i < mci->tot_dimms; i++) {
839 struct dimm_info *dimm;
842 const unsigned long npages = i5100_npages(mci, i); 840 const unsigned long npages = i5100_npages(mci, i);
843 const unsigned chan = i5100_csrow_to_chan(mci, i); 841 const unsigned chan = i5100_csrow_to_chan(mci, i);
844 const unsigned rank = i5100_csrow_to_rank(mci, i); 842 const unsigned rank = i5100_csrow_to_rank(mci, i);
@@ -846,33 +844,23 @@ static void __devinit i5100_init_csrows(struct mem_ctl_info *mci)
846 if (!npages) 844 if (!npages)
847 continue; 845 continue;
848 846
849 /* 847 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
850 * FIXME: these two are totally bogus -- I don't see how to 848 chan, rank, 0);
851 * map them correctly to this structure... 849
852 */ 850 dimm->nr_pages = npages;
853 mci->csrows[i].first_page = total_pages; 851 if (npages) {
854 mci->csrows[i].last_page = total_pages + npages - 1; 852 dimm->grain = 32;
855 mci->csrows[i].page_mask = 0UL; 853 dimm->dtype = (priv->mtr[chan][rank].width == 4) ?
856 854 DEV_X4 : DEV_X8;
857 mci->csrows[i].nr_pages = npages; 855 dimm->mtype = MEM_RDDR2;
858 mci->csrows[i].grain = 32; 856 dimm->edac_mode = EDAC_SECDED;
859 mci->csrows[i].csrow_idx = i; 857 snprintf(dimm->label, sizeof(dimm->label),
860 mci->csrows[i].dtype = 858 "DIMM%u",
861 (priv->mtr[chan][rank].width == 4) ? DEV_X4 : DEV_X8; 859 i5100_rank_to_slot(mci, chan, rank));
862 mci->csrows[i].ue_count = 0; 860 }
863 mci->csrows[i].ce_count = 0; 861
864 mci->csrows[i].mtype = MEM_RDDR2; 862 debugf2("dimm channel %d, rank %d, size %ld\n",
865 mci->csrows[i].edac_mode = EDAC_SECDED; 863 chan, rank, (long)PAGES_TO_MiB(npages));
866 mci->csrows[i].mci = mci;
867 mci->csrows[i].nr_channels = 1;
868 mci->csrows[i].channels[0].chan_idx = 0;
869 mci->csrows[i].channels[0].ce_count = 0;
870 mci->csrows[i].channels[0].csrow = mci->csrows + i;
871 snprintf(mci->csrows[i].channels[0].label,
872 sizeof(mci->csrows[i].channels[0].label),
873 "DIMM%u", i5100_rank_to_slot(mci, chan, rank));
874
875 total_pages += npages;
876 } 864 }
877} 865}
878 866
@@ -881,6 +869,7 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
881{ 869{
882 int rc; 870 int rc;
883 struct mem_ctl_info *mci; 871 struct mem_ctl_info *mci;
872 struct edac_mc_layer layers[2];
884 struct i5100_priv *priv; 873 struct i5100_priv *priv;
885 struct pci_dev *ch0mm, *ch1mm; 874 struct pci_dev *ch0mm, *ch1mm;
886 int ret = 0; 875 int ret = 0;
@@ -941,7 +930,14 @@ static int __devinit i5100_init_one(struct pci_dev *pdev,
941 goto bail_ch1; 930 goto bail_ch1;
942 } 931 }
943 932
944 mci = edac_mc_alloc(sizeof(*priv), ranksperch * 2, 1, 0); 933 layers[0].type = EDAC_MC_LAYER_CHANNEL;
934 layers[0].size = 2;
935 layers[0].is_virt_csrow = false;
936 layers[1].type = EDAC_MC_LAYER_SLOT;
937 layers[1].size = ranksperch;
938 layers[1].is_virt_csrow = true;
939 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers,
940 sizeof(*priv));
945 if (!mci) { 941 if (!mci) {
946 ret = -ENOMEM; 942 ret = -ENOMEM;
947 goto bail_disable_ch1; 943 goto bail_disable_ch1;
diff --git a/drivers/edac/i5400_edac.c b/drivers/edac/i5400_edac.c
index 1869a1018fb5..6640c29e1885 100644
--- a/drivers/edac/i5400_edac.c
+++ b/drivers/edac/i5400_edac.c
@@ -18,6 +18,10 @@
18 * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet 18 * Intel 5400 Chipset Memory Controller Hub (MCH) - Datasheet
19 * http://developer.intel.com/design/chipsets/datashts/313070.htm 19 * http://developer.intel.com/design/chipsets/datashts/313070.htm
20 * 20 *
21 * This Memory Controller manages DDR2 FB-DIMMs. It has 2 branches, each with
22 * 2 channels operating in lockstep no-mirror mode. Each channel can have up to
23 * 4 dimm's, each with up to 8GB.
24 *
21 */ 25 */
22 26
23#include <linux/module.h> 27#include <linux/module.h>
@@ -44,12 +48,10 @@
44 edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg) 48 edac_mc_chipset_printk(mci, level, "i5400", fmt, ##arg)
45 49
46/* Limits for i5400 */ 50/* Limits for i5400 */
47#define NUM_MTRS_PER_BRANCH 4 51#define MAX_BRANCHES 2
48#define CHANNELS_PER_BRANCH 2 52#define CHANNELS_PER_BRANCH 2
49#define MAX_DIMMS_PER_CHANNEL NUM_MTRS_PER_BRANCH 53#define DIMMS_PER_CHANNEL 4
50#define MAX_CHANNELS 4 54#define MAX_CHANNELS (MAX_BRANCHES * CHANNELS_PER_BRANCH)
51/* max possible csrows per channel */
52#define MAX_CSROWS (MAX_DIMMS_PER_CHANNEL)
53 55
54/* Device 16, 56/* Device 16,
55 * Function 0: System Address 57 * Function 0: System Address
@@ -347,16 +349,16 @@ struct i5400_pvt {
347 349
348 u16 mir0, mir1; 350 u16 mir0, mir1;
349 351
350 u16 b0_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ 352 u16 b0_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
351 u16 b0_ambpresent0; /* Branch 0, Channel 0 */ 353 u16 b0_ambpresent0; /* Branch 0, Channel 0 */
352 u16 b0_ambpresent1; /* Brnach 0, Channel 1 */ 354 u16 b0_ambpresent1; /* Brnach 0, Channel 1 */
353 355
354 u16 b1_mtr[NUM_MTRS_PER_BRANCH]; /* Memory Technlogy Reg */ 356 u16 b1_mtr[DIMMS_PER_CHANNEL]; /* Memory Technlogy Reg */
355 u16 b1_ambpresent0; /* Branch 1, Channel 8 */ 357 u16 b1_ambpresent0; /* Branch 1, Channel 8 */
356 u16 b1_ambpresent1; /* Branch 1, Channel 1 */ 358 u16 b1_ambpresent1; /* Branch 1, Channel 1 */
357 359
358 /* DIMM information matrix, allocating architecture maximums */ 360 /* DIMM information matrix, allocating architecture maximums */
359 struct i5400_dimm_info dimm_info[MAX_CSROWS][MAX_CHANNELS]; 361 struct i5400_dimm_info dimm_info[DIMMS_PER_CHANNEL][MAX_CHANNELS];
360 362
361 /* Actual values for this controller */ 363 /* Actual values for this controller */
362 int maxch; /* Max channels */ 364 int maxch; /* Max channels */
@@ -532,13 +534,15 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
532 int ras, cas; 534 int ras, cas;
533 int errnum; 535 int errnum;
534 char *type = NULL; 536 char *type = NULL;
537 enum hw_event_mc_err_type tp_event = HW_EVENT_ERR_UNCORRECTED;
535 538
536 if (!allErrors) 539 if (!allErrors)
537 return; /* if no error, return now */ 540 return; /* if no error, return now */
538 541
539 if (allErrors & ERROR_FAT_MASK) 542 if (allErrors & ERROR_FAT_MASK) {
540 type = "FATAL"; 543 type = "FATAL";
541 else if (allErrors & FERR_NF_UNCORRECTABLE) 544 tp_event = HW_EVENT_ERR_FATAL;
545 } else if (allErrors & FERR_NF_UNCORRECTABLE)
542 type = "NON-FATAL uncorrected"; 546 type = "NON-FATAL uncorrected";
543 else 547 else
544 type = "NON-FATAL recoverable"; 548 type = "NON-FATAL recoverable";
@@ -556,7 +560,7 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
556 ras = nrec_ras(info); 560 ras = nrec_ras(info);
557 cas = nrec_cas(info); 561 cas = nrec_cas(info);
558 562
559 debugf0("\t\tCSROW= %d Channels= %d,%d (Branch= %d " 563 debugf0("\t\tDIMM= %d Channels= %d,%d (Branch= %d "
560 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n", 564 "DRAM Bank= %d Buffer ID = %d rdwr= %s ras= %d cas= %d)\n",
561 rank, channel, channel + 1, branch >> 1, bank, 565 rank, channel, channel + 1, branch >> 1, bank,
562 buf_id, rdwr_str(rdwr), ras, cas); 566 buf_id, rdwr_str(rdwr), ras, cas);
@@ -566,13 +570,13 @@ static void i5400_proccess_non_recoverable_info(struct mem_ctl_info *mci,
566 570
567 /* Form out message */ 571 /* Form out message */
568 snprintf(msg, sizeof(msg), 572 snprintf(msg, sizeof(msg),
569 "%s (Branch=%d DRAM-Bank=%d Buffer ID = %d RDWR=%s " 573 "Bank=%d Buffer ID = %d RAS=%d CAS=%d Err=0x%lx (%s)",
570 "RAS=%d CAS=%d %s Err=0x%lx (%s))", 574 bank, buf_id, ras, cas, allErrors, error_name[errnum]);
571 type, branch >> 1, bank, buf_id, rdwr_str(rdwr), ras, cas,
572 type, allErrors, error_name[errnum]);
573 575
574 /* Call the helper to output message */ 576 edac_mc_handle_error(tp_event, mci, 0, 0, 0,
575 edac_mc_handle_fbd_ue(mci, rank, channel, channel + 1, msg); 577 branch >> 1, -1, rank,
578 rdwr ? "Write error" : "Read error",
579 msg, NULL);
576} 580}
577 581
578/* 582/*
@@ -630,7 +634,7 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
630 /* Only 1 bit will be on */ 634 /* Only 1 bit will be on */
631 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name)); 635 errnum = find_first_bit(&allErrors, ARRAY_SIZE(error_name));
632 636
633 debugf0("\t\tCSROW= %d Channel= %d (Branch %d " 637 debugf0("\t\tDIMM= %d Channel= %d (Branch %d "
634 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n", 638 "DRAM Bank= %d rdwr= %s ras= %d cas= %d)\n",
635 rank, channel, branch >> 1, bank, 639 rank, channel, branch >> 1, bank,
636 rdwr_str(rdwr), ras, cas); 640 rdwr_str(rdwr), ras, cas);
@@ -642,8 +646,10 @@ static void i5400_process_nonfatal_error_info(struct mem_ctl_info *mci,
642 branch >> 1, bank, rdwr_str(rdwr), ras, cas, 646 branch >> 1, bank, rdwr_str(rdwr), ras, cas,
643 allErrors, error_name[errnum]); 647 allErrors, error_name[errnum]);
644 648
645 /* Call the helper to output message */ 649 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
646 edac_mc_handle_fbd_ce(mci, rank, channel, msg); 650 branch >> 1, channel % 2, rank,
651 rdwr ? "Write error" : "Read error",
652 msg, NULL);
647 653
648 return; 654 return;
649 } 655 }
@@ -831,8 +837,8 @@ static int i5400_get_devices(struct mem_ctl_info *mci, int dev_idx)
831/* 837/*
832 * determine_amb_present 838 * determine_amb_present
833 * 839 *
834 * the information is contained in NUM_MTRS_PER_BRANCH different 840 * the information is contained in DIMMS_PER_CHANNEL different
835 * registers determining which of the NUM_MTRS_PER_BRANCH requires 841 * registers determining which of the DIMMS_PER_CHANNEL requires
836 * knowing which channel is in question 842 * knowing which channel is in question
837 * 843 *
838 * 2 branches, each with 2 channels 844 * 2 branches, each with 2 channels
@@ -861,11 +867,11 @@ static int determine_amb_present_reg(struct i5400_pvt *pvt, int channel)
861} 867}
862 868
863/* 869/*
864 * determine_mtr(pvt, csrow, channel) 870 * determine_mtr(pvt, dimm, channel)
865 * 871 *
866 * return the proper MTR register as determine by the csrow and desired channel 872 * return the proper MTR register as determine by the dimm and desired channel
867 */ 873 */
868static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel) 874static int determine_mtr(struct i5400_pvt *pvt, int dimm, int channel)
869{ 875{
870 int mtr; 876 int mtr;
871 int n; 877 int n;
@@ -873,11 +879,11 @@ static int determine_mtr(struct i5400_pvt *pvt, int csrow, int channel)
873 /* There is one MTR for each slot pair of FB-DIMMs, 879 /* There is one MTR for each slot pair of FB-DIMMs,
874 Each slot pair may be at branch 0 or branch 1. 880 Each slot pair may be at branch 0 or branch 1.
875 */ 881 */
876 n = csrow; 882 n = dimm;
877 883
878 if (n >= NUM_MTRS_PER_BRANCH) { 884 if (n >= DIMMS_PER_CHANNEL) {
879 debugf0("ERROR: trying to access an invalid csrow: %d\n", 885 debugf0("ERROR: trying to access an invalid dimm: %d\n",
880 csrow); 886 dimm);
881 return 0; 887 return 0;
882 } 888 }
883 889
@@ -913,19 +919,19 @@ static void decode_mtr(int slot_row, u16 mtr)
913 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 919 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
914} 920}
915 921
916static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel, 922static void handle_channel(struct i5400_pvt *pvt, int dimm, int channel,
917 struct i5400_dimm_info *dinfo) 923 struct i5400_dimm_info *dinfo)
918{ 924{
919 int mtr; 925 int mtr;
920 int amb_present_reg; 926 int amb_present_reg;
921 int addrBits; 927 int addrBits;
922 928
923 mtr = determine_mtr(pvt, csrow, channel); 929 mtr = determine_mtr(pvt, dimm, channel);
924 if (MTR_DIMMS_PRESENT(mtr)) { 930 if (MTR_DIMMS_PRESENT(mtr)) {
925 amb_present_reg = determine_amb_present_reg(pvt, channel); 931 amb_present_reg = determine_amb_present_reg(pvt, channel);
926 932
927 /* Determine if there is a DIMM present in this DIMM slot */ 933 /* Determine if there is a DIMM present in this DIMM slot */
928 if (amb_present_reg & (1 << csrow)) { 934 if (amb_present_reg & (1 << dimm)) {
929 /* Start with the number of bits for a Bank 935 /* Start with the number of bits for a Bank
930 * on the DRAM */ 936 * on the DRAM */
931 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr); 937 addrBits = MTR_DRAM_BANKS_ADDR_BITS(mtr);
@@ -954,10 +960,10 @@ static void handle_channel(struct i5400_pvt *pvt, int csrow, int channel,
954static void calculate_dimm_size(struct i5400_pvt *pvt) 960static void calculate_dimm_size(struct i5400_pvt *pvt)
955{ 961{
956 struct i5400_dimm_info *dinfo; 962 struct i5400_dimm_info *dinfo;
957 int csrow, max_csrows; 963 int dimm, max_dimms;
958 char *p, *mem_buffer; 964 char *p, *mem_buffer;
959 int space, n; 965 int space, n;
960 int channel; 966 int channel, branch;
961 967
962 /* ================= Generate some debug output ================= */ 968 /* ================= Generate some debug output ================= */
963 space = PAGE_SIZE; 969 space = PAGE_SIZE;
@@ -968,32 +974,32 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
968 return; 974 return;
969 } 975 }
970 976
971 /* Scan all the actual CSROWS 977 /* Scan all the actual DIMMS
972 * and calculate the information for each DIMM 978 * and calculate the information for each DIMM
973 * Start with the highest csrow first, to display it first 979 * Start with the highest dimm first, to display it first
974 * and work toward the 0th csrow 980 * and work toward the 0th dimm
975 */ 981 */
976 max_csrows = pvt->maxdimmperch; 982 max_dimms = pvt->maxdimmperch;
977 for (csrow = max_csrows - 1; csrow >= 0; csrow--) { 983 for (dimm = max_dimms - 1; dimm >= 0; dimm--) {
978 984
979 /* on an odd csrow, first output a 'boundary' marker, 985 /* on an odd dimm, first output a 'boundary' marker,
980 * then reset the message buffer */ 986 * then reset the message buffer */
981 if (csrow & 0x1) { 987 if (dimm & 0x1) {
982 n = snprintf(p, space, "---------------------------" 988 n = snprintf(p, space, "---------------------------"
983 "--------------------------------"); 989 "-------------------------------");
984 p += n; 990 p += n;
985 space -= n; 991 space -= n;
986 debugf2("%s\n", mem_buffer); 992 debugf2("%s\n", mem_buffer);
987 p = mem_buffer; 993 p = mem_buffer;
988 space = PAGE_SIZE; 994 space = PAGE_SIZE;
989 } 995 }
990 n = snprintf(p, space, "csrow %2d ", csrow); 996 n = snprintf(p, space, "dimm %2d ", dimm);
991 p += n; 997 p += n;
992 space -= n; 998 space -= n;
993 999
994 for (channel = 0; channel < pvt->maxch; channel++) { 1000 for (channel = 0; channel < pvt->maxch; channel++) {
995 dinfo = &pvt->dimm_info[csrow][channel]; 1001 dinfo = &pvt->dimm_info[dimm][channel];
996 handle_channel(pvt, csrow, channel, dinfo); 1002 handle_channel(pvt, dimm, channel, dinfo);
997 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes); 1003 n = snprintf(p, space, "%4d MB | ", dinfo->megabytes);
998 p += n; 1004 p += n;
999 space -= n; 1005 space -= n;
@@ -1005,7 +1011,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1005 1011
1006 /* Output the last bottom 'boundary' marker */ 1012 /* Output the last bottom 'boundary' marker */
1007 n = snprintf(p, space, "---------------------------" 1013 n = snprintf(p, space, "---------------------------"
1008 "--------------------------------"); 1014 "-------------------------------");
1009 p += n; 1015 p += n;
1010 space -= n; 1016 space -= n;
1011 debugf2("%s\n", mem_buffer); 1017 debugf2("%s\n", mem_buffer);
@@ -1013,7 +1019,7 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1013 space = PAGE_SIZE; 1019 space = PAGE_SIZE;
1014 1020
1015 /* now output the 'channel' labels */ 1021 /* now output the 'channel' labels */
1016 n = snprintf(p, space, " "); 1022 n = snprintf(p, space, " ");
1017 p += n; 1023 p += n;
1018 space -= n; 1024 space -= n;
1019 for (channel = 0; channel < pvt->maxch; channel++) { 1025 for (channel = 0; channel < pvt->maxch; channel++) {
@@ -1022,6 +1028,19 @@ static void calculate_dimm_size(struct i5400_pvt *pvt)
1022 space -= n; 1028 space -= n;
1023 } 1029 }
1024 1030
1031 space -= n;
1032 debugf2("%s\n", mem_buffer);
1033 p = mem_buffer;
1034 space = PAGE_SIZE;
1035
1036 n = snprintf(p, space, " ");
1037 p += n;
1038 for (branch = 0; branch < MAX_BRANCHES; branch++) {
1039 n = snprintf(p, space, " branch %d | ", branch);
1040 p += n;
1041 space -= n;
1042 }
1043
1025 /* output the last message and free buffer */ 1044 /* output the last message and free buffer */
1026 debugf2("%s\n", mem_buffer); 1045 debugf2("%s\n", mem_buffer);
1027 kfree(mem_buffer); 1046 kfree(mem_buffer);
@@ -1080,7 +1099,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1080 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0); 1099 debugf2("MIR1: limit= 0x%x WAY1= %u WAY0= %x\n", limit, way1, way0);
1081 1100
1082 /* Get the set of MTR[0-3] regs by each branch */ 1101 /* Get the set of MTR[0-3] regs by each branch */
1083 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) { 1102 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++) {
1084 int where = MTR0 + (slot_row * sizeof(u16)); 1103 int where = MTR0 + (slot_row * sizeof(u16));
1085 1104
1086 /* Branch 0 set of MTR registers */ 1105 /* Branch 0 set of MTR registers */
@@ -1105,7 +1124,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1105 /* Read and dump branch 0's MTRs */ 1124 /* Read and dump branch 0's MTRs */
1106 debugf2("\nMemory Technology Registers:\n"); 1125 debugf2("\nMemory Technology Registers:\n");
1107 debugf2(" Branch 0:\n"); 1126 debugf2(" Branch 0:\n");
1108 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) 1127 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1109 decode_mtr(slot_row, pvt->b0_mtr[slot_row]); 1128 decode_mtr(slot_row, pvt->b0_mtr[slot_row]);
1110 1129
1111 pci_read_config_word(pvt->branch_0, AMBPRESENT_0, 1130 pci_read_config_word(pvt->branch_0, AMBPRESENT_0,
@@ -1122,7 +1141,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1122 } else { 1141 } else {
1123 /* Read and dump branch 1's MTRs */ 1142 /* Read and dump branch 1's MTRs */
1124 debugf2(" Branch 1:\n"); 1143 debugf2(" Branch 1:\n");
1125 for (slot_row = 0; slot_row < NUM_MTRS_PER_BRANCH; slot_row++) 1144 for (slot_row = 0; slot_row < DIMMS_PER_CHANNEL; slot_row++)
1126 decode_mtr(slot_row, pvt->b1_mtr[slot_row]); 1145 decode_mtr(slot_row, pvt->b1_mtr[slot_row]);
1127 1146
1128 pci_read_config_word(pvt->branch_1, AMBPRESENT_0, 1147 pci_read_config_word(pvt->branch_1, AMBPRESENT_0,
@@ -1141,7 +1160,7 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1141} 1160}
1142 1161
1143/* 1162/*
1144 * i5400_init_csrows Initialize the 'csrows' table within 1163 * i5400_init_dimms Initialize the 'dimms' table within
1145 * the mci control structure with the 1164 * the mci control structure with the
1146 * addressing of memory. 1165 * addressing of memory.
1147 * 1166 *
@@ -1149,64 +1168,68 @@ static void i5400_get_mc_regs(struct mem_ctl_info *mci)
1149 * 0 success 1168 * 0 success
1150 * 1 no actual memory found on this MC 1169 * 1 no actual memory found on this MC
1151 */ 1170 */
1152static int i5400_init_csrows(struct mem_ctl_info *mci) 1171static int i5400_init_dimms(struct mem_ctl_info *mci)
1153{ 1172{
1154 struct i5400_pvt *pvt; 1173 struct i5400_pvt *pvt;
1155 struct csrow_info *p_csrow; 1174 struct dimm_info *dimm;
1156 int empty, channel_count; 1175 int ndimms, channel_count;
1157 int max_csrows; 1176 int max_dimms;
1158 int mtr; 1177 int mtr;
1159 int csrow_megs; 1178 int size_mb;
1160 int channel; 1179 int channel, slot;
1161 int csrow;
1162 1180
1163 pvt = mci->pvt_info; 1181 pvt = mci->pvt_info;
1164 1182
1165 channel_count = pvt->maxch; 1183 channel_count = pvt->maxch;
1166 max_csrows = pvt->maxdimmperch; 1184 max_dimms = pvt->maxdimmperch;
1167 1185
1168 empty = 1; /* Assume NO memory */ 1186 ndimms = 0;
1169 1187
1170 for (csrow = 0; csrow < max_csrows; csrow++) { 1188 /*
1171 p_csrow = &mci->csrows[csrow]; 1189 * FIXME: remove pvt->dimm_info[slot][channel] and use the 3
1172 1190 * layers here.
1173 p_csrow->csrow_idx = csrow; 1191 */
1174 1192 for (channel = 0; channel < mci->layers[0].size * mci->layers[1].size;
1175 /* use branch 0 for the basis */ 1193 channel++) {
1176 mtr = determine_mtr(pvt, csrow, 0); 1194 for (slot = 0; slot < mci->layers[2].size; slot++) {
1177 1195 mtr = determine_mtr(pvt, slot, channel);
1178 /* if no DIMMS on this row, continue */ 1196
1179 if (!MTR_DIMMS_PRESENT(mtr)) 1197 /* if no DIMMS on this slot, continue */
1180 continue; 1198 if (!MTR_DIMMS_PRESENT(mtr))
1181 1199 continue;
1182 /* FAKE OUT VALUES, FIXME */ 1200
1183 p_csrow->first_page = 0 + csrow * 20; 1201 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
1184 p_csrow->last_page = 9 + csrow * 20; 1202 channel / 2, channel % 2, slot);
1185 p_csrow->page_mask = 0xFFF; 1203
1186 1204 size_mb = pvt->dimm_info[slot][channel].megabytes;
1187 p_csrow->grain = 8; 1205
1188 1206 debugf2("%s: dimm%zd (branch %d channel %d slot %d): %d.%03d GB\n",
1189 csrow_megs = 0; 1207 __func__, dimm - mci->dimms,
1190 for (channel = 0; channel < pvt->maxch; channel++) 1208 channel / 2, channel % 2, slot,
1191 csrow_megs += pvt->dimm_info[csrow][channel].megabytes; 1209 size_mb / 1000, size_mb % 1000);
1192 1210
1193 p_csrow->nr_pages = csrow_megs << 8; 1211 dimm->nr_pages = size_mb << 8;
1194 1212 dimm->grain = 8;
1195 /* Assume DDR2 for now */ 1213 dimm->dtype = MTR_DRAM_WIDTH(mtr) ? DEV_X8 : DEV_X4;
1196 p_csrow->mtype = MEM_FB_DDR2; 1214 dimm->mtype = MEM_FB_DDR2;
1197 1215 /*
1198 /* ask what device type on this row */ 1216 * The eccc mechanism is SDDC (aka SECC), with
1199 if (MTR_DRAM_WIDTH(mtr)) 1217 * is similar to Chipkill.
1200 p_csrow->dtype = DEV_X8; 1218 */
1201 else 1219 dimm->edac_mode = MTR_DRAM_WIDTH(mtr) ?
1202 p_csrow->dtype = DEV_X4; 1220 EDAC_S8ECD8ED : EDAC_S4ECD4ED;
1203 1221 ndimms++;
1204 p_csrow->edac_mode = EDAC_S8ECD8ED; 1222 }
1205
1206 empty = 0;
1207 } 1223 }
1208 1224
1209 return empty; 1225 /*
1226 * When just one memory is provided, it should be at location (0,0,0).
1227 * With such single-DIMM mode, the SDCC algorithm degrades to SECDEC+.
1228 */
1229 if (ndimms == 1)
1230 mci->dimms[0].edac_mode = EDAC_SECDED;
1231
1232 return (ndimms == 0);
1210} 1233}
1211 1234
1212/* 1235/*
@@ -1242,9 +1265,7 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1242{ 1265{
1243 struct mem_ctl_info *mci; 1266 struct mem_ctl_info *mci;
1244 struct i5400_pvt *pvt; 1267 struct i5400_pvt *pvt;
1245 int num_channels; 1268 struct edac_mc_layer layers[3];
1246 int num_dimms_per_channel;
1247 int num_csrows;
1248 1269
1249 if (dev_idx >= ARRAY_SIZE(i5400_devs)) 1270 if (dev_idx >= ARRAY_SIZE(i5400_devs))
1250 return -EINVAL; 1271 return -EINVAL;
@@ -1258,23 +1279,21 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1258 if (PCI_FUNC(pdev->devfn) != 0) 1279 if (PCI_FUNC(pdev->devfn) != 0)
1259 return -ENODEV; 1280 return -ENODEV;
1260 1281
1261 /* As we don't have a motherboard identification routine to determine 1282 /*
1262 * actual number of slots/dimms per channel, we thus utilize the 1283 * allocate a new MC control structure
1263 * resource as specified by the chipset. Thus, we might have 1284 *
1264 * have more DIMMs per channel than actually on the mobo, but this 1285 * This drivers uses the DIMM slot as "csrow" and the rest as "channel".
1265 * allows the driver to support up to the chipset max, without
1266 * some fancy mobo determination.
1267 */ 1286 */
1268 num_dimms_per_channel = MAX_DIMMS_PER_CHANNEL; 1287 layers[0].type = EDAC_MC_LAYER_BRANCH;
1269 num_channels = MAX_CHANNELS; 1288 layers[0].size = MAX_BRANCHES;
1270 num_csrows = num_dimms_per_channel; 1289 layers[0].is_virt_csrow = false;
1271 1290 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1272 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n", 1291 layers[1].size = CHANNELS_PER_BRANCH;
1273 __func__, num_channels, num_dimms_per_channel, num_csrows); 1292 layers[1].is_virt_csrow = false;
1274 1293 layers[2].type = EDAC_MC_LAYER_SLOT;
1275 /* allocate a new MC control structure */ 1294 layers[2].size = DIMMS_PER_CHANNEL;
1276 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); 1295 layers[2].is_virt_csrow = true;
1277 1296 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1278 if (mci == NULL) 1297 if (mci == NULL)
1279 return -ENOMEM; 1298 return -ENOMEM;
1280 1299
@@ -1284,8 +1303,8 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1284 1303
1285 pvt = mci->pvt_info; 1304 pvt = mci->pvt_info;
1286 pvt->system_address = pdev; /* Record this device in our private */ 1305 pvt->system_address = pdev; /* Record this device in our private */
1287 pvt->maxch = num_channels; 1306 pvt->maxch = MAX_CHANNELS;
1288 pvt->maxdimmperch = num_dimms_per_channel; 1307 pvt->maxdimmperch = DIMMS_PER_CHANNEL;
1289 1308
1290 /* 'get' the pci devices we want to reserve for our use */ 1309 /* 'get' the pci devices we want to reserve for our use */
1291 if (i5400_get_devices(mci, dev_idx)) 1310 if (i5400_get_devices(mci, dev_idx))
@@ -1307,13 +1326,13 @@ static int i5400_probe1(struct pci_dev *pdev, int dev_idx)
1307 /* Set the function pointer to an actual operation function */ 1326 /* Set the function pointer to an actual operation function */
1308 mci->edac_check = i5400_check_error; 1327 mci->edac_check = i5400_check_error;
1309 1328
1310 /* initialize the MC control structure 'csrows' table 1329 /* initialize the MC control structure 'dimms' table
1311 * with the mapping and control information */ 1330 * with the mapping and control information */
1312 if (i5400_init_csrows(mci)) { 1331 if (i5400_init_dimms(mci)) {
1313 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n" 1332 debugf0("MC: Setting mci->edac_cap to EDAC_FLAG_NONE\n"
1314 " because i5400_init_csrows() returned nonzero " 1333 " because i5400_init_dimms() returned nonzero "
1315 "value\n"); 1334 "value\n");
1316 mci->edac_cap = EDAC_FLAG_NONE; /* no csrows found */ 1335 mci->edac_cap = EDAC_FLAG_NONE; /* no dimms found */
1317 } else { 1336 } else {
1318 debugf1("MC: Enable error reporting now\n"); 1337 debugf1("MC: Enable error reporting now\n");
1319 i5400_enable_error_reporting(mci); 1338 i5400_enable_error_reporting(mci);
diff --git a/drivers/edac/i7300_edac.c b/drivers/edac/i7300_edac.c
index 3bafa3bca148..97c22fd650ee 100644
--- a/drivers/edac/i7300_edac.c
+++ b/drivers/edac/i7300_edac.c
@@ -464,17 +464,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
464 FERR_FAT_FBD, error_reg); 464 FERR_FAT_FBD, error_reg);
465 465
466 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 466 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
467 "FATAL (Branch=%d DRAM-Bank=%d %s " 467 "Bank=%d RAS=%d CAS=%d Err=0x%lx (%s))",
468 "RAS=%d CAS=%d Err=0x%lx (%s))", 468 bank, ras, cas, errors, specific);
469 branch, bank, 469
470 is_wr ? "RDWR" : "RD", 470 edac_mc_handle_error(HW_EVENT_ERR_FATAL, mci, 0, 0, 0,
471 ras, cas, 471 branch, -1, rank,
472 errors, specific); 472 is_wr ? "Write error" : "Read error",
473 473 pvt->tmp_prt_buffer, NULL);
474 /* Call the helper to output message */ 474
475 edac_mc_handle_fbd_ue(mci, rank, branch << 1,
476 (branch << 1) + 1,
477 pvt->tmp_prt_buffer);
478 } 475 }
479 476
480 /* read in the 1st NON-FATAL error register */ 477 /* read in the 1st NON-FATAL error register */
@@ -513,23 +510,14 @@ static void i7300_process_fbd_error(struct mem_ctl_info *mci)
513 510
514 /* Form out message */ 511 /* Form out message */
515 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE, 512 snprintf(pvt->tmp_prt_buffer, PAGE_SIZE,
516 "Corrected error (Branch=%d, Channel %d), " 513 "DRAM-Bank=%d RAS=%d CAS=%d, Err=0x%lx (%s))",
517 " DRAM-Bank=%d %s " 514 bank, ras, cas, errors, specific);
518 "RAS=%d CAS=%d, CE Err=0x%lx, Syndrome=0x%08x(%s))", 515
519 branch, channel, 516 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0,
520 bank, 517 syndrome,
521 is_wr ? "RDWR" : "RD", 518 branch >> 1, channel % 2, rank,
522 ras, cas, 519 is_wr ? "Write error" : "Read error",
523 errors, syndrome, specific); 520 pvt->tmp_prt_buffer, NULL);
524
525 /*
526 * Call the helper to output message
527 * NOTE: Errors are reported per-branch, and not per-channel
528 * Currently, we don't know how to identify the right
529 * channel.
530 */
531 edac_mc_handle_fbd_ce(mci, rank, channel,
532 pvt->tmp_prt_buffer);
533 } 521 }
534 return; 522 return;
535} 523}
@@ -617,8 +605,7 @@ static void i7300_enable_error_reporting(struct mem_ctl_info *mci)
617static int decode_mtr(struct i7300_pvt *pvt, 605static int decode_mtr(struct i7300_pvt *pvt,
618 int slot, int ch, int branch, 606 int slot, int ch, int branch,
619 struct i7300_dimm_info *dinfo, 607 struct i7300_dimm_info *dinfo,
620 struct csrow_info *p_csrow, 608 struct dimm_info *dimm)
621 u32 *nr_pages)
622{ 609{
623 int mtr, ans, addrBits, channel; 610 int mtr, ans, addrBits, channel;
624 611
@@ -650,7 +637,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
650 addrBits -= 3; /* 8 bits per bytes */ 637 addrBits -= 3; /* 8 bits per bytes */
651 638
652 dinfo->megabytes = 1 << addrBits; 639 dinfo->megabytes = 1 << addrBits;
653 *nr_pages = dinfo->megabytes << 8;
654 640
655 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr)); 641 debugf2("\t\tWIDTH: x%d\n", MTR_DRAM_WIDTH(mtr));
656 642
@@ -663,11 +649,6 @@ static int decode_mtr(struct i7300_pvt *pvt,
663 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]); 649 debugf2("\t\tNUMCOL: %s\n", numcol_toString[MTR_DIMM_COLS(mtr)]);
664 debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes); 650 debugf2("\t\tSIZE: %d MB\n", dinfo->megabytes);
665 651
666 p_csrow->grain = 8;
667 p_csrow->mtype = MEM_FB_DDR2;
668 p_csrow->csrow_idx = slot;
669 p_csrow->page_mask = 0;
670
671 /* 652 /*
672 * The type of error detection actually depends of the 653 * The type of error detection actually depends of the
673 * mode of operation. When it is just one single memory chip, at 654 * mode of operation. When it is just one single memory chip, at
@@ -677,15 +658,18 @@ static int decode_mtr(struct i7300_pvt *pvt,
677 * See datasheet Sections 7.3.6 to 7.3.8 658 * See datasheet Sections 7.3.6 to 7.3.8
678 */ 659 */
679 660
661 dimm->nr_pages = MiB_TO_PAGES(dinfo->megabytes);
662 dimm->grain = 8;
663 dimm->mtype = MEM_FB_DDR2;
680 if (IS_SINGLE_MODE(pvt->mc_settings_a)) { 664 if (IS_SINGLE_MODE(pvt->mc_settings_a)) {
681 p_csrow->edac_mode = EDAC_SECDED; 665 dimm->edac_mode = EDAC_SECDED;
682 debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n"); 666 debugf2("\t\tECC code is 8-byte-over-32-byte SECDED+ code\n");
683 } else { 667 } else {
684 debugf2("\t\tECC code is on Lockstep mode\n"); 668 debugf2("\t\tECC code is on Lockstep mode\n");
685 if (MTR_DRAM_WIDTH(mtr) == 8) 669 if (MTR_DRAM_WIDTH(mtr) == 8)
686 p_csrow->edac_mode = EDAC_S8ECD8ED; 670 dimm->edac_mode = EDAC_S8ECD8ED;
687 else 671 else
688 p_csrow->edac_mode = EDAC_S4ECD4ED; 672 dimm->edac_mode = EDAC_S4ECD4ED;
689 } 673 }
690 674
691 /* ask what device type on this row */ 675 /* ask what device type on this row */
@@ -694,9 +678,9 @@ static int decode_mtr(struct i7300_pvt *pvt,
694 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ? 678 IS_SCRBALGO_ENHANCED(pvt->mc_settings) ?
695 "enhanced" : "normal"); 679 "enhanced" : "normal");
696 680
697 p_csrow->dtype = DEV_X8; 681 dimm->dtype = DEV_X8;
698 } else 682 } else
699 p_csrow->dtype = DEV_X4; 683 dimm->dtype = DEV_X4;
700 684
701 return mtr; 685 return mtr;
702} 686}
@@ -774,11 +758,10 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
774{ 758{
775 struct i7300_pvt *pvt; 759 struct i7300_pvt *pvt;
776 struct i7300_dimm_info *dinfo; 760 struct i7300_dimm_info *dinfo;
777 struct csrow_info *p_csrow;
778 int rc = -ENODEV; 761 int rc = -ENODEV;
779 int mtr; 762 int mtr;
780 int ch, branch, slot, channel; 763 int ch, branch, slot, channel;
781 u32 last_page = 0, nr_pages; 764 struct dimm_info *dimm;
782 765
783 pvt = mci->pvt_info; 766 pvt = mci->pvt_info;
784 767
@@ -809,25 +792,23 @@ static int i7300_init_csrows(struct mem_ctl_info *mci)
809 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch], 792 pci_read_config_word(pvt->pci_dev_2x_0_fbd_branch[branch],
810 where, 793 where,
811 &pvt->mtr[slot][branch]); 794 &pvt->mtr[slot][branch]);
812 for (ch = 0; ch < MAX_BRANCHES; ch++) { 795 for (ch = 0; ch < MAX_CH_PER_BRANCH; ch++) {
813 int channel = to_channel(ch, branch); 796 int channel = to_channel(ch, branch);
814 797
798 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms,
799 mci->n_layers, branch, ch, slot);
800
815 dinfo = &pvt->dimm_info[slot][channel]; 801 dinfo = &pvt->dimm_info[slot][channel];
816 p_csrow = &mci->csrows[slot];
817 802
818 mtr = decode_mtr(pvt, slot, ch, branch, 803 mtr = decode_mtr(pvt, slot, ch, branch,
819 dinfo, p_csrow, &nr_pages); 804 dinfo, dimm);
805
820 /* if no DIMMS on this row, continue */ 806 /* if no DIMMS on this row, continue */
821 if (!MTR_DIMMS_PRESENT(mtr)) 807 if (!MTR_DIMMS_PRESENT(mtr))
822 continue; 808 continue;
823 809
824 /* Update per_csrow memory count */
825 p_csrow->nr_pages += nr_pages;
826 p_csrow->first_page = last_page;
827 last_page += nr_pages;
828 p_csrow->last_page = last_page;
829
830 rc = 0; 810 rc = 0;
811
831 } 812 }
832 } 813 }
833 } 814 }
@@ -1042,10 +1023,8 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1042 const struct pci_device_id *id) 1023 const struct pci_device_id *id)
1043{ 1024{
1044 struct mem_ctl_info *mci; 1025 struct mem_ctl_info *mci;
1026 struct edac_mc_layer layers[3];
1045 struct i7300_pvt *pvt; 1027 struct i7300_pvt *pvt;
1046 int num_channels;
1047 int num_dimms_per_channel;
1048 int num_csrows;
1049 int rc; 1028 int rc;
1050 1029
1051 /* wake up device */ 1030 /* wake up device */
@@ -1062,23 +1041,17 @@ static int __devinit i7300_init_one(struct pci_dev *pdev,
1062 if (PCI_FUNC(pdev->devfn) != 0) 1041 if (PCI_FUNC(pdev->devfn) != 0)
1063 return -ENODEV; 1042 return -ENODEV;
1064 1043
1065 /* As we don't have a motherboard identification routine to determine
1066 * actual number of slots/dimms per channel, we thus utilize the
1067 * resource as specified by the chipset. Thus, we might have
1068 * have more DIMMs per channel than actually on the mobo, but this
1069 * allows the driver to support up to the chipset max, without
1070 * some fancy mobo determination.
1071 */
1072 num_dimms_per_channel = MAX_SLOTS;
1073 num_channels = MAX_CHANNELS;
1074 num_csrows = MAX_SLOTS * MAX_CHANNELS;
1075
1076 debugf0("MC: %s(): Number of - Channels= %d DIMMS= %d CSROWS= %d\n",
1077 __func__, num_channels, num_dimms_per_channel, num_csrows);
1078
1079 /* allocate a new MC control structure */ 1044 /* allocate a new MC control structure */
1080 mci = edac_mc_alloc(sizeof(*pvt), num_csrows, num_channels, 0); 1045 layers[0].type = EDAC_MC_LAYER_BRANCH;
1081 1046 layers[0].size = MAX_BRANCHES;
1047 layers[0].is_virt_csrow = false;
1048 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1049 layers[1].size = MAX_CH_PER_BRANCH;
1050 layers[1].is_virt_csrow = true;
1051 layers[2].type = EDAC_MC_LAYER_SLOT;
1052 layers[2].size = MAX_SLOTS;
1053 layers[2].is_virt_csrow = true;
1054 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
1082 if (mci == NULL) 1055 if (mci == NULL)
1083 return -ENOMEM; 1056 return -ENOMEM;
1084 1057
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index 7f1dfcc4e597..d27778f65a5d 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -221,7 +221,9 @@ struct i7core_inject {
221}; 221};
222 222
223struct i7core_channel { 223struct i7core_channel {
224 u32 ranks; 224 bool is_3dimms_present;
225 bool is_single_4rank;
226 bool has_4rank;
225 u32 dimms; 227 u32 dimms;
226}; 228};
227 229
@@ -257,7 +259,6 @@ struct i7core_pvt {
257 struct i7core_channel channel[NUM_CHANS]; 259 struct i7core_channel channel[NUM_CHANS];
258 260
259 int ce_count_available; 261 int ce_count_available;
260 int csrow_map[NUM_CHANS][MAX_DIMMS];
261 262
262 /* ECC corrected errors counts per udimm */ 263 /* ECC corrected errors counts per udimm */
263 unsigned long udimm_ce_count[MAX_DIMMS]; 264 unsigned long udimm_ce_count[MAX_DIMMS];
@@ -492,116 +493,15 @@ static void free_i7core_dev(struct i7core_dev *i7core_dev)
492/**************************************************************************** 493/****************************************************************************
493 Memory check routines 494 Memory check routines
494 ****************************************************************************/ 495 ****************************************************************************/
495static struct pci_dev *get_pdev_slot_func(u8 socket, unsigned slot,
496 unsigned func)
497{
498 struct i7core_dev *i7core_dev = get_i7core_dev(socket);
499 int i;
500
501 if (!i7core_dev)
502 return NULL;
503
504 for (i = 0; i < i7core_dev->n_devs; i++) {
505 if (!i7core_dev->pdev[i])
506 continue;
507
508 if (PCI_SLOT(i7core_dev->pdev[i]->devfn) == slot &&
509 PCI_FUNC(i7core_dev->pdev[i]->devfn) == func) {
510 return i7core_dev->pdev[i];
511 }
512 }
513
514 return NULL;
515}
516
517/**
518 * i7core_get_active_channels() - gets the number of channels and csrows
519 * @socket: Quick Path Interconnect socket
520 * @channels: Number of channels that will be returned
521 * @csrows: Number of csrows found
522 *
523 * Since EDAC core needs to know in advance the number of available channels
524 * and csrows, in order to allocate memory for csrows/channels, it is needed
525 * to run two similar steps. At the first step, implemented on this function,
526 * it checks the number of csrows/channels present at one socket.
527 * this is used in order to properly allocate the size of mci components.
528 *
529 * It should be noticed that none of the current available datasheets explain
530 * or even mention how csrows are seen by the memory controller. So, we need
531 * to add a fake description for csrows.
532 * So, this driver is attributing one DIMM memory for one csrow.
533 */
534static int i7core_get_active_channels(const u8 socket, unsigned *channels,
535 unsigned *csrows)
536{
537 struct pci_dev *pdev = NULL;
538 int i, j;
539 u32 status, control;
540
541 *channels = 0;
542 *csrows = 0;
543
544 pdev = get_pdev_slot_func(socket, 3, 0);
545 if (!pdev) {
546 i7core_printk(KERN_ERR, "Couldn't find socket %d fn 3.0!!!\n",
547 socket);
548 return -ENODEV;
549 }
550
551 /* Device 3 function 0 reads */
552 pci_read_config_dword(pdev, MC_STATUS, &status);
553 pci_read_config_dword(pdev, MC_CONTROL, &control);
554
555 for (i = 0; i < NUM_CHANS; i++) {
556 u32 dimm_dod[3];
557 /* Check if the channel is active */
558 if (!(control & (1 << (8 + i))))
559 continue;
560
561 /* Check if the channel is disabled */
562 if (status & (1 << i))
563 continue;
564
565 pdev = get_pdev_slot_func(socket, i + 4, 1);
566 if (!pdev) {
567 i7core_printk(KERN_ERR, "Couldn't find socket %d "
568 "fn %d.%d!!!\n",
569 socket, i + 4, 1);
570 return -ENODEV;
571 }
572 /* Devices 4-6 function 1 */
573 pci_read_config_dword(pdev,
574 MC_DOD_CH_DIMM0, &dimm_dod[0]);
575 pci_read_config_dword(pdev,
576 MC_DOD_CH_DIMM1, &dimm_dod[1]);
577 pci_read_config_dword(pdev,
578 MC_DOD_CH_DIMM2, &dimm_dod[2]);
579 496
580 (*channels)++; 497static int get_dimm_config(struct mem_ctl_info *mci)
581
582 for (j = 0; j < 3; j++) {
583 if (!DIMM_PRESENT(dimm_dod[j]))
584 continue;
585 (*csrows)++;
586 }
587 }
588
589 debugf0("Number of active channels on socket %d: %d\n",
590 socket, *channels);
591
592 return 0;
593}
594
595static int get_dimm_config(const struct mem_ctl_info *mci)
596{ 498{
597 struct i7core_pvt *pvt = mci->pvt_info; 499 struct i7core_pvt *pvt = mci->pvt_info;
598 struct csrow_info *csr;
599 struct pci_dev *pdev; 500 struct pci_dev *pdev;
600 int i, j; 501 int i, j;
601 int csrow = 0;
602 unsigned long last_page = 0;
603 enum edac_type mode; 502 enum edac_type mode;
604 enum mem_type mtype; 503 enum mem_type mtype;
504 struct dimm_info *dimm;
605 505
606 /* Get data from the MC register, function 0 */ 506 /* Get data from the MC register, function 0 */
607 pdev = pvt->pci_mcr[0]; 507 pdev = pvt->pci_mcr[0];
@@ -657,21 +557,20 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
657 pci_read_config_dword(pvt->pci_ch[i][0], 557 pci_read_config_dword(pvt->pci_ch[i][0],
658 MC_CHANNEL_DIMM_INIT_PARAMS, &data); 558 MC_CHANNEL_DIMM_INIT_PARAMS, &data);
659 559
660 pvt->channel[i].ranks = (data & QUAD_RANK_PRESENT) ? 560
661 4 : 2; 561 if (data & THREE_DIMMS_PRESENT)
562 pvt->channel[i].is_3dimms_present = true;
563
564 if (data & SINGLE_QUAD_RANK_PRESENT)
565 pvt->channel[i].is_single_4rank = true;
566
567 if (data & QUAD_RANK_PRESENT)
568 pvt->channel[i].has_4rank = true;
662 569
663 if (data & REGISTERED_DIMM) 570 if (data & REGISTERED_DIMM)
664 mtype = MEM_RDDR3; 571 mtype = MEM_RDDR3;
665 else 572 else
666 mtype = MEM_DDR3; 573 mtype = MEM_DDR3;
667#if 0
668 if (data & THREE_DIMMS_PRESENT)
669 pvt->channel[i].dimms = 3;
670 else if (data & SINGLE_QUAD_RANK_PRESENT)
671 pvt->channel[i].dimms = 1;
672 else
673 pvt->channel[i].dimms = 2;
674#endif
675 574
676 /* Devices 4-6 function 1 */ 575 /* Devices 4-6 function 1 */
677 pci_read_config_dword(pvt->pci_ch[i][1], 576 pci_read_config_dword(pvt->pci_ch[i][1],
@@ -682,11 +581,13 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
682 MC_DOD_CH_DIMM2, &dimm_dod[2]); 581 MC_DOD_CH_DIMM2, &dimm_dod[2]);
683 582
684 debugf0("Ch%d phy rd%d, wr%d (0x%08x): " 583 debugf0("Ch%d phy rd%d, wr%d (0x%08x): "
685 "%d ranks, %cDIMMs\n", 584 "%s%s%s%cDIMMs\n",
686 i, 585 i,
687 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i), 586 RDLCH(pvt->info.ch_map, i), WRLCH(pvt->info.ch_map, i),
688 data, 587 data,
689 pvt->channel[i].ranks, 588 pvt->channel[i].is_3dimms_present ? "3DIMMS " : "",
589 pvt->channel[i].is_3dimms_present ? "SINGLE_4R " : "",
590 pvt->channel[i].has_4rank ? "HAS_4R " : "",
690 (data & REGISTERED_DIMM) ? 'R' : 'U'); 591 (data & REGISTERED_DIMM) ? 'R' : 'U');
691 592
692 for (j = 0; j < 3; j++) { 593 for (j = 0; j < 3; j++) {
@@ -696,6 +597,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
696 if (!DIMM_PRESENT(dimm_dod[j])) 597 if (!DIMM_PRESENT(dimm_dod[j]))
697 continue; 598 continue;
698 599
600 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
601 i, j, 0);
699 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j])); 602 banks = numbank(MC_DOD_NUMBANK(dimm_dod[j]));
700 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j])); 603 ranks = numrank(MC_DOD_NUMRANK(dimm_dod[j]));
701 rows = numrow(MC_DOD_NUMROW(dimm_dod[j])); 604 rows = numrow(MC_DOD_NUMROW(dimm_dod[j]));
@@ -704,8 +607,6 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
704 /* DDR3 has 8 I/O banks */ 607 /* DDR3 has 8 I/O banks */
705 size = (rows * cols * banks * ranks) >> (20 - 3); 608 size = (rows * cols * banks * ranks) >> (20 - 3);
706 609
707 pvt->channel[i].dimms++;
708
709 debugf0("\tdimm %d %d Mb offset: %x, " 610 debugf0("\tdimm %d %d Mb offset: %x, "
710 "bank: %d, rank: %d, row: %#x, col: %#x\n", 611 "bank: %d, rank: %d, row: %#x, col: %#x\n",
711 j, size, 612 j, size,
@@ -714,44 +615,28 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
714 615
715 npages = MiB_TO_PAGES(size); 616 npages = MiB_TO_PAGES(size);
716 617
717 csr = &mci->csrows[csrow]; 618 dimm->nr_pages = npages;
718 csr->first_page = last_page + 1;
719 last_page += npages;
720 csr->last_page = last_page;
721 csr->nr_pages = npages;
722
723 csr->page_mask = 0;
724 csr->grain = 8;
725 csr->csrow_idx = csrow;
726 csr->nr_channels = 1;
727
728 csr->channels[0].chan_idx = i;
729 csr->channels[0].ce_count = 0;
730
731 pvt->csrow_map[i][j] = csrow;
732 619
733 switch (banks) { 620 switch (banks) {
734 case 4: 621 case 4:
735 csr->dtype = DEV_X4; 622 dimm->dtype = DEV_X4;
736 break; 623 break;
737 case 8: 624 case 8:
738 csr->dtype = DEV_X8; 625 dimm->dtype = DEV_X8;
739 break; 626 break;
740 case 16: 627 case 16:
741 csr->dtype = DEV_X16; 628 dimm->dtype = DEV_X16;
742 break; 629 break;
743 default: 630 default:
744 csr->dtype = DEV_UNKNOWN; 631 dimm->dtype = DEV_UNKNOWN;
745 } 632 }
746 633
747 csr->edac_mode = mode; 634 snprintf(dimm->label, sizeof(dimm->label),
748 csr->mtype = mtype; 635 "CPU#%uChannel#%u_DIMM#%u",
749 snprintf(csr->channels[0].label, 636 pvt->i7core_dev->socket, i, j);
750 sizeof(csr->channels[0].label), 637 dimm->grain = 8;
751 "CPU#%uChannel#%u_DIMM#%u", 638 dimm->edac_mode = mode;
752 pvt->i7core_dev->socket, i, j); 639 dimm->mtype = mtype;
753
754 csrow++;
755 } 640 }
756 641
757 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]); 642 pci_read_config_dword(pdev, MC_SAG_CH_0, &value[0]);
@@ -1567,22 +1452,16 @@ error:
1567/**************************************************************************** 1452/****************************************************************************
1568 Error check routines 1453 Error check routines
1569 ****************************************************************************/ 1454 ****************************************************************************/
1570static void i7core_rdimm_update_csrow(struct mem_ctl_info *mci, 1455static void i7core_rdimm_update_errcount(struct mem_ctl_info *mci,
1571 const int chan, 1456 const int chan,
1572 const int dimm, 1457 const int dimm,
1573 const int add) 1458 const int add)
1574{ 1459{
1575 char *msg; 1460 int i;
1576 struct i7core_pvt *pvt = mci->pvt_info;
1577 int row = pvt->csrow_map[chan][dimm], i;
1578 1461
1579 for (i = 0; i < add; i++) { 1462 for (i = 0; i < add; i++) {
1580 msg = kasprintf(GFP_KERNEL, "Corrected error " 1463 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci, 0, 0, 0,
1581 "(Socket=%d channel=%d dimm=%d)", 1464 chan, dimm, -1, "error", "", NULL);
1582 pvt->i7core_dev->socket, chan, dimm);
1583
1584 edac_mc_handle_fbd_ce(mci, row, 0, msg);
1585 kfree (msg);
1586 } 1465 }
1587} 1466}
1588 1467
@@ -1623,11 +1502,11 @@ static void i7core_rdimm_update_ce_count(struct mem_ctl_info *mci,
1623 1502
1624 /*updated the edac core */ 1503 /*updated the edac core */
1625 if (add0 != 0) 1504 if (add0 != 0)
1626 i7core_rdimm_update_csrow(mci, chan, 0, add0); 1505 i7core_rdimm_update_errcount(mci, chan, 0, add0);
1627 if (add1 != 0) 1506 if (add1 != 0)
1628 i7core_rdimm_update_csrow(mci, chan, 1, add1); 1507 i7core_rdimm_update_errcount(mci, chan, 1, add1);
1629 if (add2 != 0) 1508 if (add2 != 0)
1630 i7core_rdimm_update_csrow(mci, chan, 2, add2); 1509 i7core_rdimm_update_errcount(mci, chan, 2, add2);
1631 1510
1632} 1511}
1633 1512
@@ -1747,20 +1626,30 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1747 const struct mce *m) 1626 const struct mce *m)
1748{ 1627{
1749 struct i7core_pvt *pvt = mci->pvt_info; 1628 struct i7core_pvt *pvt = mci->pvt_info;
1750 char *type, *optype, *err, *msg; 1629 char *type, *optype, *err, msg[80];
1630 enum hw_event_mc_err_type tp_event;
1751 unsigned long error = m->status & 0x1ff0000l; 1631 unsigned long error = m->status & 0x1ff0000l;
1632 bool uncorrected_error = m->mcgstatus & 1ll << 61;
1633 bool ripv = m->mcgstatus & 1;
1752 u32 optypenum = (m->status >> 4) & 0x07; 1634 u32 optypenum = (m->status >> 4) & 0x07;
1753 u32 core_err_cnt = (m->status >> 38) & 0x7fff; 1635 u32 core_err_cnt = (m->status >> 38) & 0x7fff;
1754 u32 dimm = (m->misc >> 16) & 0x3; 1636 u32 dimm = (m->misc >> 16) & 0x3;
1755 u32 channel = (m->misc >> 18) & 0x3; 1637 u32 channel = (m->misc >> 18) & 0x3;
1756 u32 syndrome = m->misc >> 32; 1638 u32 syndrome = m->misc >> 32;
1757 u32 errnum = find_first_bit(&error, 32); 1639 u32 errnum = find_first_bit(&error, 32);
1758 int csrow;
1759 1640
1760 if (m->mcgstatus & 1) 1641 if (uncorrected_error) {
1761 type = "FATAL"; 1642 if (ripv) {
1762 else 1643 type = "FATAL";
1763 type = "NON_FATAL"; 1644 tp_event = HW_EVENT_ERR_FATAL;
1645 } else {
1646 type = "NON_FATAL";
1647 tp_event = HW_EVENT_ERR_UNCORRECTED;
1648 }
1649 } else {
1650 type = "CORRECTED";
1651 tp_event = HW_EVENT_ERR_CORRECTED;
1652 }
1764 1653
1765 switch (optypenum) { 1654 switch (optypenum) {
1766 case 0: 1655 case 0:
@@ -1815,27 +1704,20 @@ static void i7core_mce_output_error(struct mem_ctl_info *mci,
1815 err = "unknown"; 1704 err = "unknown";
1816 } 1705 }
1817 1706
1818 /* FIXME: should convert addr into bank and rank information */ 1707 snprintf(msg, sizeof(msg), "count=%d %s", core_err_cnt, optype);
1819 msg = kasprintf(GFP_ATOMIC,
1820 "%s (addr = 0x%08llx, cpu=%d, Dimm=%d, Channel=%d, "
1821 "syndrome=0x%08x, count=%d, Err=%08llx:%08llx (%s: %s))\n",
1822 type, (long long) m->addr, m->cpu, dimm, channel,
1823 syndrome, core_err_cnt, (long long)m->status,
1824 (long long)m->misc, optype, err);
1825
1826 debugf0("%s", msg);
1827
1828 csrow = pvt->csrow_map[channel][dimm];
1829 1708
1830 /* Call the helper to output message */ 1709 /*
1831 if (m->mcgstatus & 1) 1710 * Call the helper to output message
1832 edac_mc_handle_fbd_ue(mci, csrow, 0, 1711 * FIXME: what to do if core_err_cnt > 1? Currently, it generates
1833 0 /* FIXME: should be channel here */, msg); 1712 * only one event
1834 else if (!pvt->is_registered) 1713 */
1835 edac_mc_handle_fbd_ce(mci, csrow, 1714 if (uncorrected_error || !pvt->is_registered)
1836 0 /* FIXME: should be channel here */, msg); 1715 edac_mc_handle_error(tp_event, mci,
1837 1716 m->addr >> PAGE_SHIFT,
1838 kfree(msg); 1717 m->addr & ~PAGE_MASK,
1718 syndrome,
1719 channel, dimm, -1,
1720 err, msg, m);
1839} 1721}
1840 1722
1841/* 1723/*
@@ -2252,15 +2134,19 @@ static int i7core_register_mci(struct i7core_dev *i7core_dev)
2252{ 2134{
2253 struct mem_ctl_info *mci; 2135 struct mem_ctl_info *mci;
2254 struct i7core_pvt *pvt; 2136 struct i7core_pvt *pvt;
2255 int rc, channels, csrows; 2137 int rc;
2256 2138 struct edac_mc_layer layers[2];
2257 /* Check the number of active and not disabled channels */
2258 rc = i7core_get_active_channels(i7core_dev->socket, &channels, &csrows);
2259 if (unlikely(rc < 0))
2260 return rc;
2261 2139
2262 /* allocate a new MC control structure */ 2140 /* allocate a new MC control structure */
2263 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, i7core_dev->socket); 2141
2142 layers[0].type = EDAC_MC_LAYER_CHANNEL;
2143 layers[0].size = NUM_CHANS;
2144 layers[0].is_virt_csrow = false;
2145 layers[1].type = EDAC_MC_LAYER_SLOT;
2146 layers[1].size = MAX_DIMMS;
2147 layers[1].is_virt_csrow = true;
2148 mci = edac_mc_alloc(i7core_dev->socket, ARRAY_SIZE(layers), layers,
2149 sizeof(*pvt));
2264 if (unlikely(!mci)) 2150 if (unlikely(!mci))
2265 return -ENOMEM; 2151 return -ENOMEM;
2266 2152
diff --git a/drivers/edac/i82443bxgx_edac.c b/drivers/edac/i82443bxgx_edac.c
index 3bf2b2f490e7..52072c28a8a6 100644
--- a/drivers/edac/i82443bxgx_edac.c
+++ b/drivers/edac/i82443bxgx_edac.c
@@ -12,7 +12,7 @@
12 * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>. 12 * 440GX fix by Jason Uhlenkott <juhlenko@akamai.com>.
13 * 13 *
14 * Written with reference to 82443BX Host Bridge Datasheet: 14 * Written with reference to 82443BX Host Bridge Datasheet:
15 * http://download.intel.com/design/chipsets/datashts/29063301.pdf 15 * http://download.intel.com/design/chipsets/datashts/29063301.pdf
16 * references to this document given in []. 16 * references to this document given in [].
17 * 17 *
18 * This module doesn't support the 440LX, but it may be possible to 18 * This module doesn't support the 440LX, but it may be possible to
@@ -156,19 +156,19 @@ static int i82443bxgx_edacmc_process_error_info(struct mem_ctl_info *mci,
156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) { 156 if (info->eap & I82443BXGX_EAP_OFFSET_SBE) {
157 error_found = 1; 157 error_found = 1;
158 if (handle_errors) 158 if (handle_errors)
159 edac_mc_handle_ce(mci, page, pageoffset, 159 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
160 /* 440BX/GX don't make syndrome information 160 page, pageoffset, 0,
161 * available */ 161 edac_mc_find_csrow_by_page(mci, page),
162 0, edac_mc_find_csrow_by_page(mci, page), 0, 162 0, -1, mci->ctl_name, "", NULL);
163 mci->ctl_name);
164 } 163 }
165 164
166 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) { 165 if (info->eap & I82443BXGX_EAP_OFFSET_MBE) {
167 error_found = 1; 166 error_found = 1;
168 if (handle_errors) 167 if (handle_errors)
169 edac_mc_handle_ue(mci, page, pageoffset, 168 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
170 edac_mc_find_csrow_by_page(mci, page), 169 page, pageoffset, 0,
171 mci->ctl_name); 170 edac_mc_find_csrow_by_page(mci, page),
171 0, -1, mci->ctl_name, "", NULL);
172 } 172 }
173 173
174 return error_found; 174 return error_found;
@@ -189,6 +189,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
189 enum mem_type mtype) 189 enum mem_type mtype)
190{ 190{
191 struct csrow_info *csrow; 191 struct csrow_info *csrow;
192 struct dimm_info *dimm;
192 int index; 193 int index;
193 u8 drbar, dramc; 194 u8 drbar, dramc;
194 u32 row_base, row_high_limit, row_high_limit_last; 195 u32 row_base, row_high_limit, row_high_limit_last;
@@ -197,6 +198,8 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
197 row_high_limit_last = 0; 198 row_high_limit_last = 0;
198 for (index = 0; index < mci->nr_csrows; index++) { 199 for (index = 0; index < mci->nr_csrows; index++) {
199 csrow = &mci->csrows[index]; 200 csrow = &mci->csrows[index];
201 dimm = csrow->channels[0].dimm;
202
200 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar); 203 pci_read_config_byte(pdev, I82443BXGX_DRB + index, &drbar);
201 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n", 204 debugf1("MC%d: %s: %s() Row=%d DRB = %#0x\n",
202 mci->mc_idx, __FILE__, __func__, index, drbar); 205 mci->mc_idx, __FILE__, __func__, index, drbar);
@@ -217,14 +220,14 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
217 row_base = row_high_limit_last; 220 row_base = row_high_limit_last;
218 csrow->first_page = row_base >> PAGE_SHIFT; 221 csrow->first_page = row_base >> PAGE_SHIFT;
219 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; 222 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
220 csrow->nr_pages = csrow->last_page - csrow->first_page + 1; 223 dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
221 /* EAP reports in 4kilobyte granularity [61] */ 224 /* EAP reports in 4kilobyte granularity [61] */
222 csrow->grain = 1 << 12; 225 dimm->grain = 1 << 12;
223 csrow->mtype = mtype; 226 dimm->mtype = mtype;
224 /* I don't think 440BX can tell you device type? FIXME? */ 227 /* I don't think 440BX can tell you device type? FIXME? */
225 csrow->dtype = DEV_UNKNOWN; 228 dimm->dtype = DEV_UNKNOWN;
226 /* Mode is global to all rows on 440BX */ 229 /* Mode is global to all rows on 440BX */
227 csrow->edac_mode = edac_mode; 230 dimm->edac_mode = edac_mode;
228 row_high_limit_last = row_high_limit; 231 row_high_limit_last = row_high_limit;
229 } 232 }
230} 233}
@@ -232,6 +235,7 @@ static void i82443bxgx_init_csrows(struct mem_ctl_info *mci,
232static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx) 235static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
233{ 236{
234 struct mem_ctl_info *mci; 237 struct mem_ctl_info *mci;
238 struct edac_mc_layer layers[2];
235 u8 dramc; 239 u8 dramc;
236 u32 nbxcfg, ecc_mode; 240 u32 nbxcfg, ecc_mode;
237 enum mem_type mtype; 241 enum mem_type mtype;
@@ -245,8 +249,13 @@ static int i82443bxgx_edacmc_probe1(struct pci_dev *pdev, int dev_idx)
245 if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg)) 249 if (pci_read_config_dword(pdev, I82443BXGX_NBXCFG, &nbxcfg))
246 return -EIO; 250 return -EIO;
247 251
248 mci = edac_mc_alloc(0, I82443BXGX_NR_CSROWS, I82443BXGX_NR_CHANS, 0); 252 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
249 253 layers[0].size = I82443BXGX_NR_CSROWS;
254 layers[0].is_virt_csrow = true;
255 layers[1].type = EDAC_MC_LAYER_CHANNEL;
256 layers[1].size = I82443BXGX_NR_CHANS;
257 layers[1].is_virt_csrow = false;
258 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
250 if (mci == NULL) 259 if (mci == NULL)
251 return -ENOMEM; 260 return -ENOMEM;
252 261
diff --git a/drivers/edac/i82860_edac.c b/drivers/edac/i82860_edac.c
index c779092d18d1..08045059d10b 100644
--- a/drivers/edac/i82860_edac.c
+++ b/drivers/edac/i82860_edac.c
@@ -99,6 +99,7 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
99 struct i82860_error_info *info, 99 struct i82860_error_info *info,
100 int handle_errors) 100 int handle_errors)
101{ 101{
102 struct dimm_info *dimm;
102 int row; 103 int row;
103 104
104 if (!(info->errsts2 & 0x0003)) 105 if (!(info->errsts2 & 0x0003))
@@ -108,18 +109,25 @@ static int i82860_process_error_info(struct mem_ctl_info *mci,
108 return 1; 109 return 1;
109 110
110 if ((info->errsts ^ info->errsts2) & 0x0003) { 111 if ((info->errsts ^ info->errsts2) & 0x0003) {
111 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 112 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
113 -1, -1, -1, "UE overwrote CE", "", NULL);
112 info->errsts = info->errsts2; 114 info->errsts = info->errsts2;
113 } 115 }
114 116
115 info->eap >>= PAGE_SHIFT; 117 info->eap >>= PAGE_SHIFT;
116 row = edac_mc_find_csrow_by_page(mci, info->eap); 118 row = edac_mc_find_csrow_by_page(mci, info->eap);
119 dimm = mci->csrows[row].channels[0].dimm;
117 120
118 if (info->errsts & 0x0002) 121 if (info->errsts & 0x0002)
119 edac_mc_handle_ue(mci, info->eap, 0, row, "i82860 UE"); 122 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
123 info->eap, 0, 0,
124 dimm->location[0], dimm->location[1], -1,
125 "i82860 UE", "", NULL);
120 else 126 else
121 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 0, 127 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
122 "i82860 UE"); 128 info->eap, 0, info->derrsyn,
129 dimm->location[0], dimm->location[1], -1,
130 "i82860 CE", "", NULL);
123 131
124 return 1; 132 return 1;
125} 133}
@@ -140,6 +148,7 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
140 u16 value; 148 u16 value;
141 u32 cumul_size; 149 u32 cumul_size;
142 struct csrow_info *csrow; 150 struct csrow_info *csrow;
151 struct dimm_info *dimm;
143 int index; 152 int index;
144 153
145 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim); 154 pci_read_config_word(pdev, I82860_MCHCFG, &mchcfg_ddim);
@@ -153,6 +162,8 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
153 */ 162 */
154 for (index = 0; index < mci->nr_csrows; index++) { 163 for (index = 0; index < mci->nr_csrows; index++) {
155 csrow = &mci->csrows[index]; 164 csrow = &mci->csrows[index];
165 dimm = csrow->channels[0].dimm;
166
156 pci_read_config_word(pdev, I82860_GBA + index * 2, &value); 167 pci_read_config_word(pdev, I82860_GBA + index * 2, &value);
157 cumul_size = (value & I82860_GBA_MASK) << 168 cumul_size = (value & I82860_GBA_MASK) <<
158 (I82860_GBA_SHIFT - PAGE_SHIFT); 169 (I82860_GBA_SHIFT - PAGE_SHIFT);
@@ -164,30 +175,38 @@ static void i82860_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev)
164 175
165 csrow->first_page = last_cumul_size; 176 csrow->first_page = last_cumul_size;
166 csrow->last_page = cumul_size - 1; 177 csrow->last_page = cumul_size - 1;
167 csrow->nr_pages = cumul_size - last_cumul_size; 178 dimm->nr_pages = cumul_size - last_cumul_size;
168 last_cumul_size = cumul_size; 179 last_cumul_size = cumul_size;
169 csrow->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */ 180 dimm->grain = 1 << 12; /* I82860_EAP has 4KiB reolution */
170 csrow->mtype = MEM_RMBS; 181 dimm->mtype = MEM_RMBS;
171 csrow->dtype = DEV_UNKNOWN; 182 dimm->dtype = DEV_UNKNOWN;
172 csrow->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE; 183 dimm->edac_mode = mchcfg_ddim ? EDAC_SECDED : EDAC_NONE;
173 } 184 }
174} 185}
175 186
176static int i82860_probe1(struct pci_dev *pdev, int dev_idx) 187static int i82860_probe1(struct pci_dev *pdev, int dev_idx)
177{ 188{
178 struct mem_ctl_info *mci; 189 struct mem_ctl_info *mci;
190 struct edac_mc_layer layers[2];
179 struct i82860_error_info discard; 191 struct i82860_error_info discard;
180 192
181 /* RDRAM has channels but these don't map onto the abstractions that 193 /*
182 edac uses. 194 * RDRAM has channels but these don't map onto the csrow abstraction.
183 The device groups from the GRA registers seem to map reasonably 195 * According with the datasheet, there are 2 Rambus channels, supporting
184 well onto the notion of a chip select row. 196 * up to 16 direct RDRAM devices.
185 There are 16 GRA registers and since the name is associated with 197 * The device groups from the GRA registers seem to map reasonably
186 the channel and the GRA registers map to physical devices so we are 198 * well onto the notion of a chip select row.
187 going to make 1 channel for group. 199 * There are 16 GRA registers and since the name is associated with
200 * the channel and the GRA registers map to physical devices so we are
201 * going to make 1 channel for group.
188 */ 202 */
189 mci = edac_mc_alloc(0, 16, 1, 0); 203 layers[0].type = EDAC_MC_LAYER_CHANNEL;
190 204 layers[0].size = 2;
205 layers[0].is_virt_csrow = true;
206 layers[1].type = EDAC_MC_LAYER_SLOT;
207 layers[1].size = 8;
208 layers[1].is_virt_csrow = true;
209 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
191 if (!mci) 210 if (!mci)
192 return -ENOMEM; 211 return -ENOMEM;
193 212
diff --git a/drivers/edac/i82875p_edac.c b/drivers/edac/i82875p_edac.c
index 10f15d85fb5e..b613e31c16e5 100644
--- a/drivers/edac/i82875p_edac.c
+++ b/drivers/edac/i82875p_edac.c
@@ -38,7 +38,8 @@
38#endif /* PCI_DEVICE_ID_INTEL_82875_6 */ 38#endif /* PCI_DEVICE_ID_INTEL_82875_6 */
39 39
40/* four csrows in dual channel, eight in single channel */ 40/* four csrows in dual channel, eight in single channel */
41#define I82875P_NR_CSROWS(nr_chans) (8/(nr_chans)) 41#define I82875P_NR_DIMMS 8
42#define I82875P_NR_CSROWS(nr_chans) (I82875P_NR_DIMMS / (nr_chans))
42 43
43/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */ 44/* Intel 82875p register addresses - device 0 function 0 - DRAM Controller */
44#define I82875P_EAP 0x58 /* Error Address Pointer (32b) 45#define I82875P_EAP 0x58 /* Error Address Pointer (32b)
@@ -235,7 +236,9 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
235 return 1; 236 return 1;
236 237
237 if ((info->errsts ^ info->errsts2) & 0x0081) { 238 if ((info->errsts ^ info->errsts2) & 0x0081) {
238 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 239 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
240 -1, -1, -1,
241 "UE overwrote CE", "", NULL);
239 info->errsts = info->errsts2; 242 info->errsts = info->errsts2;
240 } 243 }
241 244
@@ -243,11 +246,15 @@ static int i82875p_process_error_info(struct mem_ctl_info *mci,
243 row = edac_mc_find_csrow_by_page(mci, info->eap); 246 row = edac_mc_find_csrow_by_page(mci, info->eap);
244 247
245 if (info->errsts & 0x0080) 248 if (info->errsts & 0x0080)
246 edac_mc_handle_ue(mci, info->eap, 0, row, "i82875p UE"); 249 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
250 info->eap, 0, 0,
251 row, -1, -1,
252 "i82875p UE", "", NULL);
247 else 253 else
248 edac_mc_handle_ce(mci, info->eap, 0, info->derrsyn, row, 254 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
249 multi_chan ? (info->des & 0x1) : 0, 255 info->eap, 0, info->derrsyn,
250 "i82875p CE"); 256 row, multi_chan ? (info->des & 0x1) : 0,
257 -1, "i82875p CE", "", NULL);
251 258
252 return 1; 259 return 1;
253} 260}
@@ -342,11 +349,13 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
342 void __iomem * ovrfl_window, u32 drc) 349 void __iomem * ovrfl_window, u32 drc)
343{ 350{
344 struct csrow_info *csrow; 351 struct csrow_info *csrow;
352 struct dimm_info *dimm;
353 unsigned nr_chans = dual_channel_active(drc) + 1;
345 unsigned long last_cumul_size; 354 unsigned long last_cumul_size;
346 u8 value; 355 u8 value;
347 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */ 356 u32 drc_ddim; /* DRAM Data Integrity Mode 0=none,2=edac */
348 u32 cumul_size; 357 u32 cumul_size, nr_pages;
349 int index; 358 int index, j;
350 359
351 drc_ddim = (drc >> 18) & 0x1; 360 drc_ddim = (drc >> 18) & 0x1;
352 last_cumul_size = 0; 361 last_cumul_size = 0;
@@ -369,12 +378,18 @@ static void i82875p_init_csrows(struct mem_ctl_info *mci,
369 378
370 csrow->first_page = last_cumul_size; 379 csrow->first_page = last_cumul_size;
371 csrow->last_page = cumul_size - 1; 380 csrow->last_page = cumul_size - 1;
372 csrow->nr_pages = cumul_size - last_cumul_size; 381 nr_pages = cumul_size - last_cumul_size;
373 last_cumul_size = cumul_size; 382 last_cumul_size = cumul_size;
374 csrow->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */ 383
375 csrow->mtype = MEM_DDR; 384 for (j = 0; j < nr_chans; j++) {
376 csrow->dtype = DEV_UNKNOWN; 385 dimm = csrow->channels[j].dimm;
377 csrow->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE; 386
387 dimm->nr_pages = nr_pages / nr_chans;
388 dimm->grain = 1 << 12; /* I82875P_EAP has 4KiB reolution */
389 dimm->mtype = MEM_DDR;
390 dimm->dtype = DEV_UNKNOWN;
391 dimm->edac_mode = drc_ddim ? EDAC_SECDED : EDAC_NONE;
392 }
378 } 393 }
379} 394}
380 395
@@ -382,6 +397,7 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
382{ 397{
383 int rc = -ENODEV; 398 int rc = -ENODEV;
384 struct mem_ctl_info *mci; 399 struct mem_ctl_info *mci;
400 struct edac_mc_layer layers[2];
385 struct i82875p_pvt *pvt; 401 struct i82875p_pvt *pvt;
386 struct pci_dev *ovrfl_pdev; 402 struct pci_dev *ovrfl_pdev;
387 void __iomem *ovrfl_window; 403 void __iomem *ovrfl_window;
@@ -397,9 +413,14 @@ static int i82875p_probe1(struct pci_dev *pdev, int dev_idx)
397 return -ENODEV; 413 return -ENODEV;
398 drc = readl(ovrfl_window + I82875P_DRC); 414 drc = readl(ovrfl_window + I82875P_DRC);
399 nr_chans = dual_channel_active(drc) + 1; 415 nr_chans = dual_channel_active(drc) + 1;
400 mci = edac_mc_alloc(sizeof(*pvt), I82875P_NR_CSROWS(nr_chans),
401 nr_chans, 0);
402 416
417 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
418 layers[0].size = I82875P_NR_CSROWS(nr_chans);
419 layers[0].is_virt_csrow = true;
420 layers[1].type = EDAC_MC_LAYER_CHANNEL;
421 layers[1].size = nr_chans;
422 layers[1].is_virt_csrow = false;
423 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
403 if (!mci) { 424 if (!mci) {
404 rc = -ENOMEM; 425 rc = -ENOMEM;
405 goto fail0; 426 goto fail0;
diff --git a/drivers/edac/i82975x_edac.c b/drivers/edac/i82975x_edac.c
index 0cd8368f88f8..433332c7cdba 100644
--- a/drivers/edac/i82975x_edac.c
+++ b/drivers/edac/i82975x_edac.c
@@ -29,7 +29,8 @@
29#define PCI_DEVICE_ID_INTEL_82975_0 0x277c 29#define PCI_DEVICE_ID_INTEL_82975_0 0x277c
30#endif /* PCI_DEVICE_ID_INTEL_82975_0 */ 30#endif /* PCI_DEVICE_ID_INTEL_82975_0 */
31 31
32#define I82975X_NR_CSROWS(nr_chans) (8/(nr_chans)) 32#define I82975X_NR_DIMMS 8
33#define I82975X_NR_CSROWS(nr_chans) (I82975X_NR_DIMMS / (nr_chans))
33 34
34/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */ 35/* Intel 82975X register addresses - device 0 function 0 - DRAM Controller */
35#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b) 36#define I82975X_EAP 0x58 /* Dram Error Address Pointer (32b)
@@ -287,7 +288,8 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
287 return 1; 288 return 1;
288 289
289 if ((info->errsts ^ info->errsts2) & 0x0003) { 290 if ((info->errsts ^ info->errsts2) & 0x0003) {
290 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 291 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
292 -1, -1, -1, "UE overwrote CE", "", NULL);
291 info->errsts = info->errsts2; 293 info->errsts = info->errsts2;
292 } 294 }
293 295
@@ -309,13 +311,18 @@ static int i82975x_process_error_info(struct mem_ctl_info *mci,
309 chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1; 311 chan = (mci->csrows[row].nr_channels == 1) ? 0 : info->eap & 1;
310 offst = info->eap 312 offst = info->eap
311 & ((1 << PAGE_SHIFT) - 313 & ((1 << PAGE_SHIFT) -
312 (1 << mci->csrows[row].grain)); 314 (1 << mci->csrows[row].channels[chan].dimm->grain));
313 315
314 if (info->errsts & 0x0002) 316 if (info->errsts & 0x0002)
315 edac_mc_handle_ue(mci, page, offst , row, "i82975x UE"); 317 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
318 page, offst, 0,
319 row, -1, -1,
320 "i82975x UE", "", NULL);
316 else 321 else
317 edac_mc_handle_ce(mci, page, offst, info->derrsyn, row, 322 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
318 chan, "i82975x CE"); 323 page, offst, info->derrsyn,
324 row, chan ? chan : 0, -1,
325 "i82975x CE", "", NULL);
319 326
320 return 1; 327 return 1;
321} 328}
@@ -370,8 +377,10 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
370 struct csrow_info *csrow; 377 struct csrow_info *csrow;
371 unsigned long last_cumul_size; 378 unsigned long last_cumul_size;
372 u8 value; 379 u8 value;
373 u32 cumul_size; 380 u32 cumul_size, nr_pages;
374 int index, chan; 381 int index, chan;
382 struct dimm_info *dimm;
383 enum dev_type dtype;
375 384
376 last_cumul_size = 0; 385 last_cumul_size = 0;
377 386
@@ -400,28 +409,33 @@ static void i82975x_init_csrows(struct mem_ctl_info *mci,
400 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index, 409 debugf3("%s(): (%d) cumul_size 0x%x\n", __func__, index,
401 cumul_size); 410 cumul_size);
402 411
412 nr_pages = cumul_size - last_cumul_size;
413 if (!nr_pages)
414 continue;
415
403 /* 416 /*
404 * Initialise dram labels 417 * Initialise dram labels
405 * index values: 418 * index values:
406 * [0-7] for single-channel; i.e. csrow->nr_channels = 1 419 * [0-7] for single-channel; i.e. csrow->nr_channels = 1
407 * [0-3] for dual-channel; i.e. csrow->nr_channels = 2 420 * [0-3] for dual-channel; i.e. csrow->nr_channels = 2
408 */ 421 */
409 for (chan = 0; chan < csrow->nr_channels; chan++) 422 dtype = i82975x_dram_type(mch_window, index);
410 strncpy(csrow->channels[chan].label, 423 for (chan = 0; chan < csrow->nr_channels; chan++) {
424 dimm = mci->csrows[index].channels[chan].dimm;
425
426 dimm->nr_pages = nr_pages / csrow->nr_channels;
427 strncpy(csrow->channels[chan].dimm->label,
411 labels[(index >> 1) + (chan * 2)], 428 labels[(index >> 1) + (chan * 2)],
412 EDAC_MC_LABEL_LEN); 429 EDAC_MC_LABEL_LEN);
413 430 dimm->grain = 1 << 7; /* 128Byte cache-line resolution */
414 if (cumul_size == last_cumul_size) 431 dimm->dtype = i82975x_dram_type(mch_window, index);
415 continue; /* not populated */ 432 dimm->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
433 dimm->edac_mode = EDAC_SECDED; /* only supported */
434 }
416 435
417 csrow->first_page = last_cumul_size; 436 csrow->first_page = last_cumul_size;
418 csrow->last_page = cumul_size - 1; 437 csrow->last_page = cumul_size - 1;
419 csrow->nr_pages = cumul_size - last_cumul_size;
420 last_cumul_size = cumul_size; 438 last_cumul_size = cumul_size;
421 csrow->grain = 1 << 7; /* 128Byte cache-line resolution */
422 csrow->mtype = MEM_DDR2; /* I82975x supports only DDR2 */
423 csrow->dtype = i82975x_dram_type(mch_window, index);
424 csrow->edac_mode = EDAC_SECDED; /* only supported */
425 } 439 }
426} 440}
427 441
@@ -463,6 +477,7 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
463{ 477{
464 int rc = -ENODEV; 478 int rc = -ENODEV;
465 struct mem_ctl_info *mci; 479 struct mem_ctl_info *mci;
480 struct edac_mc_layer layers[2];
466 struct i82975x_pvt *pvt; 481 struct i82975x_pvt *pvt;
467 void __iomem *mch_window; 482 void __iomem *mch_window;
468 u32 mchbar; 483 u32 mchbar;
@@ -531,8 +546,13 @@ static int i82975x_probe1(struct pci_dev *pdev, int dev_idx)
531 chans = dual_channel_active(mch_window) + 1; 546 chans = dual_channel_active(mch_window) + 1;
532 547
533 /* assuming only one controller, index thus is 0 */ 548 /* assuming only one controller, index thus is 0 */
534 mci = edac_mc_alloc(sizeof(*pvt), I82975X_NR_CSROWS(chans), 549 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
535 chans, 0); 550 layers[0].size = I82975X_NR_DIMMS;
551 layers[0].is_virt_csrow = true;
552 layers[1].type = EDAC_MC_LAYER_CHANNEL;
553 layers[1].size = I82975X_NR_CSROWS(chans);
554 layers[1].is_virt_csrow = false;
555 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, sizeof(*pvt));
536 if (!mci) { 556 if (!mci) {
537 rc = -ENOMEM; 557 rc = -ENOMEM;
538 goto fail1; 558 goto fail1;
diff --git a/drivers/edac/mpc85xx_edac.c b/drivers/edac/mpc85xx_edac.c
index 73464a62adf7..4c402353ba98 100644
--- a/drivers/edac/mpc85xx_edac.c
+++ b/drivers/edac/mpc85xx_edac.c
@@ -854,12 +854,16 @@ static void mpc85xx_mc_check(struct mem_ctl_info *mci)
854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n"); 854 mpc85xx_mc_printk(mci, KERN_ERR, "PFN out of range!\n");
855 855
856 if (err_detect & DDR_EDE_SBE) 856 if (err_detect & DDR_EDE_SBE)
857 edac_mc_handle_ce(mci, pfn, err_addr & ~PAGE_MASK, 857 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
858 syndrome, row_index, 0, mci->ctl_name); 858 pfn, err_addr & ~PAGE_MASK, syndrome,
859 row_index, 0, -1,
860 mci->ctl_name, "", NULL);
859 861
860 if (err_detect & DDR_EDE_MBE) 862 if (err_detect & DDR_EDE_MBE)
861 edac_mc_handle_ue(mci, pfn, err_addr & ~PAGE_MASK, 863 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
862 row_index, mci->ctl_name); 864 pfn, err_addr & ~PAGE_MASK, syndrome,
865 row_index, 0, -1,
866 mci->ctl_name, "", NULL);
863 867
864 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect); 868 out_be32(pdata->mc_vbase + MPC85XX_MC_ERR_DETECT, err_detect);
865} 869}
@@ -883,6 +887,7 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
883{ 887{
884 struct mpc85xx_mc_pdata *pdata = mci->pvt_info; 888 struct mpc85xx_mc_pdata *pdata = mci->pvt_info;
885 struct csrow_info *csrow; 889 struct csrow_info *csrow;
890 struct dimm_info *dimm;
886 u32 sdram_ctl; 891 u32 sdram_ctl;
887 u32 sdtype; 892 u32 sdtype;
888 enum mem_type mtype; 893 enum mem_type mtype;
@@ -929,6 +934,8 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
929 u32 end; 934 u32 end;
930 935
931 csrow = &mci->csrows[index]; 936 csrow = &mci->csrows[index];
937 dimm = csrow->channels[0].dimm;
938
932 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 + 939 cs_bnds = in_be32(pdata->mc_vbase + MPC85XX_MC_CS_BNDS_0 +
933 (index * MPC85XX_MC_CS_BNDS_OFS)); 940 (index * MPC85XX_MC_CS_BNDS_OFS));
934 941
@@ -944,19 +951,21 @@ static void __devinit mpc85xx_init_csrows(struct mem_ctl_info *mci)
944 951
945 csrow->first_page = start; 952 csrow->first_page = start;
946 csrow->last_page = end; 953 csrow->last_page = end;
947 csrow->nr_pages = end + 1 - start; 954
948 csrow->grain = 8; 955 dimm->nr_pages = end + 1 - start;
949 csrow->mtype = mtype; 956 dimm->grain = 8;
950 csrow->dtype = DEV_UNKNOWN; 957 dimm->mtype = mtype;
958 dimm->dtype = DEV_UNKNOWN;
951 if (sdram_ctl & DSC_X32_EN) 959 if (sdram_ctl & DSC_X32_EN)
952 csrow->dtype = DEV_X32; 960 dimm->dtype = DEV_X32;
953 csrow->edac_mode = EDAC_SECDED; 961 dimm->edac_mode = EDAC_SECDED;
954 } 962 }
955} 963}
956 964
957static int __devinit mpc85xx_mc_err_probe(struct platform_device *op) 965static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
958{ 966{
959 struct mem_ctl_info *mci; 967 struct mem_ctl_info *mci;
968 struct edac_mc_layer layers[2];
960 struct mpc85xx_mc_pdata *pdata; 969 struct mpc85xx_mc_pdata *pdata;
961 struct resource r; 970 struct resource r;
962 u32 sdram_ctl; 971 u32 sdram_ctl;
@@ -965,7 +974,13 @@ static int __devinit mpc85xx_mc_err_probe(struct platform_device *op)
965 if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL)) 974 if (!devres_open_group(&op->dev, mpc85xx_mc_err_probe, GFP_KERNEL))
966 return -ENOMEM; 975 return -ENOMEM;
967 976
968 mci = edac_mc_alloc(sizeof(*pdata), 4, 1, edac_mc_idx); 977 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
978 layers[0].size = 4;
979 layers[0].is_virt_csrow = true;
980 layers[1].type = EDAC_MC_LAYER_CHANNEL;
981 layers[1].size = 1;
982 layers[1].is_virt_csrow = false;
983 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), sizeof(*pdata));
969 if (!mci) { 984 if (!mci) {
970 devres_release_group(&op->dev, mpc85xx_mc_err_probe); 985 devres_release_group(&op->dev, mpc85xx_mc_err_probe);
971 return -ENOMEM; 986 return -ENOMEM;
diff --git a/drivers/edac/mv64x60_edac.c b/drivers/edac/mv64x60_edac.c
index 7e5ff367705c..b0bb5a3d2527 100644
--- a/drivers/edac/mv64x60_edac.c
+++ b/drivers/edac/mv64x60_edac.c
@@ -611,12 +611,17 @@ static void mv64x60_mc_check(struct mem_ctl_info *mci)
611 611
612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */ 612 /* first bit clear in ECC Err Reg, 1 bit error, correctable by HW */
613 if (!(reg & 0x1)) 613 if (!(reg & 0x1))
614 edac_mc_handle_ce(mci, err_addr >> PAGE_SHIFT, 614 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
615 err_addr & PAGE_MASK, syndrome, 0, 0, 615 err_addr >> PAGE_SHIFT,
616 mci->ctl_name); 616 err_addr & PAGE_MASK, syndrome,
617 0, 0, -1,
618 mci->ctl_name, "", NULL);
617 else /* 2 bit error, UE */ 619 else /* 2 bit error, UE */
618 edac_mc_handle_ue(mci, err_addr >> PAGE_SHIFT, 620 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
619 err_addr & PAGE_MASK, 0, mci->ctl_name); 621 err_addr >> PAGE_SHIFT,
622 err_addr & PAGE_MASK, 0,
623 0, 0, -1,
624 mci->ctl_name, "", NULL);
620 625
621 /* clear the error */ 626 /* clear the error */
622 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0); 627 out_le32(pdata->mc_vbase + MV64X60_SDRAM_ERR_ADDR, 0);
@@ -656,6 +661,8 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
656 struct mv64x60_mc_pdata *pdata) 661 struct mv64x60_mc_pdata *pdata)
657{ 662{
658 struct csrow_info *csrow; 663 struct csrow_info *csrow;
664 struct dimm_info *dimm;
665
659 u32 devtype; 666 u32 devtype;
660 u32 ctl; 667 u32 ctl;
661 668
@@ -664,35 +671,36 @@ static void mv64x60_init_csrows(struct mem_ctl_info *mci,
664 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG); 671 ctl = in_le32(pdata->mc_vbase + MV64X60_SDRAM_CONFIG);
665 672
666 csrow = &mci->csrows[0]; 673 csrow = &mci->csrows[0];
667 csrow->first_page = 0; 674 dimm = csrow->channels[0].dimm;
668 csrow->nr_pages = pdata->total_mem >> PAGE_SHIFT; 675
669 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 676 dimm->nr_pages = pdata->total_mem >> PAGE_SHIFT;
670 csrow->grain = 8; 677 dimm->grain = 8;
671 678
672 csrow->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR; 679 dimm->mtype = (ctl & MV64X60_SDRAM_REGISTERED) ? MEM_RDDR : MEM_DDR;
673 680
674 devtype = (ctl >> 20) & 0x3; 681 devtype = (ctl >> 20) & 0x3;
675 switch (devtype) { 682 switch (devtype) {
676 case 0x0: 683 case 0x0:
677 csrow->dtype = DEV_X32; 684 dimm->dtype = DEV_X32;
678 break; 685 break;
679 case 0x2: /* could be X8 too, but no way to tell */ 686 case 0x2: /* could be X8 too, but no way to tell */
680 csrow->dtype = DEV_X16; 687 dimm->dtype = DEV_X16;
681 break; 688 break;
682 case 0x3: 689 case 0x3:
683 csrow->dtype = DEV_X4; 690 dimm->dtype = DEV_X4;
684 break; 691 break;
685 default: 692 default:
686 csrow->dtype = DEV_UNKNOWN; 693 dimm->dtype = DEV_UNKNOWN;
687 break; 694 break;
688 } 695 }
689 696
690 csrow->edac_mode = EDAC_SECDED; 697 dimm->edac_mode = EDAC_SECDED;
691} 698}
692 699
693static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev) 700static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
694{ 701{
695 struct mem_ctl_info *mci; 702 struct mem_ctl_info *mci;
703 struct edac_mc_layer layers[2];
696 struct mv64x60_mc_pdata *pdata; 704 struct mv64x60_mc_pdata *pdata;
697 struct resource *r; 705 struct resource *r;
698 u32 ctl; 706 u32 ctl;
@@ -701,7 +709,14 @@ static int __devinit mv64x60_mc_err_probe(struct platform_device *pdev)
701 if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL)) 709 if (!devres_open_group(&pdev->dev, mv64x60_mc_err_probe, GFP_KERNEL))
702 return -ENOMEM; 710 return -ENOMEM;
703 711
704 mci = edac_mc_alloc(sizeof(struct mv64x60_mc_pdata), 1, 1, edac_mc_idx); 712 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
713 layers[0].size = 1;
714 layers[0].is_virt_csrow = true;
715 layers[1].type = EDAC_MC_LAYER_CHANNEL;
716 layers[1].size = 1;
717 layers[1].is_virt_csrow = false;
718 mci = edac_mc_alloc(edac_mc_idx, ARRAY_SIZE(layers), layers,
719 sizeof(struct mv64x60_mc_pdata));
705 if (!mci) { 720 if (!mci) {
706 printk(KERN_ERR "%s: No memory for CPU err\n", __func__); 721 printk(KERN_ERR "%s: No memory for CPU err\n", __func__);
707 devres_release_group(&pdev->dev, mv64x60_mc_err_probe); 722 devres_release_group(&pdev->dev, mv64x60_mc_err_probe);
diff --git a/drivers/edac/pasemi_edac.c b/drivers/edac/pasemi_edac.c
index 7f71ee436744..b095a906a994 100644
--- a/drivers/edac/pasemi_edac.c
+++ b/drivers/edac/pasemi_edac.c
@@ -110,15 +110,16 @@ static void pasemi_edac_process_error_info(struct mem_ctl_info *mci, u32 errsta)
110 /* uncorrectable/multi-bit errors */ 110 /* uncorrectable/multi-bit errors */
111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS | 111 if (errsta & (MCDEBUG_ERRSTA_MBE_STATUS |
112 MCDEBUG_ERRSTA_RFL_STATUS)) { 112 MCDEBUG_ERRSTA_RFL_STATUS)) {
113 edac_mc_handle_ue(mci, mci->csrows[cs].first_page, 0, 113 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
114 cs, mci->ctl_name); 114 mci->csrows[cs].first_page, 0, 0,
115 cs, 0, -1, mci->ctl_name, "", NULL);
115 } 116 }
116 117
117 /* correctable/single-bit errors */ 118 /* correctable/single-bit errors */
118 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS) { 119 if (errsta & MCDEBUG_ERRSTA_SBE_STATUS)
119 edac_mc_handle_ce(mci, mci->csrows[cs].first_page, 0, 120 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
120 0, cs, 0, mci->ctl_name); 121 mci->csrows[cs].first_page, 0, 0,
121 } 122 cs, 0, -1, mci->ctl_name, "", NULL);
122} 123}
123 124
124static void pasemi_edac_check(struct mem_ctl_info *mci) 125static void pasemi_edac_check(struct mem_ctl_info *mci)
@@ -135,11 +136,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
135 enum edac_type edac_mode) 136 enum edac_type edac_mode)
136{ 137{
137 struct csrow_info *csrow; 138 struct csrow_info *csrow;
139 struct dimm_info *dimm;
138 u32 rankcfg; 140 u32 rankcfg;
139 int index; 141 int index;
140 142
141 for (index = 0; index < mci->nr_csrows; index++) { 143 for (index = 0; index < mci->nr_csrows; index++) {
142 csrow = &mci->csrows[index]; 144 csrow = &mci->csrows[index];
145 dimm = csrow->channels[0].dimm;
143 146
144 pci_read_config_dword(pdev, 147 pci_read_config_dword(pdev,
145 MCDRAM_RANKCFG + (index * 12), 148 MCDRAM_RANKCFG + (index * 12),
@@ -151,20 +154,20 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
151 switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >> 154 switch ((rankcfg & MCDRAM_RANKCFG_TYPE_SIZE_M) >>
152 MCDRAM_RANKCFG_TYPE_SIZE_S) { 155 MCDRAM_RANKCFG_TYPE_SIZE_S) {
153 case 0: 156 case 0:
154 csrow->nr_pages = 128 << (20 - PAGE_SHIFT); 157 dimm->nr_pages = 128 << (20 - PAGE_SHIFT);
155 break; 158 break;
156 case 1: 159 case 1:
157 csrow->nr_pages = 256 << (20 - PAGE_SHIFT); 160 dimm->nr_pages = 256 << (20 - PAGE_SHIFT);
158 break; 161 break;
159 case 2: 162 case 2:
160 case 3: 163 case 3:
161 csrow->nr_pages = 512 << (20 - PAGE_SHIFT); 164 dimm->nr_pages = 512 << (20 - PAGE_SHIFT);
162 break; 165 break;
163 case 4: 166 case 4:
164 csrow->nr_pages = 1024 << (20 - PAGE_SHIFT); 167 dimm->nr_pages = 1024 << (20 - PAGE_SHIFT);
165 break; 168 break;
166 case 5: 169 case 5:
167 csrow->nr_pages = 2048 << (20 - PAGE_SHIFT); 170 dimm->nr_pages = 2048 << (20 - PAGE_SHIFT);
168 break; 171 break;
169 default: 172 default:
170 edac_mc_printk(mci, KERN_ERR, 173 edac_mc_printk(mci, KERN_ERR,
@@ -174,13 +177,13 @@ static int pasemi_edac_init_csrows(struct mem_ctl_info *mci,
174 } 177 }
175 178
176 csrow->first_page = last_page_in_mmc; 179 csrow->first_page = last_page_in_mmc;
177 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 180 csrow->last_page = csrow->first_page + dimm->nr_pages - 1;
178 last_page_in_mmc += csrow->nr_pages; 181 last_page_in_mmc += dimm->nr_pages;
179 csrow->page_mask = 0; 182 csrow->page_mask = 0;
180 csrow->grain = PASEMI_EDAC_ERROR_GRAIN; 183 dimm->grain = PASEMI_EDAC_ERROR_GRAIN;
181 csrow->mtype = MEM_DDR; 184 dimm->mtype = MEM_DDR;
182 csrow->dtype = DEV_UNKNOWN; 185 dimm->dtype = DEV_UNKNOWN;
183 csrow->edac_mode = edac_mode; 186 dimm->edac_mode = edac_mode;
184 } 187 }
185 return 0; 188 return 0;
186} 189}
@@ -189,6 +192,7 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
189 const struct pci_device_id *ent) 192 const struct pci_device_id *ent)
190{ 193{
191 struct mem_ctl_info *mci = NULL; 194 struct mem_ctl_info *mci = NULL;
195 struct edac_mc_layer layers[2];
192 u32 errctl1, errcor, scrub, mcen; 196 u32 errctl1, errcor, scrub, mcen;
193 197
194 pci_read_config_dword(pdev, MCCFG_MCEN, &mcen); 198 pci_read_config_dword(pdev, MCCFG_MCEN, &mcen);
@@ -205,9 +209,14 @@ static int __devinit pasemi_edac_probe(struct pci_dev *pdev,
205 MCDEBUG_ERRCTL1_RFL_LOG_EN; 209 MCDEBUG_ERRCTL1_RFL_LOG_EN;
206 pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1); 210 pci_write_config_dword(pdev, MCDEBUG_ERRCTL1, errctl1);
207 211
208 mci = edac_mc_alloc(0, PASEMI_EDAC_NR_CSROWS, PASEMI_EDAC_NR_CHANS, 212 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
209 system_mmc_id++); 213 layers[0].size = PASEMI_EDAC_NR_CSROWS;
210 214 layers[0].is_virt_csrow = true;
215 layers[1].type = EDAC_MC_LAYER_CHANNEL;
216 layers[1].size = PASEMI_EDAC_NR_CHANS;
217 layers[1].is_virt_csrow = false;
218 mci = edac_mc_alloc(system_mmc_id++, ARRAY_SIZE(layers), layers,
219 0);
211 if (mci == NULL) 220 if (mci == NULL)
212 return -ENOMEM; 221 return -ENOMEM;
213 222
diff --git a/drivers/edac/ppc4xx_edac.c b/drivers/edac/ppc4xx_edac.c
index d427c69bb8b1..f3f9fed06ad7 100644
--- a/drivers/edac/ppc4xx_edac.c
+++ b/drivers/edac/ppc4xx_edac.c
@@ -727,7 +727,10 @@ ppc4xx_edac_handle_ce(struct mem_ctl_info *mci,
727 727
728 for (row = 0; row < mci->nr_csrows; row++) 728 for (row = 0; row < mci->nr_csrows; row++)
729 if (ppc4xx_edac_check_bank_error(status, row)) 729 if (ppc4xx_edac_check_bank_error(status, row))
730 edac_mc_handle_ce_no_info(mci, message); 730 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
731 0, 0, 0,
732 row, 0, -1,
733 message, "", NULL);
731} 734}
732 735
733/** 736/**
@@ -755,7 +758,10 @@ ppc4xx_edac_handle_ue(struct mem_ctl_info *mci,
755 758
756 for (row = 0; row < mci->nr_csrows; row++) 759 for (row = 0; row < mci->nr_csrows; row++)
757 if (ppc4xx_edac_check_bank_error(status, row)) 760 if (ppc4xx_edac_check_bank_error(status, row))
758 edac_mc_handle_ue(mci, page, offset, row, message); 761 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
762 page, offset, 0,
763 row, 0, -1,
764 message, "", NULL);
759} 765}
760 766
761/** 767/**
@@ -895,9 +901,8 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
895 enum mem_type mtype; 901 enum mem_type mtype;
896 enum dev_type dtype; 902 enum dev_type dtype;
897 enum edac_type edac_mode; 903 enum edac_type edac_mode;
898 int row; 904 int row, j;
899 u32 mbxcf, size; 905 u32 mbxcf, size, nr_pages;
900 static u32 ppc4xx_last_page;
901 906
902 /* Establish the memory type and width */ 907 /* Establish the memory type and width */
903 908
@@ -948,7 +953,7 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
948 case SDRAM_MBCF_SZ_2GB: 953 case SDRAM_MBCF_SZ_2GB:
949 case SDRAM_MBCF_SZ_4GB: 954 case SDRAM_MBCF_SZ_4GB:
950 case SDRAM_MBCF_SZ_8GB: 955 case SDRAM_MBCF_SZ_8GB:
951 csi->nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size); 956 nr_pages = SDRAM_MBCF_SZ_TO_PAGES(size);
952 break; 957 break;
953 default: 958 default:
954 ppc4xx_edac_mc_printk(KERN_ERR, mci, 959 ppc4xx_edac_mc_printk(KERN_ERR, mci,
@@ -959,10 +964,6 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
959 goto done; 964 goto done;
960 } 965 }
961 966
962 csi->first_page = ppc4xx_last_page;
963 csi->last_page = csi->first_page + csi->nr_pages - 1;
964 csi->page_mask = 0;
965
966 /* 967 /*
967 * It's unclear exactly what grain should be set to 968 * It's unclear exactly what grain should be set to
968 * here. The SDRAM_ECCES register allows resolution of 969 * here. The SDRAM_ECCES register allows resolution of
@@ -975,15 +976,17 @@ ppc4xx_edac_init_csrows(struct mem_ctl_info *mci, u32 mcopt1)
975 * possible values would be the PLB width (16), the 976 * possible values would be the PLB width (16), the
976 * page size (PAGE_SIZE) or the memory width (2 or 4). 977 * page size (PAGE_SIZE) or the memory width (2 or 4).
977 */ 978 */
979 for (j = 0; j < csi->nr_channels; j++) {
980 struct dimm_info *dimm = csi->channels[j].dimm;
978 981
979 csi->grain = 1; 982 dimm->nr_pages = nr_pages / csi->nr_channels;
980 983 dimm->grain = 1;
981 csi->mtype = mtype;
982 csi->dtype = dtype;
983 984
984 csi->edac_mode = edac_mode; 985 dimm->mtype = mtype;
986 dimm->dtype = dtype;
985 987
986 ppc4xx_last_page += csi->nr_pages; 988 dimm->edac_mode = edac_mode;
989 }
987 } 990 }
988 991
989 done: 992 done:
@@ -1236,6 +1239,7 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
1236 dcr_host_t dcr_host; 1239 dcr_host_t dcr_host;
1237 const struct device_node *np = op->dev.of_node; 1240 const struct device_node *np = op->dev.of_node;
1238 struct mem_ctl_info *mci = NULL; 1241 struct mem_ctl_info *mci = NULL;
1242 struct edac_mc_layer layers[2];
1239 static int ppc4xx_edac_instance; 1243 static int ppc4xx_edac_instance;
1240 1244
1241 /* 1245 /*
@@ -1281,12 +1285,14 @@ static int __devinit ppc4xx_edac_probe(struct platform_device *op)
1281 * controller instance and perform the appropriate 1285 * controller instance and perform the appropriate
1282 * initialization. 1286 * initialization.
1283 */ 1287 */
1284 1288 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
1285 mci = edac_mc_alloc(sizeof(struct ppc4xx_edac_pdata), 1289 layers[0].size = ppc4xx_edac_nr_csrows;
1286 ppc4xx_edac_nr_csrows, 1290 layers[0].is_virt_csrow = true;
1287 ppc4xx_edac_nr_chans, 1291 layers[1].type = EDAC_MC_LAYER_CHANNEL;
1288 ppc4xx_edac_instance); 1292 layers[1].size = ppc4xx_edac_nr_chans;
1289 1293 layers[1].is_virt_csrow = false;
1294 mci = edac_mc_alloc(ppc4xx_edac_instance, ARRAY_SIZE(layers), layers,
1295 sizeof(struct ppc4xx_edac_pdata));
1290 if (mci == NULL) { 1296 if (mci == NULL) {
1291 ppc4xx_edac_printk(KERN_ERR, "%s: " 1297 ppc4xx_edac_printk(KERN_ERR, "%s: "
1292 "Failed to allocate EDAC MC instance!\n", 1298 "Failed to allocate EDAC MC instance!\n",
diff --git a/drivers/edac/r82600_edac.c b/drivers/edac/r82600_edac.c
index 6d908ad72d64..e1cacd164f31 100644
--- a/drivers/edac/r82600_edac.c
+++ b/drivers/edac/r82600_edac.c
@@ -179,10 +179,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
179 error_found = 1; 179 error_found = 1;
180 180
181 if (handle_errors) 181 if (handle_errors)
182 edac_mc_handle_ce(mci, page, 0, /* not avail */ 182 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
183 syndrome, 183 page, 0, syndrome,
184 edac_mc_find_csrow_by_page(mci, page), 184 edac_mc_find_csrow_by_page(mci, page),
185 0, mci->ctl_name); 185 0, -1,
186 mci->ctl_name, "", NULL);
186 } 187 }
187 188
188 if (info->eapr & BIT(1)) { /* UE? */ 189 if (info->eapr & BIT(1)) { /* UE? */
@@ -190,9 +191,11 @@ static int r82600_process_error_info(struct mem_ctl_info *mci,
190 191
191 if (handle_errors) 192 if (handle_errors)
192 /* 82600 doesn't give enough info */ 193 /* 82600 doesn't give enough info */
193 edac_mc_handle_ue(mci, page, 0, 194 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
194 edac_mc_find_csrow_by_page(mci, page), 195 page, 0, 0,
195 mci->ctl_name); 196 edac_mc_find_csrow_by_page(mci, page),
197 0, -1,
198 mci->ctl_name, "", NULL);
196 } 199 }
197 200
198 return error_found; 201 return error_found;
@@ -216,6 +219,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
216 u8 dramcr) 219 u8 dramcr)
217{ 220{
218 struct csrow_info *csrow; 221 struct csrow_info *csrow;
222 struct dimm_info *dimm;
219 int index; 223 int index;
220 u8 drbar; /* SDRAM Row Boundary Address Register */ 224 u8 drbar; /* SDRAM Row Boundary Address Register */
221 u32 row_high_limit, row_high_limit_last; 225 u32 row_high_limit, row_high_limit_last;
@@ -227,6 +231,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
227 231
228 for (index = 0; index < mci->nr_csrows; index++) { 232 for (index = 0; index < mci->nr_csrows; index++) {
229 csrow = &mci->csrows[index]; 233 csrow = &mci->csrows[index];
234 dimm = csrow->channels[0].dimm;
230 235
231 /* find the DRAM Chip Select Base address and mask */ 236 /* find the DRAM Chip Select Base address and mask */
232 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar); 237 pci_read_config_byte(pdev, R82600_DRBA + index, &drbar);
@@ -247,16 +252,17 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
247 252
248 csrow->first_page = row_base >> PAGE_SHIFT; 253 csrow->first_page = row_base >> PAGE_SHIFT;
249 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1; 254 csrow->last_page = (row_high_limit >> PAGE_SHIFT) - 1;
250 csrow->nr_pages = csrow->last_page - csrow->first_page + 1; 255
256 dimm->nr_pages = csrow->last_page - csrow->first_page + 1;
251 /* Error address is top 19 bits - so granularity is * 257 /* Error address is top 19 bits - so granularity is *
252 * 14 bits */ 258 * 14 bits */
253 csrow->grain = 1 << 14; 259 dimm->grain = 1 << 14;
254 csrow->mtype = reg_sdram ? MEM_RDDR : MEM_DDR; 260 dimm->mtype = reg_sdram ? MEM_RDDR : MEM_DDR;
255 /* FIXME - check that this is unknowable with this chipset */ 261 /* FIXME - check that this is unknowable with this chipset */
256 csrow->dtype = DEV_UNKNOWN; 262 dimm->dtype = DEV_UNKNOWN;
257 263
258 /* Mode is global on 82600 */ 264 /* Mode is global on 82600 */
259 csrow->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE; 265 dimm->edac_mode = ecc_on ? EDAC_SECDED : EDAC_NONE;
260 row_high_limit_last = row_high_limit; 266 row_high_limit_last = row_high_limit;
261 } 267 }
262} 268}
@@ -264,6 +270,7 @@ static void r82600_init_csrows(struct mem_ctl_info *mci, struct pci_dev *pdev,
264static int r82600_probe1(struct pci_dev *pdev, int dev_idx) 270static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
265{ 271{
266 struct mem_ctl_info *mci; 272 struct mem_ctl_info *mci;
273 struct edac_mc_layer layers[2];
267 u8 dramcr; 274 u8 dramcr;
268 u32 eapr; 275 u32 eapr;
269 u32 scrub_disabled; 276 u32 scrub_disabled;
@@ -278,8 +285,13 @@ static int r82600_probe1(struct pci_dev *pdev, int dev_idx)
278 debugf2("%s(): sdram refresh rate = %#0x\n", __func__, 285 debugf2("%s(): sdram refresh rate = %#0x\n", __func__,
279 sdram_refresh_rate); 286 sdram_refresh_rate);
280 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr); 287 debugf2("%s(): DRAMC register = %#0x\n", __func__, dramcr);
281 mci = edac_mc_alloc(0, R82600_NR_CSROWS, R82600_NR_CHANS, 0); 288 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
282 289 layers[0].size = R82600_NR_CSROWS;
290 layers[0].is_virt_csrow = true;
291 layers[1].type = EDAC_MC_LAYER_CHANNEL;
292 layers[1].size = R82600_NR_CHANS;
293 layers[1].is_virt_csrow = false;
294 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
283 if (mci == NULL) 295 if (mci == NULL)
284 return -ENOMEM; 296 return -ENOMEM;
285 297
diff --git a/drivers/edac/sb_edac.c b/drivers/edac/sb_edac.c
index 123204f8e23b..4adaf4b7da99 100644
--- a/drivers/edac/sb_edac.c
+++ b/drivers/edac/sb_edac.c
@@ -314,8 +314,6 @@ struct sbridge_pvt {
314 struct sbridge_info info; 314 struct sbridge_info info;
315 struct sbridge_channel channel[NUM_CHANNELS]; 315 struct sbridge_channel channel[NUM_CHANNELS];
316 316
317 int csrow_map[NUM_CHANNELS][MAX_DIMMS];
318
319 /* Memory type detection */ 317 /* Memory type detection */
320 bool is_mirrored, is_lockstep, is_close_pg; 318 bool is_mirrored, is_lockstep, is_close_pg;
321 319
@@ -487,29 +485,14 @@ static struct pci_dev *get_pdev_slot_func(u8 bus, unsigned slot,
487} 485}
488 486
489/** 487/**
490 * sbridge_get_active_channels() - gets the number of channels and csrows 488 * check_if_ecc_is_active() - Checks if ECC is active
491 * bus: Device bus 489 * bus: Device bus
492 * @channels: Number of channels that will be returned
493 * @csrows: Number of csrows found
494 *
495 * Since EDAC core needs to know in advance the number of available channels
496 * and csrows, in order to allocate memory for csrows/channels, it is needed
497 * to run two similar steps. At the first step, implemented on this function,
498 * it checks the number of csrows/channels present at one socket, identified
499 * by the associated PCI bus.
500 * this is used in order to properly allocate the size of mci components.
501 * Note: one csrow is one dimm.
502 */ 490 */
503static int sbridge_get_active_channels(const u8 bus, unsigned *channels, 491static int check_if_ecc_is_active(const u8 bus)
504 unsigned *csrows)
505{ 492{
506 struct pci_dev *pdev = NULL; 493 struct pci_dev *pdev = NULL;
507 int i, j;
508 u32 mcmtr; 494 u32 mcmtr;
509 495
510 *channels = 0;
511 *csrows = 0;
512
513 pdev = get_pdev_slot_func(bus, 15, 0); 496 pdev = get_pdev_slot_func(bus, 15, 0);
514 if (!pdev) { 497 if (!pdev) {
515 sbridge_printk(KERN_ERR, "Couldn't find PCI device " 498 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
@@ -523,41 +506,14 @@ static int sbridge_get_active_channels(const u8 bus, unsigned *channels,
523 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n"); 506 sbridge_printk(KERN_ERR, "ECC is disabled. Aborting\n");
524 return -ENODEV; 507 return -ENODEV;
525 } 508 }
526
527 for (i = 0; i < NUM_CHANNELS; i++) {
528 u32 mtr;
529
530 /* Device 15 functions 2 - 5 */
531 pdev = get_pdev_slot_func(bus, 15, 2 + i);
532 if (!pdev) {
533 sbridge_printk(KERN_ERR, "Couldn't find PCI device "
534 "%2x.%02d.%d!!!\n",
535 bus, 15, 2 + i);
536 return -ENODEV;
537 }
538 (*channels)++;
539
540 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
541 pci_read_config_dword(pdev, mtr_regs[j], &mtr);
542 debugf1("Bus#%02x channel #%d MTR%d = %x\n", bus, i, j, mtr);
543 if (IS_DIMM_PRESENT(mtr))
544 (*csrows)++;
545 }
546 }
547
548 debugf0("Number of active channels: %d, number of active dimms: %d\n",
549 *channels, *csrows);
550
551 return 0; 509 return 0;
552} 510}
553 511
554static int get_dimm_config(const struct mem_ctl_info *mci) 512static int get_dimm_config(struct mem_ctl_info *mci)
555{ 513{
556 struct sbridge_pvt *pvt = mci->pvt_info; 514 struct sbridge_pvt *pvt = mci->pvt_info;
557 struct csrow_info *csr; 515 struct dimm_info *dimm;
558 int i, j, banks, ranks, rows, cols, size, npages; 516 int i, j, banks, ranks, rows, cols, size, npages;
559 int csrow = 0;
560 unsigned long last_page = 0;
561 u32 reg; 517 u32 reg;
562 enum edac_type mode; 518 enum edac_type mode;
563 enum mem_type mtype; 519 enum mem_type mtype;
@@ -616,6 +572,8 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
616 u32 mtr; 572 u32 mtr;
617 573
618 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) { 574 for (j = 0; j < ARRAY_SIZE(mtr_regs); j++) {
575 dimm = EDAC_DIMM_PTR(mci->layers, mci->dimms, mci->n_layers,
576 i, j, 0);
619 pci_read_config_dword(pvt->pci_tad[i], 577 pci_read_config_dword(pvt->pci_tad[i],
620 mtr_regs[j], &mtr); 578 mtr_regs[j], &mtr);
621 debugf4("Channel #%d MTR%d = %x\n", i, j, mtr); 579 debugf4("Channel #%d MTR%d = %x\n", i, j, mtr);
@@ -634,29 +592,15 @@ static int get_dimm_config(const struct mem_ctl_info *mci)
634 pvt->sbridge_dev->mc, i, j, 592 pvt->sbridge_dev->mc, i, j,
635 size, npages, 593 size, npages,
636 banks, ranks, rows, cols); 594 banks, ranks, rows, cols);
637 csr = &mci->csrows[csrow]; 595
638 596 dimm->nr_pages = npages;
639 csr->first_page = last_page; 597 dimm->grain = 32;
640 csr->last_page = last_page + npages - 1; 598 dimm->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
641 csr->page_mask = 0UL; /* Unused */ 599 dimm->mtype = mtype;
642 csr->nr_pages = npages; 600 dimm->edac_mode = mode;
643 csr->grain = 32; 601 snprintf(dimm->label, sizeof(dimm->label),
644 csr->csrow_idx = csrow;
645 csr->dtype = (banks == 8) ? DEV_X8 : DEV_X4;
646 csr->ce_count = 0;
647 csr->ue_count = 0;
648 csr->mtype = mtype;
649 csr->edac_mode = mode;
650 csr->nr_channels = 1;
651 csr->channels[0].chan_idx = i;
652 csr->channels[0].ce_count = 0;
653 pvt->csrow_map[i][j] = csrow;
654 snprintf(csr->channels[0].label,
655 sizeof(csr->channels[0].label),
656 "CPU_SrcID#%u_Channel#%u_DIMM#%u", 602 "CPU_SrcID#%u_Channel#%u_DIMM#%u",
657 pvt->sbridge_dev->source_id, i, j); 603 pvt->sbridge_dev->source_id, i, j);
658 last_page += npages;
659 csrow++;
660 } 604 }
661 } 605 }
662 } 606 }
@@ -844,11 +788,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
844 u8 *socket, 788 u8 *socket,
845 long *channel_mask, 789 long *channel_mask,
846 u8 *rank, 790 u8 *rank,
847 char *area_type) 791 char **area_type, char *msg)
848{ 792{
849 struct mem_ctl_info *new_mci; 793 struct mem_ctl_info *new_mci;
850 struct sbridge_pvt *pvt = mci->pvt_info; 794 struct sbridge_pvt *pvt = mci->pvt_info;
851 char msg[256];
852 int n_rir, n_sads, n_tads, sad_way, sck_xch; 795 int n_rir, n_sads, n_tads, sad_way, sck_xch;
853 int sad_interl, idx, base_ch; 796 int sad_interl, idx, base_ch;
854 int interleave_mode; 797 int interleave_mode;
@@ -870,12 +813,10 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
870 */ 813 */
871 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) { 814 if ((addr > (u64) pvt->tolm) && (addr < (1LL << 32))) {
872 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr); 815 sprintf(msg, "Error at TOLM area, on addr 0x%08Lx", addr);
873 edac_mc_handle_ce_no_info(mci, msg);
874 return -EINVAL; 816 return -EINVAL;
875 } 817 }
876 if (addr >= (u64)pvt->tohm) { 818 if (addr >= (u64)pvt->tohm) {
877 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr); 819 sprintf(msg, "Error at MMIOH area, on addr 0x%016Lx", addr);
878 edac_mc_handle_ce_no_info(mci, msg);
879 return -EINVAL; 820 return -EINVAL;
880 } 821 }
881 822
@@ -892,7 +833,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
892 limit = SAD_LIMIT(reg); 833 limit = SAD_LIMIT(reg);
893 if (limit <= prv) { 834 if (limit <= prv) {
894 sprintf(msg, "Can't discover the memory socket"); 835 sprintf(msg, "Can't discover the memory socket");
895 edac_mc_handle_ce_no_info(mci, msg);
896 return -EINVAL; 836 return -EINVAL;
897 } 837 }
898 if (addr <= limit) 838 if (addr <= limit)
@@ -901,10 +841,9 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
901 } 841 }
902 if (n_sads == MAX_SAD) { 842 if (n_sads == MAX_SAD) {
903 sprintf(msg, "Can't discover the memory socket"); 843 sprintf(msg, "Can't discover the memory socket");
904 edac_mc_handle_ce_no_info(mci, msg);
905 return -EINVAL; 844 return -EINVAL;
906 } 845 }
907 area_type = get_dram_attr(reg); 846 *area_type = get_dram_attr(reg);
908 interleave_mode = INTERLEAVE_MODE(reg); 847 interleave_mode = INTERLEAVE_MODE(reg);
909 848
910 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads], 849 pci_read_config_dword(pvt->pci_sad0, interleave_list[n_sads],
@@ -942,7 +881,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
942 break; 881 break;
943 default: 882 default:
944 sprintf(msg, "Can't discover socket interleave"); 883 sprintf(msg, "Can't discover socket interleave");
945 edac_mc_handle_ce_no_info(mci, msg);
946 return -EINVAL; 884 return -EINVAL;
947 } 885 }
948 *socket = sad_interleave[idx]; 886 *socket = sad_interleave[idx];
@@ -957,7 +895,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
957 if (!new_mci) { 895 if (!new_mci) {
958 sprintf(msg, "Struct for socket #%u wasn't initialized", 896 sprintf(msg, "Struct for socket #%u wasn't initialized",
959 *socket); 897 *socket);
960 edac_mc_handle_ce_no_info(mci, msg);
961 return -EINVAL; 898 return -EINVAL;
962 } 899 }
963 mci = new_mci; 900 mci = new_mci;
@@ -973,7 +910,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
973 limit = TAD_LIMIT(reg); 910 limit = TAD_LIMIT(reg);
974 if (limit <= prv) { 911 if (limit <= prv) {
975 sprintf(msg, "Can't discover the memory channel"); 912 sprintf(msg, "Can't discover the memory channel");
976 edac_mc_handle_ce_no_info(mci, msg);
977 return -EINVAL; 913 return -EINVAL;
978 } 914 }
979 if (addr <= limit) 915 if (addr <= limit)
@@ -1013,7 +949,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1013 break; 949 break;
1014 default: 950 default:
1015 sprintf(msg, "Can't discover the TAD target"); 951 sprintf(msg, "Can't discover the TAD target");
1016 edac_mc_handle_ce_no_info(mci, msg);
1017 return -EINVAL; 952 return -EINVAL;
1018 } 953 }
1019 *channel_mask = 1 << base_ch; 954 *channel_mask = 1 << base_ch;
@@ -1027,7 +962,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1027 break; 962 break;
1028 default: 963 default:
1029 sprintf(msg, "Invalid mirror set. Can't decode addr"); 964 sprintf(msg, "Invalid mirror set. Can't decode addr");
1030 edac_mc_handle_ce_no_info(mci, msg);
1031 return -EINVAL; 965 return -EINVAL;
1032 } 966 }
1033 } else 967 } else
@@ -1055,7 +989,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1055 if (offset > addr) { 989 if (offset > addr) {
1056 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!", 990 sprintf(msg, "Can't calculate ch addr: TAD offset 0x%08Lx is too high for addr 0x%08Lx!",
1057 offset, addr); 991 offset, addr);
1058 edac_mc_handle_ce_no_info(mci, msg);
1059 return -EINVAL; 992 return -EINVAL;
1060 } 993 }
1061 addr -= offset; 994 addr -= offset;
@@ -1095,7 +1028,6 @@ static int get_memory_error_data(struct mem_ctl_info *mci,
1095 if (n_rir == MAX_RIR_RANGES) { 1028 if (n_rir == MAX_RIR_RANGES) {
1096 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx", 1029 sprintf(msg, "Can't discover the memory rank for ch addr 0x%08Lx",
1097 ch_addr); 1030 ch_addr);
1098 edac_mc_handle_ce_no_info(mci, msg);
1099 return -EINVAL; 1031 return -EINVAL;
1100 } 1032 }
1101 rir_way = RIR_WAY(reg); 1033 rir_way = RIR_WAY(reg);
@@ -1409,7 +1341,8 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1409{ 1341{
1410 struct mem_ctl_info *new_mci; 1342 struct mem_ctl_info *new_mci;
1411 struct sbridge_pvt *pvt = mci->pvt_info; 1343 struct sbridge_pvt *pvt = mci->pvt_info;
1412 char *type, *optype, *msg, *recoverable_msg; 1344 enum hw_event_mc_err_type tp_event;
1345 char *type, *optype, msg[256];
1413 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0); 1346 bool ripv = GET_BITFIELD(m->mcgstatus, 0, 0);
1414 bool overflow = GET_BITFIELD(m->status, 62, 62); 1347 bool overflow = GET_BITFIELD(m->status, 62, 62);
1415 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61); 1348 bool uncorrected_error = GET_BITFIELD(m->status, 61, 61);
@@ -1421,13 +1354,21 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1421 u32 optypenum = GET_BITFIELD(m->status, 4, 6); 1354 u32 optypenum = GET_BITFIELD(m->status, 4, 6);
1422 long channel_mask, first_channel; 1355 long channel_mask, first_channel;
1423 u8 rank, socket; 1356 u8 rank, socket;
1424 int csrow, rc, dimm; 1357 int rc, dimm;
1425 char *area_type = "Unknown"; 1358 char *area_type = NULL;
1426 1359
1427 if (ripv) 1360 if (uncorrected_error) {
1428 type = "NON_FATAL"; 1361 if (ripv) {
1429 else 1362 type = "FATAL";
1430 type = "FATAL"; 1363 tp_event = HW_EVENT_ERR_FATAL;
1364 } else {
1365 type = "NON_FATAL";
1366 tp_event = HW_EVENT_ERR_UNCORRECTED;
1367 }
1368 } else {
1369 type = "CORRECTED";
1370 tp_event = HW_EVENT_ERR_CORRECTED;
1371 }
1431 1372
1432 /* 1373 /*
1433 * According with Table 15-9 of the Intel Architecture spec vol 3A, 1374 * According with Table 15-9 of the Intel Architecture spec vol 3A,
@@ -1445,19 +1386,19 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1445 } else { 1386 } else {
1446 switch (optypenum) { 1387 switch (optypenum) {
1447 case 0: 1388 case 0:
1448 optype = "generic undef request"; 1389 optype = "generic undef request error";
1449 break; 1390 break;
1450 case 1: 1391 case 1:
1451 optype = "memory read"; 1392 optype = "memory read error";
1452 break; 1393 break;
1453 case 2: 1394 case 2:
1454 optype = "memory write"; 1395 optype = "memory write error";
1455 break; 1396 break;
1456 case 3: 1397 case 3:
1457 optype = "addr/cmd"; 1398 optype = "addr/cmd error";
1458 break; 1399 break;
1459 case 4: 1400 case 4:
1460 optype = "memory scrubbing"; 1401 optype = "memory scrubbing error";
1461 break; 1402 break;
1462 default: 1403 default:
1463 optype = "reserved"; 1404 optype = "reserved";
@@ -1466,13 +1407,13 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1466 } 1407 }
1467 1408
1468 rc = get_memory_error_data(mci, m->addr, &socket, 1409 rc = get_memory_error_data(mci, m->addr, &socket,
1469 &channel_mask, &rank, area_type); 1410 &channel_mask, &rank, &area_type, msg);
1470 if (rc < 0) 1411 if (rc < 0)
1471 return; 1412 goto err_parsing;
1472 new_mci = get_mci_for_node_id(socket); 1413 new_mci = get_mci_for_node_id(socket);
1473 if (!new_mci) { 1414 if (!new_mci) {
1474 edac_mc_handle_ce_no_info(mci, "Error: socket got corrupted!"); 1415 strcpy(msg, "Error: socket got corrupted!");
1475 return; 1416 goto err_parsing;
1476 } 1417 }
1477 mci = new_mci; 1418 mci = new_mci;
1478 pvt = mci->pvt_info; 1419 pvt = mci->pvt_info;
@@ -1486,45 +1427,39 @@ static void sbridge_mce_output_error(struct mem_ctl_info *mci,
1486 else 1427 else
1487 dimm = 2; 1428 dimm = 2;
1488 1429
1489 csrow = pvt->csrow_map[first_channel][dimm];
1490
1491 if (uncorrected_error && recoverable)
1492 recoverable_msg = " recoverable";
1493 else
1494 recoverable_msg = "";
1495 1430
1496 /* 1431 /*
1497 * FIXME: What should we do with "channel" information on mcelog? 1432 * FIXME: On some memory configurations (mirror, lockstep), the
1498 * Probably, we can just discard it, as the channel information 1433 * Memory Controller can't point the error to a single DIMM. The
1499 * comes from the get_memory_error_data() address decoding 1434 * EDAC core should be handling the channel mask, in order to point
1435 * to the group of dimm's where the error may be happening.
1500 */ 1436 */
1501 msg = kasprintf(GFP_ATOMIC, 1437 snprintf(msg, sizeof(msg),
1502 "%d %s error(s): %s on %s area %s%s: cpu=%d Err=%04x:%04x (ch=%d), " 1438 "count:%d%s%s area:%s err_code:%04x:%04x socket:%d channel_mask:%ld rank:%d",
1503 "addr = 0x%08llx => socket=%d, Channel=%ld(mask=%ld), rank=%d\n", 1439 core_err_cnt,
1504 core_err_cnt, 1440 overflow ? " OVERFLOW" : "",
1505 area_type, 1441 (uncorrected_error && recoverable) ? " recoverable" : "",
1506 optype, 1442 area_type,
1507 type, 1443 mscod, errcode,
1508 recoverable_msg, 1444 socket,
1509 overflow ? "OVERFLOW" : "", 1445 channel_mask,
1510 m->cpu, 1446 rank);
1511 mscod, errcode,
1512 channel, /* 1111b means not specified */
1513 (long long) m->addr,
1514 socket,
1515 first_channel, /* This is the real channel on SB */
1516 channel_mask,
1517 rank);
1518 1447
1519 debugf0("%s", msg); 1448 debugf0("%s", msg);
1520 1449
1450 /* FIXME: need support for channel mask */
1451
1521 /* Call the helper to output message */ 1452 /* Call the helper to output message */
1522 if (uncorrected_error) 1453 edac_mc_handle_error(tp_event, mci,
1523 edac_mc_handle_fbd_ue(mci, csrow, 0, 0, msg); 1454 m->addr >> PAGE_SHIFT, m->addr & ~PAGE_MASK, 0,
1524 else 1455 channel, dimm, -1,
1525 edac_mc_handle_fbd_ce(mci, csrow, 0, msg); 1456 optype, msg, m);
1457 return;
1458err_parsing:
1459 edac_mc_handle_error(tp_event, mci, 0, 0, 0,
1460 -1, -1, -1,
1461 msg, "", m);
1526 1462
1527 kfree(msg);
1528} 1463}
1529 1464
1530/* 1465/*
@@ -1683,16 +1618,25 @@ static void sbridge_unregister_mci(struct sbridge_dev *sbridge_dev)
1683static int sbridge_register_mci(struct sbridge_dev *sbridge_dev) 1618static int sbridge_register_mci(struct sbridge_dev *sbridge_dev)
1684{ 1619{
1685 struct mem_ctl_info *mci; 1620 struct mem_ctl_info *mci;
1621 struct edac_mc_layer layers[2];
1686 struct sbridge_pvt *pvt; 1622 struct sbridge_pvt *pvt;
1687 int rc, channels, csrows; 1623 int rc;
1688 1624
1689 /* Check the number of active and not disabled channels */ 1625 /* Check the number of active and not disabled channels */
1690 rc = sbridge_get_active_channels(sbridge_dev->bus, &channels, &csrows); 1626 rc = check_if_ecc_is_active(sbridge_dev->bus);
1691 if (unlikely(rc < 0)) 1627 if (unlikely(rc < 0))
1692 return rc; 1628 return rc;
1693 1629
1694 /* allocate a new MC control structure */ 1630 /* allocate a new MC control structure */
1695 mci = edac_mc_alloc(sizeof(*pvt), csrows, channels, sbridge_dev->mc); 1631 layers[0].type = EDAC_MC_LAYER_CHANNEL;
1632 layers[0].size = NUM_CHANNELS;
1633 layers[0].is_virt_csrow = false;
1634 layers[1].type = EDAC_MC_LAYER_SLOT;
1635 layers[1].size = MAX_DIMMS;
1636 layers[1].is_virt_csrow = true;
1637 mci = edac_mc_alloc(sbridge_dev->mc, ARRAY_SIZE(layers), layers,
1638 sizeof(*pvt));
1639
1696 if (unlikely(!mci)) 1640 if (unlikely(!mci))
1697 return -ENOMEM; 1641 return -ENOMEM;
1698 1642
diff --git a/drivers/edac/tile_edac.c b/drivers/edac/tile_edac.c
index e99d00976189..7bb4614730db 100644
--- a/drivers/edac/tile_edac.c
+++ b/drivers/edac/tile_edac.c
@@ -71,7 +71,10 @@ static void tile_edac_check(struct mem_ctl_info *mci)
71 if (mem_error.sbe_count != priv->ce_count) { 71 if (mem_error.sbe_count != priv->ce_count) {
72 dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node); 72 dev_dbg(mci->dev, "ECC CE err on node %d\n", priv->node);
73 priv->ce_count = mem_error.sbe_count; 73 priv->ce_count = mem_error.sbe_count;
74 edac_mc_handle_ce(mci, 0, 0, 0, 0, 0, mci->ctl_name); 74 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
75 0, 0, 0,
76 0, 0, -1,
77 mci->ctl_name, "", NULL);
75 } 78 }
76} 79}
77 80
@@ -84,6 +87,7 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
84 struct csrow_info *csrow = &mci->csrows[0]; 87 struct csrow_info *csrow = &mci->csrows[0];
85 struct tile_edac_priv *priv = mci->pvt_info; 88 struct tile_edac_priv *priv = mci->pvt_info;
86 struct mshim_mem_info mem_info; 89 struct mshim_mem_info mem_info;
90 struct dimm_info *dimm = csrow->channels[0].dimm;
87 91
88 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info, 92 if (hv_dev_pread(priv->hv_devhdl, 0, (HV_VirtAddr)&mem_info,
89 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) != 93 sizeof(struct mshim_mem_info), MSHIM_MEM_INFO_OFF) !=
@@ -93,27 +97,25 @@ static int __devinit tile_edac_init_csrows(struct mem_ctl_info *mci)
93 } 97 }
94 98
95 if (mem_info.mem_ecc) 99 if (mem_info.mem_ecc)
96 csrow->edac_mode = EDAC_SECDED; 100 dimm->edac_mode = EDAC_SECDED;
97 else 101 else
98 csrow->edac_mode = EDAC_NONE; 102 dimm->edac_mode = EDAC_NONE;
99 switch (mem_info.mem_type) { 103 switch (mem_info.mem_type) {
100 case DDR2: 104 case DDR2:
101 csrow->mtype = MEM_DDR2; 105 dimm->mtype = MEM_DDR2;
102 break; 106 break;
103 107
104 case DDR3: 108 case DDR3:
105 csrow->mtype = MEM_DDR3; 109 dimm->mtype = MEM_DDR3;
106 break; 110 break;
107 111
108 default: 112 default:
109 return -1; 113 return -1;
110 } 114 }
111 115
112 csrow->first_page = 0; 116 dimm->nr_pages = mem_info.mem_size >> PAGE_SHIFT;
113 csrow->nr_pages = mem_info.mem_size >> PAGE_SHIFT; 117 dimm->grain = TILE_EDAC_ERROR_GRAIN;
114 csrow->last_page = csrow->first_page + csrow->nr_pages - 1; 118 dimm->dtype = DEV_UNKNOWN;
115 csrow->grain = TILE_EDAC_ERROR_GRAIN;
116 csrow->dtype = DEV_UNKNOWN;
117 119
118 return 0; 120 return 0;
119} 121}
@@ -123,6 +125,7 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
123 char hv_file[32]; 125 char hv_file[32];
124 int hv_devhdl; 126 int hv_devhdl;
125 struct mem_ctl_info *mci; 127 struct mem_ctl_info *mci;
128 struct edac_mc_layer layers[2];
126 struct tile_edac_priv *priv; 129 struct tile_edac_priv *priv;
127 int rc; 130 int rc;
128 131
@@ -132,8 +135,14 @@ static int __devinit tile_edac_mc_probe(struct platform_device *pdev)
132 return -EINVAL; 135 return -EINVAL;
133 136
134 /* A TILE MC has a single channel and one chip-select row. */ 137 /* A TILE MC has a single channel and one chip-select row. */
135 mci = edac_mc_alloc(sizeof(struct tile_edac_priv), 138 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
136 TILE_EDAC_NR_CSROWS, TILE_EDAC_NR_CHANS, pdev->id); 139 layers[0].size = TILE_EDAC_NR_CSROWS;
140 layers[0].is_virt_csrow = true;
141 layers[1].type = EDAC_MC_LAYER_CHANNEL;
142 layers[1].size = TILE_EDAC_NR_CHANS;
143 layers[1].is_virt_csrow = false;
144 mci = edac_mc_alloc(pdev->id, ARRAY_SIZE(layers), layers,
145 sizeof(struct tile_edac_priv));
137 if (mci == NULL) 146 if (mci == NULL)
138 return -ENOMEM; 147 return -ENOMEM;
139 priv = mci->pvt_info; 148 priv = mci->pvt_info;
diff --git a/drivers/edac/x38_edac.c b/drivers/edac/x38_edac.c
index a438297389e5..1ac7962d63ea 100644
--- a/drivers/edac/x38_edac.c
+++ b/drivers/edac/x38_edac.c
@@ -215,19 +215,26 @@ static void x38_process_error_info(struct mem_ctl_info *mci,
215 return; 215 return;
216 216
217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) { 217 if ((info->errsts ^ info->errsts2) & X38_ERRSTS_BITS) {
218 edac_mc_handle_ce_no_info(mci, "UE overwrote CE"); 218 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci, 0, 0, 0,
219 -1, -1, -1,
220 "UE overwrote CE", "", NULL);
219 info->errsts = info->errsts2; 221 info->errsts = info->errsts2;
220 } 222 }
221 223
222 for (channel = 0; channel < x38_channel_num; channel++) { 224 for (channel = 0; channel < x38_channel_num; channel++) {
223 log = info->eccerrlog[channel]; 225 log = info->eccerrlog[channel];
224 if (log & X38_ECCERRLOG_UE) { 226 if (log & X38_ECCERRLOG_UE) {
225 edac_mc_handle_ue(mci, 0, 0, 227 edac_mc_handle_error(HW_EVENT_ERR_UNCORRECTED, mci,
226 eccerrlog_row(channel, log), "x38 UE"); 228 0, 0, 0,
229 eccerrlog_row(channel, log),
230 -1, -1,
231 "x38 UE", "", NULL);
227 } else if (log & X38_ECCERRLOG_CE) { 232 } else if (log & X38_ECCERRLOG_CE) {
228 edac_mc_handle_ce(mci, 0, 0, 233 edac_mc_handle_error(HW_EVENT_ERR_CORRECTED, mci,
229 eccerrlog_syndrome(log), 234 0, 0, eccerrlog_syndrome(log),
230 eccerrlog_row(channel, log), 0, "x38 CE"); 235 eccerrlog_row(channel, log),
236 -1, -1,
237 "x38 CE", "", NULL);
231 } 238 }
232 } 239 }
233} 240}
@@ -317,9 +324,9 @@ static unsigned long drb_to_nr_pages(
317static int x38_probe1(struct pci_dev *pdev, int dev_idx) 324static int x38_probe1(struct pci_dev *pdev, int dev_idx)
318{ 325{
319 int rc; 326 int rc;
320 int i; 327 int i, j;
321 struct mem_ctl_info *mci = NULL; 328 struct mem_ctl_info *mci = NULL;
322 unsigned long last_page; 329 struct edac_mc_layer layers[2];
323 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL]; 330 u16 drbs[X38_CHANNELS][X38_RANKS_PER_CHANNEL];
324 bool stacked; 331 bool stacked;
325 void __iomem *window; 332 void __iomem *window;
@@ -335,7 +342,13 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
335 how_many_channel(pdev); 342 how_many_channel(pdev);
336 343
337 /* FIXME: unconventional pvt_info usage */ 344 /* FIXME: unconventional pvt_info usage */
338 mci = edac_mc_alloc(0, X38_RANKS, x38_channel_num, 0); 345 layers[0].type = EDAC_MC_LAYER_CHIP_SELECT;
346 layers[0].size = X38_RANKS;
347 layers[0].is_virt_csrow = true;
348 layers[1].type = EDAC_MC_LAYER_CHANNEL;
349 layers[1].size = x38_channel_num;
350 layers[1].is_virt_csrow = false;
351 mci = edac_mc_alloc(0, ARRAY_SIZE(layers), layers, 0);
339 if (!mci) 352 if (!mci)
340 return -ENOMEM; 353 return -ENOMEM;
341 354
@@ -363,7 +376,6 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
363 * cumulative; the last one will contain the total memory 376 * cumulative; the last one will contain the total memory
364 * contained in all ranks. 377 * contained in all ranks.
365 */ 378 */
366 last_page = -1UL;
367 for (i = 0; i < mci->nr_csrows; i++) { 379 for (i = 0; i < mci->nr_csrows; i++) {
368 unsigned long nr_pages; 380 unsigned long nr_pages;
369 struct csrow_info *csrow = &mci->csrows[i]; 381 struct csrow_info *csrow = &mci->csrows[i];
@@ -372,20 +384,18 @@ static int x38_probe1(struct pci_dev *pdev, int dev_idx)
372 i / X38_RANKS_PER_CHANNEL, 384 i / X38_RANKS_PER_CHANNEL,
373 i % X38_RANKS_PER_CHANNEL); 385 i % X38_RANKS_PER_CHANNEL);
374 386
375 if (nr_pages == 0) { 387 if (nr_pages == 0)
376 csrow->mtype = MEM_EMPTY;
377 continue; 388 continue;
378 }
379 389
380 csrow->first_page = last_page + 1; 390 for (j = 0; j < x38_channel_num; j++) {
381 last_page += nr_pages; 391 struct dimm_info *dimm = csrow->channels[j].dimm;
382 csrow->last_page = last_page;
383 csrow->nr_pages = nr_pages;
384 392
385 csrow->grain = nr_pages << PAGE_SHIFT; 393 dimm->nr_pages = nr_pages / x38_channel_num;
386 csrow->mtype = MEM_DDR2; 394 dimm->grain = nr_pages << PAGE_SHIFT;
387 csrow->dtype = DEV_UNKNOWN; 395 dimm->mtype = MEM_DDR2;
388 csrow->edac_mode = EDAC_UNKNOWN; 396 dimm->dtype = DEV_UNKNOWN;
397 dimm->edac_mode = EDAC_UNKNOWN;
398 }
389 } 399 }
390 400
391 x38_clear_error_info(mci); 401 x38_clear_error_info(mci);
diff --git a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig
index aa3642cb8209..c4067d0141f7 100644
--- a/drivers/gpio/Kconfig
+++ b/drivers/gpio/Kconfig
@@ -114,6 +114,14 @@ config GPIO_EP93XX
114 depends on ARCH_EP93XX 114 depends on ARCH_EP93XX
115 select GPIO_GENERIC 115 select GPIO_GENERIC
116 116
117config GPIO_MM_LANTIQ
118 bool "Lantiq Memory mapped GPIOs"
119 depends on LANTIQ && SOC_XWAY
120 help
121 This enables support for memory mapped GPIOs on the External Bus Unit
122 (EBU) found on Lantiq SoCs. The gpios are output only as they are
123 created by attaching a 16bit latch to the bus.
124
117config GPIO_MPC5200 125config GPIO_MPC5200
118 def_bool y 126 def_bool y
119 depends on PPC_MPC52xx 127 depends on PPC_MPC52xx
@@ -167,6 +175,14 @@ config GPIO_PXA
167 help 175 help
168 Say yes here to support the PXA GPIO device 176 Say yes here to support the PXA GPIO device
169 177
178config GPIO_STA2X11
179 bool "STA2x11/ConneXt GPIO support"
180 depends on MFD_STA2X11
181 select GENERIC_IRQ_CHIP
182 help
183 Say yes here to support the STA2x11/ConneXt GPIO device.
184 The GPIO module has 128 GPIO pins with alternate functions.
185
170config GPIO_XILINX 186config GPIO_XILINX
171 bool "Xilinx GPIO support" 187 bool "Xilinx GPIO support"
172 depends on PPC_OF || MICROBLAZE 188 depends on PPC_OF || MICROBLAZE
@@ -180,13 +196,13 @@ config GPIO_VR41XX
180 Say yes here to support the NEC VR4100 series General-purpose I/O Uint 196 Say yes here to support the NEC VR4100 series General-purpose I/O Uint
181 197
182config GPIO_SCH 198config GPIO_SCH
183 tristate "Intel SCH/TunnelCreek GPIO" 199 tristate "Intel SCH/TunnelCreek/Centerton GPIO"
184 depends on PCI && X86 200 depends on PCI && X86
185 select MFD_CORE 201 select MFD_CORE
186 select LPC_SCH 202 select LPC_SCH
187 help 203 help
188 Say yes here to support GPIO interface on Intel Poulsbo SCH 204 Say yes here to support GPIO interface on Intel Poulsbo SCH,
189 or Intel Tunnel Creek processor. 205 Intel Tunnel Creek processor or Intel Centerton processor.
190 The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are 206 The Intel SCH contains a total of 14 GPIO pins. Ten GPIOs are
191 powered by the core power rail and are turned off during sleep 207 powered by the core power rail and are turned off during sleep
192 modes (S3 and higher). The remaining four GPIOs are powered by 208 modes (S3 and higher). The remaining four GPIOs are powered by
@@ -195,6 +211,22 @@ config GPIO_SCH
195 system from the Suspend-to-RAM state. 211 system from the Suspend-to-RAM state.
196 The Intel Tunnel Creek processor has 5 GPIOs powered by the 212 The Intel Tunnel Creek processor has 5 GPIOs powered by the
197 core power rail and 9 from suspend power supply. 213 core power rail and 9 from suspend power supply.
214 The Intel Centerton processor has a total of 30 GPIO pins.
215 Twenty-one are powered by the core power rail and 9 from the
216 suspend power supply.
217
218config GPIO_ICH
219 tristate "Intel ICH GPIO"
220 depends on PCI && X86
221 select MFD_CORE
222 select LPC_ICH
223 help
224 Say yes here to support the GPIO functionality of a number of Intel
225 ICH-based chipsets. Currently supported devices: ICH6, ICH7, ICH8
226 ICH9, ICH10, Series 5/3400 (eg Ibex Peak), Series 6/C200 (eg
227 Cougar Point), NM10 (Tiger Point), and 3100 (Whitmore Lake).
228
229 If unsure, say N.
198 230
199config GPIO_VX855 231config GPIO_VX855
200 tristate "VIA VX855/VX875 GPIO" 232 tristate "VIA VX855/VX875 GPIO"
@@ -334,6 +366,16 @@ config GPIO_STMPE
334 This enables support for the GPIOs found on the STMPE I/O 366 This enables support for the GPIOs found on the STMPE I/O
335 Expanders. 367 Expanders.
336 368
369config GPIO_STP_XWAY
370 bool "XWAY STP GPIOs"
371 depends on SOC_XWAY
372 help
373 This enables support for the Serial To Parallel (STP) unit found on
374 XWAY SoC. The STP allows the SoC to drive a shift registers cascade,
375 that can be up to 24 bit. This peripheral is aimed at driving leds.
376 Some of the gpios/leds can be auto updated by the soc with dsl and
377 phy status.
378
337config GPIO_TC3589X 379config GPIO_TC3589X
338 bool "TC3589X GPIOs" 380 bool "TC3589X GPIOs"
339 depends on MFD_TC3589X 381 depends on MFD_TC3589X
diff --git a/drivers/gpio/Makefile b/drivers/gpio/Makefile
index 07a79e245407..0f55662002c3 100644
--- a/drivers/gpio/Makefile
+++ b/drivers/gpio/Makefile
@@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_DAVINCI) += gpio-davinci.o
19obj-$(CONFIG_GPIO_EM) += gpio-em.o 19obj-$(CONFIG_GPIO_EM) += gpio-em.o
20obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o 20obj-$(CONFIG_GPIO_EP93XX) += gpio-ep93xx.o
21obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o 21obj-$(CONFIG_GPIO_GE_FPGA) += gpio-ge.o
22obj-$(CONFIG_GPIO_ICH) += gpio-ich.o
22obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o 23obj-$(CONFIG_GPIO_IT8761E) += gpio-it8761e.o
23obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o 24obj-$(CONFIG_GPIO_JANZ_TTL) += gpio-janz-ttl.o
24obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o 25obj-$(CONFIG_ARCH_KS8695) += gpio-ks8695.o
@@ -32,6 +33,7 @@ obj-$(CONFIG_GPIO_MC33880) += gpio-mc33880.o
32obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o 33obj-$(CONFIG_GPIO_MC9S08DZ60) += gpio-mc9s08dz60.o
33obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o 34obj-$(CONFIG_GPIO_MCP23S08) += gpio-mcp23s08.o
34obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o 35obj-$(CONFIG_GPIO_ML_IOH) += gpio-ml-ioh.o
36obj-$(CONFIG_GPIO_MM_LANTIQ) += gpio-mm-lantiq.o
35obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o 37obj-$(CONFIG_GPIO_MPC5200) += gpio-mpc5200.o
36obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o 38obj-$(CONFIG_GPIO_MPC8XXX) += gpio-mpc8xxx.o
37obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o 39obj-$(CONFIG_GPIO_MSIC) += gpio-msic.o
@@ -51,7 +53,9 @@ obj-$(CONFIG_PLAT_SAMSUNG) += gpio-samsung.o
51obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o 53obj-$(CONFIG_ARCH_SA1100) += gpio-sa1100.o
52obj-$(CONFIG_GPIO_SCH) += gpio-sch.o 54obj-$(CONFIG_GPIO_SCH) += gpio-sch.o
53obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o 55obj-$(CONFIG_GPIO_SODAVILLE) += gpio-sodaville.o
56obj-$(CONFIG_GPIO_STA2X11) += gpio-sta2x11.o
54obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o 57obj-$(CONFIG_GPIO_STMPE) += gpio-stmpe.o
58obj-$(CONFIG_GPIO_STP_XWAY) += gpio-stp-xway.o
55obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o 59obj-$(CONFIG_GPIO_SX150X) += gpio-sx150x.o
56obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o 60obj-$(CONFIG_GPIO_TC3589X) += gpio-tc3589x.o
57obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o 61obj-$(CONFIG_ARCH_TEGRA) += gpio-tegra.o
diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
new file mode 100644
index 000000000000..b7c06517403d
--- /dev/null
+++ b/drivers/gpio/gpio-ich.c
@@ -0,0 +1,419 @@
1/*
2 * Intel ICH6-10, Series 5 and 6 GPIO driver
3 *
4 * Copyright (C) 2010 Extreme Engineering Solutions.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
23#include <linux/module.h>
24#include <linux/pci.h>
25#include <linux/gpio.h>
26#include <linux/platform_device.h>
27#include <linux/mfd/lpc_ich.h>
28
29#define DRV_NAME "gpio_ich"
30
31/*
32 * GPIO register offsets in GPIO I/O space.
33 * Each chunk of 32 GPIOs is manipulated via its own USE_SELx, IO_SELx, and
34 * LVLx registers. Logic in the read/write functions takes a register and
35 * an absolute bit number and determines the proper register offset and bit
36 * number in that register. For example, to read the value of GPIO bit 50
37 * the code would access offset ichx_regs[2(=GPIO_LVL)][1(=50/32)],
38 * bit 18 (50%32).
39 */
40enum GPIO_REG {
41 GPIO_USE_SEL = 0,
42 GPIO_IO_SEL,
43 GPIO_LVL,
44};
45
46static const u8 ichx_regs[3][3] = {
47 {0x00, 0x30, 0x40}, /* USE_SEL[1-3] offsets */
48 {0x04, 0x34, 0x44}, /* IO_SEL[1-3] offsets */
49 {0x0c, 0x38, 0x48}, /* LVL[1-3] offsets */
50};
51
52#define ICHX_WRITE(val, reg, base_res) outl(val, (reg) + (base_res)->start)
53#define ICHX_READ(reg, base_res) inl((reg) + (base_res)->start)
54
55struct ichx_desc {
56 /* Max GPIO pins the chipset can have */
57 uint ngpio;
58
59 /* Whether the chipset has GPIO in GPE0_STS in the PM IO region */
60 bool uses_gpe0;
61
62 /* USE_SEL is bogus on some chipsets, eg 3100 */
63 u32 use_sel_ignore[3];
64
65 /* Some chipsets have quirks, let these use their own request/get */
66 int (*request)(struct gpio_chip *chip, unsigned offset);
67 int (*get)(struct gpio_chip *chip, unsigned offset);
68};
69
70static struct {
71 spinlock_t lock;
72 struct platform_device *dev;
73 struct gpio_chip chip;
74 struct resource *gpio_base; /* GPIO IO base */
75 struct resource *pm_base; /* Power Mangagment IO base */
76 struct ichx_desc *desc; /* Pointer to chipset-specific description */
77 u32 orig_gpio_ctrl; /* Orig CTRL value, used to restore on exit */
78} ichx_priv;
79
80static int modparam_gpiobase = -1; /* dynamic */
81module_param_named(gpiobase, modparam_gpiobase, int, 0444);
82MODULE_PARM_DESC(gpiobase, "The GPIO number base. -1 means dynamic, "
83 "which is the default.");
84
85static int ichx_write_bit(int reg, unsigned nr, int val, int verify)
86{
87 unsigned long flags;
88 u32 data, tmp;
89 int reg_nr = nr / 32;
90 int bit = nr & 0x1f;
91 int ret = 0;
92
93 spin_lock_irqsave(&ichx_priv.lock, flags);
94
95 data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
96 if (val)
97 data |= 1 << bit;
98 else
99 data &= ~(1 << bit);
100 ICHX_WRITE(data, ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
101 tmp = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
102 if (verify && data != tmp)
103 ret = -EPERM;
104
105 spin_unlock_irqrestore(&ichx_priv.lock, flags);
106
107 return ret;
108}
109
110static int ichx_read_bit(int reg, unsigned nr)
111{
112 unsigned long flags;
113 u32 data;
114 int reg_nr = nr / 32;
115 int bit = nr & 0x1f;
116
117 spin_lock_irqsave(&ichx_priv.lock, flags);
118
119 data = ICHX_READ(ichx_regs[reg][reg_nr], ichx_priv.gpio_base);
120
121 spin_unlock_irqrestore(&ichx_priv.lock, flags);
122
123 return data & (1 << bit) ? 1 : 0;
124}
125
126static int ichx_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
127{
128 /*
129 * Try setting pin as an input and verify it worked since many pins
130 * are output-only.
131 */
132 if (ichx_write_bit(GPIO_IO_SEL, nr, 1, 1))
133 return -EINVAL;
134
135 return 0;
136}
137
138static int ichx_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
139 int val)
140{
141 /* Set GPIO output value. */
142 ichx_write_bit(GPIO_LVL, nr, val, 0);
143
144 /*
145 * Try setting pin as an output and verify it worked since many pins
146 * are input-only.
147 */
148 if (ichx_write_bit(GPIO_IO_SEL, nr, 0, 1))
149 return -EINVAL;
150
151 return 0;
152}
153
154static int ichx_gpio_get(struct gpio_chip *chip, unsigned nr)
155{
156 return ichx_read_bit(GPIO_LVL, nr);
157}
158
159static int ich6_gpio_get(struct gpio_chip *chip, unsigned nr)
160{
161 unsigned long flags;
162 u32 data;
163
164 /*
165 * GPI 0 - 15 need to be read from the power management registers on
166 * a ICH6/3100 bridge.
167 */
168 if (nr < 16) {
169 if (!ichx_priv.pm_base)
170 return -ENXIO;
171
172 spin_lock_irqsave(&ichx_priv.lock, flags);
173
174 /* GPI 0 - 15 are latched, write 1 to clear*/
175 ICHX_WRITE(1 << (16 + nr), 0, ichx_priv.pm_base);
176 data = ICHX_READ(0, ichx_priv.pm_base);
177
178 spin_unlock_irqrestore(&ichx_priv.lock, flags);
179
180 return (data >> 16) & (1 << nr) ? 1 : 0;
181 } else {
182 return ichx_gpio_get(chip, nr);
183 }
184}
185
186static int ichx_gpio_request(struct gpio_chip *chip, unsigned nr)
187{
188 /*
189 * Note we assume the BIOS properly set a bridge's USE value. Some
190 * chips (eg Intel 3100) have bogus USE values though, so first see if
191 * the chipset's USE value can be trusted for this specific bit.
192 * If it can't be trusted, assume that the pin can be used as a GPIO.
193 */
194 if (ichx_priv.desc->use_sel_ignore[nr / 32] & (1 << (nr & 0x1f)))
195 return 1;
196
197 return ichx_read_bit(GPIO_USE_SEL, nr) ? 0 : -ENODEV;
198}
199
200static int ich6_gpio_request(struct gpio_chip *chip, unsigned nr)
201{
202 /*
203 * Fixups for bits 16 and 17 are necessary on the Intel ICH6/3100
204 * bridge as they are controlled by USE register bits 0 and 1. See
205 * "Table 704 GPIO_USE_SEL1 register" in the i3100 datasheet for
206 * additional info.
207 */
208 if (nr == 16 || nr == 17)
209 nr -= 16;
210
211 return ichx_gpio_request(chip, nr);
212}
213
214static void ichx_gpio_set(struct gpio_chip *chip, unsigned nr, int val)
215{
216 ichx_write_bit(GPIO_LVL, nr, val, 0);
217}
218
219static void __devinit ichx_gpiolib_setup(struct gpio_chip *chip)
220{
221 chip->owner = THIS_MODULE;
222 chip->label = DRV_NAME;
223 chip->dev = &ichx_priv.dev->dev;
224
225 /* Allow chip-specific overrides of request()/get() */
226 chip->request = ichx_priv.desc->request ?
227 ichx_priv.desc->request : ichx_gpio_request;
228 chip->get = ichx_priv.desc->get ?
229 ichx_priv.desc->get : ichx_gpio_get;
230
231 chip->set = ichx_gpio_set;
232 chip->direction_input = ichx_gpio_direction_input;
233 chip->direction_output = ichx_gpio_direction_output;
234 chip->base = modparam_gpiobase;
235 chip->ngpio = ichx_priv.desc->ngpio;
236 chip->can_sleep = 0;
237 chip->dbg_show = NULL;
238}
239
240/* ICH6-based, 631xesb-based */
241static struct ichx_desc ich6_desc = {
242 /* Bridges using the ICH6 controller need fixups for GPIO 0 - 17 */
243 .request = ich6_gpio_request,
244 .get = ich6_gpio_get,
245
246 /* GPIO 0-15 are read in the GPE0_STS PM register */
247 .uses_gpe0 = true,
248
249 .ngpio = 50,
250};
251
252/* Intel 3100 */
253static struct ichx_desc i3100_desc = {
254 /*
255 * Bits 16,17, 20 of USE_SEL and bit 16 of USE_SEL2 always read 0 on
256 * the Intel 3100. See "Table 712. GPIO Summary Table" of 3100
257 * Datasheet for more info.
258 */
259 .use_sel_ignore = {0x00130000, 0x00010000, 0x0},
260
261 /* The 3100 needs fixups for GPIO 0 - 17 */
262 .request = ich6_gpio_request,
263 .get = ich6_gpio_get,
264
265 /* GPIO 0-15 are read in the GPE0_STS PM register */
266 .uses_gpe0 = true,
267
268 .ngpio = 50,
269};
270
271/* ICH7 and ICH8-based */
272static struct ichx_desc ich7_desc = {
273 .ngpio = 50,
274};
275
276/* ICH9-based */
277static struct ichx_desc ich9_desc = {
278 .ngpio = 61,
279};
280
281/* ICH10-based - Consumer/corporate versions have different amount of GPIO */
282static struct ichx_desc ich10_cons_desc = {
283 .ngpio = 61,
284};
285static struct ichx_desc ich10_corp_desc = {
286 .ngpio = 72,
287};
288
289/* Intel 5 series, 6 series, 3400 series, and C200 series */
290static struct ichx_desc intel5_desc = {
291 .ngpio = 76,
292};
293
294static int __devinit ichx_gpio_probe(struct platform_device *pdev)
295{
296 struct resource *res_base, *res_pm;
297 int err;
298 struct lpc_ich_info *ich_info = pdev->dev.platform_data;
299
300 if (!ich_info)
301 return -ENODEV;
302
303 ichx_priv.dev = pdev;
304
305 switch (ich_info->gpio_version) {
306 case ICH_I3100_GPIO:
307 ichx_priv.desc = &i3100_desc;
308 break;
309 case ICH_V5_GPIO:
310 ichx_priv.desc = &intel5_desc;
311 break;
312 case ICH_V6_GPIO:
313 ichx_priv.desc = &ich6_desc;
314 break;
315 case ICH_V7_GPIO:
316 ichx_priv.desc = &ich7_desc;
317 break;
318 case ICH_V9_GPIO:
319 ichx_priv.desc = &ich9_desc;
320 break;
321 case ICH_V10CORP_GPIO:
322 ichx_priv.desc = &ich10_corp_desc;
323 break;
324 case ICH_V10CONS_GPIO:
325 ichx_priv.desc = &ich10_cons_desc;
326 break;
327 default:
328 return -ENODEV;
329 }
330
331 res_base = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPIO);
332 if (!res_base || !res_base->start || !res_base->end)
333 return -ENODEV;
334
335 if (!request_region(res_base->start, resource_size(res_base),
336 pdev->name))
337 return -EBUSY;
338
339 ichx_priv.gpio_base = res_base;
340
341 /*
342 * If necessary, determine the I/O address of ACPI/power management
343 * registers which are needed to read the the GPE0 register for GPI pins
344 * 0 - 15 on some chipsets.
345 */
346 if (!ichx_priv.desc->uses_gpe0)
347 goto init;
348
349 res_pm = platform_get_resource(pdev, IORESOURCE_IO, ICH_RES_GPE0);
350 if (!res_pm) {
351 pr_warn("ACPI BAR is unavailable, GPI 0 - 15 unavailable\n");
352 goto init;
353 }
354
355 if (!request_region(res_pm->start, resource_size(res_pm),
356 pdev->name)) {
357 pr_warn("ACPI BAR is busy, GPI 0 - 15 unavailable\n");
358 goto init;
359 }
360
361 ichx_priv.pm_base = res_pm;
362
363init:
364 ichx_gpiolib_setup(&ichx_priv.chip);
365 err = gpiochip_add(&ichx_priv.chip);
366 if (err) {
367 pr_err("Failed to register GPIOs\n");
368 goto add_err;
369 }
370
371 pr_info("GPIO from %d to %d on %s\n", ichx_priv.chip.base,
372 ichx_priv.chip.base + ichx_priv.chip.ngpio - 1, DRV_NAME);
373
374 return 0;
375
376add_err:
377 release_region(ichx_priv.gpio_base->start,
378 resource_size(ichx_priv.gpio_base));
379 if (ichx_priv.pm_base)
380 release_region(ichx_priv.pm_base->start,
381 resource_size(ichx_priv.pm_base));
382 return err;
383}
384
385static int __devexit ichx_gpio_remove(struct platform_device *pdev)
386{
387 int err;
388
389 err = gpiochip_remove(&ichx_priv.chip);
390 if (err) {
391 dev_err(&pdev->dev, "%s failed, %d\n",
392 "gpiochip_remove()", err);
393 return err;
394 }
395
396 release_region(ichx_priv.gpio_base->start,
397 resource_size(ichx_priv.gpio_base));
398 if (ichx_priv.pm_base)
399 release_region(ichx_priv.pm_base->start,
400 resource_size(ichx_priv.pm_base));
401
402 return 0;
403}
404
405static struct platform_driver ichx_gpio_driver = {
406 .driver = {
407 .owner = THIS_MODULE,
408 .name = DRV_NAME,
409 },
410 .probe = ichx_gpio_probe,
411 .remove = __devexit_p(ichx_gpio_remove),
412};
413
414module_platform_driver(ichx_gpio_driver);
415
416MODULE_AUTHOR("Peter Tyser <ptyser@xes-inc.com>");
417MODULE_DESCRIPTION("GPIO interface for Intel ICH series");
418MODULE_LICENSE("GPL");
419MODULE_ALIAS("platform:"DRV_NAME);
diff --git a/drivers/gpio/gpio-mm-lantiq.c b/drivers/gpio/gpio-mm-lantiq.c
new file mode 100644
index 000000000000..2983dfbd0668
--- /dev/null
+++ b/drivers/gpio/gpio-mm-lantiq.c
@@ -0,0 +1,158 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
7 */
8
9#include <linux/init.h>
10#include <linux/module.h>
11#include <linux/types.h>
12#include <linux/platform_device.h>
13#include <linux/mutex.h>
14#include <linux/gpio.h>
15#include <linux/of.h>
16#include <linux/of_gpio.h>
17#include <linux/io.h>
18#include <linux/slab.h>
19
20#include <lantiq_soc.h>
21
22/*
23 * By attaching hardware latches to the EBU it is possible to create output
24 * only gpios. This driver configures a special memory address, which when
25 * written to outputs 16 bit to the latches.
26 */
27
28#define LTQ_EBU_BUSCON 0x1e7ff /* 16 bit access, slowest timing */
29#define LTQ_EBU_WP 0x80000000 /* write protect bit */
30
31struct ltq_mm {
32 struct of_mm_gpio_chip mmchip;
33 u16 shadow; /* shadow the latches state */
34};
35
36/**
37 * ltq_mm_apply() - write the shadow value to the ebu address.
38 * @chip: Pointer to our private data structure.
39 *
40 * Write the shadow value to the EBU to set the gpios. We need to set the
41 * global EBU lock to make sure that PCI/MTD dont break.
42 */
43static void ltq_mm_apply(struct ltq_mm *chip)
44{
45 unsigned long flags;
46
47 spin_lock_irqsave(&ebu_lock, flags);
48 ltq_ebu_w32(LTQ_EBU_BUSCON, LTQ_EBU_BUSCON1);
49 __raw_writew(chip->shadow, chip->mmchip.regs);
50 ltq_ebu_w32(LTQ_EBU_BUSCON | LTQ_EBU_WP, LTQ_EBU_BUSCON1);
51 spin_unlock_irqrestore(&ebu_lock, flags);
52}
53
54/**
55 * ltq_mm_set() - gpio_chip->set - set gpios.
56 * @gc: Pointer to gpio_chip device structure.
57 * @gpio: GPIO signal number.
58 * @val: Value to be written to specified signal.
59 *
60 * Set the shadow value and call ltq_mm_apply.
61 */
62static void ltq_mm_set(struct gpio_chip *gc, unsigned offset, int value)
63{
64 struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc);
65 struct ltq_mm *chip =
66 container_of(mm_gc, struct ltq_mm, mmchip);
67
68 if (value)
69 chip->shadow |= (1 << offset);
70 else
71 chip->shadow &= ~(1 << offset);
72 ltq_mm_apply(chip);
73}
74
75/**
76 * ltq_mm_dir_out() - gpio_chip->dir_out - set gpio direction.
77 * @gc: Pointer to gpio_chip device structure.
78 * @gpio: GPIO signal number.
79 * @val: Value to be written to specified signal.
80 *
81 * Same as ltq_mm_set, always returns 0.
82 */
83static int ltq_mm_dir_out(struct gpio_chip *gc, unsigned offset, int value)
84{
85 ltq_mm_set(gc, offset, value);
86
87 return 0;
88}
89
90/**
91 * ltq_mm_save_regs() - Set initial values of GPIO pins
92 * @mm_gc: pointer to memory mapped GPIO chip structure
93 */
94static void ltq_mm_save_regs(struct of_mm_gpio_chip *mm_gc)
95{
96 struct ltq_mm *chip =
97 container_of(mm_gc, struct ltq_mm, mmchip);
98
99 /* tell the ebu controller which memory address we will be using */
100 ltq_ebu_w32(CPHYSADDR(chip->mmchip.regs) | 0x1, LTQ_EBU_ADDRSEL1);
101
102 ltq_mm_apply(chip);
103}
104
105static int ltq_mm_probe(struct platform_device *pdev)
106{
107 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
108 struct ltq_mm *chip;
109 const __be32 *shadow;
110 int ret = 0;
111
112 if (!res) {
113 dev_err(&pdev->dev, "failed to get memory resource\n");
114 return -ENOENT;
115 }
116
117 chip = kzalloc(sizeof(*chip), GFP_KERNEL);
118 if (!chip)
119 return -ENOMEM;
120
121 chip->mmchip.gc.ngpio = 16;
122 chip->mmchip.gc.label = "gpio-mm-ltq";
123 chip->mmchip.gc.direction_output = ltq_mm_dir_out;
124 chip->mmchip.gc.set = ltq_mm_set;
125 chip->mmchip.save_regs = ltq_mm_save_regs;
126
127 /* store the shadow value if one was passed by the devicetree */
128 shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
129 if (shadow)
130 chip->shadow = be32_to_cpu(*shadow);
131
132 ret = of_mm_gpiochip_add(pdev->dev.of_node, &chip->mmchip);
133 if (ret)
134 kfree(chip);
135 return ret;
136}
137
138static const struct of_device_id ltq_mm_match[] = {
139 { .compatible = "lantiq,gpio-mm" },
140 {},
141};
142MODULE_DEVICE_TABLE(of, ltq_mm_match);
143
144static struct platform_driver ltq_mm_driver = {
145 .probe = ltq_mm_probe,
146 .driver = {
147 .name = "gpio-mm-ltq",
148 .owner = THIS_MODULE,
149 .of_match_table = ltq_mm_match,
150 },
151};
152
153static int __init ltq_mm_init(void)
154{
155 return platform_driver_register(&ltq_mm_driver);
156}
157
158subsys_initcall(ltq_mm_init);
diff --git a/drivers/gpio/gpio-mxs.c b/drivers/gpio/gpio-mxs.c
index b4136501abd8..39e495669961 100644
--- a/drivers/gpio/gpio-mxs.c
+++ b/drivers/gpio/gpio-mxs.c
@@ -25,23 +25,25 @@
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/irq.h> 26#include <linux/irq.h>
27#include <linux/gpio.h> 27#include <linux/gpio.h>
28#include <linux/of.h>
29#include <linux/of_address.h>
30#include <linux/of_device.h>
28#include <linux/platform_device.h> 31#include <linux/platform_device.h>
29#include <linux/slab.h> 32#include <linux/slab.h>
30#include <linux/basic_mmio_gpio.h> 33#include <linux/basic_mmio_gpio.h>
31#include <linux/module.h> 34#include <linux/module.h>
32#include <mach/mxs.h>
33 35
34#define MXS_SET 0x4 36#define MXS_SET 0x4
35#define MXS_CLR 0x8 37#define MXS_CLR 0x8
36 38
37#define PINCTRL_DOUT(n) ((cpu_is_mx23() ? 0x0500 : 0x0700) + (n) * 0x10) 39#define PINCTRL_DOUT(p) ((is_imx23_gpio(p) ? 0x0500 : 0x0700) + (p->id) * 0x10)
38#define PINCTRL_DIN(n) ((cpu_is_mx23() ? 0x0600 : 0x0900) + (n) * 0x10) 40#define PINCTRL_DIN(p) ((is_imx23_gpio(p) ? 0x0600 : 0x0900) + (p->id) * 0x10)
39#define PINCTRL_DOE(n) ((cpu_is_mx23() ? 0x0700 : 0x0b00) + (n) * 0x10) 41#define PINCTRL_DOE(p) ((is_imx23_gpio(p) ? 0x0700 : 0x0b00) + (p->id) * 0x10)
40#define PINCTRL_PIN2IRQ(n) ((cpu_is_mx23() ? 0x0800 : 0x1000) + (n) * 0x10) 42#define PINCTRL_PIN2IRQ(p) ((is_imx23_gpio(p) ? 0x0800 : 0x1000) + (p->id) * 0x10)
41#define PINCTRL_IRQEN(n) ((cpu_is_mx23() ? 0x0900 : 0x1100) + (n) * 0x10) 43#define PINCTRL_IRQEN(p) ((is_imx23_gpio(p) ? 0x0900 : 0x1100) + (p->id) * 0x10)
42#define PINCTRL_IRQLEV(n) ((cpu_is_mx23() ? 0x0a00 : 0x1200) + (n) * 0x10) 44#define PINCTRL_IRQLEV(p) ((is_imx23_gpio(p) ? 0x0a00 : 0x1200) + (p->id) * 0x10)
43#define PINCTRL_IRQPOL(n) ((cpu_is_mx23() ? 0x0b00 : 0x1300) + (n) * 0x10) 45#define PINCTRL_IRQPOL(p) ((is_imx23_gpio(p) ? 0x0b00 : 0x1300) + (p->id) * 0x10)
44#define PINCTRL_IRQSTAT(n) ((cpu_is_mx23() ? 0x0c00 : 0x1400) + (n) * 0x10) 46#define PINCTRL_IRQSTAT(p) ((is_imx23_gpio(p) ? 0x0c00 : 0x1400) + (p->id) * 0x10)
45 47
46#define GPIO_INT_FALL_EDGE 0x0 48#define GPIO_INT_FALL_EDGE 0x0
47#define GPIO_INT_LOW_LEV 0x1 49#define GPIO_INT_LOW_LEV 0x1
@@ -52,14 +54,30 @@
52 54
53#define irq_to_gpio(irq) ((irq) - MXS_GPIO_IRQ_START) 55#define irq_to_gpio(irq) ((irq) - MXS_GPIO_IRQ_START)
54 56
57enum mxs_gpio_id {
58 IMX23_GPIO,
59 IMX28_GPIO,
60};
61
55struct mxs_gpio_port { 62struct mxs_gpio_port {
56 void __iomem *base; 63 void __iomem *base;
57 int id; 64 int id;
58 int irq; 65 int irq;
59 int virtual_irq_start; 66 int virtual_irq_start;
60 struct bgpio_chip bgc; 67 struct bgpio_chip bgc;
68 enum mxs_gpio_id devid;
61}; 69};
62 70
71static inline int is_imx23_gpio(struct mxs_gpio_port *port)
72{
73 return port->devid == IMX23_GPIO;
74}
75
76static inline int is_imx28_gpio(struct mxs_gpio_port *port)
77{
78 return port->devid == IMX28_GPIO;
79}
80
63/* Note: This driver assumes 32 GPIOs are handled in one register */ 81/* Note: This driver assumes 32 GPIOs are handled in one register */
64 82
65static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type) 83static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
@@ -89,21 +107,21 @@ static int mxs_gpio_set_irq_type(struct irq_data *d, unsigned int type)
89 } 107 }
90 108
91 /* set level or edge */ 109 /* set level or edge */
92 pin_addr = port->base + PINCTRL_IRQLEV(port->id); 110 pin_addr = port->base + PINCTRL_IRQLEV(port);
93 if (edge & GPIO_INT_LEV_MASK) 111 if (edge & GPIO_INT_LEV_MASK)
94 writel(pin_mask, pin_addr + MXS_SET); 112 writel(pin_mask, pin_addr + MXS_SET);
95 else 113 else
96 writel(pin_mask, pin_addr + MXS_CLR); 114 writel(pin_mask, pin_addr + MXS_CLR);
97 115
98 /* set polarity */ 116 /* set polarity */
99 pin_addr = port->base + PINCTRL_IRQPOL(port->id); 117 pin_addr = port->base + PINCTRL_IRQPOL(port);
100 if (edge & GPIO_INT_POL_MASK) 118 if (edge & GPIO_INT_POL_MASK)
101 writel(pin_mask, pin_addr + MXS_SET); 119 writel(pin_mask, pin_addr + MXS_SET);
102 else 120 else
103 writel(pin_mask, pin_addr + MXS_CLR); 121 writel(pin_mask, pin_addr + MXS_CLR);
104 122
105 writel(1 << (gpio & 0x1f), 123 writel(1 << (gpio & 0x1f),
106 port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR); 124 port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
107 125
108 return 0; 126 return 0;
109} 127}
@@ -117,8 +135,8 @@ static void mxs_gpio_irq_handler(u32 irq, struct irq_desc *desc)
117 135
118 desc->irq_data.chip->irq_ack(&desc->irq_data); 136 desc->irq_data.chip->irq_ack(&desc->irq_data);
119 137
120 irq_stat = readl(port->base + PINCTRL_IRQSTAT(port->id)) & 138 irq_stat = readl(port->base + PINCTRL_IRQSTAT(port)) &
121 readl(port->base + PINCTRL_IRQEN(port->id)); 139 readl(port->base + PINCTRL_IRQEN(port));
122 140
123 while (irq_stat != 0) { 141 while (irq_stat != 0) {
124 int irqoffset = fls(irq_stat) - 1; 142 int irqoffset = fls(irq_stat) - 1;
@@ -164,8 +182,8 @@ static void __init mxs_gpio_init_gc(struct mxs_gpio_port *port)
164 ct->chip.irq_unmask = irq_gc_mask_set_bit; 182 ct->chip.irq_unmask = irq_gc_mask_set_bit;
165 ct->chip.irq_set_type = mxs_gpio_set_irq_type; 183 ct->chip.irq_set_type = mxs_gpio_set_irq_type;
166 ct->chip.irq_set_wake = mxs_gpio_set_wake_irq; 184 ct->chip.irq_set_wake = mxs_gpio_set_wake_irq;
167 ct->regs.ack = PINCTRL_IRQSTAT(port->id) + MXS_CLR; 185 ct->regs.ack = PINCTRL_IRQSTAT(port) + MXS_CLR;
168 ct->regs.mask = PINCTRL_IRQEN(port->id); 186 ct->regs.mask = PINCTRL_IRQEN(port);
169 187
170 irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0); 188 irq_setup_generic_chip(gc, IRQ_MSK(32), 0, IRQ_NOREQUEST, 0);
171} 189}
@@ -179,60 +197,83 @@ static int mxs_gpio_to_irq(struct gpio_chip *gc, unsigned offset)
179 return port->virtual_irq_start + offset; 197 return port->virtual_irq_start + offset;
180} 198}
181 199
200static struct platform_device_id mxs_gpio_ids[] = {
201 {
202 .name = "imx23-gpio",
203 .driver_data = IMX23_GPIO,
204 }, {
205 .name = "imx28-gpio",
206 .driver_data = IMX28_GPIO,
207 }, {
208 /* sentinel */
209 }
210};
211MODULE_DEVICE_TABLE(platform, mxs_gpio_ids);
212
213static const struct of_device_id mxs_gpio_dt_ids[] = {
214 { .compatible = "fsl,imx23-gpio", .data = (void *) IMX23_GPIO, },
215 { .compatible = "fsl,imx28-gpio", .data = (void *) IMX28_GPIO, },
216 { /* sentinel */ }
217};
218MODULE_DEVICE_TABLE(of, mxs_gpio_dt_ids);
219
182static int __devinit mxs_gpio_probe(struct platform_device *pdev) 220static int __devinit mxs_gpio_probe(struct platform_device *pdev)
183{ 221{
222 const struct of_device_id *of_id =
223 of_match_device(mxs_gpio_dt_ids, &pdev->dev);
224 struct device_node *np = pdev->dev.of_node;
225 struct device_node *parent;
184 static void __iomem *base; 226 static void __iomem *base;
185 struct mxs_gpio_port *port; 227 struct mxs_gpio_port *port;
186 struct resource *iores = NULL; 228 struct resource *iores = NULL;
187 int err; 229 int err;
188 230
189 port = kzalloc(sizeof(struct mxs_gpio_port), GFP_KERNEL); 231 port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
190 if (!port) 232 if (!port)
191 return -ENOMEM; 233 return -ENOMEM;
192 234
193 port->id = pdev->id; 235 if (np) {
236 port->id = of_alias_get_id(np, "gpio");
237 if (port->id < 0)
238 return port->id;
239 port->devid = (enum mxs_gpio_id) of_id->data;
240 } else {
241 port->id = pdev->id;
242 port->devid = pdev->id_entry->driver_data;
243 }
194 port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32; 244 port->virtual_irq_start = MXS_GPIO_IRQ_START + port->id * 32;
195 245
246 port->irq = platform_get_irq(pdev, 0);
247 if (port->irq < 0)
248 return port->irq;
249
196 /* 250 /*
197 * map memory region only once, as all the gpio ports 251 * map memory region only once, as all the gpio ports
198 * share the same one 252 * share the same one
199 */ 253 */
200 if (!base) { 254 if (!base) {
201 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); 255 if (np) {
202 if (!iores) { 256 parent = of_get_parent(np);
203 err = -ENODEV; 257 base = of_iomap(parent, 0);
204 goto out_kfree; 258 of_node_put(parent);
205 } 259 } else {
206 260 iores = platform_get_resource(pdev, IORESOURCE_MEM, 0);
207 if (!request_mem_region(iores->start, resource_size(iores), 261 base = devm_request_and_ioremap(&pdev->dev, iores);
208 pdev->name)) {
209 err = -EBUSY;
210 goto out_kfree;
211 }
212
213 base = ioremap(iores->start, resource_size(iores));
214 if (!base) {
215 err = -ENOMEM;
216 goto out_release_mem;
217 } 262 }
263 if (!base)
264 return -EADDRNOTAVAIL;
218 } 265 }
219 port->base = base; 266 port->base = base;
220 267
221 port->irq = platform_get_irq(pdev, 0);
222 if (port->irq < 0) {
223 err = -EINVAL;
224 goto out_iounmap;
225 }
226
227 /* 268 /*
228 * select the pin interrupt functionality but initially 269 * select the pin interrupt functionality but initially
229 * disable the interrupts 270 * disable the interrupts
230 */ 271 */
231 writel(~0U, port->base + PINCTRL_PIN2IRQ(port->id)); 272 writel(~0U, port->base + PINCTRL_PIN2IRQ(port));
232 writel(0, port->base + PINCTRL_IRQEN(port->id)); 273 writel(0, port->base + PINCTRL_IRQEN(port));
233 274
234 /* clear address has to be used to clear IRQSTAT bits */ 275 /* clear address has to be used to clear IRQSTAT bits */
235 writel(~0U, port->base + PINCTRL_IRQSTAT(port->id) + MXS_CLR); 276 writel(~0U, port->base + PINCTRL_IRQSTAT(port) + MXS_CLR);
236 277
237 /* gpio-mxs can be a generic irq chip */ 278 /* gpio-mxs can be a generic irq chip */
238 mxs_gpio_init_gc(port); 279 mxs_gpio_init_gc(port);
@@ -242,41 +283,32 @@ static int __devinit mxs_gpio_probe(struct platform_device *pdev)
242 irq_set_handler_data(port->irq, port); 283 irq_set_handler_data(port->irq, port);
243 284
244 err = bgpio_init(&port->bgc, &pdev->dev, 4, 285 err = bgpio_init(&port->bgc, &pdev->dev, 4,
245 port->base + PINCTRL_DIN(port->id), 286 port->base + PINCTRL_DIN(port),
246 port->base + PINCTRL_DOUT(port->id), NULL, 287 port->base + PINCTRL_DOUT(port), NULL,
247 port->base + PINCTRL_DOE(port->id), NULL, 0); 288 port->base + PINCTRL_DOE(port), NULL, 0);
248 if (err) 289 if (err)
249 goto out_iounmap; 290 return err;
250 291
251 port->bgc.gc.to_irq = mxs_gpio_to_irq; 292 port->bgc.gc.to_irq = mxs_gpio_to_irq;
252 port->bgc.gc.base = port->id * 32; 293 port->bgc.gc.base = port->id * 32;
253 294
254 err = gpiochip_add(&port->bgc.gc); 295 err = gpiochip_add(&port->bgc.gc);
255 if (err) 296 if (err) {
256 goto out_bgpio_remove; 297 bgpio_remove(&port->bgc);
298 return err;
299 }
257 300
258 return 0; 301 return 0;
259
260out_bgpio_remove:
261 bgpio_remove(&port->bgc);
262out_iounmap:
263 if (iores)
264 iounmap(port->base);
265out_release_mem:
266 if (iores)
267 release_mem_region(iores->start, resource_size(iores));
268out_kfree:
269 kfree(port);
270 dev_info(&pdev->dev, "%s failed with errno %d\n", __func__, err);
271 return err;
272} 302}
273 303
274static struct platform_driver mxs_gpio_driver = { 304static struct platform_driver mxs_gpio_driver = {
275 .driver = { 305 .driver = {
276 .name = "gpio-mxs", 306 .name = "gpio-mxs",
277 .owner = THIS_MODULE, 307 .owner = THIS_MODULE,
308 .of_match_table = mxs_gpio_dt_ids,
278 }, 309 },
279 .probe = mxs_gpio_probe, 310 .probe = mxs_gpio_probe,
311 .id_table = mxs_gpio_ids,
280}; 312};
281 313
282static int __init mxs_gpio_init(void) 314static int __init mxs_gpio_init(void)
diff --git a/drivers/gpio/gpio-samsung.c b/drivers/gpio/gpio-samsung.c
index 421f6af0f995..7bb00448e13d 100644
--- a/drivers/gpio/gpio-samsung.c
+++ b/drivers/gpio/gpio-samsung.c
@@ -2454,6 +2454,12 @@ static struct samsung_gpio_chip exynos5_gpios_1[] = {
2454 }, 2454 },
2455 }, { 2455 }, {
2456 .chip = { 2456 .chip = {
2457 .base = EXYNOS5_GPC4(0),
2458 .ngpio = EXYNOS5_GPIO_C4_NR,
2459 .label = "GPC4",
2460 },
2461 }, {
2462 .chip = {
2457 .base = EXYNOS5_GPD0(0), 2463 .base = EXYNOS5_GPD0(0),
2458 .ngpio = EXYNOS5_GPIO_D0_NR, 2464 .ngpio = EXYNOS5_GPIO_D0_NR,
2459 .label = "GPD0", 2465 .label = "GPD0",
@@ -2826,8 +2832,11 @@ static __init void exynos5_gpiolib_init(void)
2826 goto err_ioremap1; 2832 goto err_ioremap1;
2827 } 2833 }
2828 2834
2835 /* need to set base address for gpc4 */
2836 exonys5_gpios_1[11].base = gpio_base1 + 0x2E0;
2837
2829 /* need to set base address for gpx */ 2838 /* need to set base address for gpx */
2830 chip = &exynos5_gpios_1[20]; 2839 chip = &exynos5_gpios_1[21];
2831 gpx_base = gpio_base1 + 0xC00; 2840 gpx_base = gpio_base1 + 0xC00;
2832 for (i = 0; i < 4; i++, chip++, gpx_base += 0x20) 2841 for (i = 0; i < 4; i++, chip++, gpx_base += 0x20)
2833 chip->base = gpx_base; 2842 chip->base = gpx_base;
diff --git a/drivers/gpio/gpio-sch.c b/drivers/gpio/gpio-sch.c
index 8cadf4d683a8..424dce8e3f30 100644
--- a/drivers/gpio/gpio-sch.c
+++ b/drivers/gpio/gpio-sch.c
@@ -232,6 +232,14 @@ static int __devinit sch_gpio_probe(struct platform_device *pdev)
232 sch_gpio_resume.ngpio = 9; 232 sch_gpio_resume.ngpio = 9;
233 break; 233 break;
234 234
235 case PCI_DEVICE_ID_INTEL_CENTERTON_ILB:
236 sch_gpio_core.base = 0;
237 sch_gpio_core.ngpio = 21;
238
239 sch_gpio_resume.base = 21;
240 sch_gpio_resume.ngpio = 9;
241 break;
242
235 default: 243 default:
236 return -ENODEV; 244 return -ENODEV;
237 } 245 }
diff --git a/drivers/gpio/gpio-sta2x11.c b/drivers/gpio/gpio-sta2x11.c
new file mode 100644
index 000000000000..38416be8ba11
--- /dev/null
+++ b/drivers/gpio/gpio-sta2x11.c
@@ -0,0 +1,435 @@
1/*
2 * STMicroelectronics ConneXt (STA2X11) GPIO driver
3 *
4 * Copyright 2012 ST Microelectronics (Alessandro Rubini)
5 * Based on gpio-ml-ioh.c, Copyright 2010 OKI Semiconductors Ltd.
6 * Also based on previous sta2x11 work, Copyright 2011 Wind River Systems, Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
15 * See the GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
20 *
21 */
22
23#include <linux/module.h>
24#include <linux/kernel.h>
25#include <linux/slab.h>
26#include <linux/gpio.h>
27#include <linux/interrupt.h>
28#include <linux/irq.h>
29#include <linux/pci.h>
30#include <linux/platform_device.h>
31#include <linux/mfd/sta2x11-mfd.h>
32
33struct gsta_regs {
34 u32 dat; /* 0x00 */
35 u32 dats;
36 u32 datc;
37 u32 pdis;
38 u32 dir; /* 0x10 */
39 u32 dirs;
40 u32 dirc;
41 u32 unused_1c;
42 u32 afsela; /* 0x20 */
43 u32 unused_24[7];
44 u32 rimsc; /* 0x40 */
45 u32 fimsc;
46 u32 is;
47 u32 ic;
48};
49
50struct gsta_gpio {
51 spinlock_t lock;
52 struct device *dev;
53 void __iomem *reg_base;
54 struct gsta_regs __iomem *regs[GSTA_NR_BLOCKS];
55 struct gpio_chip gpio;
56 int irq_base;
57 /* FIXME: save the whole config here (AF, ...) */
58 unsigned irq_type[GSTA_NR_GPIO];
59};
60
61static inline struct gsta_regs __iomem *__regs(struct gsta_gpio *chip, int nr)
62{
63 return chip->regs[nr / GSTA_GPIO_PER_BLOCK];
64}
65
66static inline u32 __bit(int nr)
67{
68 return 1U << (nr % GSTA_GPIO_PER_BLOCK);
69}
70
71/*
72 * gpio methods
73 */
74
75static void gsta_gpio_set(struct gpio_chip *gpio, unsigned nr, int val)
76{
77 struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
78 struct gsta_regs __iomem *regs = __regs(chip, nr);
79 u32 bit = __bit(nr);
80
81 if (val)
82 writel(bit, &regs->dats);
83 else
84 writel(bit, &regs->datc);
85}
86
87static int gsta_gpio_get(struct gpio_chip *gpio, unsigned nr)
88{
89 struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
90 struct gsta_regs __iomem *regs = __regs(chip, nr);
91 u32 bit = __bit(nr);
92
93 return readl(&regs->dat) & bit;
94}
95
96static int gsta_gpio_direction_output(struct gpio_chip *gpio, unsigned nr,
97 int val)
98{
99 struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
100 struct gsta_regs __iomem *regs = __regs(chip, nr);
101 u32 bit = __bit(nr);
102
103 writel(bit, &regs->dirs);
104 /* Data register after direction, otherwise pullup/down is selected */
105 if (val)
106 writel(bit, &regs->dats);
107 else
108 writel(bit, &regs->datc);
109 return 0;
110}
111
112static int gsta_gpio_direction_input(struct gpio_chip *gpio, unsigned nr)
113{
114 struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
115 struct gsta_regs __iomem *regs = __regs(chip, nr);
116 u32 bit = __bit(nr);
117
118 writel(bit, &regs->dirc);
119 return 0;
120}
121
122static int gsta_gpio_to_irq(struct gpio_chip *gpio, unsigned offset)
123{
124 struct gsta_gpio *chip = container_of(gpio, struct gsta_gpio, gpio);
125 return chip->irq_base + offset;
126}
127
128static void gsta_gpio_setup(struct gsta_gpio *chip) /* called from probe */
129{
130 struct gpio_chip *gpio = &chip->gpio;
131
132 /*
133 * ARCH_NR_GPIOS is currently 256 and dynamic allocation starts
134 * from the end. However, for compatibility, we need the first
135 * ConneXt device to start from gpio 0: it's the main chipset
136 * on most boards so documents and drivers assume gpio0..gpio127
137 */
138 static int gpio_base;
139
140 gpio->label = dev_name(chip->dev);
141 gpio->owner = THIS_MODULE;
142 gpio->direction_input = gsta_gpio_direction_input;
143 gpio->get = gsta_gpio_get;
144 gpio->direction_output = gsta_gpio_direction_output;
145 gpio->set = gsta_gpio_set;
146 gpio->dbg_show = NULL;
147 gpio->base = gpio_base;
148 gpio->ngpio = GSTA_NR_GPIO;
149 gpio->can_sleep = 0;
150 gpio->to_irq = gsta_gpio_to_irq;
151
152 /*
153 * After the first device, turn to dynamic gpio numbers.
154 * For example, with ARCH_NR_GPIOS = 256 we can fit two cards
155 */
156 if (!gpio_base)
157 gpio_base = -1;
158}
159
160/*
161 * Special method: alternate functions and pullup/pulldown. This is only
162 * invoked on startup to configure gpio's according to platform data.
163 * FIXME : this functionality shall be managed (and exported to other drivers)
164 * via the pin control subsystem.
165 */
166static void gsta_set_config(struct gsta_gpio *chip, int nr, unsigned cfg)
167{
168 struct gsta_regs __iomem *regs = __regs(chip, nr);
169 unsigned long flags;
170 u32 bit = __bit(nr);
171 u32 val;
172 int err = 0;
173
174 pr_info("%s: %p %i %i\n", __func__, chip, nr, cfg);
175
176 if (cfg == PINMUX_TYPE_NONE)
177 return;
178
179 /* Alternate function or not? */
180 spin_lock_irqsave(&chip->lock, flags);
181 val = readl(&regs->afsela);
182 if (cfg == PINMUX_TYPE_FUNCTION)
183 val |= bit;
184 else
185 val &= ~bit;
186 writel(val | bit, &regs->afsela);
187 if (cfg == PINMUX_TYPE_FUNCTION) {
188 spin_unlock_irqrestore(&chip->lock, flags);
189 return;
190 }
191
192 /* not alternate function: set details */
193 switch (cfg) {
194 case PINMUX_TYPE_OUTPUT_LOW:
195 writel(bit, &regs->dirs);
196 writel(bit, &regs->datc);
197 break;
198 case PINMUX_TYPE_OUTPUT_HIGH:
199 writel(bit, &regs->dirs);
200 writel(bit, &regs->dats);
201 break;
202 case PINMUX_TYPE_INPUT:
203 writel(bit, &regs->dirc);
204 val = readl(&regs->pdis) | bit;
205 writel(val, &regs->pdis);
206 break;
207 case PINMUX_TYPE_INPUT_PULLUP:
208 writel(bit, &regs->dirc);
209 val = readl(&regs->pdis) & ~bit;
210 writel(val, &regs->pdis);
211 writel(bit, &regs->dats);
212 break;
213 case PINMUX_TYPE_INPUT_PULLDOWN:
214 writel(bit, &regs->dirc);
215 val = readl(&regs->pdis) & ~bit;
216 writel(val, &regs->pdis);
217 writel(bit, &regs->datc);
218 break;
219 default:
220 err = 1;
221 }
222 spin_unlock_irqrestore(&chip->lock, flags);
223 if (err)
224 pr_err("%s: chip %p, pin %i, cfg %i is invalid\n",
225 __func__, chip, nr, cfg);
226}
227
228/*
229 * Irq methods
230 */
231
232static void gsta_irq_disable(struct irq_data *data)
233{
234 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
235 struct gsta_gpio *chip = gc->private;
236 int nr = data->irq - chip->irq_base;
237 struct gsta_regs __iomem *regs = __regs(chip, nr);
238 u32 bit = __bit(nr);
239 u32 val;
240 unsigned long flags;
241
242 spin_lock_irqsave(&chip->lock, flags);
243 if (chip->irq_type[nr] & IRQ_TYPE_EDGE_RISING) {
244 val = readl(&regs->rimsc) & ~bit;
245 writel(val, &regs->rimsc);
246 }
247 if (chip->irq_type[nr] & IRQ_TYPE_EDGE_FALLING) {
248 val = readl(&regs->fimsc) & ~bit;
249 writel(val, &regs->fimsc);
250 }
251 spin_unlock_irqrestore(&chip->lock, flags);
252 return;
253}
254
255static void gsta_irq_enable(struct irq_data *data)
256{
257 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data);
258 struct gsta_gpio *chip = gc->private;
259 int nr = data->irq - chip->irq_base;
260 struct gsta_regs __iomem *regs = __regs(chip, nr);
261 u32 bit = __bit(nr);
262 u32 val;
263 int type;
264 unsigned long flags;
265
266 type = chip->irq_type[nr];
267
268 spin_lock_irqsave(&chip->lock, flags);
269 val = readl(&regs->rimsc);
270 if (type & IRQ_TYPE_EDGE_RISING)
271 writel(val | bit, &regs->rimsc);
272 else
273 writel(val & ~bit, &regs->rimsc);
274 val = readl(&regs->rimsc);
275 if (type & IRQ_TYPE_EDGE_FALLING)
276 writel(val | bit, &regs->fimsc);
277 else
278 writel(val & ~bit, &regs->fimsc);
279 spin_unlock_irqrestore(&chip->lock, flags);
280 return;
281}
282
283static int gsta_irq_type(struct irq_data *d, unsigned int type)
284{
285 struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d);
286 struct gsta_gpio *chip = gc->private;
287 int nr = d->irq - chip->irq_base;
288
289 /* We only support edge interrupts */
290 if (!(type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_EDGE_FALLING))) {
291 pr_debug("%s: unsupported type 0x%x\n", __func__, type);
292 return -EINVAL;
293 }
294
295 chip->irq_type[nr] = type; /* used for enable/disable */
296
297 gsta_irq_enable(d);
298 return 0;
299}
300
301static irqreturn_t gsta_gpio_handler(int irq, void *dev_id)
302{
303 struct gsta_gpio *chip = dev_id;
304 struct gsta_regs __iomem *regs;
305 u32 is;
306 int i, nr, base;
307 irqreturn_t ret = IRQ_NONE;
308
309 for (i = 0; i < GSTA_NR_BLOCKS; i++) {
310 regs = chip->regs[i];
311 base = chip->irq_base + i * GSTA_GPIO_PER_BLOCK;
312 while ((is = readl(&regs->is))) {
313 nr = __ffs(is);
314 irq = base + nr;
315 generic_handle_irq(irq);
316 writel(1 << nr, &regs->ic);
317 ret = IRQ_HANDLED;
318 }
319 }
320 return ret;
321}
322
323static __devinit void gsta_alloc_irq_chip(struct gsta_gpio *chip)
324{
325 struct irq_chip_generic *gc;
326 struct irq_chip_type *ct;
327
328 gc = irq_alloc_generic_chip(KBUILD_MODNAME, 1, chip->irq_base,
329 chip->reg_base, handle_simple_irq);
330 gc->private = chip;
331 ct = gc->chip_types;
332
333 ct->chip.irq_set_type = gsta_irq_type;
334 ct->chip.irq_disable = gsta_irq_disable;
335 ct->chip.irq_enable = gsta_irq_enable;
336
337 /* FIXME: this makes at most 32 interrupts. Request 0 by now */
338 irq_setup_generic_chip(gc, 0 /* IRQ_MSK(GSTA_GPIO_PER_BLOCK) */, 0,
339 IRQ_NOREQUEST | IRQ_NOPROBE, 0);
340
341 /* Set up all all 128 interrupts: code from setup_generic_chip */
342 {
343 struct irq_chip_type *ct = gc->chip_types;
344 int i, j;
345 for (j = 0; j < GSTA_NR_GPIO; j++) {
346 i = chip->irq_base + j;
347 irq_set_chip_and_handler(i, &ct->chip, ct->handler);
348 irq_set_chip_data(i, gc);
349 irq_modify_status(i, IRQ_NOREQUEST | IRQ_NOPROBE, 0);
350 }
351 gc->irq_cnt = i - gc->irq_base;
352 }
353}
354
355/* The platform device used here is instantiated by the MFD device */
356static int __devinit gsta_probe(struct platform_device *dev)
357{
358 int i, err;
359 struct pci_dev *pdev;
360 struct sta2x11_gpio_pdata *gpio_pdata;
361 struct gsta_gpio *chip;
362 struct resource *res;
363
364 pdev = *(struct pci_dev **)(dev->dev.platform_data);
365 gpio_pdata = dev_get_platdata(&pdev->dev);
366
367 if (gpio_pdata == NULL)
368 dev_err(&dev->dev, "no gpio config\n");
369 pr_debug("gpio config: %p\n", gpio_pdata);
370
371 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
372
373 chip = devm_kzalloc(&dev->dev, sizeof(*chip), GFP_KERNEL);
374 chip->dev = &dev->dev;
375 chip->reg_base = devm_request_and_ioremap(&dev->dev, res);
376
377 for (i = 0; i < GSTA_NR_BLOCKS; i++) {
378 chip->regs[i] = chip->reg_base + i * 4096;
379 /* disable all irqs */
380 writel(0, &chip->regs[i]->rimsc);
381 writel(0, &chip->regs[i]->fimsc);
382 writel(~0, &chip->regs[i]->ic);
383 }
384 spin_lock_init(&chip->lock);
385 gsta_gpio_setup(chip);
386 for (i = 0; i < GSTA_NR_GPIO; i++)
387 gsta_set_config(chip, i, gpio_pdata->pinconfig[i]);
388
389 /* 384 was used in previous code: be compatible for other drivers */
390 err = irq_alloc_descs(-1, 384, GSTA_NR_GPIO, NUMA_NO_NODE);
391 if (err < 0) {
392 dev_warn(&dev->dev, "sta2x11 gpio: Can't get irq base (%i)\n",
393 -err);
394 return err;
395 }
396 chip->irq_base = err;
397 gsta_alloc_irq_chip(chip);
398
399 err = request_irq(pdev->irq, gsta_gpio_handler,
400 IRQF_SHARED, KBUILD_MODNAME, chip);
401 if (err < 0) {
402 dev_err(&dev->dev, "sta2x11 gpio: Can't request irq (%i)\n",
403 -err);
404 goto err_free_descs;
405 }
406
407 err = gpiochip_add(&chip->gpio);
408 if (err < 0) {
409 dev_err(&dev->dev, "sta2x11 gpio: Can't register (%i)\n",
410 -err);
411 goto err_free_irq;
412 }
413
414 platform_set_drvdata(dev, chip);
415 return 0;
416
417err_free_irq:
418 free_irq(pdev->irq, chip);
419err_free_descs:
420 irq_free_descs(chip->irq_base, GSTA_NR_GPIO);
421 return err;
422}
423
424static struct platform_driver sta2x11_gpio_platform_driver = {
425 .driver = {
426 .name = "sta2x11-gpio",
427 .owner = THIS_MODULE,
428 },
429 .probe = gsta_probe,
430};
431
432module_platform_driver(sta2x11_gpio_platform_driver);
433
434MODULE_LICENSE("GPL v2");
435MODULE_DESCRIPTION("sta2x11_gpio GPIO driver");
diff --git a/drivers/gpio/gpio-stp-xway.c b/drivers/gpio/gpio-stp-xway.c
new file mode 100644
index 000000000000..e35096bf3cfb
--- /dev/null
+++ b/drivers/gpio/gpio-stp-xway.c
@@ -0,0 +1,301 @@
1/*
2 * This program is free software; you can redistribute it and/or modify it
3 * under the terms of the GNU General Public License version 2 as published
4 * by the Free Software Foundation.
5 *
6 * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
7 *
8 */
9
10#include <linux/slab.h>
11#include <linux/init.h>
12#include <linux/module.h>
13#include <linux/types.h>
14#include <linux/of_platform.h>
15#include <linux/mutex.h>
16#include <linux/gpio.h>
17#include <linux/io.h>
18#include <linux/of_gpio.h>
19#include <linux/clk.h>
20#include <linux/err.h>
21
22#include <lantiq_soc.h>
23
24/*
25 * The Serial To Parallel (STP) is found on MIPS based Lantiq socs. It is a
26 * peripheral controller used to drive external shift register cascades. At most
27 * 3 groups of 8 bits can be driven. The hardware is able to allow the DSL modem
28 * to drive the 2 LSBs of the cascade automatically.
29 */
30
31/* control register 0 */
32#define XWAY_STP_CON0 0x00
33/* control register 1 */
34#define XWAY_STP_CON1 0x04
35/* data register 0 */
36#define XWAY_STP_CPU0 0x08
37/* data register 1 */
38#define XWAY_STP_CPU1 0x0C
39/* access register */
40#define XWAY_STP_AR 0x10
41
42/* software or hardware update select bit */
43#define XWAY_STP_CON_SWU BIT(31)
44
45/* automatic update rates */
46#define XWAY_STP_2HZ 0
47#define XWAY_STP_4HZ BIT(23)
48#define XWAY_STP_8HZ BIT(24)
49#define XWAY_STP_10HZ (BIT(24) | BIT(23))
50#define XWAY_STP_SPEED_MASK (0xf << 23)
51
52/* clock source for automatic update */
53#define XWAY_STP_UPD_FPI BIT(31)
54#define XWAY_STP_UPD_MASK (BIT(31) | BIT(30))
55
56/* let the adsl core drive the 2 LSBs */
57#define XWAY_STP_ADSL_SHIFT 24
58#define XWAY_STP_ADSL_MASK 0x3
59
60/* 2 groups of 3 bits can be driven by the phys */
61#define XWAY_STP_PHY_MASK 0x3
62#define XWAY_STP_PHY1_SHIFT 27
63#define XWAY_STP_PHY2_SHIFT 15
64
65/* STP has 3 groups of 8 bits */
66#define XWAY_STP_GROUP0 BIT(0)
67#define XWAY_STP_GROUP1 BIT(1)
68#define XWAY_STP_GROUP2 BIT(2)
69#define XWAY_STP_GROUP_MASK (0x7)
70
71/* Edge configuration bits */
72#define XWAY_STP_FALLING BIT(26)
73#define XWAY_STP_EDGE_MASK BIT(26)
74
75#define xway_stp_r32(m, reg) __raw_readl(m + reg)
76#define xway_stp_w32(m, val, reg) __raw_writel(val, m + reg)
77#define xway_stp_w32_mask(m, clear, set, reg) \
78 ltq_w32((ltq_r32(m + reg) & ~(clear)) | (set), \
79 m + reg)
80
81struct xway_stp {
82 struct gpio_chip gc;
83 void __iomem *virt;
84 u32 edge; /* rising or falling edge triggered shift register */
85 u16 shadow; /* shadow the shift registers state */
86 u8 groups; /* we can drive 1-3 groups of 8bit each */
87 u8 dsl; /* the 2 LSBs can be driven by the dsl core */
88 u8 phy1; /* 3 bits can be driven by phy1 */
89 u8 phy2; /* 3 bits can be driven by phy2 */
90 u8 reserved; /* mask out the hw driven bits in gpio_request */
91};
92
93/**
94 * xway_stp_set() - gpio_chip->set - set gpios.
95 * @gc: Pointer to gpio_chip device structure.
96 * @gpio: GPIO signal number.
97 * @val: Value to be written to specified signal.
98 *
99 * Set the shadow value and call ltq_ebu_apply.
100 */
101static void xway_stp_set(struct gpio_chip *gc, unsigned gpio, int val)
102{
103 struct xway_stp *chip =
104 container_of(gc, struct xway_stp, gc);
105
106 if (val)
107 chip->shadow |= BIT(gpio);
108 else
109 chip->shadow &= ~BIT(gpio);
110 xway_stp_w32(chip->virt, chip->shadow, XWAY_STP_CPU0);
111 xway_stp_w32_mask(chip->virt, 0, XWAY_STP_CON_SWU, XWAY_STP_CON0);
112}
113
114/**
115 * xway_stp_dir_out() - gpio_chip->dir_out - set gpio direction.
116 * @gc: Pointer to gpio_chip device structure.
117 * @gpio: GPIO signal number.
118 * @val: Value to be written to specified signal.
119 *
120 * Same as xway_stp_set, always returns 0.
121 */
122static int xway_stp_dir_out(struct gpio_chip *gc, unsigned gpio, int val)
123{
124 xway_stp_set(gc, gpio, val);
125
126 return 0;
127}
128
129/**
130 * xway_stp_request() - gpio_chip->request
131 * @gc: Pointer to gpio_chip device structure.
132 * @gpio: GPIO signal number.
133 *
134 * We mask out the HW driven pins
135 */
136static int xway_stp_request(struct gpio_chip *gc, unsigned gpio)
137{
138 struct xway_stp *chip =
139 container_of(gc, struct xway_stp, gc);
140
141 if ((gpio < 8) && (chip->reserved & BIT(gpio))) {
142 dev_err(gc->dev, "GPIO %d is driven by hardware\n", gpio);
143 return -ENODEV;
144 }
145
146 return 0;
147}
148
149/**
150 * xway_stp_hw_init() - Configure the STP unit and enable the clock gate
151 * @virt: pointer to the remapped register range
152 */
153static int xway_stp_hw_init(struct xway_stp *chip)
154{
155 /* sane defaults */
156 xway_stp_w32(chip->virt, 0, XWAY_STP_AR);
157 xway_stp_w32(chip->virt, 0, XWAY_STP_CPU0);
158 xway_stp_w32(chip->virt, 0, XWAY_STP_CPU1);
159 xway_stp_w32(chip->virt, XWAY_STP_CON_SWU, XWAY_STP_CON0);
160 xway_stp_w32(chip->virt, 0, XWAY_STP_CON1);
161
162 /* apply edge trigger settings for the shift register */
163 xway_stp_w32_mask(chip->virt, XWAY_STP_EDGE_MASK,
164 chip->edge, XWAY_STP_CON0);
165
166 /* apply led group settings */
167 xway_stp_w32_mask(chip->virt, XWAY_STP_GROUP_MASK,
168 chip->groups, XWAY_STP_CON1);
169
170 /* tell the hardware which pins are controlled by the dsl modem */
171 xway_stp_w32_mask(chip->virt,
172 XWAY_STP_ADSL_MASK << XWAY_STP_ADSL_SHIFT,
173 chip->dsl << XWAY_STP_ADSL_SHIFT,
174 XWAY_STP_CON0);
175
176 /* tell the hardware which pins are controlled by the phys */
177 xway_stp_w32_mask(chip->virt,
178 XWAY_STP_PHY_MASK << XWAY_STP_PHY1_SHIFT,
179 chip->phy1 << XWAY_STP_PHY1_SHIFT,
180 XWAY_STP_CON0);
181 xway_stp_w32_mask(chip->virt,
182 XWAY_STP_PHY_MASK << XWAY_STP_PHY2_SHIFT,
183 chip->phy2 << XWAY_STP_PHY2_SHIFT,
184 XWAY_STP_CON1);
185
186 /* mask out the hw driven bits in gpio_request */
187 chip->reserved = (chip->phy2 << 5) | (chip->phy1 << 2) | chip->dsl;
188
189 /*
190 * if we have pins that are driven by hw, we need to tell the stp what
191 * clock to use as a timer.
192 */
193 if (chip->reserved)
194 xway_stp_w32_mask(chip->virt, XWAY_STP_UPD_MASK,
195 XWAY_STP_UPD_FPI, XWAY_STP_CON1);
196
197 return 0;
198}
199
200static int __devinit xway_stp_probe(struct platform_device *pdev)
201{
202 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
203 const __be32 *shadow, *groups, *dsl, *phy;
204 struct xway_stp *chip;
205 struct clk *clk;
206 int ret = 0;
207
208 if (!res) {
209 dev_err(&pdev->dev, "failed to request STP resource\n");
210 return -ENOENT;
211 }
212
213 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
214 if (!chip)
215 return -ENOMEM;
216
217 chip->virt = devm_request_and_ioremap(&pdev->dev, res);
218 if (!chip->virt) {
219 dev_err(&pdev->dev, "failed to remap STP memory\n");
220 return -ENOMEM;
221 }
222 chip->gc.dev = &pdev->dev;
223 chip->gc.label = "stp-xway";
224 chip->gc.direction_output = xway_stp_dir_out;
225 chip->gc.set = xway_stp_set;
226 chip->gc.request = xway_stp_request;
227 chip->gc.base = -1;
228 chip->gc.owner = THIS_MODULE;
229
230 /* store the shadow value if one was passed by the devicetree */
231 shadow = of_get_property(pdev->dev.of_node, "lantiq,shadow", NULL);
232 if (shadow)
233 chip->shadow = be32_to_cpu(*shadow);
234
235 /* find out which gpio groups should be enabled */
236 groups = of_get_property(pdev->dev.of_node, "lantiq,groups", NULL);
237 if (groups)
238 chip->groups = be32_to_cpu(*groups) & XWAY_STP_GROUP_MASK;
239 else
240 chip->groups = XWAY_STP_GROUP0;
241 chip->gc.ngpio = fls(chip->groups) * 8;
242
243 /* find out which gpios are controlled by the dsl core */
244 dsl = of_get_property(pdev->dev.of_node, "lantiq,dsl", NULL);
245 if (dsl)
246 chip->dsl = be32_to_cpu(*dsl) & XWAY_STP_ADSL_MASK;
247
248 /* find out which gpios are controlled by the phys */
249 if (of_machine_is_compatible("lantiq,ar9") ||
250 of_machine_is_compatible("lantiq,gr9") ||
251 of_machine_is_compatible("lantiq,vr9")) {
252 phy = of_get_property(pdev->dev.of_node, "lantiq,phy1", NULL);
253 if (phy)
254 chip->phy1 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
255 phy = of_get_property(pdev->dev.of_node, "lantiq,phy2", NULL);
256 if (phy)
257 chip->phy2 = be32_to_cpu(*phy) & XWAY_STP_PHY_MASK;
258 }
259
260 /* check which edge trigger we should use, default to a falling edge */
261 if (!of_find_property(pdev->dev.of_node, "lantiq,rising", NULL))
262 chip->edge = XWAY_STP_FALLING;
263
264 clk = clk_get(&pdev->dev, NULL);
265 if (IS_ERR(clk)) {
266 dev_err(&pdev->dev, "Failed to get clock\n");
267 return PTR_ERR(clk);
268 }
269 clk_enable(clk);
270
271 ret = xway_stp_hw_init(chip);
272 if (!ret)
273 ret = gpiochip_add(&chip->gc);
274
275 if (!ret)
276 dev_info(&pdev->dev, "Init done\n");
277
278 return ret;
279}
280
281static const struct of_device_id xway_stp_match[] = {
282 { .compatible = "lantiq,gpio-stp-xway" },
283 {},
284};
285MODULE_DEVICE_TABLE(of, xway_stp_match);
286
287static struct platform_driver xway_stp_driver = {
288 .probe = xway_stp_probe,
289 .driver = {
290 .name = "gpio-stp-xway",
291 .owner = THIS_MODULE,
292 .of_match_table = xway_stp_match,
293 },
294};
295
296int __init xway_stp_init(void)
297{
298 return platform_driver_register(&xway_stp_driver);
299}
300
301subsys_initcall(xway_stp_init);
diff --git a/drivers/gpio/gpio-tps65910.c b/drivers/gpio/gpio-tps65910.c
index 7eef648a3351..c1ad2884f2ed 100644
--- a/drivers/gpio/gpio-tps65910.c
+++ b/drivers/gpio/gpio-tps65910.c
@@ -18,14 +18,27 @@
18#include <linux/errno.h> 18#include <linux/errno.h>
19#include <linux/gpio.h> 19#include <linux/gpio.h>
20#include <linux/i2c.h> 20#include <linux/i2c.h>
21#include <linux/platform_device.h>
21#include <linux/mfd/tps65910.h> 22#include <linux/mfd/tps65910.h>
23#include <linux/of_device.h>
24
25struct tps65910_gpio {
26 struct gpio_chip gpio_chip;
27 struct tps65910 *tps65910;
28};
29
30static inline struct tps65910_gpio *to_tps65910_gpio(struct gpio_chip *chip)
31{
32 return container_of(chip, struct tps65910_gpio, gpio_chip);
33}
22 34
23static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset) 35static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
24{ 36{
25 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); 37 struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
26 uint8_t val; 38 struct tps65910 *tps65910 = tps65910_gpio->tps65910;
39 unsigned int val;
27 40
28 tps65910->read(tps65910, TPS65910_GPIO0 + offset, 1, &val); 41 tps65910_reg_read(tps65910, TPS65910_GPIO0 + offset, &val);
29 42
30 if (val & GPIO_STS_MASK) 43 if (val & GPIO_STS_MASK)
31 return 1; 44 return 1;
@@ -36,83 +49,170 @@ static int tps65910_gpio_get(struct gpio_chip *gc, unsigned offset)
36static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset, 49static void tps65910_gpio_set(struct gpio_chip *gc, unsigned offset,
37 int value) 50 int value)
38{ 51{
39 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); 52 struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
53 struct tps65910 *tps65910 = tps65910_gpio->tps65910;
40 54
41 if (value) 55 if (value)
42 tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset, 56 tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
43 GPIO_SET_MASK); 57 GPIO_SET_MASK);
44 else 58 else
45 tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset, 59 tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
46 GPIO_SET_MASK); 60 GPIO_SET_MASK);
47} 61}
48 62
49static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset, 63static int tps65910_gpio_output(struct gpio_chip *gc, unsigned offset,
50 int value) 64 int value)
51{ 65{
52 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); 66 struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
67 struct tps65910 *tps65910 = tps65910_gpio->tps65910;
53 68
54 /* Set the initial value */ 69 /* Set the initial value */
55 tps65910_gpio_set(gc, offset, value); 70 tps65910_gpio_set(gc, offset, value);
56 71
57 return tps65910_set_bits(tps65910, TPS65910_GPIO0 + offset, 72 return tps65910_reg_set_bits(tps65910, TPS65910_GPIO0 + offset,
58 GPIO_CFG_MASK); 73 GPIO_CFG_MASK);
59} 74}
60 75
61static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset) 76static int tps65910_gpio_input(struct gpio_chip *gc, unsigned offset)
62{ 77{
63 struct tps65910 *tps65910 = container_of(gc, struct tps65910, gpio); 78 struct tps65910_gpio *tps65910_gpio = to_tps65910_gpio(gc);
79 struct tps65910 *tps65910 = tps65910_gpio->tps65910;
64 80
65 return tps65910_clear_bits(tps65910, TPS65910_GPIO0 + offset, 81 return tps65910_reg_clear_bits(tps65910, TPS65910_GPIO0 + offset,
66 GPIO_CFG_MASK); 82 GPIO_CFG_MASK);
67} 83}
68 84
69void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base) 85#ifdef CONFIG_OF
86static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
87 struct tps65910 *tps65910, int chip_ngpio)
70{ 88{
89 struct tps65910_board *tps65910_board = tps65910->of_plat_data;
90 unsigned int prop_array[TPS6591X_MAX_NUM_GPIO];
91 int ngpio = min(chip_ngpio, TPS6591X_MAX_NUM_GPIO);
71 int ret; 92 int ret;
72 struct tps65910_board *board_data; 93 int idx;
94
95 tps65910_board->gpio_base = -1;
96 ret = of_property_read_u32_array(tps65910->dev->of_node,
97 "ti,en-gpio-sleep", prop_array, ngpio);
98 if (ret < 0) {
99 dev_dbg(dev, "ti,en-gpio-sleep not specified\n");
100 return tps65910_board;
101 }
73 102
74 if (!gpio_base) 103 for (idx = 0; idx < ngpio; idx++)
75 return; 104 tps65910_board->en_gpio_sleep[idx] = (prop_array[idx] != 0);
76 105
77 tps65910->gpio.owner = THIS_MODULE; 106 return tps65910_board;
78 tps65910->gpio.label = tps65910->i2c_client->name; 107}
79 tps65910->gpio.dev = tps65910->dev; 108#else
80 tps65910->gpio.base = gpio_base; 109static struct tps65910_board *tps65910_parse_dt_for_gpio(struct device *dev,
110 struct tps65910 *tps65910, int chip_ngpio)
111{
112 return NULL;
113}
114#endif
115
116static int __devinit tps65910_gpio_probe(struct platform_device *pdev)
117{
118 struct tps65910 *tps65910 = dev_get_drvdata(pdev->dev.parent);
119 struct tps65910_board *pdata = dev_get_platdata(tps65910->dev);
120 struct tps65910_gpio *tps65910_gpio;
121 int ret;
122 int i;
123
124 tps65910_gpio = devm_kzalloc(&pdev->dev,
125 sizeof(*tps65910_gpio), GFP_KERNEL);
126 if (!tps65910_gpio) {
127 dev_err(&pdev->dev, "Could not allocate tps65910_gpio\n");
128 return -ENOMEM;
129 }
130
131 tps65910_gpio->tps65910 = tps65910;
132
133 tps65910_gpio->gpio_chip.owner = THIS_MODULE;
134 tps65910_gpio->gpio_chip.label = tps65910->i2c_client->name;
81 135
82 switch(tps65910_chip_id(tps65910)) { 136 switch(tps65910_chip_id(tps65910)) {
83 case TPS65910: 137 case TPS65910:
84 tps65910->gpio.ngpio = TPS65910_NUM_GPIO; 138 tps65910_gpio->gpio_chip.ngpio = TPS65910_NUM_GPIO;
85 break; 139 break;
86 case TPS65911: 140 case TPS65911:
87 tps65910->gpio.ngpio = TPS65911_NUM_GPIO; 141 tps65910_gpio->gpio_chip.ngpio = TPS65911_NUM_GPIO;
88 break; 142 break;
89 default: 143 default:
90 return; 144 return -EINVAL;
145 }
146 tps65910_gpio->gpio_chip.can_sleep = 1;
147 tps65910_gpio->gpio_chip.direction_input = tps65910_gpio_input;
148 tps65910_gpio->gpio_chip.direction_output = tps65910_gpio_output;
149 tps65910_gpio->gpio_chip.set = tps65910_gpio_set;
150 tps65910_gpio->gpio_chip.get = tps65910_gpio_get;
151 tps65910_gpio->gpio_chip.dev = &pdev->dev;
152 if (pdata && pdata->gpio_base)
153 tps65910_gpio->gpio_chip.base = pdata->gpio_base;
154 else
155 tps65910_gpio->gpio_chip.base = -1;
156
157 if (!pdata && tps65910->dev->of_node)
158 pdata = tps65910_parse_dt_for_gpio(&pdev->dev, tps65910,
159 tps65910_gpio->gpio_chip.ngpio);
160
161 if (!pdata)
162 goto skip_init;
163
164 /* Configure sleep control for gpios if provided */
165 for (i = 0; i < tps65910_gpio->gpio_chip.ngpio; ++i) {
166 if (!pdata->en_gpio_sleep[i])
167 continue;
168
169 ret = tps65910_reg_set_bits(tps65910,
170 TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
171 if (ret < 0)
172 dev_warn(tps65910->dev,
173 "GPIO Sleep setting failed with err %d\n", ret);
91 } 174 }
92 tps65910->gpio.can_sleep = 1; 175
93 176skip_init:
94 tps65910->gpio.direction_input = tps65910_gpio_input; 177 ret = gpiochip_add(&tps65910_gpio->gpio_chip);
95 tps65910->gpio.direction_output = tps65910_gpio_output; 178 if (ret < 0) {
96 tps65910->gpio.set = tps65910_gpio_set; 179 dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret);
97 tps65910->gpio.get = tps65910_gpio_get; 180 return ret;
98
99 /* Configure sleep control for gpios */
100 board_data = dev_get_platdata(tps65910->dev);
101 if (board_data) {
102 int i;
103 for (i = 0; i < tps65910->gpio.ngpio; ++i) {
104 if (board_data->en_gpio_sleep[i]) {
105 ret = tps65910_set_bits(tps65910,
106 TPS65910_GPIO0 + i, GPIO_SLEEP_MASK);
107 if (ret < 0)
108 dev_warn(tps65910->dev,
109 "GPIO Sleep setting failed\n");
110 }
111 }
112 } 181 }
113 182
114 ret = gpiochip_add(&tps65910->gpio); 183 platform_set_drvdata(pdev, tps65910_gpio);
184
185 return ret;
186}
187
188static int __devexit tps65910_gpio_remove(struct platform_device *pdev)
189{
190 struct tps65910_gpio *tps65910_gpio = platform_get_drvdata(pdev);
115 191
116 if (ret) 192 return gpiochip_remove(&tps65910_gpio->gpio_chip);
117 dev_warn(tps65910->dev, "GPIO registration failed: %d\n", ret);
118} 193}
194
195static struct platform_driver tps65910_gpio_driver = {
196 .driver.name = "tps65910-gpio",
197 .driver.owner = THIS_MODULE,
198 .probe = tps65910_gpio_probe,
199 .remove = __devexit_p(tps65910_gpio_remove),
200};
201
202static int __init tps65910_gpio_init(void)
203{
204 return platform_driver_register(&tps65910_gpio_driver);
205}
206subsys_initcall(tps65910_gpio_init);
207
208static void __exit tps65910_gpio_exit(void)
209{
210 platform_driver_unregister(&tps65910_gpio_driver);
211}
212module_exit(tps65910_gpio_exit);
213
214MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
215MODULE_AUTHOR("Jorge Eduardo Candelaria jedu@slimlogic.co.uk>");
216MODULE_DESCRIPTION("GPIO interface for TPS65910/TPS6511 PMICs");
217MODULE_LICENSE("GPL v2");
218MODULE_ALIAS("platform:tps65910-gpio");
diff --git a/drivers/gpio/gpio-wm831x.c b/drivers/gpio/gpio-wm831x.c
index deb949e75ec1..e56a2165641c 100644
--- a/drivers/gpio/gpio-wm831x.c
+++ b/drivers/gpio/gpio-wm831x.c
@@ -102,10 +102,8 @@ static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
102 struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); 102 struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip);
103 struct wm831x *wm831x = wm831x_gpio->wm831x; 103 struct wm831x *wm831x = wm831x_gpio->wm831x;
104 104
105 if (!wm831x->irq_base) 105 return irq_create_mapping(wm831x->irq_domain,
106 return -EINVAL; 106 WM831X_IRQ_GPIO_1 + offset);
107
108 return wm831x->irq_base + WM831X_IRQ_GPIO_1 + offset;
109} 107}
110 108
111static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset, 109static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
diff --git a/drivers/i2c/Kconfig b/drivers/i2c/Kconfig
index 5f13c62e64b4..5a3bb3d738d8 100644
--- a/drivers/i2c/Kconfig
+++ b/drivers/i2c/Kconfig
@@ -49,7 +49,6 @@ config I2C_CHARDEV
49 49
50config I2C_MUX 50config I2C_MUX
51 tristate "I2C bus multiplexing support" 51 tristate "I2C bus multiplexing support"
52 depends on EXPERIMENTAL
53 help 52 help
54 Say Y here if you want the I2C core to support the ability to 53 Say Y here if you want the I2C core to support the ability to
55 handle multiplexed I2C bus topologies, by presenting each 54 handle multiplexed I2C bus topologies, by presenting each
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 94468a64ce3a..7244c8be6063 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -445,20 +445,6 @@ config I2C_IOP3XX
445 This driver can also be built as a module. If so, the module 445 This driver can also be built as a module. If so, the module
446 will be called i2c-iop3xx. 446 will be called i2c-iop3xx.
447 447
448config I2C_IXP2000
449 tristate "IXP2000 GPIO-Based I2C Interface (DEPRECATED)"
450 depends on ARCH_IXP2000
451 select I2C_ALGOBIT
452 help
453 Say Y here if you have an Intel IXP2000 (2400, 2800, 2850) based
454 system and are using GPIO lines for an I2C bus.
455
456 This support is also available as a module. If so, the module
457 will be called i2c-ixp2000.
458
459 This driver is deprecated and will be dropped soon. Use i2c-gpio
460 instead.
461
462config I2C_MPC 448config I2C_MPC
463 tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx" 449 tristate "MPC107/824x/85xx/512x/52xx/83xx/86xx"
464 depends on PPC 450 depends on PPC
@@ -483,6 +469,7 @@ config I2C_MV64XXX
483config I2C_MXS 469config I2C_MXS
484 tristate "Freescale i.MX28 I2C interface" 470 tristate "Freescale i.MX28 I2C interface"
485 depends on SOC_IMX28 471 depends on SOC_IMX28
472 select STMP_DEVICE
486 help 473 help
487 Say Y here if you want to use the I2C bus controller on 474 Say Y here if you want to use the I2C bus controller on
488 the Freescale i.MX28 processors. 475 the Freescale i.MX28 processors.
diff --git a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile
index 569567b0d027..ce3c2be7fb40 100644
--- a/drivers/i2c/busses/Makefile
+++ b/drivers/i2c/busses/Makefile
@@ -44,7 +44,6 @@ obj-$(CONFIG_I2C_IBM_IIC) += i2c-ibm_iic.o
44obj-$(CONFIG_I2C_IMX) += i2c-imx.o 44obj-$(CONFIG_I2C_IMX) += i2c-imx.o
45obj-$(CONFIG_I2C_INTEL_MID) += i2c-intel-mid.o 45obj-$(CONFIG_I2C_INTEL_MID) += i2c-intel-mid.o
46obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o 46obj-$(CONFIG_I2C_IOP3XX) += i2c-iop3xx.o
47obj-$(CONFIG_I2C_IXP2000) += i2c-ixp2000.o
48obj-$(CONFIG_I2C_MPC) += i2c-mpc.o 47obj-$(CONFIG_I2C_MPC) += i2c-mpc.o
49obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o 48obj-$(CONFIG_I2C_MV64XXX) += i2c-mv64xxx.o
50obj-$(CONFIG_I2C_MXS) += i2c-mxs.o 49obj-$(CONFIG_I2C_MXS) += i2c-mxs.o
diff --git a/drivers/i2c/busses/i2c-davinci.c b/drivers/i2c/busses/i2c-davinci.c
index a76d85fa3ad7..79b4bcb3b85c 100644
--- a/drivers/i2c/busses/i2c-davinci.c
+++ b/drivers/i2c/busses/i2c-davinci.c
@@ -755,7 +755,7 @@ static int davinci_i2c_remove(struct platform_device *pdev)
755 dev->clk = NULL; 755 dev->clk = NULL;
756 756
757 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0); 757 davinci_i2c_write_reg(dev, DAVINCI_I2C_MDR_REG, 0);
758 free_irq(IRQ_I2C, dev); 758 free_irq(dev->irq, dev);
759 iounmap(dev->base); 759 iounmap(dev->base);
760 kfree(dev); 760 kfree(dev);
761 761
diff --git a/drivers/i2c/busses/i2c-designware-core.c b/drivers/i2c/busses/i2c-designware-core.c
index df8799241009..1e48bec80edf 100644
--- a/drivers/i2c/busses/i2c-designware-core.c
+++ b/drivers/i2c/busses/i2c-designware-core.c
@@ -164,9 +164,15 @@ static char *abort_sources[] = {
164 164
165u32 dw_readl(struct dw_i2c_dev *dev, int offset) 165u32 dw_readl(struct dw_i2c_dev *dev, int offset)
166{ 166{
167 u32 value = readl(dev->base + offset); 167 u32 value;
168 168
169 if (dev->swab) 169 if (dev->accessor_flags & ACCESS_16BIT)
170 value = readw(dev->base + offset) |
171 (readw(dev->base + offset + 2) << 16);
172 else
173 value = readl(dev->base + offset);
174
175 if (dev->accessor_flags & ACCESS_SWAP)
170 return swab32(value); 176 return swab32(value);
171 else 177 else
172 return value; 178 return value;
@@ -174,10 +180,15 @@ u32 dw_readl(struct dw_i2c_dev *dev, int offset)
174 180
175void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset) 181void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset)
176{ 182{
177 if (dev->swab) 183 if (dev->accessor_flags & ACCESS_SWAP)
178 b = swab32(b); 184 b = swab32(b);
179 185
180 writel(b, dev->base + offset); 186 if (dev->accessor_flags & ACCESS_16BIT) {
187 writew((u16)b, dev->base + offset);
188 writew((u16)(b >> 16), dev->base + offset + 2);
189 } else {
190 writel(b, dev->base + offset);
191 }
181} 192}
182 193
183static u32 194static u32
@@ -251,14 +262,14 @@ int i2c_dw_init(struct dw_i2c_dev *dev)
251 262
252 input_clock_khz = dev->get_clk_rate_khz(dev); 263 input_clock_khz = dev->get_clk_rate_khz(dev);
253 264
254 /* Configure register endianess access */
255 reg = dw_readl(dev, DW_IC_COMP_TYPE); 265 reg = dw_readl(dev, DW_IC_COMP_TYPE);
256 if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) { 266 if (reg == ___constant_swab32(DW_IC_COMP_TYPE_VALUE)) {
257 dev->swab = 1; 267 /* Configure register endianess access */
258 reg = DW_IC_COMP_TYPE_VALUE; 268 dev->accessor_flags |= ACCESS_SWAP;
259 } 269 } else if (reg == (DW_IC_COMP_TYPE_VALUE & 0x0000ffff)) {
260 270 /* Configure register access mode 16bit */
261 if (reg != DW_IC_COMP_TYPE_VALUE) { 271 dev->accessor_flags |= ACCESS_16BIT;
272 } else if (reg != DW_IC_COMP_TYPE_VALUE) {
262 dev_err(dev->dev, "Unknown Synopsys component type: " 273 dev_err(dev->dev, "Unknown Synopsys component type: "
263 "0x%08x\n", reg); 274 "0x%08x\n", reg);
264 return -ENODEV; 275 return -ENODEV;
diff --git a/drivers/i2c/busses/i2c-designware-core.h b/drivers/i2c/busses/i2c-designware-core.h
index 02d1a2ddd853..9c1840ee09c7 100644
--- a/drivers/i2c/busses/i2c-designware-core.h
+++ b/drivers/i2c/busses/i2c-designware-core.h
@@ -82,7 +82,7 @@ struct dw_i2c_dev {
82 unsigned int status; 82 unsigned int status;
83 u32 abort_source; 83 u32 abort_source;
84 int irq; 84 int irq;
85 int swab; 85 u32 accessor_flags;
86 struct i2c_adapter adapter; 86 struct i2c_adapter adapter;
87 u32 functionality; 87 u32 functionality;
88 u32 master_cfg; 88 u32 master_cfg;
@@ -90,6 +90,9 @@ struct dw_i2c_dev {
90 unsigned int rx_fifo_depth; 90 unsigned int rx_fifo_depth;
91}; 91};
92 92
93#define ACCESS_SWAP 0x00000001
94#define ACCESS_16BIT 0x00000002
95
93extern u32 dw_readl(struct dw_i2c_dev *dev, int offset); 96extern u32 dw_readl(struct dw_i2c_dev *dev, int offset);
94extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset); 97extern void dw_writel(struct dw_i2c_dev *dev, u32 b, int offset);
95extern int i2c_dw_init(struct dw_i2c_dev *dev); 98extern int i2c_dw_init(struct dw_i2c_dev *dev);
diff --git a/drivers/i2c/busses/i2c-designware-platdrv.c b/drivers/i2c/busses/i2c-designware-platdrv.c
index 4ba589ab8614..0506fef8dc00 100644
--- a/drivers/i2c/busses/i2c-designware-platdrv.c
+++ b/drivers/i2c/busses/i2c-designware-platdrv.c
@@ -36,6 +36,7 @@
36#include <linux/interrupt.h> 36#include <linux/interrupt.h>
37#include <linux/of_i2c.h> 37#include <linux/of_i2c.h>
38#include <linux/platform_device.h> 38#include <linux/platform_device.h>
39#include <linux/pm.h>
39#include <linux/io.h> 40#include <linux/io.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include "i2c-designware-core.h" 42#include "i2c-designware-core.h"
@@ -95,7 +96,7 @@ static int __devinit dw_i2c_probe(struct platform_device *pdev)
95 r = -ENODEV; 96 r = -ENODEV;
96 goto err_free_mem; 97 goto err_free_mem;
97 } 98 }
98 clk_enable(dev->clk); 99 clk_prepare_enable(dev->clk);
99 100
100 dev->functionality = 101 dev->functionality =
101 I2C_FUNC_I2C | 102 I2C_FUNC_I2C |
@@ -155,7 +156,7 @@ err_free_irq:
155err_iounmap: 156err_iounmap:
156 iounmap(dev->base); 157 iounmap(dev->base);
157err_unuse_clocks: 158err_unuse_clocks:
158 clk_disable(dev->clk); 159 clk_disable_unprepare(dev->clk);
159 clk_put(dev->clk); 160 clk_put(dev->clk);
160 dev->clk = NULL; 161 dev->clk = NULL;
161err_free_mem: 162err_free_mem:
@@ -177,7 +178,7 @@ static int __devexit dw_i2c_remove(struct platform_device *pdev)
177 i2c_del_adapter(&dev->adapter); 178 i2c_del_adapter(&dev->adapter);
178 put_device(&pdev->dev); 179 put_device(&pdev->dev);
179 180
180 clk_disable(dev->clk); 181 clk_disable_unprepare(dev->clk);
181 clk_put(dev->clk); 182 clk_put(dev->clk);
182 dev->clk = NULL; 183 dev->clk = NULL;
183 184
@@ -198,6 +199,31 @@ static const struct of_device_id dw_i2c_of_match[] = {
198MODULE_DEVICE_TABLE(of, dw_i2c_of_match); 199MODULE_DEVICE_TABLE(of, dw_i2c_of_match);
199#endif 200#endif
200 201
202#ifdef CONFIG_PM
203static int dw_i2c_suspend(struct device *dev)
204{
205 struct platform_device *pdev = to_platform_device(dev);
206 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
207
208 clk_disable_unprepare(i_dev->clk);
209
210 return 0;
211}
212
213static int dw_i2c_resume(struct device *dev)
214{
215 struct platform_device *pdev = to_platform_device(dev);
216 struct dw_i2c_dev *i_dev = platform_get_drvdata(pdev);
217
218 clk_prepare_enable(i_dev->clk);
219 i2c_dw_init(i_dev);
220
221 return 0;
222}
223#endif
224
225static SIMPLE_DEV_PM_OPS(dw_i2c_dev_pm_ops, dw_i2c_suspend, dw_i2c_resume);
226
201/* work with hotplug and coldplug */ 227/* work with hotplug and coldplug */
202MODULE_ALIAS("platform:i2c_designware"); 228MODULE_ALIAS("platform:i2c_designware");
203 229
@@ -207,6 +233,7 @@ static struct platform_driver dw_i2c_driver = {
207 .name = "i2c_designware", 233 .name = "i2c_designware",
208 .owner = THIS_MODULE, 234 .owner = THIS_MODULE,
209 .of_match_table = of_match_ptr(dw_i2c_of_match), 235 .of_match_table = of_match_ptr(dw_i2c_of_match),
236 .pm = &dw_i2c_dev_pm_ops,
210 }, 237 },
211}; 238};
212 239
diff --git a/drivers/i2c/busses/i2c-eg20t.c b/drivers/i2c/busses/i2c-eg20t.c
index c811289b61e2..2f74ae872e1e 100644
--- a/drivers/i2c/busses/i2c-eg20t.c
+++ b/drivers/i2c/busses/i2c-eg20t.c
@@ -263,11 +263,6 @@ static void pch_i2c_init(struct i2c_algo_pch_data *adap)
263 init_waitqueue_head(&pch_event); 263 init_waitqueue_head(&pch_event);
264} 264}
265 265
266static inline bool ktime_lt(const ktime_t cmp1, const ktime_t cmp2)
267{
268 return cmp1.tv64 < cmp2.tv64;
269}
270
271/** 266/**
272 * pch_i2c_wait_for_bus_idle() - check the status of bus. 267 * pch_i2c_wait_for_bus_idle() - check the status of bus.
273 * @adap: Pointer to struct i2c_algo_pch_data. 268 * @adap: Pointer to struct i2c_algo_pch_data.
@@ -317,33 +312,6 @@ static void pch_i2c_start(struct i2c_algo_pch_data *adap)
317} 312}
318 313
319/** 314/**
320 * pch_i2c_wait_for_xfer_complete() - initiates a wait for the tx complete event
321 * @adap: Pointer to struct i2c_algo_pch_data.
322 */
323static s32 pch_i2c_wait_for_xfer_complete(struct i2c_algo_pch_data *adap)
324{
325 long ret;
326 ret = wait_event_timeout(pch_event,
327 (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
328
329 if (ret == 0) {
330 pch_err(adap, "timeout: %x\n", adap->pch_event_flag);
331 adap->pch_event_flag = 0;
332 return -ETIMEDOUT;
333 }
334
335 if (adap->pch_event_flag & I2C_ERROR_MASK) {
336 pch_err(adap, "error bits set: %x\n", adap->pch_event_flag);
337 adap->pch_event_flag = 0;
338 return -EIO;
339 }
340
341 adap->pch_event_flag = 0;
342
343 return 0;
344}
345
346/**
347 * pch_i2c_getack() - to confirm ACK/NACK 315 * pch_i2c_getack() - to confirm ACK/NACK
348 * @adap: Pointer to struct i2c_algo_pch_data. 316 * @adap: Pointer to struct i2c_algo_pch_data.
349 */ 317 */
@@ -373,6 +341,40 @@ static void pch_i2c_stop(struct i2c_algo_pch_data *adap)
373 pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START); 341 pch_clrbit(adap->pch_base_address, PCH_I2CCTL, PCH_START);
374} 342}
375 343
344static int pch_i2c_wait_for_check_xfer(struct i2c_algo_pch_data *adap)
345{
346 long ret;
347
348 ret = wait_event_timeout(pch_event,
349 (adap->pch_event_flag != 0), msecs_to_jiffies(1000));
350 if (!ret) {
351 pch_err(adap, "%s:wait-event timeout\n", __func__);
352 adap->pch_event_flag = 0;
353 pch_i2c_stop(adap);
354 pch_i2c_init(adap);
355 return -ETIMEDOUT;
356 }
357
358 if (adap->pch_event_flag & I2C_ERROR_MASK) {
359 pch_err(adap, "Lost Arbitration\n");
360 adap->pch_event_flag = 0;
361 pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
362 pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
363 pch_i2c_init(adap);
364 return -EAGAIN;
365 }
366
367 adap->pch_event_flag = 0;
368
369 if (pch_i2c_getack(adap)) {
370 pch_dbg(adap, "Receive NACK for slave address"
371 "setting\n");
372 return -EIO;
373 }
374
375 return 0;
376}
377
376/** 378/**
377 * pch_i2c_repstart() - generate repeated start condition in normal mode 379 * pch_i2c_repstart() - generate repeated start condition in normal mode
378 * @adap: Pointer to struct i2c_algo_pch_data. 380 * @adap: Pointer to struct i2c_algo_pch_data.
@@ -427,27 +429,12 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
427 if (first) 429 if (first)
428 pch_i2c_start(adap); 430 pch_i2c_start(adap);
429 431
430 rtn = pch_i2c_wait_for_xfer_complete(adap); 432 rtn = pch_i2c_wait_for_check_xfer(adap);
431 if (rtn == 0) { 433 if (rtn)
432 if (pch_i2c_getack(adap)) { 434 return rtn;
433 pch_dbg(adap, "Receive NACK for slave address" 435
434 "setting\n"); 436 addr_8_lsb = (addr & I2C_ADDR_MSK);
435 return -EIO; 437 iowrite32(addr_8_lsb, p + PCH_I2CDR);
436 }
437 addr_8_lsb = (addr & I2C_ADDR_MSK);
438 iowrite32(addr_8_lsb, p + PCH_I2CDR);
439 } else if (rtn == -EIO) { /* Arbitration Lost */
440 pch_err(adap, "Lost Arbitration\n");
441 pch_clrbit(adap->pch_base_address, PCH_I2CSR,
442 I2CMAL_BIT);
443 pch_clrbit(adap->pch_base_address, PCH_I2CSR,
444 I2CMIF_BIT);
445 pch_i2c_init(adap);
446 return -EAGAIN;
447 } else { /* wait-event timeout */
448 pch_i2c_stop(adap);
449 return -ETIME;
450 }
451 } else { 438 } else {
452 /* set 7 bit slave address and R/W bit as 0 */ 439 /* set 7 bit slave address and R/W bit as 0 */
453 iowrite32(addr << 1, p + PCH_I2CDR); 440 iowrite32(addr << 1, p + PCH_I2CDR);
@@ -455,44 +442,21 @@ static s32 pch_i2c_writebytes(struct i2c_adapter *i2c_adap,
455 pch_i2c_start(adap); 442 pch_i2c_start(adap);
456 } 443 }
457 444
458 rtn = pch_i2c_wait_for_xfer_complete(adap); 445 rtn = pch_i2c_wait_for_check_xfer(adap);
459 if (rtn == 0) { 446 if (rtn)
460 if (pch_i2c_getack(adap)) { 447 return rtn;
461 pch_dbg(adap, "Receive NACK for slave address"
462 "setting\n");
463 return -EIO;
464 }
465 } else if (rtn == -EIO) { /* Arbitration Lost */
466 pch_err(adap, "Lost Arbitration\n");
467 pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
468 pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
469 pch_i2c_init(adap);
470 return -EAGAIN;
471 } else { /* wait-event timeout */
472 pch_i2c_stop(adap);
473 return -ETIME;
474 }
475 448
476 for (wrcount = 0; wrcount < length; ++wrcount) { 449 for (wrcount = 0; wrcount < length; ++wrcount) {
477 /* write buffer value to I2C data register */ 450 /* write buffer value to I2C data register */
478 iowrite32(buf[wrcount], p + PCH_I2CDR); 451 iowrite32(buf[wrcount], p + PCH_I2CDR);
479 pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]); 452 pch_dbg(adap, "writing %x to Data register\n", buf[wrcount]);
480 453
481 rtn = pch_i2c_wait_for_xfer_complete(adap); 454 rtn = pch_i2c_wait_for_check_xfer(adap);
482 if (rtn == 0) { 455 if (rtn)
483 if (pch_i2c_getack(adap)) { 456 return rtn;
484 pch_dbg(adap, "Receive NACK for slave address" 457
485 "setting\n"); 458 pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMCF_BIT);
486 return -EIO; 459 pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
487 }
488 pch_clrbit(adap->pch_base_address, PCH_I2CSR,
489 I2CMCF_BIT);
490 pch_clrbit(adap->pch_base_address, PCH_I2CSR,
491 I2CMIF_BIT);
492 } else { /* wait-event timeout */
493 pch_i2c_stop(adap);
494 return -ETIME;
495 }
496 } 460 }
497 461
498 /* check if this is the last message */ 462 /* check if this is the last message */
@@ -580,50 +544,21 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
580 if (first) 544 if (first)
581 pch_i2c_start(adap); 545 pch_i2c_start(adap);
582 546
583 rtn = pch_i2c_wait_for_xfer_complete(adap); 547 rtn = pch_i2c_wait_for_check_xfer(adap);
584 if (rtn == 0) { 548 if (rtn)
585 if (pch_i2c_getack(adap)) { 549 return rtn;
586 pch_dbg(adap, "Receive NACK for slave address" 550
587 "setting\n"); 551 addr_8_lsb = (addr & I2C_ADDR_MSK);
588 return -EIO; 552 iowrite32(addr_8_lsb, p + PCH_I2CDR);
589 } 553
590 addr_8_lsb = (addr & I2C_ADDR_MSK);
591 iowrite32(addr_8_lsb, p + PCH_I2CDR);
592 } else if (rtn == -EIO) { /* Arbitration Lost */
593 pch_err(adap, "Lost Arbitration\n");
594 pch_clrbit(adap->pch_base_address, PCH_I2CSR,
595 I2CMAL_BIT);
596 pch_clrbit(adap->pch_base_address, PCH_I2CSR,
597 I2CMIF_BIT);
598 pch_i2c_init(adap);
599 return -EAGAIN;
600 } else { /* wait-event timeout */
601 pch_i2c_stop(adap);
602 return -ETIME;
603 }
604 pch_i2c_restart(adap); 554 pch_i2c_restart(adap);
605 rtn = pch_i2c_wait_for_xfer_complete(adap); 555
606 if (rtn == 0) { 556 rtn = pch_i2c_wait_for_check_xfer(adap);
607 if (pch_i2c_getack(adap)) { 557 if (rtn)
608 pch_dbg(adap, "Receive NACK for slave address" 558 return rtn;
609 "setting\n"); 559
610 return -EIO; 560 addr_2_msb |= I2C_RD;
611 } 561 iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK, p + PCH_I2CDR);
612 addr_2_msb |= I2C_RD;
613 iowrite32(addr_2_msb | TEN_BIT_ADDR_MASK,
614 p + PCH_I2CDR);
615 } else if (rtn == -EIO) { /* Arbitration Lost */
616 pch_err(adap, "Lost Arbitration\n");
617 pch_clrbit(adap->pch_base_address, PCH_I2CSR,
618 I2CMAL_BIT);
619 pch_clrbit(adap->pch_base_address, PCH_I2CSR,
620 I2CMIF_BIT);
621 pch_i2c_init(adap);
622 return -EAGAIN;
623 } else { /* wait-event timeout */
624 pch_i2c_stop(adap);
625 return -ETIME;
626 }
627 } else { 562 } else {
628 /* 7 address bits + R/W bit */ 563 /* 7 address bits + R/W bit */
629 addr = (((addr) << 1) | (I2C_RD)); 564 addr = (((addr) << 1) | (I2C_RD));
@@ -634,23 +569,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
634 if (first) 569 if (first)
635 pch_i2c_start(adap); 570 pch_i2c_start(adap);
636 571
637 rtn = pch_i2c_wait_for_xfer_complete(adap); 572 rtn = pch_i2c_wait_for_check_xfer(adap);
638 if (rtn == 0) { 573 if (rtn)
639 if (pch_i2c_getack(adap)) { 574 return rtn;
640 pch_dbg(adap, "Receive NACK for slave address"
641 "setting\n");
642 return -EIO;
643 }
644 } else if (rtn == -EIO) { /* Arbitration Lost */
645 pch_err(adap, "Lost Arbitration\n");
646 pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMAL_BIT);
647 pch_clrbit(adap->pch_base_address, PCH_I2CSR, I2CMIF_BIT);
648 pch_i2c_init(adap);
649 return -EAGAIN;
650 } else { /* wait-event timeout */
651 pch_i2c_stop(adap);
652 return -ETIME;
653 }
654 575
655 if (length == 0) { 576 if (length == 0) {
656 pch_i2c_stop(adap); 577 pch_i2c_stop(adap);
@@ -669,18 +590,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
669 if (loop != 1) 590 if (loop != 1)
670 read_index++; 591 read_index++;
671 592
672 rtn = pch_i2c_wait_for_xfer_complete(adap); 593 rtn = pch_i2c_wait_for_check_xfer(adap);
673 if (rtn == 0) { 594 if (rtn)
674 if (pch_i2c_getack(adap)) { 595 return rtn;
675 pch_dbg(adap, "Receive NACK for slave"
676 "address setting\n");
677 return -EIO;
678 }
679 } else { /* wait-event timeout */
680 pch_i2c_stop(adap);
681 return -ETIME;
682 }
683
684 } /* end for */ 596 } /* end for */
685 597
686 pch_i2c_sendnack(adap); 598 pch_i2c_sendnack(adap);
@@ -690,17 +602,9 @@ static s32 pch_i2c_readbytes(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs,
690 if (length != 1) 602 if (length != 1)
691 read_index++; 603 read_index++;
692 604
693 rtn = pch_i2c_wait_for_xfer_complete(adap); 605 rtn = pch_i2c_wait_for_check_xfer(adap);
694 if (rtn == 0) { 606 if (rtn)
695 if (pch_i2c_getack(adap)) { 607 return rtn;
696 pch_dbg(adap, "Receive NACK for slave"
697 "address setting\n");
698 return -EIO;
699 }
700 } else { /* wait-event timeout */
701 pch_i2c_stop(adap);
702 return -ETIME;
703 }
704 608
705 if (last) 609 if (last)
706 pch_i2c_stop(adap); 610 pch_i2c_stop(adap);
@@ -790,7 +694,7 @@ static s32 pch_i2c_xfer(struct i2c_adapter *i2c_adap,
790 694
791 ret = mutex_lock_interruptible(&pch_mutex); 695 ret = mutex_lock_interruptible(&pch_mutex);
792 if (ret) 696 if (ret)
793 return -ERESTARTSYS; 697 return ret;
794 698
795 if (adap->p_adapter_info->pch_i2c_suspended) { 699 if (adap->p_adapter_info->pch_i2c_suspended) {
796 mutex_unlock(&pch_mutex); 700 mutex_unlock(&pch_mutex);
@@ -909,7 +813,7 @@ static int __devinit pch_i2c_probe(struct pci_dev *pdev,
909 813
910 pch_adap->owner = THIS_MODULE; 814 pch_adap->owner = THIS_MODULE;
911 pch_adap->class = I2C_CLASS_HWMON; 815 pch_adap->class = I2C_CLASS_HWMON;
912 strcpy(pch_adap->name, KBUILD_MODNAME); 816 strlcpy(pch_adap->name, KBUILD_MODNAME, sizeof(pch_adap->name));
913 pch_adap->algo = &pch_algorithm; 817 pch_adap->algo = &pch_algorithm;
914 pch_adap->algo_data = &adap_info->pch_data[i]; 818 pch_adap->algo_data = &adap_info->pch_data[i];
915 819
@@ -963,7 +867,7 @@ static void __devexit pch_i2c_remove(struct pci_dev *pdev)
963 pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address); 867 pci_iounmap(pdev, adap_info->pch_data[0].pch_base_address);
964 868
965 for (i = 0; i < adap_info->ch_num; i++) 869 for (i = 0; i < adap_info->ch_num; i++)
966 adap_info->pch_data[i].pch_base_address = 0; 870 adap_info->pch_data[i].pch_base_address = NULL;
967 871
968 pci_set_drvdata(pdev, NULL); 872 pci_set_drvdata(pdev, NULL);
969 873
diff --git a/drivers/i2c/busses/i2c-gpio.c b/drivers/i2c/busses/i2c-gpio.c
index c0330a41db03..e62d2d938628 100644
--- a/drivers/i2c/busses/i2c-gpio.c
+++ b/drivers/i2c/busses/i2c-gpio.c
@@ -190,12 +190,7 @@ static int __devinit i2c_gpio_probe(struct platform_device *pdev)
190 adap->dev.parent = &pdev->dev; 190 adap->dev.parent = &pdev->dev;
191 adap->dev.of_node = pdev->dev.of_node; 191 adap->dev.of_node = pdev->dev.of_node;
192 192
193 /* 193 adap->nr = pdev->id;
194 * If "dev->id" is negative we consider it as zero.
195 * The reason to do so is to avoid sysfs names that only make
196 * sense when there are multiple adapters.
197 */
198 adap->nr = (pdev->id != -1) ? pdev->id : 0;
199 ret = i2c_bit_add_numbered_bus(adap); 194 ret = i2c_bit_add_numbered_bus(adap);
200 if (ret) 195 if (ret)
201 goto err_add_bus; 196 goto err_add_bus;
diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c
index 56bce9a8bcbb..8d6b504d65c4 100644
--- a/drivers/i2c/busses/i2c-imx.c
+++ b/drivers/i2c/busses/i2c-imx.c
@@ -512,7 +512,7 @@ static int __init i2c_imx_probe(struct platform_device *pdev)
512 } 512 }
513 513
514 /* Setup i2c_imx driver structure */ 514 /* Setup i2c_imx driver structure */
515 strcpy(i2c_imx->adapter.name, pdev->name); 515 strlcpy(i2c_imx->adapter.name, pdev->name, sizeof(i2c_imx->adapter.name));
516 i2c_imx->adapter.owner = THIS_MODULE; 516 i2c_imx->adapter.owner = THIS_MODULE;
517 i2c_imx->adapter.algo = &i2c_imx_algo; 517 i2c_imx->adapter.algo = &i2c_imx_algo;
518 i2c_imx->adapter.dev.parent = &pdev->dev; 518 i2c_imx->adapter.dev.parent = &pdev->dev;
diff --git a/drivers/i2c/busses/i2c-ixp2000.c b/drivers/i2c/busses/i2c-ixp2000.c
deleted file mode 100644
index 5d263f9014d6..000000000000
--- a/drivers/i2c/busses/i2c-ixp2000.c
+++ /dev/null
@@ -1,157 +0,0 @@
1/*
2 * drivers/i2c/busses/i2c-ixp2000.c
3 *
4 * I2C adapter for IXP2000 systems using GPIOs for I2C bus
5 *
6 * Author: Deepak Saxena <dsaxena@plexity.net>
7 * Based on IXDP2400 code by: Naeem M. Afzal <naeem.m.afzal@intel.com>
8 * Made generic by: Jeff Daly <jeffrey.daly@intel.com>
9 *
10 * Copyright (c) 2003-2004 MontaVista Software Inc.
11 *
12 * This file is licensed under the terms of the GNU General Public
13 * License version 2. This program is licensed "as is" without any
14 * warranty of any kind, whether express or implied.
15 *
16 * From Jeff Daly:
17 *
18 * I2C adapter driver for Intel IXDP2xxx platforms. This should work for any
19 * IXP2000 platform if it uses the HW GPIO in the same manner. Basically,
20 * SDA and SCL GPIOs have external pullups. Setting the respective GPIO to
21 * an input will make the signal a '1' via the pullup. Setting them to
22 * outputs will pull them down.
23 *
24 * The GPIOs are open drain signals and are used as configuration strap inputs
25 * during power-up so there's generally a buffer on the board that needs to be
26 * 'enabled' to drive the GPIOs.
27 */
28
29#include <linux/kernel.h>
30#include <linux/init.h>
31#include <linux/platform_device.h>
32#include <linux/module.h>
33#include <linux/i2c.h>
34#include <linux/i2c-algo-bit.h>
35#include <linux/slab.h>
36
37#include <mach/hardware.h> /* Pick up IXP2000-specific bits */
38#include <mach/gpio-ixp2000.h>
39
40static inline int ixp2000_scl_pin(void *data)
41{
42 return ((struct ixp2000_i2c_pins*)data)->scl_pin;
43}
44
45static inline int ixp2000_sda_pin(void *data)
46{
47 return ((struct ixp2000_i2c_pins*)data)->sda_pin;
48}
49
50
51static void ixp2000_bit_setscl(void *data, int val)
52{
53 int i = 5000;
54
55 if (val) {
56 gpio_line_config(ixp2000_scl_pin(data), GPIO_IN);
57 while(!gpio_line_get(ixp2000_scl_pin(data)) && i--);
58 } else {
59 gpio_line_config(ixp2000_scl_pin(data), GPIO_OUT);
60 }
61}
62
63static void ixp2000_bit_setsda(void *data, int val)
64{
65 if (val) {
66 gpio_line_config(ixp2000_sda_pin(data), GPIO_IN);
67 } else {
68 gpio_line_config(ixp2000_sda_pin(data), GPIO_OUT);
69 }
70}
71
72static int ixp2000_bit_getscl(void *data)
73{
74 return gpio_line_get(ixp2000_scl_pin(data));
75}
76
77static int ixp2000_bit_getsda(void *data)
78{
79 return gpio_line_get(ixp2000_sda_pin(data));
80}
81
82struct ixp2000_i2c_data {
83 struct ixp2000_i2c_pins *gpio_pins;
84 struct i2c_adapter adapter;
85 struct i2c_algo_bit_data algo_data;
86};
87
88static int ixp2000_i2c_remove(struct platform_device *plat_dev)
89{
90 struct ixp2000_i2c_data *drv_data = platform_get_drvdata(plat_dev);
91
92 platform_set_drvdata(plat_dev, NULL);
93
94 i2c_del_adapter(&drv_data->adapter);
95
96 kfree(drv_data);
97
98 return 0;
99}
100
101static int ixp2000_i2c_probe(struct platform_device *plat_dev)
102{
103 int err;
104 struct ixp2000_i2c_pins *gpio = plat_dev->dev.platform_data;
105 struct ixp2000_i2c_data *drv_data =
106 kzalloc(sizeof(struct ixp2000_i2c_data), GFP_KERNEL);
107
108 if (!drv_data)
109 return -ENOMEM;
110 drv_data->gpio_pins = gpio;
111
112 drv_data->algo_data.data = gpio;
113 drv_data->algo_data.setsda = ixp2000_bit_setsda;
114 drv_data->algo_data.setscl = ixp2000_bit_setscl;
115 drv_data->algo_data.getsda = ixp2000_bit_getsda;
116 drv_data->algo_data.getscl = ixp2000_bit_getscl;
117 drv_data->algo_data.udelay = 6;
118 drv_data->algo_data.timeout = HZ;
119
120 strlcpy(drv_data->adapter.name, plat_dev->dev.driver->name,
121 sizeof(drv_data->adapter.name));
122 drv_data->adapter.algo_data = &drv_data->algo_data,
123
124 drv_data->adapter.dev.parent = &plat_dev->dev;
125
126 gpio_line_config(gpio->sda_pin, GPIO_IN);
127 gpio_line_config(gpio->scl_pin, GPIO_IN);
128 gpio_line_set(gpio->scl_pin, 0);
129 gpio_line_set(gpio->sda_pin, 0);
130
131 if ((err = i2c_bit_add_bus(&drv_data->adapter)) != 0) {
132 dev_err(&plat_dev->dev, "Could not install, error %d\n", err);
133 kfree(drv_data);
134 return err;
135 }
136
137 platform_set_drvdata(plat_dev, drv_data);
138
139 return 0;
140}
141
142static struct platform_driver ixp2000_i2c_driver = {
143 .probe = ixp2000_i2c_probe,
144 .remove = ixp2000_i2c_remove,
145 .driver = {
146 .name = "IXP2000-I2C",
147 .owner = THIS_MODULE,
148 },
149};
150
151module_platform_driver(ixp2000_i2c_driver);
152
153MODULE_AUTHOR ("Deepak Saxena <dsaxena@plexity.net>");
154MODULE_DESCRIPTION("IXP2000 GPIO-based I2C bus driver");
155MODULE_LICENSE("GPL");
156MODULE_ALIAS("platform:IXP2000-I2C");
157
diff --git a/drivers/i2c/busses/i2c-mpc.c b/drivers/i2c/busses/i2c-mpc.c
index 206caacd30d7..b76731edbf10 100644
--- a/drivers/i2c/busses/i2c-mpc.c
+++ b/drivers/i2c/busses/i2c-mpc.c
@@ -64,6 +64,9 @@ struct mpc_i2c {
64 struct i2c_adapter adap; 64 struct i2c_adapter adap;
65 int irq; 65 int irq;
66 u32 real_clk; 66 u32 real_clk;
67#ifdef CONFIG_PM
68 u8 fdr, dfsrr;
69#endif
67}; 70};
68 71
69struct mpc_i2c_divider { 72struct mpc_i2c_divider {
@@ -703,6 +706,30 @@ static int __devexit fsl_i2c_remove(struct platform_device *op)
703 return 0; 706 return 0;
704}; 707};
705 708
709#ifdef CONFIG_PM
710static int mpc_i2c_suspend(struct device *dev)
711{
712 struct mpc_i2c *i2c = dev_get_drvdata(dev);
713
714 i2c->fdr = readb(i2c->base + MPC_I2C_FDR);
715 i2c->dfsrr = readb(i2c->base + MPC_I2C_DFSRR);
716
717 return 0;
718}
719
720static int mpc_i2c_resume(struct device *dev)
721{
722 struct mpc_i2c *i2c = dev_get_drvdata(dev);
723
724 writeb(i2c->fdr, i2c->base + MPC_I2C_FDR);
725 writeb(i2c->dfsrr, i2c->base + MPC_I2C_DFSRR);
726
727 return 0;
728}
729
730SIMPLE_DEV_PM_OPS(mpc_i2c_pm_ops, mpc_i2c_suspend, mpc_i2c_resume);
731#endif
732
706static struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = { 733static struct mpc_i2c_data mpc_i2c_data_512x __devinitdata = {
707 .setup = mpc_i2c_setup_512x, 734 .setup = mpc_i2c_setup_512x,
708}; 735};
@@ -747,6 +774,9 @@ static struct platform_driver mpc_i2c_driver = {
747 .owner = THIS_MODULE, 774 .owner = THIS_MODULE,
748 .name = DRV_NAME, 775 .name = DRV_NAME,
749 .of_match_table = mpc_i2c_of_match, 776 .of_match_table = mpc_i2c_of_match,
777#ifdef CONFIG_PM
778 .pm = &mpc_i2c_pm_ops,
779#endif
750 }, 780 },
751}; 781};
752 782
diff --git a/drivers/i2c/busses/i2c-mxs.c b/drivers/i2c/busses/i2c-mxs.c
index 7fa73eed84a7..04eb441b6ce1 100644
--- a/drivers/i2c/busses/i2c-mxs.c
+++ b/drivers/i2c/busses/i2c-mxs.c
@@ -27,8 +27,10 @@
27#include <linux/jiffies.h> 27#include <linux/jiffies.h>
28#include <linux/io.h> 28#include <linux/io.h>
29#include <linux/pinctrl/consumer.h> 29#include <linux/pinctrl/consumer.h>
30 30#include <linux/stmp_device.h>
31#include <mach/common.h> 31#include <linux/of.h>
32#include <linux/of_device.h>
33#include <linux/of_i2c.h>
32 34
33#define DRIVER_NAME "mxs-i2c" 35#define DRIVER_NAME "mxs-i2c"
34 36
@@ -112,13 +114,9 @@ struct mxs_i2c_dev {
112 struct i2c_adapter adapter; 114 struct i2c_adapter adapter;
113}; 115};
114 116
115/*
116 * TODO: check if calls to here are really needed. If not, we could get rid of
117 * mxs_reset_block and the mach-dependency. Needs an I2C analyzer, probably.
118 */
119static void mxs_i2c_reset(struct mxs_i2c_dev *i2c) 117static void mxs_i2c_reset(struct mxs_i2c_dev *i2c)
120{ 118{
121 mxs_reset_block(i2c->regs); 119 stmp_reset_block(i2c->regs);
122 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET); 120 writel(MXS_I2C_IRQ_MASK << 8, i2c->regs + MXS_I2C_CTRL1_SET);
123 writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE, 121 writel(MXS_I2C_QUEUECTRL_PIO_QUEUE_MODE,
124 i2c->regs + MXS_I2C_QUEUECTRL_SET); 122 i2c->regs + MXS_I2C_QUEUECTRL_SET);
@@ -371,6 +369,7 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
371 adap->algo = &mxs_i2c_algo; 369 adap->algo = &mxs_i2c_algo;
372 adap->dev.parent = dev; 370 adap->dev.parent = dev;
373 adap->nr = pdev->id; 371 adap->nr = pdev->id;
372 adap->dev.of_node = pdev->dev.of_node;
374 i2c_set_adapdata(adap, i2c); 373 i2c_set_adapdata(adap, i2c);
375 err = i2c_add_numbered_adapter(adap); 374 err = i2c_add_numbered_adapter(adap);
376 if (err) { 375 if (err) {
@@ -380,6 +379,8 @@ static int __devinit mxs_i2c_probe(struct platform_device *pdev)
380 return err; 379 return err;
381 } 380 }
382 381
382 of_i2c_register_devices(adap);
383
383 return 0; 384 return 0;
384} 385}
385 386
@@ -399,10 +400,17 @@ static int __devexit mxs_i2c_remove(struct platform_device *pdev)
399 return 0; 400 return 0;
400} 401}
401 402
403static const struct of_device_id mxs_i2c_dt_ids[] = {
404 { .compatible = "fsl,imx28-i2c", },
405 { /* sentinel */ }
406};
407MODULE_DEVICE_TABLE(of, mxs_i2c_dt_ids);
408
402static struct platform_driver mxs_i2c_driver = { 409static struct platform_driver mxs_i2c_driver = {
403 .driver = { 410 .driver = {
404 .name = DRIVER_NAME, 411 .name = DRIVER_NAME,
405 .owner = THIS_MODULE, 412 .owner = THIS_MODULE,
413 .of_match_table = mxs_i2c_dt_ids,
406 }, 414 },
407 .remove = __devexit_p(mxs_i2c_remove), 415 .remove = __devexit_p(mxs_i2c_remove),
408}; 416};
diff --git a/drivers/i2c/busses/i2c-ocores.c b/drivers/i2c/busses/i2c-ocores.c
index 18068dee48f1..75194c579b6d 100644
--- a/drivers/i2c/busses/i2c-ocores.c
+++ b/drivers/i2c/busses/i2c-ocores.c
@@ -55,6 +55,7 @@
55#include <linux/i2c-ocores.h> 55#include <linux/i2c-ocores.h>
56#include <linux/slab.h> 56#include <linux/slab.h>
57#include <linux/io.h> 57#include <linux/io.h>
58#include <linux/of_i2c.h>
58 59
59struct ocores_i2c { 60struct ocores_i2c {
60 void __iomem *base; 61 void __iomem *base;
@@ -343,6 +344,8 @@ static int __devinit ocores_i2c_probe(struct platform_device *pdev)
343 if (pdata) { 344 if (pdata) {
344 for (i = 0; i < pdata->num_devices; i++) 345 for (i = 0; i < pdata->num_devices; i++)
345 i2c_new_device(&i2c->adap, pdata->devices + i); 346 i2c_new_device(&i2c->adap, pdata->devices + i);
347 } else {
348 of_i2c_register_devices(&i2c->adap);
346 } 349 }
347 350
348 return 0; 351 return 0;
diff --git a/drivers/i2c/busses/i2c-pca-platform.c b/drivers/i2c/busses/i2c-pca-platform.c
index 2adbf1a8fdea..675878f49f76 100644
--- a/drivers/i2c/busses/i2c-pca-platform.c
+++ b/drivers/i2c/busses/i2c-pca-platform.c
@@ -171,7 +171,7 @@ static int __devinit i2c_pca_pf_probe(struct platform_device *pdev)
171 i2c->io_size = resource_size(res); 171 i2c->io_size = resource_size(res);
172 i2c->irq = irq; 172 i2c->irq = irq;
173 173
174 i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0; 174 i2c->adap.nr = pdev->id;
175 i2c->adap.owner = THIS_MODULE; 175 i2c->adap.owner = THIS_MODULE;
176 snprintf(i2c->adap.name, sizeof(i2c->adap.name), 176 snprintf(i2c->adap.name, sizeof(i2c->adap.name),
177 "PCA9564/PCA9665 at 0x%08lx", 177 "PCA9564/PCA9665 at 0x%08lx",
diff --git a/drivers/i2c/busses/i2c-pxa.c b/drivers/i2c/busses/i2c-pxa.c
index f6733267fa9c..a997c7d3f95d 100644
--- a/drivers/i2c/busses/i2c-pxa.c
+++ b/drivers/i2c/busses/i2c-pxa.c
@@ -1131,11 +1131,6 @@ static int i2c_pxa_probe(struct platform_device *dev)
1131 spin_lock_init(&i2c->lock); 1131 spin_lock_init(&i2c->lock);
1132 init_waitqueue_head(&i2c->wait); 1132 init_waitqueue_head(&i2c->wait);
1133 1133
1134 /*
1135 * If "dev->id" is negative we consider it as zero.
1136 * The reason to do so is to avoid sysfs names that only make
1137 * sense when there are multiple adapters.
1138 */
1139 i2c->adap.nr = dev->id; 1134 i2c->adap.nr = dev->id;
1140 snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u", 1135 snprintf(i2c->adap.name, sizeof(i2c->adap.name), "pxa_i2c-i2c.%u",
1141 i2c->adap.nr); 1136 i2c->adap.nr);
diff --git a/drivers/i2c/busses/i2c-s3c2410.c b/drivers/i2c/busses/i2c-s3c2410.c
index 737f7218a32c..fa0b13490873 100644
--- a/drivers/i2c/busses/i2c-s3c2410.c
+++ b/drivers/i2c/busses/i2c-s3c2410.c
@@ -44,8 +44,12 @@
44#include <plat/regs-iic.h> 44#include <plat/regs-iic.h>
45#include <plat/iic.h> 45#include <plat/iic.h>
46 46
47/* i2c controller state */ 47/* Treat S3C2410 as baseline hardware, anything else is supported via quirks */
48#define QUIRK_S3C2440 (1 << 0)
49#define QUIRK_HDMIPHY (1 << 1)
50#define QUIRK_NO_GPIO (1 << 2)
48 51
52/* i2c controller state */
49enum s3c24xx_i2c_state { 53enum s3c24xx_i2c_state {
50 STATE_IDLE, 54 STATE_IDLE,
51 STATE_START, 55 STATE_START,
@@ -54,14 +58,10 @@ enum s3c24xx_i2c_state {
54 STATE_STOP 58 STATE_STOP
55}; 59};
56 60
57enum s3c24xx_i2c_type {
58 TYPE_S3C2410,
59 TYPE_S3C2440,
60};
61
62struct s3c24xx_i2c { 61struct s3c24xx_i2c {
63 spinlock_t lock; 62 spinlock_t lock;
64 wait_queue_head_t wait; 63 wait_queue_head_t wait;
64 unsigned int quirks;
65 unsigned int suspended:1; 65 unsigned int suspended:1;
66 66
67 struct i2c_msg *msg; 67 struct i2c_msg *msg;
@@ -88,26 +88,45 @@ struct s3c24xx_i2c {
88#endif 88#endif
89}; 89};
90 90
91/* default platform data removed, dev should always carry data. */ 91static struct platform_device_id s3c24xx_driver_ids[] = {
92 {
93 .name = "s3c2410-i2c",
94 .driver_data = 0,
95 }, {
96 .name = "s3c2440-i2c",
97 .driver_data = QUIRK_S3C2440,
98 }, {
99 .name = "s3c2440-hdmiphy-i2c",
100 .driver_data = QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO,
101 }, { },
102};
103MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
104
105#ifdef CONFIG_OF
106static const struct of_device_id s3c24xx_i2c_match[] = {
107 { .compatible = "samsung,s3c2410-i2c", .data = (void *)0 },
108 { .compatible = "samsung,s3c2440-i2c", .data = (void *)QUIRK_S3C2440 },
109 { .compatible = "samsung,s3c2440-hdmiphy-i2c",
110 .data = (void *)(QUIRK_S3C2440 | QUIRK_HDMIPHY | QUIRK_NO_GPIO) },
111 {},
112};
113MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
114#endif
92 115
93/* s3c24xx_i2c_is2440() 116/* s3c24xx_get_device_quirks
94 * 117 *
95 * return true is this is an s3c2440 118 * Get controller type either from device tree or platform device variant.
96*/ 119*/
97 120
98static inline int s3c24xx_i2c_is2440(struct s3c24xx_i2c *i2c) 121static inline unsigned int s3c24xx_get_device_quirks(struct platform_device *pdev)
99{ 122{
100 struct platform_device *pdev = to_platform_device(i2c->dev); 123 if (pdev->dev.of_node) {
101 enum s3c24xx_i2c_type type; 124 const struct of_device_id *match;
102 125 match = of_match_node(&s3c24xx_i2c_match, pdev->dev.of_node);
103#ifdef CONFIG_OF 126 return (unsigned int)match->data;
104 if (i2c->dev->of_node) 127 }
105 return of_device_is_compatible(i2c->dev->of_node,
106 "samsung,s3c2440-i2c");
107#endif
108 128
109 type = platform_get_device_id(pdev)->driver_data; 129 return platform_get_device_id(pdev)->driver_data;
110 return type == TYPE_S3C2440;
111} 130}
112 131
113/* s3c24xx_i2c_master_complete 132/* s3c24xx_i2c_master_complete
@@ -471,6 +490,13 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
471 unsigned long iicstat; 490 unsigned long iicstat;
472 int timeout = 400; 491 int timeout = 400;
473 492
493 /* the timeout for HDMIPHY is reduced to 10 ms because
494 * the hangup is expected to happen, so waiting 400 ms
495 * causes only unnecessary system hangup
496 */
497 if (i2c->quirks & QUIRK_HDMIPHY)
498 timeout = 10;
499
474 while (timeout-- > 0) { 500 while (timeout-- > 0) {
475 iicstat = readl(i2c->regs + S3C2410_IICSTAT); 501 iicstat = readl(i2c->regs + S3C2410_IICSTAT);
476 502
@@ -480,6 +506,15 @@ static int s3c24xx_i2c_set_master(struct s3c24xx_i2c *i2c)
480 msleep(1); 506 msleep(1);
481 } 507 }
482 508
509 /* hang-up of bus dedicated for HDMIPHY occurred, resetting */
510 if (i2c->quirks & QUIRK_HDMIPHY) {
511 writel(0, i2c->regs + S3C2410_IICCON);
512 writel(0, i2c->regs + S3C2410_IICSTAT);
513 writel(0, i2c->regs + S3C2410_IICDS);
514
515 return 0;
516 }
517
483 return -ETIMEDOUT; 518 return -ETIMEDOUT;
484} 519}
485 520
@@ -676,7 +711,7 @@ static int s3c24xx_i2c_clockrate(struct s3c24xx_i2c *i2c, unsigned int *got)
676 711
677 writel(iiccon, i2c->regs + S3C2410_IICCON); 712 writel(iiccon, i2c->regs + S3C2410_IICCON);
678 713
679 if (s3c24xx_i2c_is2440(i2c)) { 714 if (i2c->quirks & QUIRK_S3C2440) {
680 unsigned long sda_delay; 715 unsigned long sda_delay;
681 716
682 if (pdata->sda_delay) { 717 if (pdata->sda_delay) {
@@ -761,6 +796,9 @@ static int s3c24xx_i2c_parse_dt_gpio(struct s3c24xx_i2c *i2c)
761{ 796{
762 int idx, gpio, ret; 797 int idx, gpio, ret;
763 798
799 if (i2c->quirks & QUIRK_NO_GPIO)
800 return 0;
801
764 for (idx = 0; idx < 2; idx++) { 802 for (idx = 0; idx < 2; idx++) {
765 gpio = of_get_gpio(i2c->dev->of_node, idx); 803 gpio = of_get_gpio(i2c->dev->of_node, idx);
766 if (!gpio_is_valid(gpio)) { 804 if (!gpio_is_valid(gpio)) {
@@ -785,6 +823,10 @@ free_gpio:
785static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c) 823static void s3c24xx_i2c_dt_gpio_free(struct s3c24xx_i2c *i2c)
786{ 824{
787 unsigned int idx; 825 unsigned int idx;
826
827 if (i2c->quirks & QUIRK_NO_GPIO)
828 return;
829
788 for (idx = 0; idx < 2; idx++) 830 for (idx = 0; idx < 2; idx++)
789 gpio_free(i2c->gpios[idx]); 831 gpio_free(i2c->gpios[idx]);
790} 832}
@@ -906,6 +948,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
906 goto err_noclk; 948 goto err_noclk;
907 } 949 }
908 950
951 i2c->quirks = s3c24xx_get_device_quirks(pdev);
909 if (pdata) 952 if (pdata)
910 memcpy(i2c->pdata, pdata, sizeof(*pdata)); 953 memcpy(i2c->pdata, pdata, sizeof(*pdata));
911 else 954 else
@@ -1110,28 +1153,6 @@ static const struct dev_pm_ops s3c24xx_i2c_dev_pm_ops = {
1110 1153
1111/* device driver for platform bus bits */ 1154/* device driver for platform bus bits */
1112 1155
1113static struct platform_device_id s3c24xx_driver_ids[] = {
1114 {
1115 .name = "s3c2410-i2c",
1116 .driver_data = TYPE_S3C2410,
1117 }, {
1118 .name = "s3c2440-i2c",
1119 .driver_data = TYPE_S3C2440,
1120 }, { },
1121};
1122MODULE_DEVICE_TABLE(platform, s3c24xx_driver_ids);
1123
1124#ifdef CONFIG_OF
1125static const struct of_device_id s3c24xx_i2c_match[] = {
1126 { .compatible = "samsung,s3c2410-i2c" },
1127 { .compatible = "samsung,s3c2440-i2c" },
1128 {},
1129};
1130MODULE_DEVICE_TABLE(of, s3c24xx_i2c_match);
1131#else
1132#define s3c24xx_i2c_match NULL
1133#endif
1134
1135static struct platform_driver s3c24xx_i2c_driver = { 1156static struct platform_driver s3c24xx_i2c_driver = {
1136 .probe = s3c24xx_i2c_probe, 1157 .probe = s3c24xx_i2c_probe,
1137 .remove = s3c24xx_i2c_remove, 1158 .remove = s3c24xx_i2c_remove,
@@ -1140,7 +1161,7 @@ static struct platform_driver s3c24xx_i2c_driver = {
1140 .owner = THIS_MODULE, 1161 .owner = THIS_MODULE,
1141 .name = "s3c-i2c", 1162 .name = "s3c-i2c",
1142 .pm = S3C24XX_DEV_PM_OPS, 1163 .pm = S3C24XX_DEV_PM_OPS,
1143 .of_match_table = s3c24xx_i2c_match, 1164 .of_match_table = of_match_ptr(s3c24xx_i2c_match),
1144 }, 1165 },
1145}; 1166};
1146 1167
diff --git a/drivers/i2c/busses/i2c-sh_mobile.c b/drivers/i2c/busses/i2c-sh_mobile.c
index 675c9692d148..8110ca45f342 100644
--- a/drivers/i2c/busses/i2c-sh_mobile.c
+++ b/drivers/i2c/busses/i2c-sh_mobile.c
@@ -27,6 +27,7 @@
27#include <linux/platform_device.h> 27#include <linux/platform_device.h>
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/i2c.h> 29#include <linux/i2c.h>
30#include <linux/of_i2c.h>
30#include <linux/err.h> 31#include <linux/err.h>
31#include <linux/pm_runtime.h> 32#include <linux/pm_runtime.h>
32#include <linux/clk.h> 33#include <linux/clk.h>
@@ -653,6 +654,7 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
653 adap->dev.parent = &dev->dev; 654 adap->dev.parent = &dev->dev;
654 adap->retries = 5; 655 adap->retries = 5;
655 adap->nr = dev->id; 656 adap->nr = dev->id;
657 adap->dev.of_node = dev->dev.of_node;
656 658
657 strlcpy(adap->name, dev->name, sizeof(adap->name)); 659 strlcpy(adap->name, dev->name, sizeof(adap->name));
658 660
@@ -667,6 +669,8 @@ static int sh_mobile_i2c_probe(struct platform_device *dev)
667 669
668 dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n", 670 dev_info(&dev->dev, "I2C adapter %d with bus speed %lu Hz\n",
669 adap->nr, pd->bus_speed); 671 adap->nr, pd->bus_speed);
672
673 of_i2c_register_devices(adap);
670 return 0; 674 return 0;
671 675
672 err_all: 676 err_all:
@@ -710,11 +714,18 @@ static const struct dev_pm_ops sh_mobile_i2c_dev_pm_ops = {
710 .runtime_resume = sh_mobile_i2c_runtime_nop, 714 .runtime_resume = sh_mobile_i2c_runtime_nop,
711}; 715};
712 716
717static const struct of_device_id sh_mobile_i2c_dt_ids[] __devinitconst = {
718 { .compatible = "renesas,rmobile-iic", },
719 {},
720};
721MODULE_DEVICE_TABLE(of, sh_mobile_i2c_dt_ids);
722
713static struct platform_driver sh_mobile_i2c_driver = { 723static struct platform_driver sh_mobile_i2c_driver = {
714 .driver = { 724 .driver = {
715 .name = "i2c-sh_mobile", 725 .name = "i2c-sh_mobile",
716 .owner = THIS_MODULE, 726 .owner = THIS_MODULE,
717 .pm = &sh_mobile_i2c_dev_pm_ops, 727 .pm = &sh_mobile_i2c_dev_pm_ops,
728 .of_match_table = sh_mobile_i2c_dt_ids,
718 }, 729 },
719 .probe = sh_mobile_i2c_probe, 730 .probe = sh_mobile_i2c_probe,
720 .remove = sh_mobile_i2c_remove, 731 .remove = sh_mobile_i2c_remove,
diff --git a/drivers/i2c/busses/i2c-tegra.c b/drivers/i2c/busses/i2c-tegra.c
index 55e5ea62ccee..8b2e555a9563 100644
--- a/drivers/i2c/busses/i2c-tegra.c
+++ b/drivers/i2c/busses/i2c-tegra.c
@@ -401,8 +401,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
401 disable_irq_nosync(i2c_dev->irq); 401 disable_irq_nosync(i2c_dev->irq);
402 i2c_dev->irq_disabled = 1; 402 i2c_dev->irq_disabled = 1;
403 } 403 }
404
405 complete(&i2c_dev->msg_complete);
406 goto err; 404 goto err;
407 } 405 }
408 406
@@ -411,7 +409,6 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
411 i2c_dev->msg_err |= I2C_ERR_NO_ACK; 409 i2c_dev->msg_err |= I2C_ERR_NO_ACK;
412 if (status & I2C_INT_ARBITRATION_LOST) 410 if (status & I2C_INT_ARBITRATION_LOST)
413 i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST; 411 i2c_dev->msg_err |= I2C_ERR_ARBITRATION_LOST;
414 complete(&i2c_dev->msg_complete);
415 goto err; 412 goto err;
416 } 413 }
417 414
@@ -429,14 +426,14 @@ static irqreturn_t tegra_i2c_isr(int irq, void *dev_id)
429 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ); 426 tegra_i2c_mask_irq(i2c_dev, I2C_INT_TX_FIFO_DATA_REQ);
430 } 427 }
431 428
429 i2c_writel(i2c_dev, status, I2C_INT_STATUS);
430 if (i2c_dev->is_dvc)
431 dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
432
432 if (status & I2C_INT_PACKET_XFER_COMPLETE) { 433 if (status & I2C_INT_PACKET_XFER_COMPLETE) {
433 BUG_ON(i2c_dev->msg_buf_remaining); 434 BUG_ON(i2c_dev->msg_buf_remaining);
434 complete(&i2c_dev->msg_complete); 435 complete(&i2c_dev->msg_complete);
435 } 436 }
436
437 i2c_writel(i2c_dev, status, I2C_INT_STATUS);
438 if (i2c_dev->is_dvc)
439 dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
440 return IRQ_HANDLED; 437 return IRQ_HANDLED;
441err: 438err:
442 /* An error occurred, mask all interrupts */ 439 /* An error occurred, mask all interrupts */
@@ -446,6 +443,8 @@ err:
446 i2c_writel(i2c_dev, status, I2C_INT_STATUS); 443 i2c_writel(i2c_dev, status, I2C_INT_STATUS);
447 if (i2c_dev->is_dvc) 444 if (i2c_dev->is_dvc)
448 dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS); 445 dvc_writel(i2c_dev, DVC_STATUS_I2C_DONE_INTR, DVC_STATUS);
446
447 complete(&i2c_dev->msg_complete);
449 return IRQ_HANDLED; 448 return IRQ_HANDLED;
450} 449}
451 450
@@ -476,12 +475,15 @@ static int tegra_i2c_xfer_msg(struct tegra_i2c_dev *i2c_dev,
476 packet_header = msg->len - 1; 475 packet_header = msg->len - 1;
477 i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO); 476 i2c_writel(i2c_dev, packet_header, I2C_TX_FIFO);
478 477
479 packet_header = msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT; 478 packet_header = I2C_HEADER_IE_ENABLE;
480 packet_header |= I2C_HEADER_IE_ENABLE;
481 if (!stop) 479 if (!stop)
482 packet_header |= I2C_HEADER_REPEAT_START; 480 packet_header |= I2C_HEADER_REPEAT_START;
483 if (msg->flags & I2C_M_TEN) 481 if (msg->flags & I2C_M_TEN) {
482 packet_header |= msg->addr;
484 packet_header |= I2C_HEADER_10BIT_ADDR; 483 packet_header |= I2C_HEADER_10BIT_ADDR;
484 } else {
485 packet_header |= msg->addr << I2C_HEADER_SLAVE_ADDR_SHIFT;
486 }
485 if (msg->flags & I2C_M_IGNORE_NAK) 487 if (msg->flags & I2C_M_IGNORE_NAK)
486 packet_header |= I2C_HEADER_CONT_ON_NAK; 488 packet_header |= I2C_HEADER_CONT_ON_NAK;
487 if (msg->flags & I2C_M_RD) 489 if (msg->flags & I2C_M_RD)
@@ -557,7 +559,7 @@ static int tegra_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg msgs[],
557 559
558static u32 tegra_i2c_func(struct i2c_adapter *adap) 560static u32 tegra_i2c_func(struct i2c_adapter *adap)
559{ 561{
560 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; 562 return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL | I2C_FUNC_10BIT_ADDR;
561} 563}
562 564
563static const struct i2c_algorithm tegra_i2c_algo = { 565static const struct i2c_algorithm tegra_i2c_algo = {
diff --git a/drivers/i2c/busses/i2c-versatile.c b/drivers/i2c/busses/i2c-versatile.c
index f585aead50cc..eec20db6246f 100644
--- a/drivers/i2c/busses/i2c-versatile.c
+++ b/drivers/i2c/busses/i2c-versatile.c
@@ -104,13 +104,8 @@ static int i2c_versatile_probe(struct platform_device *dev)
104 i2c->algo = i2c_versatile_algo; 104 i2c->algo = i2c_versatile_algo;
105 i2c->algo.data = i2c; 105 i2c->algo.data = i2c;
106 106
107 if (dev->id >= 0) { 107 i2c->adap.nr = dev->id;
108 /* static bus numbering */ 108 ret = i2c_bit_add_numbered_bus(&i2c->adap);
109 i2c->adap.nr = dev->id;
110 ret = i2c_bit_add_numbered_bus(&i2c->adap);
111 } else
112 /* dynamic bus numbering */
113 ret = i2c_bit_add_bus(&i2c->adap);
114 if (ret >= 0) { 109 if (ret >= 0) {
115 platform_set_drvdata(dev, i2c); 110 platform_set_drvdata(dev, i2c);
116 of_i2c_register_devices(&i2c->adap); 111 of_i2c_register_devices(&i2c->adap);
diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c
index 2bded7647ef2..641d0e5e3303 100644
--- a/drivers/i2c/busses/i2c-xiic.c
+++ b/drivers/i2c/busses/i2c-xiic.c
@@ -40,6 +40,7 @@
40#include <linux/i2c-xiic.h> 40#include <linux/i2c-xiic.h>
41#include <linux/io.h> 41#include <linux/io.h>
42#include <linux/slab.h> 42#include <linux/slab.h>
43#include <linux/of_i2c.h>
43 44
44#define DRIVER_NAME "xiic-i2c" 45#define DRIVER_NAME "xiic-i2c"
45 46
@@ -705,8 +706,6 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
705 goto resource_missing; 706 goto resource_missing;
706 707
707 pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data; 708 pdata = (struct xiic_i2c_platform_data *) pdev->dev.platform_data;
708 if (!pdata)
709 return -EINVAL;
710 709
711 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); 710 i2c = kzalloc(sizeof(*i2c), GFP_KERNEL);
712 if (!i2c) 711 if (!i2c)
@@ -730,6 +729,7 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
730 i2c->adap = xiic_adapter; 729 i2c->adap = xiic_adapter;
731 i2c_set_adapdata(&i2c->adap, i2c); 730 i2c_set_adapdata(&i2c->adap, i2c);
732 i2c->adap.dev.parent = &pdev->dev; 731 i2c->adap.dev.parent = &pdev->dev;
732 i2c->adap.dev.of_node = pdev->dev.of_node;
733 733
734 xiic_reinit(i2c); 734 xiic_reinit(i2c);
735 735
@@ -748,9 +748,13 @@ static int __devinit xiic_i2c_probe(struct platform_device *pdev)
748 goto add_adapter_failed; 748 goto add_adapter_failed;
749 } 749 }
750 750
751 /* add in known devices to the bus */ 751 if (pdata) {
752 for (i = 0; i < pdata->num_devices; i++) 752 /* add in known devices to the bus */
753 i2c_new_device(&i2c->adap, pdata->devices + i); 753 for (i = 0; i < pdata->num_devices; i++)
754 i2c_new_device(&i2c->adap, pdata->devices + i);
755 }
756
757 of_i2c_register_devices(&i2c->adap);
754 758
755 return 0; 759 return 0;
756 760
@@ -795,12 +799,21 @@ static int __devexit xiic_i2c_remove(struct platform_device* pdev)
795 return 0; 799 return 0;
796} 800}
797 801
802#if defined(CONFIG_OF)
803static const struct of_device_id xiic_of_match[] __devinitconst = {
804 { .compatible = "xlnx,xps-iic-2.00.a", },
805 {},
806};
807MODULE_DEVICE_TABLE(of, xiic_of_match);
808#endif
809
798static struct platform_driver xiic_i2c_driver = { 810static struct platform_driver xiic_i2c_driver = {
799 .probe = xiic_i2c_probe, 811 .probe = xiic_i2c_probe,
800 .remove = __devexit_p(xiic_i2c_remove), 812 .remove = __devexit_p(xiic_i2c_remove),
801 .driver = { 813 .driver = {
802 .owner = THIS_MODULE, 814 .owner = THIS_MODULE,
803 .name = DRIVER_NAME, 815 .name = DRIVER_NAME,
816 .of_match_table = of_match_ptr(xiic_of_match),
804 }, 817 },
805}; 818};
806 819
diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c
index feb7dc359186..a6ad32bc0a96 100644
--- a/drivers/i2c/i2c-core.c
+++ b/drivers/i2c/i2c-core.c
@@ -772,6 +772,23 @@ struct device_type i2c_adapter_type = {
772}; 772};
773EXPORT_SYMBOL_GPL(i2c_adapter_type); 773EXPORT_SYMBOL_GPL(i2c_adapter_type);
774 774
775/**
776 * i2c_verify_adapter - return parameter as i2c_adapter or NULL
777 * @dev: device, probably from some driver model iterator
778 *
779 * When traversing the driver model tree, perhaps using driver model
780 * iterators like @device_for_each_child(), you can't assume very much
781 * about the nodes you find. Use this function to avoid oopses caused
782 * by wrongly treating some non-I2C device as an i2c_adapter.
783 */
784struct i2c_adapter *i2c_verify_adapter(struct device *dev)
785{
786 return (dev->type == &i2c_adapter_type)
787 ? to_i2c_adapter(dev)
788 : NULL;
789}
790EXPORT_SYMBOL(i2c_verify_adapter);
791
775#ifdef CONFIG_I2C_COMPAT 792#ifdef CONFIG_I2C_COMPAT
776static struct class_compat *i2c_adapter_compat_class; 793static struct class_compat *i2c_adapter_compat_class;
777#endif 794#endif
diff --git a/drivers/i2c/i2c-mux.c b/drivers/i2c/i2c-mux.c
index d7a4833be416..1038c381aea5 100644
--- a/drivers/i2c/i2c-mux.c
+++ b/drivers/i2c/i2c-mux.c
@@ -24,6 +24,8 @@
24#include <linux/slab.h> 24#include <linux/slab.h>
25#include <linux/i2c.h> 25#include <linux/i2c.h>
26#include <linux/i2c-mux.h> 26#include <linux/i2c-mux.h>
27#include <linux/of.h>
28#include <linux/of_i2c.h>
27 29
28/* multiplexer per channel data */ 30/* multiplexer per channel data */
29struct i2c_mux_priv { 31struct i2c_mux_priv {
@@ -31,11 +33,11 @@ struct i2c_mux_priv {
31 struct i2c_algorithm algo; 33 struct i2c_algorithm algo;
32 34
33 struct i2c_adapter *parent; 35 struct i2c_adapter *parent;
34 void *mux_dev; /* the mux chip/device */ 36 void *mux_priv; /* the mux chip/device */
35 u32 chan_id; /* the channel id */ 37 u32 chan_id; /* the channel id */
36 38
37 int (*select)(struct i2c_adapter *, void *mux_dev, u32 chan_id); 39 int (*select)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
38 int (*deselect)(struct i2c_adapter *, void *mux_dev, u32 chan_id); 40 int (*deselect)(struct i2c_adapter *, void *mux_priv, u32 chan_id);
39}; 41};
40 42
41static int i2c_mux_master_xfer(struct i2c_adapter *adap, 43static int i2c_mux_master_xfer(struct i2c_adapter *adap,
@@ -47,11 +49,11 @@ static int i2c_mux_master_xfer(struct i2c_adapter *adap,
47 49
48 /* Switch to the right mux port and perform the transfer. */ 50 /* Switch to the right mux port and perform the transfer. */
49 51
50 ret = priv->select(parent, priv->mux_dev, priv->chan_id); 52 ret = priv->select(parent, priv->mux_priv, priv->chan_id);
51 if (ret >= 0) 53 if (ret >= 0)
52 ret = parent->algo->master_xfer(parent, msgs, num); 54 ret = parent->algo->master_xfer(parent, msgs, num);
53 if (priv->deselect) 55 if (priv->deselect)
54 priv->deselect(parent, priv->mux_dev, priv->chan_id); 56 priv->deselect(parent, priv->mux_priv, priv->chan_id);
55 57
56 return ret; 58 return ret;
57} 59}
@@ -67,12 +69,12 @@ static int i2c_mux_smbus_xfer(struct i2c_adapter *adap,
67 69
68 /* Select the right mux port and perform the transfer. */ 70 /* Select the right mux port and perform the transfer. */
69 71
70 ret = priv->select(parent, priv->mux_dev, priv->chan_id); 72 ret = priv->select(parent, priv->mux_priv, priv->chan_id);
71 if (ret >= 0) 73 if (ret >= 0)
72 ret = parent->algo->smbus_xfer(parent, addr, flags, 74 ret = parent->algo->smbus_xfer(parent, addr, flags,
73 read_write, command, size, data); 75 read_write, command, size, data);
74 if (priv->deselect) 76 if (priv->deselect)
75 priv->deselect(parent, priv->mux_dev, priv->chan_id); 77 priv->deselect(parent, priv->mux_priv, priv->chan_id);
76 78
77 return ret; 79 return ret;
78} 80}
@@ -87,7 +89,8 @@ static u32 i2c_mux_functionality(struct i2c_adapter *adap)
87} 89}
88 90
89struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, 91struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
90 void *mux_dev, u32 force_nr, u32 chan_id, 92 struct device *mux_dev,
93 void *mux_priv, u32 force_nr, u32 chan_id,
91 int (*select) (struct i2c_adapter *, 94 int (*select) (struct i2c_adapter *,
92 void *, u32), 95 void *, u32),
93 int (*deselect) (struct i2c_adapter *, 96 int (*deselect) (struct i2c_adapter *,
@@ -102,7 +105,7 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
102 105
103 /* Set up private adapter data */ 106 /* Set up private adapter data */
104 priv->parent = parent; 107 priv->parent = parent;
105 priv->mux_dev = mux_dev; 108 priv->mux_priv = mux_priv;
106 priv->chan_id = chan_id; 109 priv->chan_id = chan_id;
107 priv->select = select; 110 priv->select = select;
108 priv->deselect = deselect; 111 priv->deselect = deselect;
@@ -124,6 +127,25 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
124 priv->adap.algo_data = priv; 127 priv->adap.algo_data = priv;
125 priv->adap.dev.parent = &parent->dev; 128 priv->adap.dev.parent = &parent->dev;
126 129
130 /*
131 * Try to populate the mux adapter's of_node, expands to
132 * nothing if !CONFIG_OF.
133 */
134 if (mux_dev->of_node) {
135 struct device_node *child;
136 u32 reg;
137
138 for_each_child_of_node(mux_dev->of_node, child) {
139 ret = of_property_read_u32(child, "reg", &reg);
140 if (ret)
141 continue;
142 if (chan_id == reg) {
143 priv->adap.dev.of_node = child;
144 break;
145 }
146 }
147 }
148
127 if (force_nr) { 149 if (force_nr) {
128 priv->adap.nr = force_nr; 150 priv->adap.nr = force_nr;
129 ret = i2c_add_numbered_adapter(&priv->adap); 151 ret = i2c_add_numbered_adapter(&priv->adap);
@@ -141,6 +163,8 @@ struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
141 dev_info(&parent->dev, "Added multiplexed i2c bus %d\n", 163 dev_info(&parent->dev, "Added multiplexed i2c bus %d\n",
142 i2c_adapter_id(&priv->adap)); 164 i2c_adapter_id(&priv->adap));
143 165
166 of_i2c_register_devices(&priv->adap);
167
144 return &priv->adap; 168 return &priv->adap;
145} 169}
146EXPORT_SYMBOL_GPL(i2c_add_mux_adapter); 170EXPORT_SYMBOL_GPL(i2c_add_mux_adapter);
diff --git a/drivers/i2c/muxes/Kconfig b/drivers/i2c/muxes/Kconfig
index 90b7a0163899..beb2491db274 100644
--- a/drivers/i2c/muxes/Kconfig
+++ b/drivers/i2c/muxes/Kconfig
@@ -15,7 +15,7 @@ config I2C_MUX_GPIO
15 through GPIO pins. 15 through GPIO pins.
16 16
17 This driver can also be built as a module. If so, the module 17 This driver can also be built as a module. If so, the module
18 will be called gpio-i2cmux. 18 will be called i2c-mux-gpio.
19 19
20config I2C_MUX_PCA9541 20config I2C_MUX_PCA9541
21 tristate "NXP PCA9541 I2C Master Selector" 21 tristate "NXP PCA9541 I2C Master Selector"
@@ -25,7 +25,7 @@ config I2C_MUX_PCA9541
25 I2C Master Selector. 25 I2C Master Selector.
26 26
27 This driver can also be built as a module. If so, the module 27 This driver can also be built as a module. If so, the module
28 will be called pca9541. 28 will be called i2c-mux-pca9541.
29 29
30config I2C_MUX_PCA954x 30config I2C_MUX_PCA954x
31 tristate "Philips PCA954x I2C Mux/switches" 31 tristate "Philips PCA954x I2C Mux/switches"
@@ -35,6 +35,6 @@ config I2C_MUX_PCA954x
35 I2C mux/switch devices. 35 I2C mux/switch devices.
36 36
37 This driver can also be built as a module. If so, the module 37 This driver can also be built as a module. If so, the module
38 will be called pca954x. 38 will be called i2c-mux-pca954x.
39 39
40endmenu 40endmenu
diff --git a/drivers/i2c/muxes/Makefile b/drivers/i2c/muxes/Makefile
index 4640436ea61f..5826249b29ca 100644
--- a/drivers/i2c/muxes/Makefile
+++ b/drivers/i2c/muxes/Makefile
@@ -1,8 +1,8 @@
1# 1#
2# Makefile for multiplexer I2C chip drivers. 2# Makefile for multiplexer I2C chip drivers.
3 3
4obj-$(CONFIG_I2C_MUX_GPIO) += gpio-i2cmux.o 4obj-$(CONFIG_I2C_MUX_GPIO) += i2c-mux-gpio.o
5obj-$(CONFIG_I2C_MUX_PCA9541) += pca9541.o 5obj-$(CONFIG_I2C_MUX_PCA9541) += i2c-mux-pca9541.o
6obj-$(CONFIG_I2C_MUX_PCA954x) += pca954x.o 6obj-$(CONFIG_I2C_MUX_PCA954x) += i2c-mux-pca954x.o
7 7
8ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG 8ccflags-$(CONFIG_I2C_DEBUG_BUS) := -DDEBUG
diff --git a/drivers/i2c/muxes/gpio-i2cmux.c b/drivers/i2c/muxes/i2c-mux-gpio.c
index e5fa695eb0fa..68b1f8ec3436 100644
--- a/drivers/i2c/muxes/gpio-i2cmux.c
+++ b/drivers/i2c/muxes/i2c-mux-gpio.c
@@ -10,7 +10,7 @@
10 10
11#include <linux/i2c.h> 11#include <linux/i2c.h>
12#include <linux/i2c-mux.h> 12#include <linux/i2c-mux.h>
13#include <linux/gpio-i2cmux.h> 13#include <linux/i2c-mux-gpio.h>
14#include <linux/platform_device.h> 14#include <linux/platform_device.h>
15#include <linux/init.h> 15#include <linux/init.h>
16#include <linux/module.h> 16#include <linux/module.h>
@@ -20,10 +20,10 @@
20struct gpiomux { 20struct gpiomux {
21 struct i2c_adapter *parent; 21 struct i2c_adapter *parent;
22 struct i2c_adapter **adap; /* child busses */ 22 struct i2c_adapter **adap; /* child busses */
23 struct gpio_i2cmux_platform_data data; 23 struct i2c_mux_gpio_platform_data data;
24}; 24};
25 25
26static void gpiomux_set(const struct gpiomux *mux, unsigned val) 26static void i2c_mux_gpio_set(const struct gpiomux *mux, unsigned val)
27{ 27{
28 int i; 28 int i;
29 29
@@ -31,28 +31,28 @@ static void gpiomux_set(const struct gpiomux *mux, unsigned val)
31 gpio_set_value(mux->data.gpios[i], val & (1 << i)); 31 gpio_set_value(mux->data.gpios[i], val & (1 << i));
32} 32}
33 33
34static int gpiomux_select(struct i2c_adapter *adap, void *data, u32 chan) 34static int i2c_mux_gpio_select(struct i2c_adapter *adap, void *data, u32 chan)
35{ 35{
36 struct gpiomux *mux = data; 36 struct gpiomux *mux = data;
37 37
38 gpiomux_set(mux, mux->data.values[chan]); 38 i2c_mux_gpio_set(mux, mux->data.values[chan]);
39 39
40 return 0; 40 return 0;
41} 41}
42 42
43static int gpiomux_deselect(struct i2c_adapter *adap, void *data, u32 chan) 43static int i2c_mux_gpio_deselect(struct i2c_adapter *adap, void *data, u32 chan)
44{ 44{
45 struct gpiomux *mux = data; 45 struct gpiomux *mux = data;
46 46
47 gpiomux_set(mux, mux->data.idle); 47 i2c_mux_gpio_set(mux, mux->data.idle);
48 48
49 return 0; 49 return 0;
50} 50}
51 51
52static int __devinit gpiomux_probe(struct platform_device *pdev) 52static int __devinit i2c_mux_gpio_probe(struct platform_device *pdev)
53{ 53{
54 struct gpiomux *mux; 54 struct gpiomux *mux;
55 struct gpio_i2cmux_platform_data *pdata; 55 struct i2c_mux_gpio_platform_data *pdata;
56 struct i2c_adapter *parent; 56 struct i2c_adapter *parent;
57 int (*deselect) (struct i2c_adapter *, void *, u32); 57 int (*deselect) (struct i2c_adapter *, void *, u32);
58 unsigned initial_state; 58 unsigned initial_state;
@@ -86,16 +86,16 @@ static int __devinit gpiomux_probe(struct platform_device *pdev)
86 goto alloc_failed2; 86 goto alloc_failed2;
87 } 87 }
88 88
89 if (pdata->idle != GPIO_I2CMUX_NO_IDLE) { 89 if (pdata->idle != I2C_MUX_GPIO_NO_IDLE) {
90 initial_state = pdata->idle; 90 initial_state = pdata->idle;
91 deselect = gpiomux_deselect; 91 deselect = i2c_mux_gpio_deselect;
92 } else { 92 } else {
93 initial_state = pdata->values[0]; 93 initial_state = pdata->values[0];
94 deselect = NULL; 94 deselect = NULL;
95 } 95 }
96 96
97 for (i = 0; i < pdata->n_gpios; i++) { 97 for (i = 0; i < pdata->n_gpios; i++) {
98 ret = gpio_request(pdata->gpios[i], "gpio-i2cmux"); 98 ret = gpio_request(pdata->gpios[i], "i2c-mux-gpio");
99 if (ret) 99 if (ret)
100 goto err_request_gpio; 100 goto err_request_gpio;
101 gpio_direction_output(pdata->gpios[i], 101 gpio_direction_output(pdata->gpios[i],
@@ -105,8 +105,8 @@ static int __devinit gpiomux_probe(struct platform_device *pdev)
105 for (i = 0; i < pdata->n_values; i++) { 105 for (i = 0; i < pdata->n_values; i++) {
106 u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0; 106 u32 nr = pdata->base_nr ? (pdata->base_nr + i) : 0;
107 107
108 mux->adap[i] = i2c_add_mux_adapter(parent, mux, nr, i, 108 mux->adap[i] = i2c_add_mux_adapter(parent, &pdev->dev, mux, nr, i,
109 gpiomux_select, deselect); 109 i2c_mux_gpio_select, deselect);
110 if (!mux->adap[i]) { 110 if (!mux->adap[i]) {
111 ret = -ENODEV; 111 ret = -ENODEV;
112 dev_err(&pdev->dev, "Failed to add adapter %d\n", i); 112 dev_err(&pdev->dev, "Failed to add adapter %d\n", i);
@@ -137,7 +137,7 @@ alloc_failed:
137 return ret; 137 return ret;
138} 138}
139 139
140static int __devexit gpiomux_remove(struct platform_device *pdev) 140static int __devexit i2c_mux_gpio_remove(struct platform_device *pdev)
141{ 141{
142 struct gpiomux *mux = platform_get_drvdata(pdev); 142 struct gpiomux *mux = platform_get_drvdata(pdev);
143 int i; 143 int i;
@@ -156,18 +156,18 @@ static int __devexit gpiomux_remove(struct platform_device *pdev)
156 return 0; 156 return 0;
157} 157}
158 158
159static struct platform_driver gpiomux_driver = { 159static struct platform_driver i2c_mux_gpio_driver = {
160 .probe = gpiomux_probe, 160 .probe = i2c_mux_gpio_probe,
161 .remove = __devexit_p(gpiomux_remove), 161 .remove = __devexit_p(i2c_mux_gpio_remove),
162 .driver = { 162 .driver = {
163 .owner = THIS_MODULE, 163 .owner = THIS_MODULE,
164 .name = "gpio-i2cmux", 164 .name = "i2c-mux-gpio",
165 }, 165 },
166}; 166};
167 167
168module_platform_driver(gpiomux_driver); 168module_platform_driver(i2c_mux_gpio_driver);
169 169
170MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver"); 170MODULE_DESCRIPTION("GPIO-based I2C multiplexer driver");
171MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>"); 171MODULE_AUTHOR("Peter Korsgaard <peter.korsgaard@barco.com>");
172MODULE_LICENSE("GPL"); 172MODULE_LICENSE("GPL");
173MODULE_ALIAS("platform:gpio-i2cmux"); 173MODULE_ALIAS("platform:i2c-mux-gpio");
diff --git a/drivers/i2c/muxes/pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c
index e0df9b6c66b3..8aacde1516ac 100644
--- a/drivers/i2c/muxes/pca9541.c
+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c
@@ -353,7 +353,8 @@ static int pca9541_probe(struct i2c_client *client,
353 force = 0; 353 force = 0;
354 if (pdata) 354 if (pdata)
355 force = pdata->modes[0].adap_id; 355 force = pdata->modes[0].adap_id;
356 data->mux_adap = i2c_add_mux_adapter(adap, client, force, 0, 356 data->mux_adap = i2c_add_mux_adapter(adap, &client->dev, client,
357 force, 0,
357 pca9541_select_chan, 358 pca9541_select_chan,
358 pca9541_release_chan); 359 pca9541_release_chan);
359 360
diff --git a/drivers/i2c/muxes/pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c
index 0e37ef27aa12..f2dfe0d8fcce 100644
--- a/drivers/i2c/muxes/pca954x.c
+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c
@@ -226,7 +226,7 @@ static int pca954x_probe(struct i2c_client *client,
226 } 226 }
227 227
228 data->virt_adaps[num] = 228 data->virt_adaps[num] =
229 i2c_add_mux_adapter(adap, client, 229 i2c_add_mux_adapter(adap, &client->dev, client,
230 force, num, pca954x_select_chan, 230 force, num, pca954x_select_chan,
231 (pdata && pdata->modes[num].deselect_on_exit) 231 (pdata && pdata->modes[num].deselect_on_exit)
232 ? pca954x_deselect_mux : NULL); 232 ? pca954x_deselect_mux : NULL);
diff --git a/drivers/input/keyboard/pxa27x_keypad.c b/drivers/input/keyboard/pxa27x_keypad.c
index 29fe1b2be1c1..7f7b72464a37 100644
--- a/drivers/input/keyboard/pxa27x_keypad.c
+++ b/drivers/input/keyboard/pxa27x_keypad.c
@@ -311,7 +311,15 @@ static void pxa27x_keypad_scan_direct(struct pxa27x_keypad *keypad)
311 if (pdata->enable_rotary0 || pdata->enable_rotary1) 311 if (pdata->enable_rotary0 || pdata->enable_rotary1)
312 pxa27x_keypad_scan_rotary(keypad); 312 pxa27x_keypad_scan_rotary(keypad);
313 313
314 new_state = KPDK_DK(kpdk) & keypad->direct_key_mask; 314 /*
315 * The KPDR_DK only output the key pin level, so it relates to board,
316 * and low level may be active.
317 */
318 if (pdata->direct_key_low_active)
319 new_state = ~KPDK_DK(kpdk) & keypad->direct_key_mask;
320 else
321 new_state = KPDK_DK(kpdk) & keypad->direct_key_mask;
322
315 bits_changed = keypad->direct_key_state ^ new_state; 323 bits_changed = keypad->direct_key_state ^ new_state;
316 324
317 if (bits_changed == 0) 325 if (bits_changed == 0)
@@ -383,7 +391,14 @@ static void pxa27x_keypad_config(struct pxa27x_keypad *keypad)
383 if (pdata->direct_key_num > direct_key_num) 391 if (pdata->direct_key_num > direct_key_num)
384 direct_key_num = pdata->direct_key_num; 392 direct_key_num = pdata->direct_key_num;
385 393
386 keypad->direct_key_mask = ((2 << direct_key_num) - 1) & ~mask; 394 /*
395 * Direct keys usage may not start from KP_DKIN0, check the platfrom
396 * mask data to config the specific.
397 */
398 if (pdata->direct_key_mask)
399 keypad->direct_key_mask = pdata->direct_key_mask;
400 else
401 keypad->direct_key_mask = ((1 << direct_key_num) - 1) & ~mask;
387 402
388 /* enable direct key */ 403 /* enable direct key */
389 if (direct_key_num) 404 if (direct_key_num)
@@ -399,7 +414,7 @@ static int pxa27x_keypad_open(struct input_dev *dev)
399 struct pxa27x_keypad *keypad = input_get_drvdata(dev); 414 struct pxa27x_keypad *keypad = input_get_drvdata(dev);
400 415
401 /* Enable unit clock */ 416 /* Enable unit clock */
402 clk_enable(keypad->clk); 417 clk_prepare_enable(keypad->clk);
403 pxa27x_keypad_config(keypad); 418 pxa27x_keypad_config(keypad);
404 419
405 return 0; 420 return 0;
@@ -410,7 +425,7 @@ static void pxa27x_keypad_close(struct input_dev *dev)
410 struct pxa27x_keypad *keypad = input_get_drvdata(dev); 425 struct pxa27x_keypad *keypad = input_get_drvdata(dev);
411 426
412 /* Disable clock unit */ 427 /* Disable clock unit */
413 clk_disable(keypad->clk); 428 clk_disable_unprepare(keypad->clk);
414} 429}
415 430
416#ifdef CONFIG_PM 431#ifdef CONFIG_PM
@@ -419,10 +434,14 @@ static int pxa27x_keypad_suspend(struct device *dev)
419 struct platform_device *pdev = to_platform_device(dev); 434 struct platform_device *pdev = to_platform_device(dev);
420 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); 435 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
421 436
422 clk_disable(keypad->clk); 437 /*
423 438 * If the keypad is used a wake up source, clock can not be disabled.
439 * Or it can not detect the key pressing.
440 */
424 if (device_may_wakeup(&pdev->dev)) 441 if (device_may_wakeup(&pdev->dev))
425 enable_irq_wake(keypad->irq); 442 enable_irq_wake(keypad->irq);
443 else
444 clk_disable_unprepare(keypad->clk);
426 445
427 return 0; 446 return 0;
428} 447}
@@ -433,19 +452,24 @@ static int pxa27x_keypad_resume(struct device *dev)
433 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev); 452 struct pxa27x_keypad *keypad = platform_get_drvdata(pdev);
434 struct input_dev *input_dev = keypad->input_dev; 453 struct input_dev *input_dev = keypad->input_dev;
435 454
436 if (device_may_wakeup(&pdev->dev)) 455 /*
456 * If the keypad is used as wake up source, the clock is not turned
457 * off. So do not need configure it again.
458 */
459 if (device_may_wakeup(&pdev->dev)) {
437 disable_irq_wake(keypad->irq); 460 disable_irq_wake(keypad->irq);
461 } else {
462 mutex_lock(&input_dev->mutex);
438 463
439 mutex_lock(&input_dev->mutex); 464 if (input_dev->users) {
465 /* Enable unit clock */
466 clk_prepare_enable(keypad->clk);
467 pxa27x_keypad_config(keypad);
468 }
440 469
441 if (input_dev->users) { 470 mutex_unlock(&input_dev->mutex);
442 /* Enable unit clock */
443 clk_enable(keypad->clk);
444 pxa27x_keypad_config(keypad);
445 } 471 }
446 472
447 mutex_unlock(&input_dev->mutex);
448
449 return 0; 473 return 0;
450} 474}
451 475
diff --git a/drivers/input/misc/wm831x-on.c b/drivers/input/misc/wm831x-on.c
index 47f18d6bce46..6790a812a1db 100644
--- a/drivers/input/misc/wm831x-on.c
+++ b/drivers/input/misc/wm831x-on.c
@@ -73,7 +73,7 @@ static int __devinit wm831x_on_probe(struct platform_device *pdev)
73{ 73{
74 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 74 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
75 struct wm831x_on *wm831x_on; 75 struct wm831x_on *wm831x_on;
76 int irq = platform_get_irq(pdev, 0); 76 int irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
77 int ret; 77 int ret;
78 78
79 wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL); 79 wm831x_on = kzalloc(sizeof(struct wm831x_on), GFP_KERNEL);
diff --git a/drivers/input/touchscreen/wm831x-ts.c b/drivers/input/touchscreen/wm831x-ts.c
index 4bc851a9dc3d..e83410721e38 100644
--- a/drivers/input/touchscreen/wm831x-ts.c
+++ b/drivers/input/touchscreen/wm831x-ts.c
@@ -260,15 +260,16 @@ static __devinit int wm831x_ts_probe(struct platform_device *pdev)
260 * If we have a direct IRQ use it, otherwise use the interrupt 260 * If we have a direct IRQ use it, otherwise use the interrupt
261 * from the WM831x IRQ controller. 261 * from the WM831x IRQ controller.
262 */ 262 */
263 wm831x_ts->data_irq = wm831x_irq(wm831x,
264 platform_get_irq_byname(pdev,
265 "TCHDATA"));
263 if (pdata && pdata->data_irq) 266 if (pdata && pdata->data_irq)
264 wm831x_ts->data_irq = pdata->data_irq; 267 wm831x_ts->data_irq = pdata->data_irq;
265 else
266 wm831x_ts->data_irq = platform_get_irq_byname(pdev, "TCHDATA");
267 268
269 wm831x_ts->pd_irq = wm831x_irq(wm831x,
270 platform_get_irq_byname(pdev, "TCHPD"));
268 if (pdata && pdata->pd_irq) 271 if (pdata && pdata->pd_irq)
269 wm831x_ts->pd_irq = pdata->pd_irq; 272 wm831x_ts->pd_irq = pdata->pd_irq;
270 else
271 wm831x_ts->pd_irq = platform_get_irq_byname(pdev, "TCHPD");
272 273
273 if (pdata) 274 if (pdata)
274 wm831x_ts->pressure = pdata->pressure; 275 wm831x_ts->pressure = pdata->pressure;
diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig
index c69843742bb0..340893727538 100644
--- a/drivers/iommu/Kconfig
+++ b/drivers/iommu/Kconfig
@@ -162,4 +162,25 @@ config TEGRA_IOMMU_SMMU
162 space through the SMMU (System Memory Management Unit) 162 space through the SMMU (System Memory Management Unit)
163 hardware included on Tegra SoCs. 163 hardware included on Tegra SoCs.
164 164
165config EXYNOS_IOMMU
166 bool "Exynos IOMMU Support"
167 depends on ARCH_EXYNOS && EXYNOS_DEV_SYSMMU
168 select IOMMU_API
169 help
170 Support for the IOMMU(System MMU) of Samsung Exynos application
171 processor family. This enables H/W multimedia accellerators to see
172 non-linear physical memory chunks as a linear memory in their
173 address spaces
174
175 If unsure, say N here.
176
177config EXYNOS_IOMMU_DEBUG
178 bool "Debugging log for Exynos IOMMU"
179 depends on EXYNOS_IOMMU
180 help
181 Select this to see the detailed log message that shows what
182 happens in the IOMMU driver
183
184 Say N unless you need kernel log message for IOMMU debugging
185
165endif # IOMMU_SUPPORT 186endif # IOMMU_SUPPORT
diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile
index 3e5e82ae9f0d..76e54ef796de 100644
--- a/drivers/iommu/Makefile
+++ b/drivers/iommu/Makefile
@@ -10,3 +10,4 @@ obj-$(CONFIG_OMAP_IOVMM) += omap-iovmm.o
10obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o 10obj-$(CONFIG_OMAP_IOMMU_DEBUG) += omap-iommu-debug.o
11obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o 11obj-$(CONFIG_TEGRA_IOMMU_GART) += tegra-gart.o
12obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o 12obj-$(CONFIG_TEGRA_IOMMU_SMMU) += tegra-smmu.o
13obj-$(CONFIG_EXYNOS_IOMMU) += exynos-iommu.o
diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c
new file mode 100644
index 000000000000..9a114b9ff170
--- /dev/null
+++ b/drivers/iommu/exynos-iommu.c
@@ -0,0 +1,1076 @@
1/* linux/drivers/iommu/exynos_iommu.c
2 *
3 * Copyright (c) 2011 Samsung Electronics Co., Ltd.
4 * http://www.samsung.com
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#ifdef CONFIG_EXYNOS_IOMMU_DEBUG
12#define DEBUG
13#endif
14
15#include <linux/io.h>
16#include <linux/interrupt.h>
17#include <linux/platform_device.h>
18#include <linux/slab.h>
19#include <linux/pm_runtime.h>
20#include <linux/clk.h>
21#include <linux/err.h>
22#include <linux/mm.h>
23#include <linux/iommu.h>
24#include <linux/errno.h>
25#include <linux/list.h>
26#include <linux/memblock.h>
27#include <linux/export.h>
28
29#include <asm/cacheflush.h>
30#include <asm/pgtable.h>
31
32#include <mach/sysmmu.h>
33
34/* We does not consider super section mapping (16MB) */
35#define SECT_ORDER 20
36#define LPAGE_ORDER 16
37#define SPAGE_ORDER 12
38
39#define SECT_SIZE (1 << SECT_ORDER)
40#define LPAGE_SIZE (1 << LPAGE_ORDER)
41#define SPAGE_SIZE (1 << SPAGE_ORDER)
42
43#define SECT_MASK (~(SECT_SIZE - 1))
44#define LPAGE_MASK (~(LPAGE_SIZE - 1))
45#define SPAGE_MASK (~(SPAGE_SIZE - 1))
46
47#define lv1ent_fault(sent) (((*(sent) & 3) == 0) || ((*(sent) & 3) == 3))
48#define lv1ent_page(sent) ((*(sent) & 3) == 1)
49#define lv1ent_section(sent) ((*(sent) & 3) == 2)
50
51#define lv2ent_fault(pent) ((*(pent) & 3) == 0)
52#define lv2ent_small(pent) ((*(pent) & 2) == 2)
53#define lv2ent_large(pent) ((*(pent) & 3) == 1)
54
55#define section_phys(sent) (*(sent) & SECT_MASK)
56#define section_offs(iova) ((iova) & 0xFFFFF)
57#define lpage_phys(pent) (*(pent) & LPAGE_MASK)
58#define lpage_offs(iova) ((iova) & 0xFFFF)
59#define spage_phys(pent) (*(pent) & SPAGE_MASK)
60#define spage_offs(iova) ((iova) & 0xFFF)
61
62#define lv1ent_offset(iova) ((iova) >> SECT_ORDER)
63#define lv2ent_offset(iova) (((iova) & 0xFF000) >> SPAGE_ORDER)
64
65#define NUM_LV1ENTRIES 4096
66#define NUM_LV2ENTRIES 256
67
68#define LV2TABLE_SIZE (NUM_LV2ENTRIES * sizeof(long))
69
70#define SPAGES_PER_LPAGE (LPAGE_SIZE / SPAGE_SIZE)
71
72#define lv2table_base(sent) (*(sent) & 0xFFFFFC00)
73
74#define mk_lv1ent_sect(pa) ((pa) | 2)
75#define mk_lv1ent_page(pa) ((pa) | 1)
76#define mk_lv2ent_lpage(pa) ((pa) | 1)
77#define mk_lv2ent_spage(pa) ((pa) | 2)
78
79#define CTRL_ENABLE 0x5
80#define CTRL_BLOCK 0x7
81#define CTRL_DISABLE 0x0
82
83#define REG_MMU_CTRL 0x000
84#define REG_MMU_CFG 0x004
85#define REG_MMU_STATUS 0x008
86#define REG_MMU_FLUSH 0x00C
87#define REG_MMU_FLUSH_ENTRY 0x010
88#define REG_PT_BASE_ADDR 0x014
89#define REG_INT_STATUS 0x018
90#define REG_INT_CLEAR 0x01C
91
92#define REG_PAGE_FAULT_ADDR 0x024
93#define REG_AW_FAULT_ADDR 0x028
94#define REG_AR_FAULT_ADDR 0x02C
95#define REG_DEFAULT_SLAVE_ADDR 0x030
96
97#define REG_MMU_VERSION 0x034
98
99#define REG_PB0_SADDR 0x04C
100#define REG_PB0_EADDR 0x050
101#define REG_PB1_SADDR 0x054
102#define REG_PB1_EADDR 0x058
103
104static unsigned long *section_entry(unsigned long *pgtable, unsigned long iova)
105{
106 return pgtable + lv1ent_offset(iova);
107}
108
109static unsigned long *page_entry(unsigned long *sent, unsigned long iova)
110{
111 return (unsigned long *)__va(lv2table_base(sent)) + lv2ent_offset(iova);
112}
113
114enum exynos_sysmmu_inttype {
115 SYSMMU_PAGEFAULT,
116 SYSMMU_AR_MULTIHIT,
117 SYSMMU_AW_MULTIHIT,
118 SYSMMU_BUSERROR,
119 SYSMMU_AR_SECURITY,
120 SYSMMU_AR_ACCESS,
121 SYSMMU_AW_SECURITY,
122 SYSMMU_AW_PROTECTION, /* 7 */
123 SYSMMU_FAULT_UNKNOWN,
124 SYSMMU_FAULTS_NUM
125};
126
127/*
128 * @itype: type of fault.
129 * @pgtable_base: the physical address of page table base. This is 0 if @itype
130 * is SYSMMU_BUSERROR.
131 * @fault_addr: the device (virtual) address that the System MMU tried to
132 * translated. This is 0 if @itype is SYSMMU_BUSERROR.
133 */
134typedef int (*sysmmu_fault_handler_t)(enum exynos_sysmmu_inttype itype,
135 unsigned long pgtable_base, unsigned long fault_addr);
136
137static unsigned short fault_reg_offset[SYSMMU_FAULTS_NUM] = {
138 REG_PAGE_FAULT_ADDR,
139 REG_AR_FAULT_ADDR,
140 REG_AW_FAULT_ADDR,
141 REG_DEFAULT_SLAVE_ADDR,
142 REG_AR_FAULT_ADDR,
143 REG_AR_FAULT_ADDR,
144 REG_AW_FAULT_ADDR,
145 REG_AW_FAULT_ADDR
146};
147
148static char *sysmmu_fault_name[SYSMMU_FAULTS_NUM] = {
149 "PAGE FAULT",
150 "AR MULTI-HIT FAULT",
151 "AW MULTI-HIT FAULT",
152 "BUS ERROR",
153 "AR SECURITY PROTECTION FAULT",
154 "AR ACCESS PROTECTION FAULT",
155 "AW SECURITY PROTECTION FAULT",
156 "AW ACCESS PROTECTION FAULT",
157 "UNKNOWN FAULT"
158};
159
160struct exynos_iommu_domain {
161 struct list_head clients; /* list of sysmmu_drvdata.node */
162 unsigned long *pgtable; /* lv1 page table, 16KB */
163 short *lv2entcnt; /* free lv2 entry counter for each section */
164 spinlock_t lock; /* lock for this structure */
165 spinlock_t pgtablelock; /* lock for modifying page table @ pgtable */
166};
167
168struct sysmmu_drvdata {
169 struct list_head node; /* entry of exynos_iommu_domain.clients */
170 struct device *sysmmu; /* System MMU's device descriptor */
171 struct device *dev; /* Owner of system MMU */
172 char *dbgname;
173 int nsfrs;
174 void __iomem **sfrbases;
175 struct clk *clk[2];
176 int activations;
177 rwlock_t lock;
178 struct iommu_domain *domain;
179 sysmmu_fault_handler_t fault_handler;
180 unsigned long pgtable;
181};
182
183static bool set_sysmmu_active(struct sysmmu_drvdata *data)
184{
185 /* return true if the System MMU was not active previously
186 and it needs to be initialized */
187 return ++data->activations == 1;
188}
189
190static bool set_sysmmu_inactive(struct sysmmu_drvdata *data)
191{
192 /* return true if the System MMU is needed to be disabled */
193 BUG_ON(data->activations < 1);
194 return --data->activations == 0;
195}
196
197static bool is_sysmmu_active(struct sysmmu_drvdata *data)
198{
199 return data->activations > 0;
200}
201
202static void sysmmu_unblock(void __iomem *sfrbase)
203{
204 __raw_writel(CTRL_ENABLE, sfrbase + REG_MMU_CTRL);
205}
206
207static bool sysmmu_block(void __iomem *sfrbase)
208{
209 int i = 120;
210
211 __raw_writel(CTRL_BLOCK, sfrbase + REG_MMU_CTRL);
212 while ((i > 0) && !(__raw_readl(sfrbase + REG_MMU_STATUS) & 1))
213 --i;
214
215 if (!(__raw_readl(sfrbase + REG_MMU_STATUS) & 1)) {
216 sysmmu_unblock(sfrbase);
217 return false;
218 }
219
220 return true;
221}
222
223static void __sysmmu_tlb_invalidate(void __iomem *sfrbase)
224{
225 __raw_writel(0x1, sfrbase + REG_MMU_FLUSH);
226}
227
228static void __sysmmu_tlb_invalidate_entry(void __iomem *sfrbase,
229 unsigned long iova)
230{
231 __raw_writel((iova & SPAGE_MASK) | 1, sfrbase + REG_MMU_FLUSH_ENTRY);
232}
233
234static void __sysmmu_set_ptbase(void __iomem *sfrbase,
235 unsigned long pgd)
236{
237 __raw_writel(0x1, sfrbase + REG_MMU_CFG); /* 16KB LV1, LRU */
238 __raw_writel(pgd, sfrbase + REG_PT_BASE_ADDR);
239
240 __sysmmu_tlb_invalidate(sfrbase);
241}
242
243static void __sysmmu_set_prefbuf(void __iomem *sfrbase, unsigned long base,
244 unsigned long size, int idx)
245{
246 __raw_writel(base, sfrbase + REG_PB0_SADDR + idx * 8);
247 __raw_writel(size - 1 + base, sfrbase + REG_PB0_EADDR + idx * 8);
248}
249
250void exynos_sysmmu_set_prefbuf(struct device *dev,
251 unsigned long base0, unsigned long size0,
252 unsigned long base1, unsigned long size1)
253{
254 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
255 unsigned long flags;
256 int i;
257
258 BUG_ON((base0 + size0) <= base0);
259 BUG_ON((size1 > 0) && ((base1 + size1) <= base1));
260
261 read_lock_irqsave(&data->lock, flags);
262 if (!is_sysmmu_active(data))
263 goto finish;
264
265 for (i = 0; i < data->nsfrs; i++) {
266 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
267 if (!sysmmu_block(data->sfrbases[i]))
268 continue;
269
270 if (size1 == 0) {
271 if (size0 <= SZ_128K) {
272 base1 = base0;
273 size1 = size0;
274 } else {
275 size1 = size0 -
276 ALIGN(size0 / 2, SZ_64K);
277 size0 = size0 - size1;
278 base1 = base0 + size0;
279 }
280 }
281
282 __sysmmu_set_prefbuf(
283 data->sfrbases[i], base0, size0, 0);
284 __sysmmu_set_prefbuf(
285 data->sfrbases[i], base1, size1, 1);
286
287 sysmmu_unblock(data->sfrbases[i]);
288 }
289 }
290finish:
291 read_unlock_irqrestore(&data->lock, flags);
292}
293
294static void __set_fault_handler(struct sysmmu_drvdata *data,
295 sysmmu_fault_handler_t handler)
296{
297 unsigned long flags;
298
299 write_lock_irqsave(&data->lock, flags);
300 data->fault_handler = handler;
301 write_unlock_irqrestore(&data->lock, flags);
302}
303
304void exynos_sysmmu_set_fault_handler(struct device *dev,
305 sysmmu_fault_handler_t handler)
306{
307 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
308
309 __set_fault_handler(data, handler);
310}
311
312static int default_fault_handler(enum exynos_sysmmu_inttype itype,
313 unsigned long pgtable_base, unsigned long fault_addr)
314{
315 unsigned long *ent;
316
317 if ((itype >= SYSMMU_FAULTS_NUM) || (itype < SYSMMU_PAGEFAULT))
318 itype = SYSMMU_FAULT_UNKNOWN;
319
320 pr_err("%s occured at 0x%lx(Page table base: 0x%lx)\n",
321 sysmmu_fault_name[itype], fault_addr, pgtable_base);
322
323 ent = section_entry(__va(pgtable_base), fault_addr);
324 pr_err("\tLv1 entry: 0x%lx\n", *ent);
325
326 if (lv1ent_page(ent)) {
327 ent = page_entry(ent, fault_addr);
328 pr_err("\t Lv2 entry: 0x%lx\n", *ent);
329 }
330
331 pr_err("Generating Kernel OOPS... because it is unrecoverable.\n");
332
333 BUG();
334
335 return 0;
336}
337
338static irqreturn_t exynos_sysmmu_irq(int irq, void *dev_id)
339{
340 /* SYSMMU is in blocked when interrupt occurred. */
341 struct sysmmu_drvdata *data = dev_id;
342 struct resource *irqres;
343 struct platform_device *pdev;
344 enum exynos_sysmmu_inttype itype;
345 unsigned long addr = -1;
346
347 int i, ret = -ENOSYS;
348
349 read_lock(&data->lock);
350
351 WARN_ON(!is_sysmmu_active(data));
352
353 pdev = to_platform_device(data->sysmmu);
354 for (i = 0; i < (pdev->num_resources / 2); i++) {
355 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, i);
356 if (irqres && ((int)irqres->start == irq))
357 break;
358 }
359
360 if (i == pdev->num_resources) {
361 itype = SYSMMU_FAULT_UNKNOWN;
362 } else {
363 itype = (enum exynos_sysmmu_inttype)
364 __ffs(__raw_readl(data->sfrbases[i] + REG_INT_STATUS));
365 if (WARN_ON(!((itype >= 0) && (itype < SYSMMU_FAULT_UNKNOWN))))
366 itype = SYSMMU_FAULT_UNKNOWN;
367 else
368 addr = __raw_readl(
369 data->sfrbases[i] + fault_reg_offset[itype]);
370 }
371
372 if (data->domain)
373 ret = report_iommu_fault(data->domain, data->dev,
374 addr, itype);
375
376 if ((ret == -ENOSYS) && data->fault_handler) {
377 unsigned long base = data->pgtable;
378 if (itype != SYSMMU_FAULT_UNKNOWN)
379 base = __raw_readl(
380 data->sfrbases[i] + REG_PT_BASE_ADDR);
381 ret = data->fault_handler(itype, base, addr);
382 }
383
384 if (!ret && (itype != SYSMMU_FAULT_UNKNOWN))
385 __raw_writel(1 << itype, data->sfrbases[i] + REG_INT_CLEAR);
386 else
387 dev_dbg(data->sysmmu, "(%s) %s is not handled.\n",
388 data->dbgname, sysmmu_fault_name[itype]);
389
390 if (itype != SYSMMU_FAULT_UNKNOWN)
391 sysmmu_unblock(data->sfrbases[i]);
392
393 read_unlock(&data->lock);
394
395 return IRQ_HANDLED;
396}
397
398static bool __exynos_sysmmu_disable(struct sysmmu_drvdata *data)
399{
400 unsigned long flags;
401 bool disabled = false;
402 int i;
403
404 write_lock_irqsave(&data->lock, flags);
405
406 if (!set_sysmmu_inactive(data))
407 goto finish;
408
409 for (i = 0; i < data->nsfrs; i++)
410 __raw_writel(CTRL_DISABLE, data->sfrbases[i] + REG_MMU_CTRL);
411
412 if (data->clk[1])
413 clk_disable(data->clk[1]);
414 if (data->clk[0])
415 clk_disable(data->clk[0]);
416
417 disabled = true;
418 data->pgtable = 0;
419 data->domain = NULL;
420finish:
421 write_unlock_irqrestore(&data->lock, flags);
422
423 if (disabled)
424 dev_dbg(data->sysmmu, "(%s) Disabled\n", data->dbgname);
425 else
426 dev_dbg(data->sysmmu, "(%s) %d times left to be disabled\n",
427 data->dbgname, data->activations);
428
429 return disabled;
430}
431
432/* __exynos_sysmmu_enable: Enables System MMU
433 *
434 * returns -error if an error occurred and System MMU is not enabled,
435 * 0 if the System MMU has been just enabled and 1 if System MMU was already
436 * enabled before.
437 */
438static int __exynos_sysmmu_enable(struct sysmmu_drvdata *data,
439 unsigned long pgtable, struct iommu_domain *domain)
440{
441 int i, ret = 0;
442 unsigned long flags;
443
444 write_lock_irqsave(&data->lock, flags);
445
446 if (!set_sysmmu_active(data)) {
447 if (WARN_ON(pgtable != data->pgtable)) {
448 ret = -EBUSY;
449 set_sysmmu_inactive(data);
450 } else {
451 ret = 1;
452 }
453
454 dev_dbg(data->sysmmu, "(%s) Already enabled\n", data->dbgname);
455 goto finish;
456 }
457
458 if (data->clk[0])
459 clk_enable(data->clk[0]);
460 if (data->clk[1])
461 clk_enable(data->clk[1]);
462
463 data->pgtable = pgtable;
464
465 for (i = 0; i < data->nsfrs; i++) {
466 __sysmmu_set_ptbase(data->sfrbases[i], pgtable);
467
468 if ((readl(data->sfrbases[i] + REG_MMU_VERSION) >> 28) == 3) {
469 /* System MMU version is 3.x */
470 __raw_writel((1 << 12) | (2 << 28),
471 data->sfrbases[i] + REG_MMU_CFG);
472 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 0);
473 __sysmmu_set_prefbuf(data->sfrbases[i], 0, -1, 1);
474 }
475
476 __raw_writel(CTRL_ENABLE, data->sfrbases[i] + REG_MMU_CTRL);
477 }
478
479 data->domain = domain;
480
481 dev_dbg(data->sysmmu, "(%s) Enabled\n", data->dbgname);
482finish:
483 write_unlock_irqrestore(&data->lock, flags);
484
485 return ret;
486}
487
488int exynos_sysmmu_enable(struct device *dev, unsigned long pgtable)
489{
490 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
491 int ret;
492
493 BUG_ON(!memblock_is_memory(pgtable));
494
495 ret = pm_runtime_get_sync(data->sysmmu);
496 if (ret < 0) {
497 dev_dbg(data->sysmmu, "(%s) Failed to enable\n", data->dbgname);
498 return ret;
499 }
500
501 ret = __exynos_sysmmu_enable(data, pgtable, NULL);
502 if (WARN_ON(ret < 0)) {
503 pm_runtime_put(data->sysmmu);
504 dev_err(data->sysmmu,
505 "(%s) Already enabled with page table %#lx\n",
506 data->dbgname, data->pgtable);
507 } else {
508 data->dev = dev;
509 }
510
511 return ret;
512}
513
514bool exynos_sysmmu_disable(struct device *dev)
515{
516 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
517 bool disabled;
518
519 disabled = __exynos_sysmmu_disable(data);
520 pm_runtime_put(data->sysmmu);
521
522 return disabled;
523}
524
525static void sysmmu_tlb_invalidate_entry(struct device *dev, unsigned long iova)
526{
527 unsigned long flags;
528 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
529
530 read_lock_irqsave(&data->lock, flags);
531
532 if (is_sysmmu_active(data)) {
533 int i;
534 for (i = 0; i < data->nsfrs; i++) {
535 if (sysmmu_block(data->sfrbases[i])) {
536 __sysmmu_tlb_invalidate_entry(
537 data->sfrbases[i], iova);
538 sysmmu_unblock(data->sfrbases[i]);
539 }
540 }
541 } else {
542 dev_dbg(data->sysmmu,
543 "(%s) Disabled. Skipping invalidating TLB.\n",
544 data->dbgname);
545 }
546
547 read_unlock_irqrestore(&data->lock, flags);
548}
549
550void exynos_sysmmu_tlb_invalidate(struct device *dev)
551{
552 unsigned long flags;
553 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
554
555 read_lock_irqsave(&data->lock, flags);
556
557 if (is_sysmmu_active(data)) {
558 int i;
559 for (i = 0; i < data->nsfrs; i++) {
560 if (sysmmu_block(data->sfrbases[i])) {
561 __sysmmu_tlb_invalidate(data->sfrbases[i]);
562 sysmmu_unblock(data->sfrbases[i]);
563 }
564 }
565 } else {
566 dev_dbg(data->sysmmu,
567 "(%s) Disabled. Skipping invalidating TLB.\n",
568 data->dbgname);
569 }
570
571 read_unlock_irqrestore(&data->lock, flags);
572}
573
574static int exynos_sysmmu_probe(struct platform_device *pdev)
575{
576 int i, ret;
577 struct device *dev;
578 struct sysmmu_drvdata *data;
579
580 dev = &pdev->dev;
581
582 data = kzalloc(sizeof(*data), GFP_KERNEL);
583 if (!data) {
584 dev_dbg(dev, "Not enough memory\n");
585 ret = -ENOMEM;
586 goto err_alloc;
587 }
588
589 ret = dev_set_drvdata(dev, data);
590 if (ret) {
591 dev_dbg(dev, "Unabled to initialize driver data\n");
592 goto err_init;
593 }
594
595 data->nsfrs = pdev->num_resources / 2;
596 data->sfrbases = kmalloc(sizeof(*data->sfrbases) * data->nsfrs,
597 GFP_KERNEL);
598 if (data->sfrbases == NULL) {
599 dev_dbg(dev, "Not enough memory\n");
600 ret = -ENOMEM;
601 goto err_init;
602 }
603
604 for (i = 0; i < data->nsfrs; i++) {
605 struct resource *res;
606 res = platform_get_resource(pdev, IORESOURCE_MEM, i);
607 if (!res) {
608 dev_dbg(dev, "Unable to find IOMEM region\n");
609 ret = -ENOENT;
610 goto err_res;
611 }
612
613 data->sfrbases[i] = ioremap(res->start, resource_size(res));
614 if (!data->sfrbases[i]) {
615 dev_dbg(dev, "Unable to map IOMEM @ PA:%#x\n",
616 res->start);
617 ret = -ENOENT;
618 goto err_res;
619 }
620 }
621
622 for (i = 0; i < data->nsfrs; i++) {
623 ret = platform_get_irq(pdev, i);
624 if (ret <= 0) {
625 dev_dbg(dev, "Unable to find IRQ resource\n");
626 goto err_irq;
627 }
628
629 ret = request_irq(ret, exynos_sysmmu_irq, 0,
630 dev_name(dev), data);
631 if (ret) {
632 dev_dbg(dev, "Unabled to register interrupt handler\n");
633 goto err_irq;
634 }
635 }
636
637 if (dev_get_platdata(dev)) {
638 char *deli, *beg;
639 struct sysmmu_platform_data *platdata = dev_get_platdata(dev);
640
641 beg = platdata->clockname;
642
643 for (deli = beg; (*deli != '\0') && (*deli != ','); deli++)
644 /* NOTHING */;
645
646 if (*deli == '\0')
647 deli = NULL;
648 else
649 *deli = '\0';
650
651 data->clk[0] = clk_get(dev, beg);
652 if (IS_ERR(data->clk[0])) {
653 data->clk[0] = NULL;
654 dev_dbg(dev, "No clock descriptor registered\n");
655 }
656
657 if (data->clk[0] && deli) {
658 *deli = ',';
659 data->clk[1] = clk_get(dev, deli + 1);
660 if (IS_ERR(data->clk[1]))
661 data->clk[1] = NULL;
662 }
663
664 data->dbgname = platdata->dbgname;
665 }
666
667 data->sysmmu = dev;
668 rwlock_init(&data->lock);
669 INIT_LIST_HEAD(&data->node);
670
671 __set_fault_handler(data, &default_fault_handler);
672
673 if (dev->parent)
674 pm_runtime_enable(dev);
675
676 dev_dbg(dev, "(%s) Initialized\n", data->dbgname);
677 return 0;
678err_irq:
679 while (i-- > 0) {
680 int irq;
681
682 irq = platform_get_irq(pdev, i);
683 free_irq(irq, data);
684 }
685err_res:
686 while (data->nsfrs-- > 0)
687 iounmap(data->sfrbases[data->nsfrs]);
688 kfree(data->sfrbases);
689err_init:
690 kfree(data);
691err_alloc:
692 dev_err(dev, "Failed to initialize\n");
693 return ret;
694}
695
696static struct platform_driver exynos_sysmmu_driver = {
697 .probe = exynos_sysmmu_probe,
698 .driver = {
699 .owner = THIS_MODULE,
700 .name = "exynos-sysmmu",
701 }
702};
703
704static inline void pgtable_flush(void *vastart, void *vaend)
705{
706 dmac_flush_range(vastart, vaend);
707 outer_flush_range(virt_to_phys(vastart),
708 virt_to_phys(vaend));
709}
710
711static int exynos_iommu_domain_init(struct iommu_domain *domain)
712{
713 struct exynos_iommu_domain *priv;
714
715 priv = kzalloc(sizeof(*priv), GFP_KERNEL);
716 if (!priv)
717 return -ENOMEM;
718
719 priv->pgtable = (unsigned long *)__get_free_pages(
720 GFP_KERNEL | __GFP_ZERO, 2);
721 if (!priv->pgtable)
722 goto err_pgtable;
723
724 priv->lv2entcnt = (short *)__get_free_pages(
725 GFP_KERNEL | __GFP_ZERO, 1);
726 if (!priv->lv2entcnt)
727 goto err_counter;
728
729 pgtable_flush(priv->pgtable, priv->pgtable + NUM_LV1ENTRIES);
730
731 spin_lock_init(&priv->lock);
732 spin_lock_init(&priv->pgtablelock);
733 INIT_LIST_HEAD(&priv->clients);
734
735 domain->priv = priv;
736 return 0;
737
738err_counter:
739 free_pages((unsigned long)priv->pgtable, 2);
740err_pgtable:
741 kfree(priv);
742 return -ENOMEM;
743}
744
745static void exynos_iommu_domain_destroy(struct iommu_domain *domain)
746{
747 struct exynos_iommu_domain *priv = domain->priv;
748 struct sysmmu_drvdata *data;
749 unsigned long flags;
750 int i;
751
752 WARN_ON(!list_empty(&priv->clients));
753
754 spin_lock_irqsave(&priv->lock, flags);
755
756 list_for_each_entry(data, &priv->clients, node) {
757 while (!exynos_sysmmu_disable(data->dev))
758 ; /* until System MMU is actually disabled */
759 }
760
761 spin_unlock_irqrestore(&priv->lock, flags);
762
763 for (i = 0; i < NUM_LV1ENTRIES; i++)
764 if (lv1ent_page(priv->pgtable + i))
765 kfree(__va(lv2table_base(priv->pgtable + i)));
766
767 free_pages((unsigned long)priv->pgtable, 2);
768 free_pages((unsigned long)priv->lv2entcnt, 1);
769 kfree(domain->priv);
770 domain->priv = NULL;
771}
772
773static int exynos_iommu_attach_device(struct iommu_domain *domain,
774 struct device *dev)
775{
776 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
777 struct exynos_iommu_domain *priv = domain->priv;
778 unsigned long flags;
779 int ret;
780
781 ret = pm_runtime_get_sync(data->sysmmu);
782 if (ret < 0)
783 return ret;
784
785 ret = 0;
786
787 spin_lock_irqsave(&priv->lock, flags);
788
789 ret = __exynos_sysmmu_enable(data, __pa(priv->pgtable), domain);
790
791 if (ret == 0) {
792 /* 'data->node' must not be appeared in priv->clients */
793 BUG_ON(!list_empty(&data->node));
794 data->dev = dev;
795 list_add_tail(&data->node, &priv->clients);
796 }
797
798 spin_unlock_irqrestore(&priv->lock, flags);
799
800 if (ret < 0) {
801 dev_err(dev, "%s: Failed to attach IOMMU with pgtable %#lx\n",
802 __func__, __pa(priv->pgtable));
803 pm_runtime_put(data->sysmmu);
804 } else if (ret > 0) {
805 dev_dbg(dev, "%s: IOMMU with pgtable 0x%lx already attached\n",
806 __func__, __pa(priv->pgtable));
807 } else {
808 dev_dbg(dev, "%s: Attached new IOMMU with pgtable 0x%lx\n",
809 __func__, __pa(priv->pgtable));
810 }
811
812 return ret;
813}
814
815static void exynos_iommu_detach_device(struct iommu_domain *domain,
816 struct device *dev)
817{
818 struct sysmmu_drvdata *data = dev_get_drvdata(dev->archdata.iommu);
819 struct exynos_iommu_domain *priv = domain->priv;
820 struct list_head *pos;
821 unsigned long flags;
822 bool found = false;
823
824 spin_lock_irqsave(&priv->lock, flags);
825
826 list_for_each(pos, &priv->clients) {
827 if (list_entry(pos, struct sysmmu_drvdata, node) == data) {
828 found = true;
829 break;
830 }
831 }
832
833 if (!found)
834 goto finish;
835
836 if (__exynos_sysmmu_disable(data)) {
837 dev_dbg(dev, "%s: Detached IOMMU with pgtable %#lx\n",
838 __func__, __pa(priv->pgtable));
839 list_del(&data->node);
840 INIT_LIST_HEAD(&data->node);
841
842 } else {
843 dev_dbg(dev, "%s: Detaching IOMMU with pgtable %#lx delayed",
844 __func__, __pa(priv->pgtable));
845 }
846
847finish:
848 spin_unlock_irqrestore(&priv->lock, flags);
849
850 if (found)
851 pm_runtime_put(data->sysmmu);
852}
853
854static unsigned long *alloc_lv2entry(unsigned long *sent, unsigned long iova,
855 short *pgcounter)
856{
857 if (lv1ent_fault(sent)) {
858 unsigned long *pent;
859
860 pent = kzalloc(LV2TABLE_SIZE, GFP_ATOMIC);
861 BUG_ON((unsigned long)pent & (LV2TABLE_SIZE - 1));
862 if (!pent)
863 return NULL;
864
865 *sent = mk_lv1ent_page(__pa(pent));
866 *pgcounter = NUM_LV2ENTRIES;
867 pgtable_flush(pent, pent + NUM_LV2ENTRIES);
868 pgtable_flush(sent, sent + 1);
869 }
870
871 return page_entry(sent, iova);
872}
873
874static int lv1set_section(unsigned long *sent, phys_addr_t paddr, short *pgcnt)
875{
876 if (lv1ent_section(sent))
877 return -EADDRINUSE;
878
879 if (lv1ent_page(sent)) {
880 if (*pgcnt != NUM_LV2ENTRIES)
881 return -EADDRINUSE;
882
883 kfree(page_entry(sent, 0));
884
885 *pgcnt = 0;
886 }
887
888 *sent = mk_lv1ent_sect(paddr);
889
890 pgtable_flush(sent, sent + 1);
891
892 return 0;
893}
894
895static int lv2set_page(unsigned long *pent, phys_addr_t paddr, size_t size,
896 short *pgcnt)
897{
898 if (size == SPAGE_SIZE) {
899 if (!lv2ent_fault(pent))
900 return -EADDRINUSE;
901
902 *pent = mk_lv2ent_spage(paddr);
903 pgtable_flush(pent, pent + 1);
904 *pgcnt -= 1;
905 } else { /* size == LPAGE_SIZE */
906 int i;
907 for (i = 0; i < SPAGES_PER_LPAGE; i++, pent++) {
908 if (!lv2ent_fault(pent)) {
909 memset(pent, 0, sizeof(*pent) * i);
910 return -EADDRINUSE;
911 }
912
913 *pent = mk_lv2ent_lpage(paddr);
914 }
915 pgtable_flush(pent - SPAGES_PER_LPAGE, pent);
916 *pgcnt -= SPAGES_PER_LPAGE;
917 }
918
919 return 0;
920}
921
922static int exynos_iommu_map(struct iommu_domain *domain, unsigned long iova,
923 phys_addr_t paddr, size_t size, int prot)
924{
925 struct exynos_iommu_domain *priv = domain->priv;
926 unsigned long *entry;
927 unsigned long flags;
928 int ret = -ENOMEM;
929
930 BUG_ON(priv->pgtable == NULL);
931
932 spin_lock_irqsave(&priv->pgtablelock, flags);
933
934 entry = section_entry(priv->pgtable, iova);
935
936 if (size == SECT_SIZE) {
937 ret = lv1set_section(entry, paddr,
938 &priv->lv2entcnt[lv1ent_offset(iova)]);
939 } else {
940 unsigned long *pent;
941
942 pent = alloc_lv2entry(entry, iova,
943 &priv->lv2entcnt[lv1ent_offset(iova)]);
944
945 if (!pent)
946 ret = -ENOMEM;
947 else
948 ret = lv2set_page(pent, paddr, size,
949 &priv->lv2entcnt[lv1ent_offset(iova)]);
950 }
951
952 if (ret) {
953 pr_debug("%s: Failed to map iova 0x%lx/0x%x bytes\n",
954 __func__, iova, size);
955 }
956
957 spin_unlock_irqrestore(&priv->pgtablelock, flags);
958
959 return ret;
960}
961
962static size_t exynos_iommu_unmap(struct iommu_domain *domain,
963 unsigned long iova, size_t size)
964{
965 struct exynos_iommu_domain *priv = domain->priv;
966 struct sysmmu_drvdata *data;
967 unsigned long flags;
968 unsigned long *ent;
969
970 BUG_ON(priv->pgtable == NULL);
971
972 spin_lock_irqsave(&priv->pgtablelock, flags);
973
974 ent = section_entry(priv->pgtable, iova);
975
976 if (lv1ent_section(ent)) {
977 BUG_ON(size < SECT_SIZE);
978
979 *ent = 0;
980 pgtable_flush(ent, ent + 1);
981 size = SECT_SIZE;
982 goto done;
983 }
984
985 if (unlikely(lv1ent_fault(ent))) {
986 if (size > SECT_SIZE)
987 size = SECT_SIZE;
988 goto done;
989 }
990
991 /* lv1ent_page(sent) == true here */
992
993 ent = page_entry(ent, iova);
994
995 if (unlikely(lv2ent_fault(ent))) {
996 size = SPAGE_SIZE;
997 goto done;
998 }
999
1000 if (lv2ent_small(ent)) {
1001 *ent = 0;
1002 size = SPAGE_SIZE;
1003 priv->lv2entcnt[lv1ent_offset(iova)] += 1;
1004 goto done;
1005 }
1006
1007 /* lv1ent_large(ent) == true here */
1008 BUG_ON(size < LPAGE_SIZE);
1009
1010 memset(ent, 0, sizeof(*ent) * SPAGES_PER_LPAGE);
1011
1012 size = LPAGE_SIZE;
1013 priv->lv2entcnt[lv1ent_offset(iova)] += SPAGES_PER_LPAGE;
1014done:
1015 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1016
1017 spin_lock_irqsave(&priv->lock, flags);
1018 list_for_each_entry(data, &priv->clients, node)
1019 sysmmu_tlb_invalidate_entry(data->dev, iova);
1020 spin_unlock_irqrestore(&priv->lock, flags);
1021
1022
1023 return size;
1024}
1025
1026static phys_addr_t exynos_iommu_iova_to_phys(struct iommu_domain *domain,
1027 unsigned long iova)
1028{
1029 struct exynos_iommu_domain *priv = domain->priv;
1030 unsigned long *entry;
1031 unsigned long flags;
1032 phys_addr_t phys = 0;
1033
1034 spin_lock_irqsave(&priv->pgtablelock, flags);
1035
1036 entry = section_entry(priv->pgtable, iova);
1037
1038 if (lv1ent_section(entry)) {
1039 phys = section_phys(entry) + section_offs(iova);
1040 } else if (lv1ent_page(entry)) {
1041 entry = page_entry(entry, iova);
1042
1043 if (lv2ent_large(entry))
1044 phys = lpage_phys(entry) + lpage_offs(iova);
1045 else if (lv2ent_small(entry))
1046 phys = spage_phys(entry) + spage_offs(iova);
1047 }
1048
1049 spin_unlock_irqrestore(&priv->pgtablelock, flags);
1050
1051 return phys;
1052}
1053
1054static struct iommu_ops exynos_iommu_ops = {
1055 .domain_init = &exynos_iommu_domain_init,
1056 .domain_destroy = &exynos_iommu_domain_destroy,
1057 .attach_dev = &exynos_iommu_attach_device,
1058 .detach_dev = &exynos_iommu_detach_device,
1059 .map = &exynos_iommu_map,
1060 .unmap = &exynos_iommu_unmap,
1061 .iova_to_phys = &exynos_iommu_iova_to_phys,
1062 .pgsize_bitmap = SECT_SIZE | LPAGE_SIZE | SPAGE_SIZE,
1063};
1064
1065static int __init exynos_iommu_init(void)
1066{
1067 int ret;
1068
1069 ret = platform_driver_register(&exynos_sysmmu_driver);
1070
1071 if (ret == 0)
1072 bus_set_iommu(&platform_bus_type, &exynos_iommu_ops);
1073
1074 return ret;
1075}
1076subsys_initcall(exynos_iommu_init);
diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c
index bf2fbaad5e22..b12af2ff8c54 100644
--- a/drivers/iommu/intel-iommu.c
+++ b/drivers/iommu/intel-iommu.c
@@ -1907,6 +1907,15 @@ static void iommu_detach_dev(struct intel_iommu *iommu, u8 bus, u8 devfn)
1907 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH); 1907 iommu->flush.flush_iotlb(iommu, 0, 0, 0, DMA_TLB_GLOBAL_FLUSH);
1908} 1908}
1909 1909
1910static inline void unlink_domain_info(struct device_domain_info *info)
1911{
1912 assert_spin_locked(&device_domain_lock);
1913 list_del(&info->link);
1914 list_del(&info->global);
1915 if (info->dev)
1916 info->dev->dev.archdata.iommu = NULL;
1917}
1918
1910static void domain_remove_dev_info(struct dmar_domain *domain) 1919static void domain_remove_dev_info(struct dmar_domain *domain)
1911{ 1920{
1912 struct device_domain_info *info; 1921 struct device_domain_info *info;
@@ -1917,10 +1926,7 @@ static void domain_remove_dev_info(struct dmar_domain *domain)
1917 while (!list_empty(&domain->devices)) { 1926 while (!list_empty(&domain->devices)) {
1918 info = list_entry(domain->devices.next, 1927 info = list_entry(domain->devices.next,
1919 struct device_domain_info, link); 1928 struct device_domain_info, link);
1920 list_del(&info->link); 1929 unlink_domain_info(info);
1921 list_del(&info->global);
1922 if (info->dev)
1923 info->dev->dev.archdata.iommu = NULL;
1924 spin_unlock_irqrestore(&device_domain_lock, flags); 1930 spin_unlock_irqrestore(&device_domain_lock, flags);
1925 1931
1926 iommu_disable_dev_iotlb(info); 1932 iommu_disable_dev_iotlb(info);
@@ -2287,12 +2293,6 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2287 if (!info) 2293 if (!info)
2288 return -ENOMEM; 2294 return -ENOMEM;
2289 2295
2290 ret = domain_context_mapping(domain, pdev, translation);
2291 if (ret) {
2292 free_devinfo_mem(info);
2293 return ret;
2294 }
2295
2296 info->segment = pci_domain_nr(pdev->bus); 2296 info->segment = pci_domain_nr(pdev->bus);
2297 info->bus = pdev->bus->number; 2297 info->bus = pdev->bus->number;
2298 info->devfn = pdev->devfn; 2298 info->devfn = pdev->devfn;
@@ -2305,6 +2305,15 @@ static int domain_add_dev_info(struct dmar_domain *domain,
2305 pdev->dev.archdata.iommu = info; 2305 pdev->dev.archdata.iommu = info;
2306 spin_unlock_irqrestore(&device_domain_lock, flags); 2306 spin_unlock_irqrestore(&device_domain_lock, flags);
2307 2307
2308 ret = domain_context_mapping(domain, pdev, translation);
2309 if (ret) {
2310 spin_lock_irqsave(&device_domain_lock, flags);
2311 unlink_domain_info(info);
2312 spin_unlock_irqrestore(&device_domain_lock, flags);
2313 free_devinfo_mem(info);
2314 return ret;
2315 }
2316
2308 return 0; 2317 return 0;
2309} 2318}
2310 2319
@@ -3728,10 +3737,7 @@ static void domain_remove_one_dev_info(struct dmar_domain *domain,
3728 if (info->segment == pci_domain_nr(pdev->bus) && 3737 if (info->segment == pci_domain_nr(pdev->bus) &&
3729 info->bus == pdev->bus->number && 3738 info->bus == pdev->bus->number &&
3730 info->devfn == pdev->devfn) { 3739 info->devfn == pdev->devfn) {
3731 list_del(&info->link); 3740 unlink_domain_info(info);
3732 list_del(&info->global);
3733 if (info->dev)
3734 info->dev->dev.archdata.iommu = NULL;
3735 spin_unlock_irqrestore(&device_domain_lock, flags); 3741 spin_unlock_irqrestore(&device_domain_lock, flags);
3736 3742
3737 iommu_disable_dev_iotlb(info); 3743 iommu_disable_dev_iotlb(info);
@@ -3786,11 +3792,7 @@ static void vm_domain_remove_all_dev_info(struct dmar_domain *domain)
3786 while (!list_empty(&domain->devices)) { 3792 while (!list_empty(&domain->devices)) {
3787 info = list_entry(domain->devices.next, 3793 info = list_entry(domain->devices.next,
3788 struct device_domain_info, link); 3794 struct device_domain_info, link);
3789 list_del(&info->link); 3795 unlink_domain_info(info);
3790 list_del(&info->global);
3791 if (info->dev)
3792 info->dev->dev.archdata.iommu = NULL;
3793
3794 spin_unlock_irqrestore(&device_domain_lock, flags1); 3796 spin_unlock_irqrestore(&device_domain_lock, flags1);
3795 3797
3796 iommu_disable_dev_iotlb(info); 3798 iommu_disable_dev_iotlb(info);
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index ff4b8cfda585..04cb8c88d74b 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -50,6 +50,19 @@ config LEDS_LM3530
50 controlled manually or using PWM input or using ambient 50 controlled manually or using PWM input or using ambient
51 light automatically. 51 light automatically.
52 52
53config LEDS_LM3533
54 tristate "LED support for LM3533"
55 depends on LEDS_CLASS
56 depends on MFD_LM3533
57 help
58 This option enables support for the LEDs on National Semiconductor /
59 TI LM3533 Lighting Power chips.
60
61 The LEDs can be controlled directly, through PWM input, or by the
62 ambient-light-sensor interface. The chip supports
63 hardware-accelerated blinking with maximum on and off periods of 9.8
64 and 77 seconds respectively.
65
53config LEDS_LOCOMO 66config LEDS_LOCOMO
54 tristate "LED Support for Locomo device" 67 tristate "LED Support for Locomo device"
55 depends on LEDS_CLASS 68 depends on LEDS_CLASS
@@ -259,6 +272,14 @@ config LEDS_DA903X
259 This option enables support for on-chip LED drivers found 272 This option enables support for on-chip LED drivers found
260 on Dialog Semiconductor DA9030/DA9034 PMICs. 273 on Dialog Semiconductor DA9030/DA9034 PMICs.
261 274
275config LEDS_DA9052
276 tristate "Dialog DA9052/DA9053 LEDS"
277 depends on LEDS_CLASS
278 depends on PMIC_DA9052
279 help
280 This option enables support for on-chip LED drivers found
281 on Dialog Semiconductor DA9052-BC and DA9053-AA/Bx PMICs.
282
262config LEDS_DAC124S085 283config LEDS_DAC124S085
263 tristate "LED Support for DAC124S085 SPI DAC" 284 tristate "LED Support for DAC124S085 SPI DAC"
264 depends on LEDS_CLASS 285 depends on LEDS_CLASS
@@ -471,4 +492,12 @@ config LEDS_TRIGGER_DEFAULT_ON
471comment "iptables trigger is under Netfilter config (LED target)" 492comment "iptables trigger is under Netfilter config (LED target)"
472 depends on LEDS_TRIGGERS 493 depends on LEDS_TRIGGERS
473 494
495config LEDS_TRIGGER_TRANSIENT
496 tristate "LED Transient Trigger"
497 depends on LEDS_TRIGGERS
498 help
499 This allows one time activation of a transient state on
500 GPIO/PWM based hadrware.
501 If unsure, say Y.
502
474endif # NEW_LEDS 503endif # NEW_LEDS
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index 890481cb09f6..f8958cd6cf6e 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -10,6 +10,7 @@ obj-$(CONFIG_LEDS_ATMEL_PWM) += leds-atmel-pwm.o
10obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o 10obj-$(CONFIG_LEDS_BD2802) += leds-bd2802.o
11obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o 11obj-$(CONFIG_LEDS_LOCOMO) += leds-locomo.o
12obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o 12obj-$(CONFIG_LEDS_LM3530) += leds-lm3530.o
13obj-$(CONFIG_LEDS_LM3533) += leds-lm3533.o
13obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o 14obj-$(CONFIG_LEDS_MIKROTIK_RB532) += leds-rb532.o
14obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o 15obj-$(CONFIG_LEDS_S3C24XX) += leds-s3c24xx.o
15obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o 16obj-$(CONFIG_LEDS_NET48XX) += leds-net48xx.o
@@ -31,6 +32,7 @@ obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
31obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o 32obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
32obj-$(CONFIG_LEDS_PCA9633) += leds-pca9633.o 33obj-$(CONFIG_LEDS_PCA9633) += leds-pca9633.o
33obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o 34obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
35obj-$(CONFIG_LEDS_DA9052) += leds-da9052.o
34obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o 36obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
35obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o 37obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
36obj-$(CONFIG_LEDS_PWM) += leds-pwm.o 38obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
@@ -56,3 +58,4 @@ obj-$(CONFIG_LEDS_TRIGGER_HEARTBEAT) += ledtrig-heartbeat.o
56obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o 58obj-$(CONFIG_LEDS_TRIGGER_BACKLIGHT) += ledtrig-backlight.o
57obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o 59obj-$(CONFIG_LEDS_TRIGGER_GPIO) += ledtrig-gpio.o
58obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o 60obj-$(CONFIG_LEDS_TRIGGER_DEFAULT_ON) += ledtrig-default-on.o
61obj-$(CONFIG_LEDS_TRIGGER_TRANSIENT) += ledtrig-transient.o
diff --git a/drivers/leds/led-class.c b/drivers/leds/led-class.c
index 5bff8439dc68..8ee92c81aec2 100644
--- a/drivers/leds/led-class.c
+++ b/drivers/leds/led-class.c
@@ -44,23 +44,18 @@ static ssize_t led_brightness_store(struct device *dev,
44 struct device_attribute *attr, const char *buf, size_t size) 44 struct device_attribute *attr, const char *buf, size_t size)
45{ 45{
46 struct led_classdev *led_cdev = dev_get_drvdata(dev); 46 struct led_classdev *led_cdev = dev_get_drvdata(dev);
47 unsigned long state;
47 ssize_t ret = -EINVAL; 48 ssize_t ret = -EINVAL;
48 char *after;
49 unsigned long state = simple_strtoul(buf, &after, 10);
50 size_t count = after - buf;
51 49
52 if (isspace(*after)) 50 ret = kstrtoul(buf, 10, &state);
53 count++; 51 if (ret)
52 return ret;
54 53
55 if (count == size) { 54 if (state == LED_OFF)
56 ret = count; 55 led_trigger_remove(led_cdev);
56 led_set_brightness(led_cdev, state);
57 57
58 if (state == LED_OFF) 58 return size;
59 led_trigger_remove(led_cdev);
60 led_set_brightness(led_cdev, state);
61 }
62
63 return ret;
64} 59}
65 60
66static ssize_t led_max_brightness_show(struct device *dev, 61static ssize_t led_max_brightness_show(struct device *dev,
diff --git a/drivers/leds/leds-da9052.c b/drivers/leds/leds-da9052.c
new file mode 100644
index 000000000000..58a5244c437e
--- /dev/null
+++ b/drivers/leds/leds-da9052.c
@@ -0,0 +1,214 @@
1/*
2 * LED Driver for Dialog DA9052 PMICs.
3 *
4 * Copyright(c) 2012 Dialog Semiconductor Ltd.
5 *
6 * Author: David Dajun Chen <dchen@diasemi.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/kernel.h>
17#include <linux/init.h>
18#include <linux/platform_device.h>
19#include <linux/leds.h>
20#include <linux/workqueue.h>
21#include <linux/slab.h>
22
23#include <linux/mfd/da9052/reg.h>
24#include <linux/mfd/da9052/da9052.h>
25#include <linux/mfd/da9052/pdata.h>
26
27#define DA9052_OPENDRAIN_OUTPUT 2
28#define DA9052_SET_HIGH_LVL_OUTPUT (1 << 3)
29#define DA9052_MASK_UPPER_NIBBLE 0xF0
30#define DA9052_MASK_LOWER_NIBBLE 0x0F
31#define DA9052_NIBBLE_SHIFT 4
32#define DA9052_MAX_BRIGHTNESS 0x5f
33
34struct da9052_led {
35 struct led_classdev cdev;
36 struct work_struct work;
37 struct da9052 *da9052;
38 unsigned char led_index;
39 unsigned char id;
40 int brightness;
41};
42
43static unsigned char led_reg[] = {
44 DA9052_LED_CONT_4_REG,
45 DA9052_LED_CONT_5_REG,
46};
47
48static int da9052_set_led_brightness(struct da9052_led *led)
49{
50 u8 val;
51 int error;
52
53 val = (led->brightness & 0x7f) | DA9052_LED_CONT_DIM;
54
55 error = da9052_reg_write(led->da9052, led_reg[led->led_index], val);
56 if (error < 0)
57 dev_err(led->da9052->dev, "Failed to set led brightness, %d\n",
58 error);
59 return error;
60}
61
62static void da9052_led_work(struct work_struct *work)
63{
64 struct da9052_led *led = container_of(work, struct da9052_led, work);
65
66 da9052_set_led_brightness(led);
67}
68
69static void da9052_led_set(struct led_classdev *led_cdev,
70 enum led_brightness value)
71{
72 struct da9052_led *led;
73
74 led = container_of(led_cdev, struct da9052_led, cdev);
75 led->brightness = value;
76 schedule_work(&led->work);
77}
78
79static int da9052_configure_leds(struct da9052 *da9052)
80{
81 int error;
82 unsigned char register_value = DA9052_OPENDRAIN_OUTPUT
83 | DA9052_SET_HIGH_LVL_OUTPUT;
84
85 error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
86 DA9052_MASK_LOWER_NIBBLE,
87 register_value);
88
89 if (error < 0) {
90 dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
91 error);
92 return error;
93 }
94
95 error = da9052_reg_update(da9052, DA9052_GPIO_14_15_REG,
96 DA9052_MASK_UPPER_NIBBLE,
97 register_value << DA9052_NIBBLE_SHIFT);
98 if (error < 0)
99 dev_err(da9052->dev, "Failed to write GPIO 14-15 reg, %d\n",
100 error);
101
102 return error;
103}
104
105static int __devinit da9052_led_probe(struct platform_device *pdev)
106{
107 struct da9052_pdata *pdata;
108 struct da9052 *da9052;
109 struct led_platform_data *pled;
110 struct da9052_led *led = NULL;
111 int error = -ENODEV;
112 int i;
113
114 da9052 = dev_get_drvdata(pdev->dev.parent);
115 pdata = da9052->dev->platform_data;
116 if (pdata == NULL) {
117 dev_err(&pdev->dev, "No platform data\n");
118 goto err;
119 }
120
121 pled = pdata->pled;
122 if (pled == NULL) {
123 dev_err(&pdev->dev, "No platform data for LED\n");
124 goto err;
125 }
126
127 led = devm_kzalloc(&pdev->dev,
128 sizeof(struct da9052_led) * pled->num_leds,
129 GFP_KERNEL);
130 if (led == NULL) {
131 dev_err(&pdev->dev, "Failed to alloc memory\n");
132 error = -ENOMEM;
133 goto err;
134 }
135
136 for (i = 0; i < pled->num_leds; i++) {
137 led[i].cdev.name = pled->leds[i].name;
138 led[i].cdev.brightness_set = da9052_led_set;
139 led[i].cdev.brightness = LED_OFF;
140 led[i].cdev.max_brightness = DA9052_MAX_BRIGHTNESS;
141 led[i].brightness = LED_OFF;
142 led[i].led_index = pled->leds[i].flags;
143 led[i].da9052 = dev_get_drvdata(pdev->dev.parent);
144 INIT_WORK(&led[i].work, da9052_led_work);
145
146 error = led_classdev_register(pdev->dev.parent, &led[i].cdev);
147 if (error) {
148 dev_err(&pdev->dev, "Failed to register led %d\n",
149 led[i].led_index);
150 goto err_register;
151 }
152
153 error = da9052_set_led_brightness(&led[i]);
154 if (error) {
155 dev_err(&pdev->dev, "Unable to init led %d\n",
156 led[i].led_index);
157 continue;
158 }
159 }
160 error = da9052_configure_leds(led->da9052);
161 if (error) {
162 dev_err(&pdev->dev, "Failed to configure GPIO LED%d\n", error);
163 goto err_register;
164 }
165
166 platform_set_drvdata(pdev, led);
167
168 return 0;
169
170err_register:
171 for (i = i - 1; i >= 0; i--) {
172 led_classdev_unregister(&led[i].cdev);
173 cancel_work_sync(&led[i].work);
174 }
175err:
176 return error;
177}
178
179static int __devexit da9052_led_remove(struct platform_device *pdev)
180{
181 struct da9052_led *led = platform_get_drvdata(pdev);
182 struct da9052_pdata *pdata;
183 struct da9052 *da9052;
184 struct led_platform_data *pled;
185 int i;
186
187 da9052 = dev_get_drvdata(pdev->dev.parent);
188 pdata = da9052->dev->platform_data;
189 pled = pdata->pled;
190
191 for (i = 0; i < pled->num_leds; i++) {
192 led[i].brightness = 0;
193 da9052_set_led_brightness(&led[i]);
194 led_classdev_unregister(&led[i].cdev);
195 cancel_work_sync(&led[i].work);
196 }
197
198 return 0;
199}
200
201static struct platform_driver da9052_led_driver = {
202 .driver = {
203 .name = "da9052-leds",
204 .owner = THIS_MODULE,
205 },
206 .probe = da9052_led_probe,
207 .remove = __devexit_p(da9052_led_remove),
208};
209
210module_platform_driver(da9052_led_driver);
211
212MODULE_AUTHOR("Dialog Semiconductor Ltd <dchen@diasemi.com>");
213MODULE_DESCRIPTION("LED driver for Dialog DA9052 PMIC");
214MODULE_LICENSE("GPL");
diff --git a/drivers/leds/leds-lm3530.c b/drivers/leds/leds-lm3530.c
index 968fd5fef4fc..84ba6de8039c 100644
--- a/drivers/leds/leds-lm3530.c
+++ b/drivers/leds/leds-lm3530.c
@@ -113,6 +113,18 @@ struct lm3530_data {
113 bool enable; 113 bool enable;
114}; 114};
115 115
116/*
117 * struct lm3530_als_data
118 * @config : value of ALS configuration register
119 * @imp_sel : value of ALS resistor select register
120 * @zone : values of ALS ZB(Zone Boundary) registers
121 */
122struct lm3530_als_data {
123 u8 config;
124 u8 imp_sel;
125 u8 zones[LM3530_ALS_ZB_MAX];
126};
127
116static const u8 lm3530_reg[LM3530_REG_MAX] = { 128static const u8 lm3530_reg[LM3530_REG_MAX] = {
117 LM3530_GEN_CONFIG, 129 LM3530_GEN_CONFIG,
118 LM3530_ALS_CONFIG, 130 LM3530_ALS_CONFIG,
@@ -141,29 +153,65 @@ static int lm3530_get_mode_from_str(const char *str)
141 return -1; 153 return -1;
142} 154}
143 155
156static void lm3530_als_configure(struct lm3530_platform_data *pdata,
157 struct lm3530_als_data *als)
158{
159 int i;
160 u32 als_vmin, als_vmax, als_vstep;
161
162 if (pdata->als_vmax == 0) {
163 pdata->als_vmin = 0;
164 pdata->als_vmax = LM3530_ALS_WINDOW_mV;
165 }
166
167 als_vmin = pdata->als_vmin;
168 als_vmax = pdata->als_vmax;
169
170 if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
171 pdata->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV;
172
173 /* n zone boundary makes n+1 zones */
174 als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
175
176 for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
177 als->zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
178 als_vstep + (i * als_vstep)) * LED_FULL) / 1000;
179
180 als->config =
181 (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
182 (LM3530_ENABLE_ALS) |
183 (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
184
185 als->imp_sel =
186 (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
187 (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
188}
189
144static int lm3530_init_registers(struct lm3530_data *drvdata) 190static int lm3530_init_registers(struct lm3530_data *drvdata)
145{ 191{
146 int ret = 0; 192 int ret = 0;
147 int i; 193 int i;
148 u8 gen_config; 194 u8 gen_config;
149 u8 als_config = 0;
150 u8 brt_ramp; 195 u8 brt_ramp;
151 u8 als_imp_sel = 0;
152 u8 brightness; 196 u8 brightness;
153 u8 reg_val[LM3530_REG_MAX]; 197 u8 reg_val[LM3530_REG_MAX];
154 u8 zones[LM3530_ALS_ZB_MAX];
155 u32 als_vmin, als_vmax, als_vstep;
156 struct lm3530_platform_data *pdata = drvdata->pdata; 198 struct lm3530_platform_data *pdata = drvdata->pdata;
157 struct i2c_client *client = drvdata->client; 199 struct i2c_client *client = drvdata->client;
158 struct lm3530_pwm_data *pwm = &pdata->pwm_data; 200 struct lm3530_pwm_data *pwm = &pdata->pwm_data;
201 struct lm3530_als_data als;
202
203 memset(&als, 0, sizeof(struct lm3530_als_data));
159 204
160 gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) | 205 gen_config = (pdata->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) |
161 ((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT); 206 ((pdata->max_current & 7) << LM3530_MAX_CURR_SHIFT);
162 207
163 switch (drvdata->mode) { 208 switch (drvdata->mode) {
164 case LM3530_BL_MODE_MANUAL: 209 case LM3530_BL_MODE_MANUAL:
210 gen_config |= LM3530_ENABLE_I2C;
211 break;
165 case LM3530_BL_MODE_ALS: 212 case LM3530_BL_MODE_ALS:
166 gen_config |= LM3530_ENABLE_I2C; 213 gen_config |= LM3530_ENABLE_I2C;
214 lm3530_als_configure(pdata, &als);
167 break; 215 break;
168 case LM3530_BL_MODE_PWM: 216 case LM3530_BL_MODE_PWM:
169 gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE | 217 gen_config |= LM3530_ENABLE_PWM | LM3530_ENABLE_PWM_SIMPLE |
@@ -171,38 +219,6 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
171 break; 219 break;
172 } 220 }
173 221
174 if (drvdata->mode == LM3530_BL_MODE_ALS) {
175 if (pdata->als_vmax == 0) {
176 pdata->als_vmin = 0;
177 pdata->als_vmax = LM3530_ALS_WINDOW_mV;
178 }
179
180 als_vmin = pdata->als_vmin;
181 als_vmax = pdata->als_vmax;
182
183 if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV)
184 pdata->als_vmax = als_vmax =
185 als_vmin + LM3530_ALS_WINDOW_mV;
186
187 /* n zone boundary makes n+1 zones */
188 als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1);
189
190 for (i = 0; i < LM3530_ALS_ZB_MAX; i++)
191 zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) +
192 als_vstep + (i * als_vstep)) * LED_FULL)
193 / 1000;
194
195 als_config =
196 (pdata->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) |
197 (LM3530_ENABLE_ALS) |
198 (pdata->als_input_mode << LM3530_ALS_SEL_SHIFT);
199
200 als_imp_sel =
201 (pdata->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) |
202 (pdata->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT);
203
204 }
205
206 brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) | 222 brt_ramp = (pdata->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) |
207 (pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT); 223 (pdata->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT);
208 224
@@ -215,14 +231,14 @@ static int lm3530_init_registers(struct lm3530_data *drvdata)
215 brightness = drvdata->led_dev.max_brightness; 231 brightness = drvdata->led_dev.max_brightness;
216 232
217 reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */ 233 reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */
218 reg_val[1] = als_config; /* LM3530_ALS_CONFIG */ 234 reg_val[1] = als.config; /* LM3530_ALS_CONFIG */
219 reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */ 235 reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */
220 reg_val[3] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */ 236 reg_val[3] = als.imp_sel; /* LM3530_ALS_IMP_SELECT */
221 reg_val[4] = brightness; /* LM3530_BRT_CTRL_REG */ 237 reg_val[4] = brightness; /* LM3530_BRT_CTRL_REG */
222 reg_val[5] = zones[0]; /* LM3530_ALS_ZB0_REG */ 238 reg_val[5] = als.zones[0]; /* LM3530_ALS_ZB0_REG */
223 reg_val[6] = zones[1]; /* LM3530_ALS_ZB1_REG */ 239 reg_val[6] = als.zones[1]; /* LM3530_ALS_ZB1_REG */
224 reg_val[7] = zones[2]; /* LM3530_ALS_ZB2_REG */ 240 reg_val[7] = als.zones[2]; /* LM3530_ALS_ZB2_REG */
225 reg_val[8] = zones[3]; /* LM3530_ALS_ZB3_REG */ 241 reg_val[8] = als.zones[3]; /* LM3530_ALS_ZB3_REG */
226 reg_val[9] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */ 242 reg_val[9] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */
227 reg_val[10] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */ 243 reg_val[10] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */
228 reg_val[11] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */ 244 reg_val[11] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */
diff --git a/drivers/leds/leds-lm3533.c b/drivers/leds/leds-lm3533.c
new file mode 100644
index 000000000000..f56b6e7ffdac
--- /dev/null
+++ b/drivers/leds/leds-lm3533.c
@@ -0,0 +1,785 @@
1/*
2 * leds-lm3533.c -- LM3533 LED driver
3 *
4 * Copyright (C) 2011-2012 Texas Instruments
5 *
6 * Author: Johan Hovold <jhovold@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/leds.h>
17#include <linux/mfd/core.h>
18#include <linux/mutex.h>
19#include <linux/platform_device.h>
20#include <linux/slab.h>
21#include <linux/workqueue.h>
22
23#include <linux/mfd/lm3533.h>
24
25
26#define LM3533_LVCTRLBANK_MIN 2
27#define LM3533_LVCTRLBANK_MAX 5
28#define LM3533_LVCTRLBANK_COUNT 4
29#define LM3533_RISEFALLTIME_MAX 7
30#define LM3533_ALS_CHANNEL_LV_MIN 1
31#define LM3533_ALS_CHANNEL_LV_MAX 2
32
33#define LM3533_REG_CTRLBANK_BCONF_BASE 0x1b
34#define LM3533_REG_PATTERN_ENABLE 0x28
35#define LM3533_REG_PATTERN_LOW_TIME_BASE 0x71
36#define LM3533_REG_PATTERN_HIGH_TIME_BASE 0x72
37#define LM3533_REG_PATTERN_RISETIME_BASE 0x74
38#define LM3533_REG_PATTERN_FALLTIME_BASE 0x75
39
40#define LM3533_REG_PATTERN_STEP 0x10
41
42#define LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK 0x04
43#define LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK 0x02
44#define LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK 0x01
45
46#define LM3533_LED_FLAG_PATTERN_ENABLE 1
47
48
49struct lm3533_led {
50 struct lm3533 *lm3533;
51 struct lm3533_ctrlbank cb;
52 struct led_classdev cdev;
53 int id;
54
55 struct mutex mutex;
56 unsigned long flags;
57
58 struct work_struct work;
59 u8 new_brightness;
60};
61
62
63static inline struct lm3533_led *to_lm3533_led(struct led_classdev *cdev)
64{
65 return container_of(cdev, struct lm3533_led, cdev);
66}
67
68static inline int lm3533_led_get_ctrlbank_id(struct lm3533_led *led)
69{
70 return led->id + 2;
71}
72
73static inline u8 lm3533_led_get_lv_reg(struct lm3533_led *led, u8 base)
74{
75 return base + led->id;
76}
77
78static inline u8 lm3533_led_get_pattern(struct lm3533_led *led)
79{
80 return led->id;
81}
82
83static inline u8 lm3533_led_get_pattern_reg(struct lm3533_led *led,
84 u8 base)
85{
86 return base + lm3533_led_get_pattern(led) * LM3533_REG_PATTERN_STEP;
87}
88
89static int lm3533_led_pattern_enable(struct lm3533_led *led, int enable)
90{
91 u8 mask;
92 u8 val;
93 int pattern;
94 int state;
95 int ret = 0;
96
97 dev_dbg(led->cdev.dev, "%s - %d\n", __func__, enable);
98
99 mutex_lock(&led->mutex);
100
101 state = test_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
102 if ((enable && state) || (!enable && !state))
103 goto out;
104
105 pattern = lm3533_led_get_pattern(led);
106 mask = 1 << (2 * pattern);
107
108 if (enable)
109 val = mask;
110 else
111 val = 0;
112
113 ret = lm3533_update(led->lm3533, LM3533_REG_PATTERN_ENABLE, val, mask);
114 if (ret) {
115 dev_err(led->cdev.dev, "failed to enable pattern %d (%d)\n",
116 pattern, enable);
117 goto out;
118 }
119
120 __change_bit(LM3533_LED_FLAG_PATTERN_ENABLE, &led->flags);
121out:
122 mutex_unlock(&led->mutex);
123
124 return ret;
125}
126
127static void lm3533_led_work(struct work_struct *work)
128{
129 struct lm3533_led *led = container_of(work, struct lm3533_led, work);
130
131 dev_dbg(led->cdev.dev, "%s - %u\n", __func__, led->new_brightness);
132
133 if (led->new_brightness == 0)
134 lm3533_led_pattern_enable(led, 0); /* disable blink */
135
136 lm3533_ctrlbank_set_brightness(&led->cb, led->new_brightness);
137}
138
139static void lm3533_led_set(struct led_classdev *cdev,
140 enum led_brightness value)
141{
142 struct lm3533_led *led = to_lm3533_led(cdev);
143
144 dev_dbg(led->cdev.dev, "%s - %d\n", __func__, value);
145
146 led->new_brightness = value;
147 schedule_work(&led->work);
148}
149
150static enum led_brightness lm3533_led_get(struct led_classdev *cdev)
151{
152 struct lm3533_led *led = to_lm3533_led(cdev);
153 u8 val;
154 int ret;
155
156 ret = lm3533_ctrlbank_get_brightness(&led->cb, &val);
157 if (ret)
158 return ret;
159
160 dev_dbg(led->cdev.dev, "%s - %u\n", __func__, val);
161
162 return val;
163}
164
165/* Pattern generator defines (delays in us). */
166#define LM3533_LED_DELAY1_VMIN 0x00
167#define LM3533_LED_DELAY2_VMIN 0x3d
168#define LM3533_LED_DELAY3_VMIN 0x80
169
170#define LM3533_LED_DELAY1_VMAX (LM3533_LED_DELAY2_VMIN - 1)
171#define LM3533_LED_DELAY2_VMAX (LM3533_LED_DELAY3_VMIN - 1)
172#define LM3533_LED_DELAY3_VMAX 0xff
173
174#define LM3533_LED_DELAY1_TMIN 16384U
175#define LM3533_LED_DELAY2_TMIN 1130496U
176#define LM3533_LED_DELAY3_TMIN 10305536U
177
178#define LM3533_LED_DELAY1_TMAX 999424U
179#define LM3533_LED_DELAY2_TMAX 9781248U
180#define LM3533_LED_DELAY3_TMAX 76890112U
181
182/* t_step = (t_max - t_min) / (v_max - v_min) */
183#define LM3533_LED_DELAY1_TSTEP 16384
184#define LM3533_LED_DELAY2_TSTEP 131072
185#define LM3533_LED_DELAY3_TSTEP 524288
186
187/* Delay limits for hardware accelerated blinking (in ms). */
188#define LM3533_LED_DELAY_ON_MAX \
189 ((LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY2_TSTEP / 2) / 1000)
190#define LM3533_LED_DELAY_OFF_MAX \
191 ((LM3533_LED_DELAY3_TMAX + LM3533_LED_DELAY3_TSTEP / 2) / 1000)
192
193/*
194 * Returns linear map of *t from [t_min,t_max] to [v_min,v_max] with a step
195 * size of t_step, where
196 *
197 * t_step = (t_max - t_min) / (v_max - v_min)
198 *
199 * and updates *t to reflect the mapped value.
200 */
201static u8 time_to_val(unsigned *t, unsigned t_min, unsigned t_step,
202 u8 v_min, u8 v_max)
203{
204 unsigned val;
205
206 val = (*t + t_step / 2 - t_min) / t_step + v_min;
207
208 *t = t_step * (val - v_min) + t_min;
209
210 return (u8)val;
211}
212
213/*
214 * Returns time code corresponding to *delay (in ms) and updates *delay to
215 * reflect actual hardware delay.
216 *
217 * Hardware supports 256 discrete delay times, divided into three groups with
218 * the following ranges and step-sizes:
219 *
220 * [ 16, 999] [0x00, 0x3e] step 16 ms
221 * [ 1130, 9781] [0x3d, 0x7f] step 131 ms
222 * [10306, 76890] [0x80, 0xff] step 524 ms
223 *
224 * Note that delay group 3 is only available for delay_off.
225 */
226static u8 lm3533_led_get_hw_delay(unsigned *delay)
227{
228 unsigned t;
229 u8 val;
230
231 t = *delay * 1000;
232
233 if (t >= (LM3533_LED_DELAY2_TMAX + LM3533_LED_DELAY3_TMIN) / 2) {
234 t = clamp(t, LM3533_LED_DELAY3_TMIN, LM3533_LED_DELAY3_TMAX);
235 val = time_to_val(&t, LM3533_LED_DELAY3_TMIN,
236 LM3533_LED_DELAY3_TSTEP,
237 LM3533_LED_DELAY3_VMIN,
238 LM3533_LED_DELAY3_VMAX);
239 } else if (t >= (LM3533_LED_DELAY1_TMAX + LM3533_LED_DELAY2_TMIN) / 2) {
240 t = clamp(t, LM3533_LED_DELAY2_TMIN, LM3533_LED_DELAY2_TMAX);
241 val = time_to_val(&t, LM3533_LED_DELAY2_TMIN,
242 LM3533_LED_DELAY2_TSTEP,
243 LM3533_LED_DELAY2_VMIN,
244 LM3533_LED_DELAY2_VMAX);
245 } else {
246 t = clamp(t, LM3533_LED_DELAY1_TMIN, LM3533_LED_DELAY1_TMAX);
247 val = time_to_val(&t, LM3533_LED_DELAY1_TMIN,
248 LM3533_LED_DELAY1_TSTEP,
249 LM3533_LED_DELAY1_VMIN,
250 LM3533_LED_DELAY1_VMAX);
251 }
252
253 *delay = (t + 500) / 1000;
254
255 return val;
256}
257
258/*
259 * Set delay register base to *delay (in ms) and update *delay to reflect
260 * actual hardware delay used.
261 */
262static u8 lm3533_led_delay_set(struct lm3533_led *led, u8 base,
263 unsigned long *delay)
264{
265 unsigned t;
266 u8 val;
267 u8 reg;
268 int ret;
269
270 t = (unsigned)*delay;
271
272 /* Delay group 3 is only available for low time (delay off). */
273 if (base != LM3533_REG_PATTERN_LOW_TIME_BASE)
274 t = min(t, LM3533_LED_DELAY2_TMAX / 1000);
275
276 val = lm3533_led_get_hw_delay(&t);
277
278 dev_dbg(led->cdev.dev, "%s - %lu: %u (0x%02x)\n", __func__,
279 *delay, t, val);
280 reg = lm3533_led_get_pattern_reg(led, base);
281 ret = lm3533_write(led->lm3533, reg, val);
282 if (ret)
283 dev_err(led->cdev.dev, "failed to set delay (%02x)\n", reg);
284
285 *delay = t;
286
287 return ret;
288}
289
290static int lm3533_led_delay_on_set(struct lm3533_led *led, unsigned long *t)
291{
292 return lm3533_led_delay_set(led, LM3533_REG_PATTERN_HIGH_TIME_BASE, t);
293}
294
295static int lm3533_led_delay_off_set(struct lm3533_led *led, unsigned long *t)
296{
297 return lm3533_led_delay_set(led, LM3533_REG_PATTERN_LOW_TIME_BASE, t);
298}
299
300static int lm3533_led_blink_set(struct led_classdev *cdev,
301 unsigned long *delay_on,
302 unsigned long *delay_off)
303{
304 struct lm3533_led *led = to_lm3533_led(cdev);
305 int ret;
306
307 dev_dbg(led->cdev.dev, "%s - on = %lu, off = %lu\n", __func__,
308 *delay_on, *delay_off);
309
310 if (*delay_on > LM3533_LED_DELAY_ON_MAX ||
311 *delay_off > LM3533_LED_DELAY_OFF_MAX)
312 return -EINVAL;
313
314 if (*delay_on == 0 && *delay_off == 0) {
315 *delay_on = 500;
316 *delay_off = 500;
317 }
318
319 ret = lm3533_led_delay_on_set(led, delay_on);
320 if (ret)
321 return ret;
322
323 ret = lm3533_led_delay_off_set(led, delay_off);
324 if (ret)
325 return ret;
326
327 return lm3533_led_pattern_enable(led, 1);
328}
329
330static ssize_t show_id(struct device *dev,
331 struct device_attribute *attr, char *buf)
332{
333 struct led_classdev *led_cdev = dev_get_drvdata(dev);
334 struct lm3533_led *led = to_lm3533_led(led_cdev);
335
336 return scnprintf(buf, PAGE_SIZE, "%d\n", led->id);
337}
338
339/*
340 * Pattern generator rise/fall times:
341 *
342 * 0 - 2048 us (default)
343 * 1 - 262 ms
344 * 2 - 524 ms
345 * 3 - 1.049 s
346 * 4 - 2.097 s
347 * 5 - 4.194 s
348 * 6 - 8.389 s
349 * 7 - 16.78 s
350 */
351static ssize_t show_risefalltime(struct device *dev,
352 struct device_attribute *attr,
353 char *buf, u8 base)
354{
355 struct led_classdev *led_cdev = dev_get_drvdata(dev);
356 struct lm3533_led *led = to_lm3533_led(led_cdev);
357 ssize_t ret;
358 u8 reg;
359 u8 val;
360
361 reg = lm3533_led_get_pattern_reg(led, base);
362 ret = lm3533_read(led->lm3533, reg, &val);
363 if (ret)
364 return ret;
365
366 return scnprintf(buf, PAGE_SIZE, "%x\n", val);
367}
368
369static ssize_t show_risetime(struct device *dev,
370 struct device_attribute *attr, char *buf)
371{
372 return show_risefalltime(dev, attr, buf,
373 LM3533_REG_PATTERN_RISETIME_BASE);
374}
375
376static ssize_t show_falltime(struct device *dev,
377 struct device_attribute *attr, char *buf)
378{
379 return show_risefalltime(dev, attr, buf,
380 LM3533_REG_PATTERN_FALLTIME_BASE);
381}
382
383static ssize_t store_risefalltime(struct device *dev,
384 struct device_attribute *attr,
385 const char *buf, size_t len, u8 base)
386{
387 struct led_classdev *led_cdev = dev_get_drvdata(dev);
388 struct lm3533_led *led = to_lm3533_led(led_cdev);
389 u8 val;
390 u8 reg;
391 int ret;
392
393 if (kstrtou8(buf, 0, &val) || val > LM3533_RISEFALLTIME_MAX)
394 return -EINVAL;
395
396 reg = lm3533_led_get_pattern_reg(led, base);
397 ret = lm3533_write(led->lm3533, reg, val);
398 if (ret)
399 return ret;
400
401 return len;
402}
403
404static ssize_t store_risetime(struct device *dev,
405 struct device_attribute *attr,
406 const char *buf, size_t len)
407{
408 return store_risefalltime(dev, attr, buf, len,
409 LM3533_REG_PATTERN_RISETIME_BASE);
410}
411
412static ssize_t store_falltime(struct device *dev,
413 struct device_attribute *attr,
414 const char *buf, size_t len)
415{
416 return store_risefalltime(dev, attr, buf, len,
417 LM3533_REG_PATTERN_FALLTIME_BASE);
418}
419
420static ssize_t show_als_channel(struct device *dev,
421 struct device_attribute *attr, char *buf)
422{
423 struct led_classdev *led_cdev = dev_get_drvdata(dev);
424 struct lm3533_led *led = to_lm3533_led(led_cdev);
425 unsigned channel;
426 u8 reg;
427 u8 val;
428 int ret;
429
430 reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
431 ret = lm3533_read(led->lm3533, reg, &val);
432 if (ret)
433 return ret;
434
435 channel = (val & LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK) + 1;
436
437 return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
438}
439
440static ssize_t store_als_channel(struct device *dev,
441 struct device_attribute *attr,
442 const char *buf, size_t len)
443{
444 struct led_classdev *led_cdev = dev_get_drvdata(dev);
445 struct lm3533_led *led = to_lm3533_led(led_cdev);
446 unsigned channel;
447 u8 reg;
448 u8 val;
449 u8 mask;
450 int ret;
451
452 if (kstrtouint(buf, 0, &channel))
453 return -EINVAL;
454
455 if (channel < LM3533_ALS_CHANNEL_LV_MIN ||
456 channel > LM3533_ALS_CHANNEL_LV_MAX)
457 return -EINVAL;
458
459 reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
460 mask = LM3533_REG_CTRLBANK_BCONF_ALS_CHANNEL_MASK;
461 val = channel - 1;
462
463 ret = lm3533_update(led->lm3533, reg, val, mask);
464 if (ret)
465 return ret;
466
467 return len;
468}
469
470static ssize_t show_als_en(struct device *dev,
471 struct device_attribute *attr, char *buf)
472{
473 struct led_classdev *led_cdev = dev_get_drvdata(dev);
474 struct lm3533_led *led = to_lm3533_led(led_cdev);
475 bool enable;
476 u8 reg;
477 u8 val;
478 int ret;
479
480 reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
481 ret = lm3533_read(led->lm3533, reg, &val);
482 if (ret)
483 return ret;
484
485 enable = val & LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
486
487 return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
488}
489
490static ssize_t store_als_en(struct device *dev,
491 struct device_attribute *attr,
492 const char *buf, size_t len)
493{
494 struct led_classdev *led_cdev = dev_get_drvdata(dev);
495 struct lm3533_led *led = to_lm3533_led(led_cdev);
496 unsigned enable;
497 u8 reg;
498 u8 mask;
499 u8 val;
500 int ret;
501
502 if (kstrtouint(buf, 0, &enable))
503 return -EINVAL;
504
505 reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
506 mask = LM3533_REG_CTRLBANK_BCONF_ALS_EN_MASK;
507
508 if (enable)
509 val = mask;
510 else
511 val = 0;
512
513 ret = lm3533_update(led->lm3533, reg, val, mask);
514 if (ret)
515 return ret;
516
517 return len;
518}
519
520static ssize_t show_linear(struct device *dev,
521 struct device_attribute *attr, char *buf)
522{
523 struct led_classdev *led_cdev = dev_get_drvdata(dev);
524 struct lm3533_led *led = to_lm3533_led(led_cdev);
525 u8 reg;
526 u8 val;
527 int linear;
528 int ret;
529
530 reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
531 ret = lm3533_read(led->lm3533, reg, &val);
532 if (ret)
533 return ret;
534
535 if (val & LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK)
536 linear = 1;
537 else
538 linear = 0;
539
540 return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
541}
542
543static ssize_t store_linear(struct device *dev,
544 struct device_attribute *attr,
545 const char *buf, size_t len)
546{
547 struct led_classdev *led_cdev = dev_get_drvdata(dev);
548 struct lm3533_led *led = to_lm3533_led(led_cdev);
549 unsigned long linear;
550 u8 reg;
551 u8 mask;
552 u8 val;
553 int ret;
554
555 if (kstrtoul(buf, 0, &linear))
556 return -EINVAL;
557
558 reg = lm3533_led_get_lv_reg(led, LM3533_REG_CTRLBANK_BCONF_BASE);
559 mask = LM3533_REG_CTRLBANK_BCONF_MAPPING_MASK;
560
561 if (linear)
562 val = mask;
563 else
564 val = 0;
565
566 ret = lm3533_update(led->lm3533, reg, val, mask);
567 if (ret)
568 return ret;
569
570 return len;
571}
572
573static ssize_t show_pwm(struct device *dev,
574 struct device_attribute *attr,
575 char *buf)
576{
577 struct led_classdev *led_cdev = dev_get_drvdata(dev);
578 struct lm3533_led *led = to_lm3533_led(led_cdev);
579 u8 val;
580 int ret;
581
582 ret = lm3533_ctrlbank_get_pwm(&led->cb, &val);
583 if (ret)
584 return ret;
585
586 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
587}
588
589static ssize_t store_pwm(struct device *dev,
590 struct device_attribute *attr,
591 const char *buf, size_t len)
592{
593 struct led_classdev *led_cdev = dev_get_drvdata(dev);
594 struct lm3533_led *led = to_lm3533_led(led_cdev);
595 u8 val;
596 int ret;
597
598 if (kstrtou8(buf, 0, &val))
599 return -EINVAL;
600
601 ret = lm3533_ctrlbank_set_pwm(&led->cb, val);
602 if (ret)
603 return ret;
604
605 return len;
606}
607
608static LM3533_ATTR_RW(als_channel);
609static LM3533_ATTR_RW(als_en);
610static LM3533_ATTR_RW(falltime);
611static LM3533_ATTR_RO(id);
612static LM3533_ATTR_RW(linear);
613static LM3533_ATTR_RW(pwm);
614static LM3533_ATTR_RW(risetime);
615
616static struct attribute *lm3533_led_attributes[] = {
617 &dev_attr_als_channel.attr,
618 &dev_attr_als_en.attr,
619 &dev_attr_falltime.attr,
620 &dev_attr_id.attr,
621 &dev_attr_linear.attr,
622 &dev_attr_pwm.attr,
623 &dev_attr_risetime.attr,
624 NULL,
625};
626
627static umode_t lm3533_led_attr_is_visible(struct kobject *kobj,
628 struct attribute *attr, int n)
629{
630 struct device *dev = container_of(kobj, struct device, kobj);
631 struct led_classdev *led_cdev = dev_get_drvdata(dev);
632 struct lm3533_led *led = to_lm3533_led(led_cdev);
633 umode_t mode = attr->mode;
634
635 if (attr == &dev_attr_als_channel.attr ||
636 attr == &dev_attr_als_en.attr) {
637 if (!led->lm3533->have_als)
638 mode = 0;
639 }
640
641 return mode;
642};
643
644static struct attribute_group lm3533_led_attribute_group = {
645 .is_visible = lm3533_led_attr_is_visible,
646 .attrs = lm3533_led_attributes
647};
648
649static int __devinit lm3533_led_setup(struct lm3533_led *led,
650 struct lm3533_led_platform_data *pdata)
651{
652 int ret;
653
654 ret = lm3533_ctrlbank_set_max_current(&led->cb, pdata->max_current);
655 if (ret)
656 return ret;
657
658 return lm3533_ctrlbank_set_pwm(&led->cb, pdata->pwm);
659}
660
661static int __devinit lm3533_led_probe(struct platform_device *pdev)
662{
663 struct lm3533 *lm3533;
664 struct lm3533_led_platform_data *pdata;
665 struct lm3533_led *led;
666 int ret;
667
668 dev_dbg(&pdev->dev, "%s\n", __func__);
669
670 lm3533 = dev_get_drvdata(pdev->dev.parent);
671 if (!lm3533)
672 return -EINVAL;
673
674 pdata = pdev->dev.platform_data;
675 if (!pdata) {
676 dev_err(&pdev->dev, "no platform data\n");
677 return -EINVAL;
678 }
679
680 if (pdev->id < 0 || pdev->id >= LM3533_LVCTRLBANK_COUNT) {
681 dev_err(&pdev->dev, "illegal LED id %d\n", pdev->id);
682 return -EINVAL;
683 }
684
685 led = devm_kzalloc(&pdev->dev, sizeof(*led), GFP_KERNEL);
686 if (!led)
687 return -ENOMEM;
688
689 led->lm3533 = lm3533;
690 led->cdev.name = pdata->name;
691 led->cdev.default_trigger = pdata->default_trigger;
692 led->cdev.brightness_set = lm3533_led_set;
693 led->cdev.brightness_get = lm3533_led_get;
694 led->cdev.blink_set = lm3533_led_blink_set;
695 led->cdev.brightness = LED_OFF;
696 led->id = pdev->id;
697
698 mutex_init(&led->mutex);
699 INIT_WORK(&led->work, lm3533_led_work);
700
701 /* The class framework makes a callback to get brightness during
702 * registration so use parent device (for error reporting) until
703 * registered.
704 */
705 led->cb.lm3533 = lm3533;
706 led->cb.id = lm3533_led_get_ctrlbank_id(led);
707 led->cb.dev = lm3533->dev;
708
709 platform_set_drvdata(pdev, led);
710
711 ret = led_classdev_register(pdev->dev.parent, &led->cdev);
712 if (ret) {
713 dev_err(&pdev->dev, "failed to register LED %d\n", pdev->id);
714 return ret;
715 }
716
717 led->cb.dev = led->cdev.dev;
718
719 ret = sysfs_create_group(&led->cdev.dev->kobj,
720 &lm3533_led_attribute_group);
721 if (ret < 0) {
722 dev_err(&pdev->dev, "failed to create sysfs attributes\n");
723 goto err_unregister;
724 }
725
726 ret = lm3533_led_setup(led, pdata);
727 if (ret)
728 goto err_sysfs_remove;
729
730 ret = lm3533_ctrlbank_enable(&led->cb);
731 if (ret)
732 goto err_sysfs_remove;
733
734 return 0;
735
736err_sysfs_remove:
737 sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
738err_unregister:
739 led_classdev_unregister(&led->cdev);
740 flush_work_sync(&led->work);
741
742 return ret;
743}
744
745static int __devexit lm3533_led_remove(struct platform_device *pdev)
746{
747 struct lm3533_led *led = platform_get_drvdata(pdev);
748
749 dev_dbg(&pdev->dev, "%s\n", __func__);
750
751 lm3533_ctrlbank_disable(&led->cb);
752 sysfs_remove_group(&led->cdev.dev->kobj, &lm3533_led_attribute_group);
753 led_classdev_unregister(&led->cdev);
754 flush_work_sync(&led->work);
755
756 return 0;
757}
758
759static void lm3533_led_shutdown(struct platform_device *pdev)
760{
761
762 struct lm3533_led *led = platform_get_drvdata(pdev);
763
764 dev_dbg(&pdev->dev, "%s\n", __func__);
765
766 lm3533_ctrlbank_disable(&led->cb);
767 lm3533_led_set(&led->cdev, LED_OFF); /* disable blink */
768 flush_work_sync(&led->work);
769}
770
771static struct platform_driver lm3533_led_driver = {
772 .driver = {
773 .name = "lm3533-leds",
774 .owner = THIS_MODULE,
775 },
776 .probe = lm3533_led_probe,
777 .remove = __devexit_p(lm3533_led_remove),
778 .shutdown = lm3533_led_shutdown,
779};
780module_platform_driver(lm3533_led_driver);
781
782MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
783MODULE_DESCRIPTION("LM3533 LED driver");
784MODULE_LICENSE("GPL");
785MODULE_ALIAS("platform:lm3533-leds");
diff --git a/drivers/leds/leds-lp5521.c b/drivers/leds/leds-lp5521.c
index 410a723b8691..23815624f35e 100644
--- a/drivers/leds/leds-lp5521.c
+++ b/drivers/leds/leds-lp5521.c
@@ -193,9 +193,14 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
193 193
194 /* move current engine to direct mode and remember the state */ 194 /* move current engine to direct mode and remember the state */
195 ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT); 195 ret = lp5521_set_engine_mode(eng, LP5521_CMD_DIRECT);
196 if (ret)
197 return ret;
198
196 /* Mode change requires min 500 us delay. 1 - 2 ms with margin */ 199 /* Mode change requires min 500 us delay. 1 - 2 ms with margin */
197 usleep_range(1000, 2000); 200 usleep_range(1000, 2000);
198 ret |= lp5521_read(client, LP5521_REG_OP_MODE, &mode); 201 ret = lp5521_read(client, LP5521_REG_OP_MODE, &mode);
202 if (ret)
203 return ret;
199 204
200 /* For loading, all the engines to load mode */ 205 /* For loading, all the engines to load mode */
201 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT); 206 lp5521_write(client, LP5521_REG_OP_MODE, LP5521_CMD_DIRECT);
@@ -211,8 +216,7 @@ static int lp5521_load_program(struct lp5521_engine *eng, const u8 *pattern)
211 LP5521_PROG_MEM_SIZE, 216 LP5521_PROG_MEM_SIZE,
212 pattern); 217 pattern);
213 218
214 ret |= lp5521_write(client, LP5521_REG_OP_MODE, mode); 219 return lp5521_write(client, LP5521_REG_OP_MODE, mode);
215 return ret;
216} 220}
217 221
218static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr) 222static int lp5521_set_led_current(struct lp5521_chip *chip, int led, u8 curr)
@@ -785,7 +789,7 @@ static int __devinit lp5521_probe(struct i2c_client *client,
785 * LP5521_REG_ENABLE register will not have any effect - strange! 789 * LP5521_REG_ENABLE register will not have any effect - strange!
786 */ 790 */
787 ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf); 791 ret = lp5521_read(client, LP5521_REG_R_CURRENT, &buf);
788 if (buf != LP5521_REG_R_CURR_DEFAULT) { 792 if (ret || buf != LP5521_REG_R_CURR_DEFAULT) {
789 dev_err(&client->dev, "error in resetting chip\n"); 793 dev_err(&client->dev, "error in resetting chip\n");
790 goto fail2; 794 goto fail2;
791 } 795 }
diff --git a/drivers/leds/leds-mc13783.c b/drivers/leds/leds-mc13783.c
index 8bc491541550..4cc6a2e3df34 100644
--- a/drivers/leds/leds-mc13783.c
+++ b/drivers/leds/leds-mc13783.c
@@ -280,7 +280,7 @@ static int __devinit mc13783_led_probe(struct platform_device *pdev)
280 return -EINVAL; 280 return -EINVAL;
281 } 281 }
282 282
283 led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL); 283 led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL);
284 if (led == NULL) { 284 if (led == NULL) {
285 dev_err(&pdev->dev, "failed to alloc memory\n"); 285 dev_err(&pdev->dev, "failed to alloc memory\n");
286 return -ENOMEM; 286 return -ENOMEM;
diff --git a/drivers/leds/leds-pca955x.c b/drivers/leds/leds-pca955x.c
index dcc3bc3d38db..5f462dbf0dbb 100644
--- a/drivers/leds/leds-pca955x.c
+++ b/drivers/leds/leds-pca955x.c
@@ -101,11 +101,16 @@ static const struct i2c_device_id pca955x_id[] = {
101}; 101};
102MODULE_DEVICE_TABLE(i2c, pca955x_id); 102MODULE_DEVICE_TABLE(i2c, pca955x_id);
103 103
104struct pca955x_led { 104struct pca955x {
105 struct mutex lock;
106 struct pca955x_led *leds;
105 struct pca955x_chipdef *chipdef; 107 struct pca955x_chipdef *chipdef;
106 struct i2c_client *client; 108 struct i2c_client *client;
109};
110
111struct pca955x_led {
112 struct pca955x *pca955x;
107 struct work_struct work; 113 struct work_struct work;
108 spinlock_t lock;
109 enum led_brightness brightness; 114 enum led_brightness brightness;
110 struct led_classdev led_cdev; 115 struct led_classdev led_cdev;
111 int led_num; /* 0 .. 15 potentially */ 116 int led_num; /* 0 .. 15 potentially */
@@ -140,7 +145,7 @@ static inline u8 pca955x_ledsel(u8 oldval, int led_num, int state)
140 */ 145 */
141static void pca955x_write_psc(struct i2c_client *client, int n, u8 val) 146static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
142{ 147{
143 struct pca955x_led *pca955x = i2c_get_clientdata(client); 148 struct pca955x *pca955x = i2c_get_clientdata(client);
144 149
145 i2c_smbus_write_byte_data(client, 150 i2c_smbus_write_byte_data(client,
146 pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n, 151 pca95xx_num_input_regs(pca955x->chipdef->bits) + 2*n,
@@ -156,7 +161,7 @@ static void pca955x_write_psc(struct i2c_client *client, int n, u8 val)
156 */ 161 */
157static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val) 162static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
158{ 163{
159 struct pca955x_led *pca955x = i2c_get_clientdata(client); 164 struct pca955x *pca955x = i2c_get_clientdata(client);
160 165
161 i2c_smbus_write_byte_data(client, 166 i2c_smbus_write_byte_data(client,
162 pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n, 167 pca95xx_num_input_regs(pca955x->chipdef->bits) + 1 + 2*n,
@@ -169,7 +174,7 @@ static void pca955x_write_pwm(struct i2c_client *client, int n, u8 val)
169 */ 174 */
170static void pca955x_write_ls(struct i2c_client *client, int n, u8 val) 175static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
171{ 176{
172 struct pca955x_led *pca955x = i2c_get_clientdata(client); 177 struct pca955x *pca955x = i2c_get_clientdata(client);
173 178
174 i2c_smbus_write_byte_data(client, 179 i2c_smbus_write_byte_data(client,
175 pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n, 180 pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n,
@@ -182,7 +187,7 @@ static void pca955x_write_ls(struct i2c_client *client, int n, u8 val)
182 */ 187 */
183static u8 pca955x_read_ls(struct i2c_client *client, int n) 188static u8 pca955x_read_ls(struct i2c_client *client, int n)
184{ 189{
185 struct pca955x_led *pca955x = i2c_get_clientdata(client); 190 struct pca955x *pca955x = i2c_get_clientdata(client);
186 191
187 return (u8) i2c_smbus_read_byte_data(client, 192 return (u8) i2c_smbus_read_byte_data(client,
188 pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n); 193 pca95xx_num_input_regs(pca955x->chipdef->bits) + 4 + n);
@@ -190,18 +195,23 @@ static u8 pca955x_read_ls(struct i2c_client *client, int n)
190 195
191static void pca955x_led_work(struct work_struct *work) 196static void pca955x_led_work(struct work_struct *work)
192{ 197{
193 struct pca955x_led *pca955x; 198 struct pca955x_led *pca955x_led;
199 struct pca955x *pca955x;
194 u8 ls; 200 u8 ls;
195 int chip_ls; /* which LSx to use (0-3 potentially) */ 201 int chip_ls; /* which LSx to use (0-3 potentially) */
196 int ls_led; /* which set of bits within LSx to use (0-3) */ 202 int ls_led; /* which set of bits within LSx to use (0-3) */
197 203
198 pca955x = container_of(work, struct pca955x_led, work); 204 pca955x_led = container_of(work, struct pca955x_led, work);
199 chip_ls = pca955x->led_num / 4; 205 pca955x = pca955x_led->pca955x;
200 ls_led = pca955x->led_num % 4; 206
207 chip_ls = pca955x_led->led_num / 4;
208 ls_led = pca955x_led->led_num % 4;
209
210 mutex_lock(&pca955x->lock);
201 211
202 ls = pca955x_read_ls(pca955x->client, chip_ls); 212 ls = pca955x_read_ls(pca955x->client, chip_ls);
203 213
204 switch (pca955x->brightness) { 214 switch (pca955x_led->brightness) {
205 case LED_FULL: 215 case LED_FULL:
206 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON); 216 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_LED_ON);
207 break; 217 break;
@@ -219,12 +229,15 @@ static void pca955x_led_work(struct work_struct *work)
219 * OFF, HALF, or FULL. But, this is probably better than 229 * OFF, HALF, or FULL. But, this is probably better than
220 * just turning off for all other values. 230 * just turning off for all other values.
221 */ 231 */
222 pca955x_write_pwm(pca955x->client, 1, 255-pca955x->brightness); 232 pca955x_write_pwm(pca955x->client, 1,
233 255 - pca955x_led->brightness);
223 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1); 234 ls = pca955x_ledsel(ls, ls_led, PCA955X_LS_BLINK1);
224 break; 235 break;
225 } 236 }
226 237
227 pca955x_write_ls(pca955x->client, chip_ls, ls); 238 pca955x_write_ls(pca955x->client, chip_ls, ls);
239
240 mutex_unlock(&pca955x->lock);
228} 241}
229 242
230static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value) 243static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness value)
@@ -233,7 +246,6 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
233 246
234 pca955x = container_of(led_cdev, struct pca955x_led, led_cdev); 247 pca955x = container_of(led_cdev, struct pca955x_led, led_cdev);
235 248
236 spin_lock(&pca955x->lock);
237 pca955x->brightness = value; 249 pca955x->brightness = value;
238 250
239 /* 251 /*
@@ -241,14 +253,13 @@ static void pca955x_led_set(struct led_classdev *led_cdev, enum led_brightness v
241 * can sleep. 253 * can sleep.
242 */ 254 */
243 schedule_work(&pca955x->work); 255 schedule_work(&pca955x->work);
244
245 spin_unlock(&pca955x->lock);
246} 256}
247 257
248static int __devinit pca955x_probe(struct i2c_client *client, 258static int __devinit pca955x_probe(struct i2c_client *client,
249 const struct i2c_device_id *id) 259 const struct i2c_device_id *id)
250{ 260{
251 struct pca955x_led *pca955x; 261 struct pca955x *pca955x;
262 struct pca955x_led *pca955x_led;
252 struct pca955x_chipdef *chip; 263 struct pca955x_chipdef *chip;
253 struct i2c_adapter *adapter; 264 struct i2c_adapter *adapter;
254 struct led_platform_data *pdata; 265 struct led_platform_data *pdata;
@@ -282,39 +293,48 @@ static int __devinit pca955x_probe(struct i2c_client *client,
282 } 293 }
283 } 294 }
284 295
285 pca955x = kzalloc(sizeof(*pca955x) * chip->bits, GFP_KERNEL); 296 pca955x = kzalloc(sizeof(*pca955x), GFP_KERNEL);
286 if (!pca955x) 297 if (!pca955x)
287 return -ENOMEM; 298 return -ENOMEM;
288 299
300 pca955x->leds = kzalloc(sizeof(*pca955x_led) * chip->bits, GFP_KERNEL);
301 if (!pca955x->leds) {
302 err = -ENOMEM;
303 goto exit_nomem;
304 }
305
289 i2c_set_clientdata(client, pca955x); 306 i2c_set_clientdata(client, pca955x);
290 307
308 mutex_init(&pca955x->lock);
309 pca955x->client = client;
310 pca955x->chipdef = chip;
311
291 for (i = 0; i < chip->bits; i++) { 312 for (i = 0; i < chip->bits; i++) {
292 pca955x[i].chipdef = chip; 313 pca955x_led = &pca955x->leds[i];
293 pca955x[i].client = client; 314 pca955x_led->led_num = i;
294 pca955x[i].led_num = i; 315 pca955x_led->pca955x = pca955x;
295 316
296 /* Platform data can specify LED names and default triggers */ 317 /* Platform data can specify LED names and default triggers */
297 if (pdata) { 318 if (pdata) {
298 if (pdata->leds[i].name) 319 if (pdata->leds[i].name)
299 snprintf(pca955x[i].name, 320 snprintf(pca955x_led->name,
300 sizeof(pca955x[i].name), "pca955x:%s", 321 sizeof(pca955x_led->name), "pca955x:%s",
301 pdata->leds[i].name); 322 pdata->leds[i].name);
302 if (pdata->leds[i].default_trigger) 323 if (pdata->leds[i].default_trigger)
303 pca955x[i].led_cdev.default_trigger = 324 pca955x_led->led_cdev.default_trigger =
304 pdata->leds[i].default_trigger; 325 pdata->leds[i].default_trigger;
305 } else { 326 } else {
306 snprintf(pca955x[i].name, sizeof(pca955x[i].name), 327 snprintf(pca955x_led->name, sizeof(pca955x_led->name),
307 "pca955x:%d", i); 328 "pca955x:%d", i);
308 } 329 }
309 330
310 spin_lock_init(&pca955x[i].lock); 331 pca955x_led->led_cdev.name = pca955x_led->name;
311 332 pca955x_led->led_cdev.brightness_set = pca955x_led_set;
312 pca955x[i].led_cdev.name = pca955x[i].name;
313 pca955x[i].led_cdev.brightness_set = pca955x_led_set;
314 333
315 INIT_WORK(&pca955x[i].work, pca955x_led_work); 334 INIT_WORK(&pca955x_led->work, pca955x_led_work);
316 335
317 err = led_classdev_register(&client->dev, &pca955x[i].led_cdev); 336 err = led_classdev_register(&client->dev,
337 &pca955x_led->led_cdev);
318 if (err < 0) 338 if (err < 0)
319 goto exit; 339 goto exit;
320 } 340 }
@@ -337,10 +357,12 @@ static int __devinit pca955x_probe(struct i2c_client *client,
337 357
338exit: 358exit:
339 while (i--) { 359 while (i--) {
340 led_classdev_unregister(&pca955x[i].led_cdev); 360 led_classdev_unregister(&pca955x->leds[i].led_cdev);
341 cancel_work_sync(&pca955x[i].work); 361 cancel_work_sync(&pca955x->leds[i].work);
342 } 362 }
343 363
364 kfree(pca955x->leds);
365exit_nomem:
344 kfree(pca955x); 366 kfree(pca955x);
345 367
346 return err; 368 return err;
@@ -348,14 +370,15 @@ exit:
348 370
349static int __devexit pca955x_remove(struct i2c_client *client) 371static int __devexit pca955x_remove(struct i2c_client *client)
350{ 372{
351 struct pca955x_led *pca955x = i2c_get_clientdata(client); 373 struct pca955x *pca955x = i2c_get_clientdata(client);
352 int i; 374 int i;
353 375
354 for (i = 0; i < pca955x->chipdef->bits; i++) { 376 for (i = 0; i < pca955x->chipdef->bits; i++) {
355 led_classdev_unregister(&pca955x[i].led_cdev); 377 led_classdev_unregister(&pca955x->leds[i].led_cdev);
356 cancel_work_sync(&pca955x[i].work); 378 cancel_work_sync(&pca955x->leds[i].work);
357 } 379 }
358 380
381 kfree(pca955x->leds);
359 kfree(pca955x); 382 kfree(pca955x);
360 383
361 return 0; 384 return 0;
diff --git a/drivers/leds/ledtrig-backlight.c b/drivers/leds/ledtrig-backlight.c
index 2b513a2ad7de..e2726867c5d4 100644
--- a/drivers/leds/ledtrig-backlight.c
+++ b/drivers/leds/ledtrig-backlight.c
@@ -120,6 +120,7 @@ static void bl_trig_activate(struct led_classdev *led)
120 ret = fb_register_client(&n->notifier); 120 ret = fb_register_client(&n->notifier);
121 if (ret) 121 if (ret)
122 dev_err(led->dev, "unable to register backlight trigger\n"); 122 dev_err(led->dev, "unable to register backlight trigger\n");
123 led->activated = true;
123 124
124 return; 125 return;
125 126
@@ -133,10 +134,11 @@ static void bl_trig_deactivate(struct led_classdev *led)
133 struct bl_trig_notifier *n = 134 struct bl_trig_notifier *n =
134 (struct bl_trig_notifier *) led->trigger_data; 135 (struct bl_trig_notifier *) led->trigger_data;
135 136
136 if (n) { 137 if (led->activated) {
137 device_remove_file(led->dev, &dev_attr_inverted); 138 device_remove_file(led->dev, &dev_attr_inverted);
138 fb_unregister_client(&n->notifier); 139 fb_unregister_client(&n->notifier);
139 kfree(n); 140 kfree(n);
141 led->activated = false;
140 } 142 }
141} 143}
142 144
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index ecc4bf3f37a9..f057c101b896 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -200,6 +200,7 @@ static void gpio_trig_activate(struct led_classdev *led)
200 gpio_data->led = led; 200 gpio_data->led = led;
201 led->trigger_data = gpio_data; 201 led->trigger_data = gpio_data;
202 INIT_WORK(&gpio_data->work, gpio_trig_work); 202 INIT_WORK(&gpio_data->work, gpio_trig_work);
203 led->activated = true;
203 204
204 return; 205 return;
205 206
@@ -217,7 +218,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
217{ 218{
218 struct gpio_trig_data *gpio_data = led->trigger_data; 219 struct gpio_trig_data *gpio_data = led->trigger_data;
219 220
220 if (gpio_data) { 221 if (led->activated) {
221 device_remove_file(led->dev, &dev_attr_gpio); 222 device_remove_file(led->dev, &dev_attr_gpio);
222 device_remove_file(led->dev, &dev_attr_inverted); 223 device_remove_file(led->dev, &dev_attr_inverted);
223 device_remove_file(led->dev, &dev_attr_desired_brightness); 224 device_remove_file(led->dev, &dev_attr_desired_brightness);
@@ -225,6 +226,7 @@ static void gpio_trig_deactivate(struct led_classdev *led)
225 if (gpio_data->gpio != 0) 226 if (gpio_data->gpio != 0)
226 free_irq(gpio_to_irq(gpio_data->gpio), led); 227 free_irq(gpio_to_irq(gpio_data->gpio), led);
227 kfree(gpio_data); 228 kfree(gpio_data);
229 led->activated = false;
228 } 230 }
229} 231}
230 232
diff --git a/drivers/leds/ledtrig-heartbeat.c b/drivers/leds/ledtrig-heartbeat.c
index 759c0bba4a8f..41dc76db4311 100644
--- a/drivers/leds/ledtrig-heartbeat.c
+++ b/drivers/leds/ledtrig-heartbeat.c
@@ -18,6 +18,7 @@
18#include <linux/timer.h> 18#include <linux/timer.h>
19#include <linux/sched.h> 19#include <linux/sched.h>
20#include <linux/leds.h> 20#include <linux/leds.h>
21#include <linux/reboot.h>
21#include "leds.h" 22#include "leds.h"
22 23
23struct heartbeat_trig_data { 24struct heartbeat_trig_data {
@@ -83,15 +84,17 @@ static void heartbeat_trig_activate(struct led_classdev *led_cdev)
83 led_heartbeat_function, (unsigned long) led_cdev); 84 led_heartbeat_function, (unsigned long) led_cdev);
84 heartbeat_data->phase = 0; 85 heartbeat_data->phase = 0;
85 led_heartbeat_function(heartbeat_data->timer.data); 86 led_heartbeat_function(heartbeat_data->timer.data);
87 led_cdev->activated = true;
86} 88}
87 89
88static void heartbeat_trig_deactivate(struct led_classdev *led_cdev) 90static void heartbeat_trig_deactivate(struct led_classdev *led_cdev)
89{ 91{
90 struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data; 92 struct heartbeat_trig_data *heartbeat_data = led_cdev->trigger_data;
91 93
92 if (heartbeat_data) { 94 if (led_cdev->activated) {
93 del_timer_sync(&heartbeat_data->timer); 95 del_timer_sync(&heartbeat_data->timer);
94 kfree(heartbeat_data); 96 kfree(heartbeat_data);
97 led_cdev->activated = false;
95 } 98 }
96} 99}
97 100
@@ -101,13 +104,38 @@ static struct led_trigger heartbeat_led_trigger = {
101 .deactivate = heartbeat_trig_deactivate, 104 .deactivate = heartbeat_trig_deactivate,
102}; 105};
103 106
107static int heartbeat_reboot_notifier(struct notifier_block *nb,
108 unsigned long code, void *unused)
109{
110 led_trigger_unregister(&heartbeat_led_trigger);
111 return NOTIFY_DONE;
112}
113
114static struct notifier_block heartbeat_reboot_nb = {
115 .notifier_call = heartbeat_reboot_notifier,
116};
117
118static struct notifier_block heartbeat_panic_nb = {
119 .notifier_call = heartbeat_reboot_notifier,
120};
121
104static int __init heartbeat_trig_init(void) 122static int __init heartbeat_trig_init(void)
105{ 123{
106 return led_trigger_register(&heartbeat_led_trigger); 124 int rc = led_trigger_register(&heartbeat_led_trigger);
125
126 if (!rc) {
127 atomic_notifier_chain_register(&panic_notifier_list,
128 &heartbeat_panic_nb);
129 register_reboot_notifier(&heartbeat_reboot_nb);
130 }
131 return rc;
107} 132}
108 133
109static void __exit heartbeat_trig_exit(void) 134static void __exit heartbeat_trig_exit(void)
110{ 135{
136 unregister_reboot_notifier(&heartbeat_reboot_nb);
137 atomic_notifier_chain_unregister(&panic_notifier_list,
138 &heartbeat_panic_nb);
111 led_trigger_unregister(&heartbeat_led_trigger); 139 led_trigger_unregister(&heartbeat_led_trigger);
112} 140}
113 141
diff --git a/drivers/leds/ledtrig-timer.c b/drivers/leds/ledtrig-timer.c
index 328c64c0841c..9010f7abaf2c 100644
--- a/drivers/leds/ledtrig-timer.c
+++ b/drivers/leds/ledtrig-timer.c
@@ -31,21 +31,17 @@ static ssize_t led_delay_on_store(struct device *dev,
31 struct device_attribute *attr, const char *buf, size_t size) 31 struct device_attribute *attr, const char *buf, size_t size)
32{ 32{
33 struct led_classdev *led_cdev = dev_get_drvdata(dev); 33 struct led_classdev *led_cdev = dev_get_drvdata(dev);
34 int ret = -EINVAL; 34 unsigned long state;
35 char *after; 35 ssize_t ret = -EINVAL;
36 unsigned long state = simple_strtoul(buf, &after, 10); 36
37 size_t count = after - buf; 37 ret = kstrtoul(buf, 10, &state);
38 38 if (ret)
39 if (isspace(*after)) 39 return ret;
40 count++;
41
42 if (count == size) {
43 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
44 led_cdev->blink_delay_on = state;
45 ret = count;
46 }
47 40
48 return ret; 41 led_blink_set(led_cdev, &state, &led_cdev->blink_delay_off);
42 led_cdev->blink_delay_on = state;
43
44 return size;
49} 45}
50 46
51static ssize_t led_delay_off_show(struct device *dev, 47static ssize_t led_delay_off_show(struct device *dev,
@@ -60,21 +56,17 @@ static ssize_t led_delay_off_store(struct device *dev,
60 struct device_attribute *attr, const char *buf, size_t size) 56 struct device_attribute *attr, const char *buf, size_t size)
61{ 57{
62 struct led_classdev *led_cdev = dev_get_drvdata(dev); 58 struct led_classdev *led_cdev = dev_get_drvdata(dev);
63 int ret = -EINVAL; 59 unsigned long state;
64 char *after; 60 ssize_t ret = -EINVAL;
65 unsigned long state = simple_strtoul(buf, &after, 10);
66 size_t count = after - buf;
67
68 if (isspace(*after))
69 count++;
70
71 if (count == size) {
72 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
73 led_cdev->blink_delay_off = state;
74 ret = count;
75 }
76 61
77 return ret; 62 ret = kstrtoul(buf, 10, &state);
63 if (ret)
64 return ret;
65
66 led_blink_set(led_cdev, &led_cdev->blink_delay_on, &state);
67 led_cdev->blink_delay_off = state;
68
69 return size;
78} 70}
79 71
80static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store); 72static DEVICE_ATTR(delay_on, 0644, led_delay_on_show, led_delay_on_store);
@@ -95,8 +87,7 @@ static void timer_trig_activate(struct led_classdev *led_cdev)
95 87
96 led_blink_set(led_cdev, &led_cdev->blink_delay_on, 88 led_blink_set(led_cdev, &led_cdev->blink_delay_on,
97 &led_cdev->blink_delay_off); 89 &led_cdev->blink_delay_off);
98 90 led_cdev->activated = true;
99 led_cdev->trigger_data = (void *)1;
100 91
101 return; 92 return;
102 93
@@ -106,9 +97,10 @@ err_out_delayon:
106 97
107static void timer_trig_deactivate(struct led_classdev *led_cdev) 98static void timer_trig_deactivate(struct led_classdev *led_cdev)
108{ 99{
109 if (led_cdev->trigger_data) { 100 if (led_cdev->activated) {
110 device_remove_file(led_cdev->dev, &dev_attr_delay_on); 101 device_remove_file(led_cdev->dev, &dev_attr_delay_on);
111 device_remove_file(led_cdev->dev, &dev_attr_delay_off); 102 device_remove_file(led_cdev->dev, &dev_attr_delay_off);
103 led_cdev->activated = false;
112 } 104 }
113 105
114 /* Stop blinking */ 106 /* Stop blinking */
diff --git a/drivers/leds/ledtrig-transient.c b/drivers/leds/ledtrig-transient.c
new file mode 100644
index 000000000000..83179f435e1e
--- /dev/null
+++ b/drivers/leds/ledtrig-transient.c
@@ -0,0 +1,237 @@
1/*
2 * LED Kernel Transient Trigger
3 *
4 * Copyright (C) 2012 Shuah Khan <shuahkhan@gmail.com>
5 *
6 * Based on Richard Purdie's ledtrig-timer.c and Atsushi Nemoto's
7 * ledtrig-heartbeat.c
8 * Design and use-case input from Jonas Bonn <jonas@southpole.se> and
9 * Neil Brown <neilb@suse.de>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16/*
17 * Transient trigger allows one shot timer activation. Please refer to
18 * Documentation/leds/ledtrig-transient.txt for details
19*/
20
21#include <linux/module.h>
22#include <linux/kernel.h>
23#include <linux/init.h>
24#include <linux/device.h>
25#include <linux/slab.h>
26#include <linux/timer.h>
27#include <linux/leds.h>
28#include "leds.h"
29
30struct transient_trig_data {
31 int activate;
32 int state;
33 int restore_state;
34 unsigned long duration;
35 struct timer_list timer;
36};
37
38static void transient_timer_function(unsigned long data)
39{
40 struct led_classdev *led_cdev = (struct led_classdev *) data;
41 struct transient_trig_data *transient_data = led_cdev->trigger_data;
42
43 transient_data->activate = 0;
44 led_set_brightness(led_cdev, transient_data->restore_state);
45}
46
47static ssize_t transient_activate_show(struct device *dev,
48 struct device_attribute *attr, char *buf)
49{
50 struct led_classdev *led_cdev = dev_get_drvdata(dev);
51 struct transient_trig_data *transient_data = led_cdev->trigger_data;
52
53 return sprintf(buf, "%d\n", transient_data->activate);
54}
55
56static ssize_t transient_activate_store(struct device *dev,
57 struct device_attribute *attr, const char *buf, size_t size)
58{
59 struct led_classdev *led_cdev = dev_get_drvdata(dev);
60 struct transient_trig_data *transient_data = led_cdev->trigger_data;
61 unsigned long state;
62 ssize_t ret;
63
64 ret = kstrtoul(buf, 10, &state);
65 if (ret)
66 return ret;
67
68 if (state != 1 && state != 0)
69 return -EINVAL;
70
71 /* cancel the running timer */
72 if (state == 0 && transient_data->activate == 1) {
73 del_timer(&transient_data->timer);
74 transient_data->activate = state;
75 led_set_brightness(led_cdev, transient_data->restore_state);
76 return size;
77 }
78
79 /* start timer if there is no active timer */
80 if (state == 1 && transient_data->activate == 0 &&
81 transient_data->duration != 0) {
82 transient_data->activate = state;
83 led_set_brightness(led_cdev, transient_data->state);
84 transient_data->restore_state =
85 (transient_data->state == LED_FULL) ? LED_OFF : LED_FULL;
86 mod_timer(&transient_data->timer,
87 jiffies + transient_data->duration);
88 }
89
90 /* state == 0 && transient_data->activate == 0
91 timer is not active - just return */
92 /* state == 1 && transient_data->activate == 1
93 timer is already active - just return */
94
95 return size;
96}
97
98static ssize_t transient_duration_show(struct device *dev,
99 struct device_attribute *attr, char *buf)
100{
101 struct led_classdev *led_cdev = dev_get_drvdata(dev);
102 struct transient_trig_data *transient_data = led_cdev->trigger_data;
103
104 return sprintf(buf, "%lu\n", transient_data->duration);
105}
106
107static ssize_t transient_duration_store(struct device *dev,
108 struct device_attribute *attr, const char *buf, size_t size)
109{
110 struct led_classdev *led_cdev = dev_get_drvdata(dev);
111 struct transient_trig_data *transient_data = led_cdev->trigger_data;
112 unsigned long state;
113 ssize_t ret;
114
115 ret = kstrtoul(buf, 10, &state);
116 if (ret)
117 return ret;
118
119 transient_data->duration = state;
120 return size;
121}
122
123static ssize_t transient_state_show(struct device *dev,
124 struct device_attribute *attr, char *buf)
125{
126 struct led_classdev *led_cdev = dev_get_drvdata(dev);
127 struct transient_trig_data *transient_data = led_cdev->trigger_data;
128 int state;
129
130 state = (transient_data->state == LED_FULL) ? 1 : 0;
131 return sprintf(buf, "%d\n", state);
132}
133
134static ssize_t transient_state_store(struct device *dev,
135 struct device_attribute *attr, const char *buf, size_t size)
136{
137 struct led_classdev *led_cdev = dev_get_drvdata(dev);
138 struct transient_trig_data *transient_data = led_cdev->trigger_data;
139 unsigned long state;
140 ssize_t ret;
141
142 ret = kstrtoul(buf, 10, &state);
143 if (ret)
144 return ret;
145
146 if (state != 1 && state != 0)
147 return -EINVAL;
148
149 transient_data->state = (state == 1) ? LED_FULL : LED_OFF;
150 return size;
151}
152
153static DEVICE_ATTR(activate, 0644, transient_activate_show,
154 transient_activate_store);
155static DEVICE_ATTR(duration, 0644, transient_duration_show,
156 transient_duration_store);
157static DEVICE_ATTR(state, 0644, transient_state_show, transient_state_store);
158
159static void transient_trig_activate(struct led_classdev *led_cdev)
160{
161 int rc;
162 struct transient_trig_data *tdata;
163
164 tdata = kzalloc(sizeof(struct transient_trig_data), GFP_KERNEL);
165 if (!tdata) {
166 dev_err(led_cdev->dev,
167 "unable to allocate transient trigger\n");
168 return;
169 }
170 led_cdev->trigger_data = tdata;
171
172 rc = device_create_file(led_cdev->dev, &dev_attr_activate);
173 if (rc)
174 goto err_out;
175
176 rc = device_create_file(led_cdev->dev, &dev_attr_duration);
177 if (rc)
178 goto err_out_duration;
179
180 rc = device_create_file(led_cdev->dev, &dev_attr_state);
181 if (rc)
182 goto err_out_state;
183
184 setup_timer(&tdata->timer, transient_timer_function,
185 (unsigned long) led_cdev);
186 led_cdev->activated = true;
187
188 return;
189
190err_out_state:
191 device_remove_file(led_cdev->dev, &dev_attr_duration);
192err_out_duration:
193 device_remove_file(led_cdev->dev, &dev_attr_activate);
194err_out:
195 dev_err(led_cdev->dev, "unable to register transient trigger\n");
196 led_cdev->trigger_data = NULL;
197 kfree(tdata);
198}
199
200static void transient_trig_deactivate(struct led_classdev *led_cdev)
201{
202 struct transient_trig_data *transient_data = led_cdev->trigger_data;
203
204 if (led_cdev->activated) {
205 del_timer_sync(&transient_data->timer);
206 led_set_brightness(led_cdev, transient_data->restore_state);
207 device_remove_file(led_cdev->dev, &dev_attr_activate);
208 device_remove_file(led_cdev->dev, &dev_attr_duration);
209 device_remove_file(led_cdev->dev, &dev_attr_state);
210 led_cdev->trigger_data = NULL;
211 led_cdev->activated = false;
212 kfree(transient_data);
213 }
214}
215
216static struct led_trigger transient_trigger = {
217 .name = "transient",
218 .activate = transient_trig_activate,
219 .deactivate = transient_trig_deactivate,
220};
221
222static int __init transient_trig_init(void)
223{
224 return led_trigger_register(&transient_trigger);
225}
226
227static void __exit transient_trig_exit(void)
228{
229 led_trigger_unregister(&transient_trigger);
230}
231
232module_init(transient_trig_init);
233module_exit(transient_trig_exit);
234
235MODULE_AUTHOR("Shuah Khan <shuahkhan@gmail.com>");
236MODULE_DESCRIPTION("Transient LED trigger");
237MODULE_LICENSE("GPL");
diff --git a/drivers/media/video/mx3_camera.c b/drivers/media/video/mx3_camera.c
index 02d54a057b60..f13643d31353 100644
--- a/drivers/media/video/mx3_camera.c
+++ b/drivers/media/video/mx3_camera.c
@@ -511,7 +511,7 @@ static void mx3_camera_activate(struct mx3_camera_dev *mx3_cam,
511 /* ipu_csi_init_interface() */ 511 /* ipu_csi_init_interface() */
512 csi_reg_write(mx3_cam, conf, CSI_SENS_CONF); 512 csi_reg_write(mx3_cam, conf, CSI_SENS_CONF);
513 513
514 clk_enable(mx3_cam->clk); 514 clk_prepare_enable(mx3_cam->clk);
515 rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk); 515 rate = clk_round_rate(mx3_cam->clk, mx3_cam->mclk);
516 dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate); 516 dev_dbg(icd->parent, "Set SENS_CONF to %x, rate %ld\n", conf, rate);
517 if (rate) 517 if (rate)
@@ -552,7 +552,7 @@ static void mx3_camera_remove_device(struct soc_camera_device *icd)
552 *ichan = NULL; 552 *ichan = NULL;
553 } 553 }
554 554
555 clk_disable(mx3_cam->clk); 555 clk_disable_unprepare(mx3_cam->clk);
556 556
557 mx3_cam->icd = NULL; 557 mx3_cam->icd = NULL;
558 558
diff --git a/drivers/mfd/Kconfig b/drivers/mfd/Kconfig
index f4b4dad77391..e129c820df7d 100644
--- a/drivers/mfd/Kconfig
+++ b/drivers/mfd/Kconfig
@@ -106,6 +106,19 @@ config UCB1400_CORE
106 To compile this driver as a module, choose M here: the 106 To compile this driver as a module, choose M here: the
107 module will be called ucb1400_core. 107 module will be called ucb1400_core.
108 108
109config MFD_LM3533
110 tristate "LM3533 Lighting Power chip"
111 depends on I2C
112 select MFD_CORE
113 select REGMAP_I2C
114 help
115 Say yes here to enable support for National Semiconductor / TI
116 LM3533 Lighting Power chips.
117
118 This driver provides common support for accessing the device;
119 additional drivers must be enabled in order to use the LED,
120 backlight or ambient-light-sensor functionality of the device.
121
109config TPS6105X 122config TPS6105X
110 tristate "TPS61050/61052 Boost Converters" 123 tristate "TPS61050/61052 Boost Converters"
111 depends on I2C 124 depends on I2C
@@ -177,8 +190,8 @@ config MFD_TPS65910
177 bool "TPS65910 Power Management chip" 190 bool "TPS65910 Power Management chip"
178 depends on I2C=y && GPIOLIB 191 depends on I2C=y && GPIOLIB
179 select MFD_CORE 192 select MFD_CORE
180 select GPIO_TPS65910
181 select REGMAP_I2C 193 select REGMAP_I2C
194 select IRQ_DOMAIN
182 help 195 help
183 if you say yes here you get support for the TPS65910 series of 196 if you say yes here you get support for the TPS65910 series of
184 Power Management chips. 197 Power Management chips.
@@ -409,6 +422,19 @@ config PMIC_ADP5520
409 individual components like LCD backlight, LEDs, GPIOs and Kepad 422 individual components like LCD backlight, LEDs, GPIOs and Kepad
410 under the corresponding menus. 423 under the corresponding menus.
411 424
425config MFD_MAX77693
426 bool "Maxim Semiconductor MAX77693 PMIC Support"
427 depends on I2C=y && GENERIC_HARDIRQS
428 select MFD_CORE
429 select REGMAP_I2C
430 help
431 Say yes here to support for Maxim Semiconductor MAX77693.
432 This is a companion Power Management IC with Flash, Haptic, Charger,
433 and MUIC(Micro USB Interface Controller) controls on chip.
434 This driver provides common support for accessing the device;
435 additional drivers must be enabled in order to use the functionality
436 of the device.
437
412config MFD_MAX8925 438config MFD_MAX8925
413 bool "Maxim Semiconductor MAX8925 PMIC Support" 439 bool "Maxim Semiconductor MAX8925 PMIC Support"
414 depends on I2C=y && GENERIC_HARDIRQS 440 depends on I2C=y && GENERIC_HARDIRQS
@@ -454,9 +480,9 @@ config MFD_S5M_CORE
454 of the device 480 of the device
455 481
456config MFD_WM8400 482config MFD_WM8400
457 tristate "Support Wolfson Microelectronics WM8400" 483 bool "Support Wolfson Microelectronics WM8400"
458 select MFD_CORE 484 select MFD_CORE
459 depends on I2C 485 depends on I2C=y
460 select REGMAP_I2C 486 select REGMAP_I2C
461 help 487 help
462 Support for the Wolfson Microelecronics WM8400 PMIC and audio 488 Support for the Wolfson Microelecronics WM8400 PMIC and audio
@@ -473,6 +499,7 @@ config MFD_WM831X_I2C
473 select MFD_CORE 499 select MFD_CORE
474 select MFD_WM831X 500 select MFD_WM831X
475 select REGMAP_I2C 501 select REGMAP_I2C
502 select IRQ_DOMAIN
476 depends on I2C=y && GENERIC_HARDIRQS 503 depends on I2C=y && GENERIC_HARDIRQS
477 help 504 help
478 Support for the Wolfson Microelecronics WM831x and WM832x PMICs 505 Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -485,6 +512,7 @@ config MFD_WM831X_SPI
485 select MFD_CORE 512 select MFD_CORE
486 select MFD_WM831X 513 select MFD_WM831X
487 select REGMAP_SPI 514 select REGMAP_SPI
515 select IRQ_DOMAIN
488 depends on SPI_MASTER && GENERIC_HARDIRQS 516 depends on SPI_MASTER && GENERIC_HARDIRQS
489 help 517 help
490 Support for the Wolfson Microelecronics WM831x and WM832x PMICs 518 Support for the Wolfson Microelecronics WM831x and WM832x PMICs
@@ -597,17 +625,32 @@ config MFD_MC13783
597 tristate 625 tristate
598 626
599config MFD_MC13XXX 627config MFD_MC13XXX
600 tristate "Support Freescale MC13783 and MC13892" 628 tristate
601 depends on SPI_MASTER 629 depends on SPI_MASTER || I2C
602 select MFD_CORE 630 select MFD_CORE
603 select MFD_MC13783 631 select MFD_MC13783
604 help 632 help
605 Support for the Freescale (Atlas) PMIC and audio CODECs 633 Enable support for the Freescale MC13783 and MC13892 PMICs.
606 MC13783 and MC13892. 634 This driver provides common support for accessing the device,
607 This driver provides common support for accessing the device,
608 additional drivers must be enabled in order to use the 635 additional drivers must be enabled in order to use the
609 functionality of the device. 636 functionality of the device.
610 637
638config MFD_MC13XXX_SPI
639 tristate "Freescale MC13783 and MC13892 SPI interface"
640 depends on SPI_MASTER
641 select REGMAP_SPI
642 select MFD_MC13XXX
643 help
644 Select this if your MC13xxx is connected via an SPI bus.
645
646config MFD_MC13XXX_I2C
647 tristate "Freescale MC13892 I2C interface"
648 depends on I2C
649 select REGMAP_I2C
650 select MFD_MC13XXX
651 help
652 Select this if your MC13xxx is connected via an I2C bus.
653
611config ABX500_CORE 654config ABX500_CORE
612 bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions" 655 bool "ST-Ericsson ABX500 Mixed Signal Circuit register functions"
613 default y if ARCH_U300 || ARCH_U8500 656 default y if ARCH_U300 || ARCH_U8500
@@ -651,7 +694,7 @@ config EZX_PCAP
651 694
652config AB8500_CORE 695config AB8500_CORE
653 bool "ST-Ericsson AB8500 Mixed Signal Power Management chip" 696 bool "ST-Ericsson AB8500 Mixed Signal Power Management chip"
654 depends on GENERIC_HARDIRQS && ABX500_CORE 697 depends on GENERIC_HARDIRQS && ABX500_CORE && MFD_DB8500_PRCMU
655 select MFD_CORE 698 select MFD_CORE
656 help 699 help
657 Select this option to enable access to AB8500 power management 700 Select this option to enable access to AB8500 power management
@@ -722,6 +765,16 @@ config LPC_SCH
722 LPC bridge function of the Intel SCH provides support for 765 LPC bridge function of the Intel SCH provides support for
723 System Management Bus and General Purpose I/O. 766 System Management Bus and General Purpose I/O.
724 767
768config LPC_ICH
769 tristate "Intel ICH LPC"
770 depends on PCI
771 select MFD_CORE
772 help
773 The LPC bridge function of the Intel ICH provides support for
774 many functional units. This driver provides needed support for
775 other drivers to control these functions, currently GPIO and
776 watchdog.
777
725config MFD_RDC321X 778config MFD_RDC321X
726 tristate "Support for RDC-R321x southbridge" 779 tristate "Support for RDC-R321x southbridge"
727 select MFD_CORE 780 select MFD_CORE
@@ -854,6 +907,11 @@ config MFD_RC5T583
854 Additional drivers must be enabled in order to use the 907 Additional drivers must be enabled in order to use the
855 different functionality of the device. 908 different functionality of the device.
856 909
910config MFD_STA2X11
911 bool "STA2X11 multi function device support"
912 depends on STA2X11
913 select MFD_CORE
914
857config MFD_ANATOP 915config MFD_ANATOP
858 bool "Support for Freescale i.MX on-chip ANATOP controller" 916 bool "Support for Freescale i.MX on-chip ANATOP controller"
859 depends on SOC_IMX6Q 917 depends on SOC_IMX6Q
diff --git a/drivers/mfd/Makefile b/drivers/mfd/Makefile
index 43672b87805a..75f6ed68a4b9 100644
--- a/drivers/mfd/Makefile
+++ b/drivers/mfd/Makefile
@@ -15,6 +15,7 @@ obj-$(CONFIG_MFD_DAVINCI_VOICECODEC) += davinci_voicecodec.o
15obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o 15obj-$(CONFIG_MFD_DM355EVM_MSP) += dm355evm_msp.o
16obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o 16obj-$(CONFIG_MFD_TI_SSP) += ti-ssp.o
17 17
18obj-$(CONFIG_MFD_STA2X11) += sta2x11-mfd.o
18obj-$(CONFIG_MFD_STMPE) += stmpe.o 19obj-$(CONFIG_MFD_STMPE) += stmpe.o
19obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o 20obj-$(CONFIG_STMPE_I2C) += stmpe-i2c.o
20obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o 21obj-$(CONFIG_STMPE_SPI) += stmpe-spi.o
@@ -54,6 +55,8 @@ obj-$(CONFIG_TWL6030_PWM) += twl6030-pwm.o
54obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o 55obj-$(CONFIG_TWL6040_CORE) += twl6040-core.o twl6040-irq.o
55 56
56obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o 57obj-$(CONFIG_MFD_MC13XXX) += mc13xxx-core.o
58obj-$(CONFIG_MFD_MC13XXX_SPI) += mc13xxx-spi.o
59obj-$(CONFIG_MFD_MC13XXX_I2C) += mc13xxx-i2c.o
57 60
58obj-$(CONFIG_MFD_CORE) += mfd-core.o 61obj-$(CONFIG_MFD_CORE) += mfd-core.o
59 62
@@ -75,6 +78,7 @@ obj-$(CONFIG_PMIC_DA9052) += da9052-core.o
75obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o 78obj-$(CONFIG_MFD_DA9052_SPI) += da9052-spi.o
76obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o 79obj-$(CONFIG_MFD_DA9052_I2C) += da9052-i2c.o
77 80
81obj-$(CONFIG_MFD_MAX77693) += max77693.o max77693-irq.o
78max8925-objs := max8925-core.o max8925-i2c.o 82max8925-objs := max8925-core.o max8925-i2c.o
79obj-$(CONFIG_MFD_MAX8925) += max8925.o 83obj-$(CONFIG_MFD_MAX8925) += max8925.o
80obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o 84obj-$(CONFIG_MFD_MAX8997) += max8997.o max8997-irq.o
@@ -87,15 +91,15 @@ obj-$(CONFIG_PCF50633_GPIO) += pcf50633-gpio.o
87obj-$(CONFIG_ABX500_CORE) += abx500-core.o 91obj-$(CONFIG_ABX500_CORE) += abx500-core.o
88obj-$(CONFIG_AB3100_CORE) += ab3100-core.o 92obj-$(CONFIG_AB3100_CORE) += ab3100-core.o
89obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o 93obj-$(CONFIG_AB3100_OTP) += ab3100-otp.o
90obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
91obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o 94obj-$(CONFIG_AB8500_DEBUG) += ab8500-debugfs.o
92obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o 95obj-$(CONFIG_AB8500_GPADC) += ab8500-gpadc.o
93obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o 96obj-$(CONFIG_MFD_DB8500_PRCMU) += db8500-prcmu.o
94# ab8500-i2c need to come after db8500-prcmu (which provides the channel) 97# ab8500-core need to come after db8500-prcmu (which provides the channel)
95obj-$(CONFIG_AB8500_I2C_CORE) += ab8500-i2c.o 98obj-$(CONFIG_AB8500_CORE) += ab8500-core.o ab8500-sysctrl.o
96obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o 99obj-$(CONFIG_MFD_TIMBERDALE) += timberdale.o
97obj-$(CONFIG_PMIC_ADP5520) += adp5520.o 100obj-$(CONFIG_PMIC_ADP5520) += adp5520.o
98obj-$(CONFIG_LPC_SCH) += lpc_sch.o 101obj-$(CONFIG_LPC_SCH) += lpc_sch.o
102obj-$(CONFIG_LPC_ICH) += lpc_ich.o
99obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o 103obj-$(CONFIG_MFD_RDC321X) += rdc321x-southbridge.o
100obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o 104obj-$(CONFIG_MFD_JANZ_CMODIO) += janz-cmodio.o
101obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o 105obj-$(CONFIG_MFD_JZ4740_ADC) += jz4740-adc.o
diff --git a/drivers/mfd/ab8500-core.c b/drivers/mfd/ab8500-core.c
index 1f08704f7ae8..dac0e2998603 100644
--- a/drivers/mfd/ab8500-core.c
+++ b/drivers/mfd/ab8500-core.c
@@ -18,7 +18,10 @@
18#include <linux/mfd/core.h> 18#include <linux/mfd/core.h>
19#include <linux/mfd/abx500.h> 19#include <linux/mfd/abx500.h>
20#include <linux/mfd/abx500/ab8500.h> 20#include <linux/mfd/abx500/ab8500.h>
21#include <linux/mfd/dbx500-prcmu.h>
21#include <linux/regulator/ab8500.h> 22#include <linux/regulator/ab8500.h>
23#include <linux/of.h>
24#include <linux/of_device.h>
22 25
23/* 26/*
24 * Interrupt register offsets 27 * Interrupt register offsets
@@ -91,12 +94,24 @@
91#define AB8500_IT_MASK23_REG 0x56 94#define AB8500_IT_MASK23_REG 0x56
92#define AB8500_IT_MASK24_REG 0x57 95#define AB8500_IT_MASK24_REG 0x57
93 96
97/*
98 * latch hierarchy registers
99 */
100#define AB8500_IT_LATCHHIER1_REG 0x60
101#define AB8500_IT_LATCHHIER2_REG 0x61
102#define AB8500_IT_LATCHHIER3_REG 0x62
103
104#define AB8500_IT_LATCHHIER_NUM 3
105
94#define AB8500_REV_REG 0x80 106#define AB8500_REV_REG 0x80
95#define AB8500_IC_NAME_REG 0x82 107#define AB8500_IC_NAME_REG 0x82
96#define AB8500_SWITCH_OFF_STATUS 0x00 108#define AB8500_SWITCH_OFF_STATUS 0x00
97 109
98#define AB8500_TURN_ON_STATUS 0x00 110#define AB8500_TURN_ON_STATUS 0x00
99 111
112static bool no_bm; /* No battery management */
113module_param(no_bm, bool, S_IRUGO);
114
100#define AB9540_MODEM_CTRL2_REG 0x23 115#define AB9540_MODEM_CTRL2_REG 0x23
101#define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT BIT(2) 116#define AB9540_MODEM_CTRL2_SWDBBRSTN_BIT BIT(2)
102 117
@@ -125,6 +140,41 @@ static const char ab8500_version_str[][7] = {
125 [AB8500_VERSION_AB8540] = "AB8540", 140 [AB8500_VERSION_AB8540] = "AB8540",
126}; 141};
127 142
143static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
144{
145 int ret;
146
147 ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
148 if (ret < 0)
149 dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
150 return ret;
151}
152
153static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
154 u8 data)
155{
156 int ret;
157
158 ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
159 &mask, 1);
160 if (ret < 0)
161 dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
162 return ret;
163}
164
165static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
166{
167 int ret;
168 u8 data;
169
170 ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
171 if (ret < 0) {
172 dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
173 return ret;
174 }
175 return (int)data;
176}
177
128static int ab8500_get_chip_id(struct device *dev) 178static int ab8500_get_chip_id(struct device *dev)
129{ 179{
130 struct ab8500 *ab8500; 180 struct ab8500 *ab8500;
@@ -161,9 +211,13 @@ static int set_register_interruptible(struct ab8500 *ab8500, u8 bank,
161static int ab8500_set_register(struct device *dev, u8 bank, 211static int ab8500_set_register(struct device *dev, u8 bank,
162 u8 reg, u8 value) 212 u8 reg, u8 value)
163{ 213{
214 int ret;
164 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); 215 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
165 216
166 return set_register_interruptible(ab8500, bank, reg, value); 217 atomic_inc(&ab8500->transfer_ongoing);
218 ret = set_register_interruptible(ab8500, bank, reg, value);
219 atomic_dec(&ab8500->transfer_ongoing);
220 return ret;
167} 221}
168 222
169static int get_register_interruptible(struct ab8500 *ab8500, u8 bank, 223static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -192,9 +246,13 @@ static int get_register_interruptible(struct ab8500 *ab8500, u8 bank,
192static int ab8500_get_register(struct device *dev, u8 bank, 246static int ab8500_get_register(struct device *dev, u8 bank,
193 u8 reg, u8 *value) 247 u8 reg, u8 *value)
194{ 248{
249 int ret;
195 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); 250 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
196 251
197 return get_register_interruptible(ab8500, bank, reg, value); 252 atomic_inc(&ab8500->transfer_ongoing);
253 ret = get_register_interruptible(ab8500, bank, reg, value);
254 atomic_dec(&ab8500->transfer_ongoing);
255 return ret;
198} 256}
199 257
200static int mask_and_set_register_interruptible(struct ab8500 *ab8500, u8 bank, 258static int mask_and_set_register_interruptible(struct ab8500 *ab8500, u8 bank,
@@ -241,11 +299,14 @@ out:
241static int ab8500_mask_and_set_register(struct device *dev, 299static int ab8500_mask_and_set_register(struct device *dev,
242 u8 bank, u8 reg, u8 bitmask, u8 bitvalues) 300 u8 bank, u8 reg, u8 bitmask, u8 bitvalues)
243{ 301{
302 int ret;
244 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent); 303 struct ab8500 *ab8500 = dev_get_drvdata(dev->parent);
245 304
246 return mask_and_set_register_interruptible(ab8500, bank, reg, 305 atomic_inc(&ab8500->transfer_ongoing);
247 bitmask, bitvalues); 306 ret= mask_and_set_register_interruptible(ab8500, bank, reg,
248 307 bitmask, bitvalues);
308 atomic_dec(&ab8500->transfer_ongoing);
309 return ret;
249} 310}
250 311
251static struct abx500_ops ab8500_ops = { 312static struct abx500_ops ab8500_ops = {
@@ -264,6 +325,7 @@ static void ab8500_irq_lock(struct irq_data *data)
264 struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data); 325 struct ab8500 *ab8500 = irq_data_get_irq_chip_data(data);
265 326
266 mutex_lock(&ab8500->irq_lock); 327 mutex_lock(&ab8500->irq_lock);
328 atomic_inc(&ab8500->transfer_ongoing);
267} 329}
268 330
269static void ab8500_irq_sync_unlock(struct irq_data *data) 331static void ab8500_irq_sync_unlock(struct irq_data *data)
@@ -292,7 +354,7 @@ static void ab8500_irq_sync_unlock(struct irq_data *data)
292 reg = AB8500_IT_MASK1_REG + ab8500->irq_reg_offset[i]; 354 reg = AB8500_IT_MASK1_REG + ab8500->irq_reg_offset[i];
293 set_register_interruptible(ab8500, AB8500_INTERRUPT, reg, new); 355 set_register_interruptible(ab8500, AB8500_INTERRUPT, reg, new);
294 } 356 }
295 357 atomic_dec(&ab8500->transfer_ongoing);
296 mutex_unlock(&ab8500->irq_lock); 358 mutex_unlock(&ab8500->irq_lock);
297} 359}
298 360
@@ -325,6 +387,90 @@ static struct irq_chip ab8500_irq_chip = {
325 .irq_unmask = ab8500_irq_unmask, 387 .irq_unmask = ab8500_irq_unmask,
326}; 388};
327 389
390static int ab8500_handle_hierarchical_line(struct ab8500 *ab8500,
391 int latch_offset, u8 latch_val)
392{
393 int int_bit = __ffs(latch_val);
394 int line, i;
395
396 do {
397 int_bit = __ffs(latch_val);
398
399 for (i = 0; i < ab8500->mask_size; i++)
400 if (ab8500->irq_reg_offset[i] == latch_offset)
401 break;
402
403 if (i >= ab8500->mask_size) {
404 dev_err(ab8500->dev, "Register offset 0x%2x not declared\n",
405 latch_offset);
406 return -ENXIO;
407 }
408
409 line = (i << 3) + int_bit;
410 latch_val &= ~(1 << int_bit);
411
412 handle_nested_irq(ab8500->irq_base + line);
413 } while (latch_val);
414
415 return 0;
416}
417
418static int ab8500_handle_hierarchical_latch(struct ab8500 *ab8500,
419 int hier_offset, u8 hier_val)
420{
421 int latch_bit, status;
422 u8 latch_offset, latch_val;
423
424 do {
425 latch_bit = __ffs(hier_val);
426 latch_offset = (hier_offset << 3) + latch_bit;
427
428 /* Fix inconsistent ITFromLatch25 bit mapping... */
429 if (unlikely(latch_offset == 17))
430 latch_offset = 24;
431
432 status = get_register_interruptible(ab8500,
433 AB8500_INTERRUPT,
434 AB8500_IT_LATCH1_REG + latch_offset,
435 &latch_val);
436 if (status < 0 || latch_val == 0)
437 goto discard;
438
439 status = ab8500_handle_hierarchical_line(ab8500,
440 latch_offset, latch_val);
441 if (status < 0)
442 return status;
443discard:
444 hier_val &= ~(1 << latch_bit);
445 } while (hier_val);
446
447 return 0;
448}
449
450static irqreturn_t ab8500_hierarchical_irq(int irq, void *dev)
451{
452 struct ab8500 *ab8500 = dev;
453 u8 i;
454
455 dev_vdbg(ab8500->dev, "interrupt\n");
456
457 /* Hierarchical interrupt version */
458 for (i = 0; i < AB8500_IT_LATCHHIER_NUM; i++) {
459 int status;
460 u8 hier_val;
461
462 status = get_register_interruptible(ab8500, AB8500_INTERRUPT,
463 AB8500_IT_LATCHHIER1_REG + i, &hier_val);
464 if (status < 0 || hier_val == 0)
465 continue;
466
467 status = ab8500_handle_hierarchical_latch(ab8500, i, hier_val);
468 if (status < 0)
469 break;
470 }
471 return IRQ_HANDLED;
472}
473
328static irqreturn_t ab8500_irq(int irq, void *dev) 474static irqreturn_t ab8500_irq(int irq, void *dev)
329{ 475{
330 struct ab8500 *ab8500 = dev; 476 struct ab8500 *ab8500 = dev;
@@ -332,6 +478,8 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
332 478
333 dev_vdbg(ab8500->dev, "interrupt\n"); 479 dev_vdbg(ab8500->dev, "interrupt\n");
334 480
481 atomic_inc(&ab8500->transfer_ongoing);
482
335 for (i = 0; i < ab8500->mask_size; i++) { 483 for (i = 0; i < ab8500->mask_size; i++) {
336 int regoffset = ab8500->irq_reg_offset[i]; 484 int regoffset = ab8500->irq_reg_offset[i];
337 int status; 485 int status;
@@ -355,9 +503,10 @@ static irqreturn_t ab8500_irq(int irq, void *dev)
355 503
356 handle_nested_irq(ab8500->irq_base + line); 504 handle_nested_irq(ab8500->irq_base + line);
357 value &= ~(1 << bit); 505 value &= ~(1 << bit);
506
358 } while (value); 507 } while (value);
359 } 508 }
360 509 atomic_dec(&ab8500->transfer_ongoing);
361 return IRQ_HANDLED; 510 return IRQ_HANDLED;
362} 511}
363 512
@@ -411,6 +560,14 @@ static void ab8500_irq_remove(struct ab8500 *ab8500)
411 } 560 }
412} 561}
413 562
563int ab8500_suspend(struct ab8500 *ab8500)
564{
565 if (atomic_read(&ab8500->transfer_ongoing))
566 return -EINVAL;
567 else
568 return 0;
569}
570
414/* AB8500 GPIO Resources */ 571/* AB8500 GPIO Resources */
415static struct resource __devinitdata ab8500_gpio_resources[] = { 572static struct resource __devinitdata ab8500_gpio_resources[] = {
416 { 573 {
@@ -744,6 +901,39 @@ static struct resource __devinitdata ab8500_usb_resources[] = {
744 }, 901 },
745}; 902};
746 903
904static struct resource __devinitdata ab8505_iddet_resources[] = {
905 {
906 .name = "KeyDeglitch",
907 .start = AB8505_INT_KEYDEGLITCH,
908 .end = AB8505_INT_KEYDEGLITCH,
909 .flags = IORESOURCE_IRQ,
910 },
911 {
912 .name = "KP",
913 .start = AB8505_INT_KP,
914 .end = AB8505_INT_KP,
915 .flags = IORESOURCE_IRQ,
916 },
917 {
918 .name = "IKP",
919 .start = AB8505_INT_IKP,
920 .end = AB8505_INT_IKP,
921 .flags = IORESOURCE_IRQ,
922 },
923 {
924 .name = "IKR",
925 .start = AB8505_INT_IKR,
926 .end = AB8505_INT_IKR,
927 .flags = IORESOURCE_IRQ,
928 },
929 {
930 .name = "KeyStuck",
931 .start = AB8505_INT_KEYSTUCK,
932 .end = AB8505_INT_KEYSTUCK,
933 .flags = IORESOURCE_IRQ,
934 },
935};
936
747static struct resource __devinitdata ab8500_temp_resources[] = { 937static struct resource __devinitdata ab8500_temp_resources[] = {
748 { 938 {
749 .name = "AB8500_TEMP_WARM", 939 .name = "AB8500_TEMP_WARM",
@@ -778,35 +968,11 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
778 .resources = ab8500_rtc_resources, 968 .resources = ab8500_rtc_resources,
779 }, 969 },
780 { 970 {
781 .name = "ab8500-charger",
782 .num_resources = ARRAY_SIZE(ab8500_charger_resources),
783 .resources = ab8500_charger_resources,
784 },
785 {
786 .name = "ab8500-btemp",
787 .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
788 .resources = ab8500_btemp_resources,
789 },
790 {
791 .name = "ab8500-fg",
792 .num_resources = ARRAY_SIZE(ab8500_fg_resources),
793 .resources = ab8500_fg_resources,
794 },
795 {
796 .name = "ab8500-chargalg",
797 .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
798 .resources = ab8500_chargalg_resources,
799 },
800 {
801 .name = "ab8500-acc-det", 971 .name = "ab8500-acc-det",
802 .num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources), 972 .num_resources = ARRAY_SIZE(ab8500_av_acc_detect_resources),
803 .resources = ab8500_av_acc_detect_resources, 973 .resources = ab8500_av_acc_detect_resources,
804 }, 974 },
805 { 975 {
806 .name = "ab8500-codec",
807 },
808
809 {
810 .name = "ab8500-poweron-key", 976 .name = "ab8500-poweron-key",
811 .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources), 977 .num_resources = ARRAY_SIZE(ab8500_poweronkey_db_resources),
812 .resources = ab8500_poweronkey_db_resources, 978 .resources = ab8500_poweronkey_db_resources,
@@ -834,6 +1000,29 @@ static struct mfd_cell __devinitdata abx500_common_devs[] = {
834 }, 1000 },
835}; 1001};
836 1002
1003static struct mfd_cell __devinitdata ab8500_bm_devs[] = {
1004 {
1005 .name = "ab8500-charger",
1006 .num_resources = ARRAY_SIZE(ab8500_charger_resources),
1007 .resources = ab8500_charger_resources,
1008 },
1009 {
1010 .name = "ab8500-btemp",
1011 .num_resources = ARRAY_SIZE(ab8500_btemp_resources),
1012 .resources = ab8500_btemp_resources,
1013 },
1014 {
1015 .name = "ab8500-fg",
1016 .num_resources = ARRAY_SIZE(ab8500_fg_resources),
1017 .resources = ab8500_fg_resources,
1018 },
1019 {
1020 .name = "ab8500-chargalg",
1021 .num_resources = ARRAY_SIZE(ab8500_chargalg_resources),
1022 .resources = ab8500_chargalg_resources,
1023 },
1024};
1025
837static struct mfd_cell __devinitdata ab8500_devs[] = { 1026static struct mfd_cell __devinitdata ab8500_devs[] = {
838 { 1027 {
839 .name = "ab8500-gpio", 1028 .name = "ab8500-gpio",
@@ -845,6 +1034,9 @@ static struct mfd_cell __devinitdata ab8500_devs[] = {
845 .num_resources = ARRAY_SIZE(ab8500_usb_resources), 1034 .num_resources = ARRAY_SIZE(ab8500_usb_resources),
846 .resources = ab8500_usb_resources, 1035 .resources = ab8500_usb_resources,
847 }, 1036 },
1037 {
1038 .name = "ab8500-codec",
1039 },
848}; 1040};
849 1041
850static struct mfd_cell __devinitdata ab9540_devs[] = { 1042static struct mfd_cell __devinitdata ab9540_devs[] = {
@@ -858,6 +1050,18 @@ static struct mfd_cell __devinitdata ab9540_devs[] = {
858 .num_resources = ARRAY_SIZE(ab8500_usb_resources), 1050 .num_resources = ARRAY_SIZE(ab8500_usb_resources),
859 .resources = ab8500_usb_resources, 1051 .resources = ab8500_usb_resources,
860 }, 1052 },
1053 {
1054 .name = "ab9540-codec",
1055 },
1056};
1057
1058/* Device list common to ab9540 and ab8505 */
1059static struct mfd_cell __devinitdata ab9540_ab8505_devs[] = {
1060 {
1061 .name = "ab-iddet",
1062 .num_resources = ARRAY_SIZE(ab8505_iddet_resources),
1063 .resources = ab8505_iddet_resources,
1064 },
861}; 1065};
862 1066
863static ssize_t show_chip_id(struct device *dev, 1067static ssize_t show_chip_id(struct device *dev,
@@ -1003,18 +1207,66 @@ static struct attribute_group ab9540_attr_group = {
1003 .attrs = ab9540_sysfs_entries, 1207 .attrs = ab9540_sysfs_entries,
1004}; 1208};
1005 1209
1006int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version) 1210static const struct of_device_id ab8500_match[] = {
1211 {
1212 .compatible = "stericsson,ab8500",
1213 .data = (void *)AB8500_VERSION_AB8500,
1214 },
1215 {},
1216};
1217
1218static int __devinit ab8500_probe(struct platform_device *pdev)
1007{ 1219{
1008 struct ab8500_platform_data *plat = dev_get_platdata(ab8500->dev); 1220 struct ab8500_platform_data *plat = dev_get_platdata(&pdev->dev);
1221 const struct platform_device_id *platid = platform_get_device_id(pdev);
1222 enum ab8500_version version = AB8500_VERSION_UNDEFINED;
1223 struct device_node *np = pdev->dev.of_node;
1224 struct ab8500 *ab8500;
1225 struct resource *resource;
1009 int ret; 1226 int ret;
1010 int i; 1227 int i;
1011 u8 value; 1228 u8 value;
1012 1229
1230 ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
1231 if (!ab8500)
1232 return -ENOMEM;
1233
1013 if (plat) 1234 if (plat)
1014 ab8500->irq_base = plat->irq_base; 1235 ab8500->irq_base = plat->irq_base;
1236 else if (np)
1237 ret = of_property_read_u32(np, "stericsson,irq-base", &ab8500->irq_base);
1238
1239 if (!ab8500->irq_base) {
1240 dev_info(&pdev->dev, "couldn't find irq-base\n");
1241 ret = -EINVAL;
1242 goto out_free_ab8500;
1243 }
1244
1245 ab8500->dev = &pdev->dev;
1246
1247 resource = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1248 if (!resource) {
1249 ret = -ENODEV;
1250 goto out_free_ab8500;
1251 }
1252
1253 ab8500->irq = resource->start;
1254
1255 ab8500->read = ab8500_i2c_read;
1256 ab8500->write = ab8500_i2c_write;
1257 ab8500->write_masked = ab8500_i2c_write_masked;
1015 1258
1016 mutex_init(&ab8500->lock); 1259 mutex_init(&ab8500->lock);
1017 mutex_init(&ab8500->irq_lock); 1260 mutex_init(&ab8500->irq_lock);
1261 atomic_set(&ab8500->transfer_ongoing, 0);
1262
1263 platform_set_drvdata(pdev, ab8500);
1264
1265 if (platid)
1266 version = platid->driver_data;
1267 else if (np)
1268 version = (unsigned int)
1269 of_match_device(ab8500_match, &pdev->dev)->data;
1018 1270
1019 if (version != AB8500_VERSION_UNDEFINED) 1271 if (version != AB8500_VERSION_UNDEFINED)
1020 ab8500->version = version; 1272 ab8500->version = version;
@@ -1022,7 +1274,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
1022 ret = get_register_interruptible(ab8500, AB8500_MISC, 1274 ret = get_register_interruptible(ab8500, AB8500_MISC,
1023 AB8500_IC_NAME_REG, &value); 1275 AB8500_IC_NAME_REG, &value);
1024 if (ret < 0) 1276 if (ret < 0)
1025 return ret; 1277 goto out_free_ab8500;
1026 1278
1027 ab8500->version = value; 1279 ab8500->version = value;
1028 } 1280 }
@@ -1030,7 +1282,7 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
1030 ret = get_register_interruptible(ab8500, AB8500_MISC, 1282 ret = get_register_interruptible(ab8500, AB8500_MISC,
1031 AB8500_REV_REG, &value); 1283 AB8500_REV_REG, &value);
1032 if (ret < 0) 1284 if (ret < 0)
1033 return ret; 1285 goto out_free_ab8500;
1034 1286
1035 ab8500->chip_id = value; 1287 ab8500->chip_id = value;
1036 1288
@@ -1105,30 +1357,57 @@ int __devinit ab8500_init(struct ab8500 *ab8500, enum ab8500_version version)
1105 if (ret) 1357 if (ret)
1106 goto out_freeoldmask; 1358 goto out_freeoldmask;
1107 1359
1108 ret = request_threaded_irq(ab8500->irq, NULL, ab8500_irq, 1360 /* Activate this feature only in ab9540 */
1109 IRQF_ONESHOT | IRQF_NO_SUSPEND, 1361 /* till tests are done on ab8500 1p2 or later*/
1110 "ab8500", ab8500); 1362 if (is_ab9540(ab8500))
1363 ret = request_threaded_irq(ab8500->irq, NULL,
1364 ab8500_hierarchical_irq,
1365 IRQF_ONESHOT | IRQF_NO_SUSPEND,
1366 "ab8500", ab8500);
1367 else
1368 ret = request_threaded_irq(ab8500->irq, NULL,
1369 ab8500_irq,
1370 IRQF_ONESHOT | IRQF_NO_SUSPEND,
1371 "ab8500", ab8500);
1111 if (ret) 1372 if (ret)
1112 goto out_removeirq; 1373 goto out_removeirq;
1113 } 1374 }
1114 1375
1115 ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs, 1376 if (!np) {
1116 ARRAY_SIZE(abx500_common_devs), NULL, 1377 ret = mfd_add_devices(ab8500->dev, 0, abx500_common_devs,
1117 ab8500->irq_base); 1378 ARRAY_SIZE(abx500_common_devs), NULL,
1379 ab8500->irq_base);
1118 1380
1119 if (ret) 1381 if (ret)
1120 goto out_freeirq; 1382 goto out_freeirq;
1383
1384 if (is_ab9540(ab8500))
1385 ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs,
1386 ARRAY_SIZE(ab9540_devs), NULL,
1387 ab8500->irq_base);
1388 else
1389 ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs,
1390 ARRAY_SIZE(ab8500_devs), NULL,
1391 ab8500->irq_base);
1392 if (ret)
1393 goto out_freeirq;
1121 1394
1122 if (is_ab9540(ab8500)) 1395 if (is_ab9540(ab8500) || is_ab8505(ab8500))
1123 ret = mfd_add_devices(ab8500->dev, 0, ab9540_devs, 1396 ret = mfd_add_devices(ab8500->dev, 0, ab9540_ab8505_devs,
1124 ARRAY_SIZE(ab9540_devs), NULL, 1397 ARRAY_SIZE(ab9540_ab8505_devs), NULL,
1125 ab8500->irq_base); 1398 ab8500->irq_base);
1126 else 1399 if (ret)
1127 ret = mfd_add_devices(ab8500->dev, 0, ab8500_devs, 1400 goto out_freeirq;
1128 ARRAY_SIZE(ab9540_devs), NULL, 1401 }
1129 ab8500->irq_base); 1402
1130 if (ret) 1403 if (!no_bm) {
1131 goto out_freeirq; 1404 /* Add battery management devices */
1405 ret = mfd_add_devices(ab8500->dev, 0, ab8500_bm_devs,
1406 ARRAY_SIZE(ab8500_bm_devs), NULL,
1407 ab8500->irq_base);
1408 if (ret)
1409 dev_err(ab8500->dev, "error adding bm devices\n");
1410 }
1132 1411
1133 if (is_ab9540(ab8500)) 1412 if (is_ab9540(ab8500))
1134 ret = sysfs_create_group(&ab8500->dev->kobj, 1413 ret = sysfs_create_group(&ab8500->dev->kobj,
@@ -1151,12 +1430,16 @@ out_freeoldmask:
1151 kfree(ab8500->oldmask); 1430 kfree(ab8500->oldmask);
1152out_freemask: 1431out_freemask:
1153 kfree(ab8500->mask); 1432 kfree(ab8500->mask);
1433out_free_ab8500:
1434 kfree(ab8500);
1154 1435
1155 return ret; 1436 return ret;
1156} 1437}
1157 1438
1158int __devexit ab8500_exit(struct ab8500 *ab8500) 1439static int __devexit ab8500_remove(struct platform_device *pdev)
1159{ 1440{
1441 struct ab8500 *ab8500 = platform_get_drvdata(pdev);
1442
1160 if (is_ab9540(ab8500)) 1443 if (is_ab9540(ab8500))
1161 sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group); 1444 sysfs_remove_group(&ab8500->dev->kobj, &ab9540_attr_group);
1162 else 1445 else
@@ -1168,10 +1451,42 @@ int __devexit ab8500_exit(struct ab8500 *ab8500)
1168 } 1451 }
1169 kfree(ab8500->oldmask); 1452 kfree(ab8500->oldmask);
1170 kfree(ab8500->mask); 1453 kfree(ab8500->mask);
1454 kfree(ab8500);
1171 1455
1172 return 0; 1456 return 0;
1173} 1457}
1174 1458
1459static const struct platform_device_id ab8500_id[] = {
1460 { "ab8500-core", AB8500_VERSION_AB8500 },
1461 { "ab8505-i2c", AB8500_VERSION_AB8505 },
1462 { "ab9540-i2c", AB8500_VERSION_AB9540 },
1463 { "ab8540-i2c", AB8500_VERSION_AB8540 },
1464 { }
1465};
1466
1467static struct platform_driver ab8500_core_driver = {
1468 .driver = {
1469 .name = "ab8500-core",
1470 .owner = THIS_MODULE,
1471 .of_match_table = ab8500_match,
1472 },
1473 .probe = ab8500_probe,
1474 .remove = __devexit_p(ab8500_remove),
1475 .id_table = ab8500_id,
1476};
1477
1478static int __init ab8500_core_init(void)
1479{
1480 return platform_driver_register(&ab8500_core_driver);
1481}
1482
1483static void __exit ab8500_core_exit(void)
1484{
1485 platform_driver_unregister(&ab8500_core_driver);
1486}
1487arch_initcall(ab8500_core_init);
1488module_exit(ab8500_core_exit);
1489
1175MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent"); 1490MODULE_AUTHOR("Mattias Wallin, Srinidhi Kasagar, Rabin Vincent");
1176MODULE_DESCRIPTION("AB8500 MFD core"); 1491MODULE_DESCRIPTION("AB8500 MFD core");
1177MODULE_LICENSE("GPL v2"); 1492MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-debugfs.c b/drivers/mfd/ab8500-debugfs.c
index 9a0211aa8897..50c4c89ab220 100644
--- a/drivers/mfd/ab8500-debugfs.c
+++ b/drivers/mfd/ab8500-debugfs.c
@@ -608,10 +608,16 @@ static int __devexit ab8500_debug_remove(struct platform_device *plf)
608 return 0; 608 return 0;
609} 609}
610 610
611static const struct of_device_id ab8500_debug_match[] = {
612 { .compatible = "stericsson,ab8500-debug", },
613 {}
614};
615
611static struct platform_driver ab8500_debug_driver = { 616static struct platform_driver ab8500_debug_driver = {
612 .driver = { 617 .driver = {
613 .name = "ab8500-debug", 618 .name = "ab8500-debug",
614 .owner = THIS_MODULE, 619 .owner = THIS_MODULE,
620 .of_match_table = ab8500_debug_match,
615 }, 621 },
616 .probe = ab8500_debug_probe, 622 .probe = ab8500_debug_probe,
617 .remove = __devexit_p(ab8500_debug_remove) 623 .remove = __devexit_p(ab8500_debug_remove)
diff --git a/drivers/mfd/ab8500-gpadc.c b/drivers/mfd/ab8500-gpadc.c
index c39fc716e1dc..b86fd8e1ec3f 100644
--- a/drivers/mfd/ab8500-gpadc.c
+++ b/drivers/mfd/ab8500-gpadc.c
@@ -584,7 +584,7 @@ static int __devinit ab8500_gpadc_probe(struct platform_device *pdev)
584 584
585 gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END"); 585 gpadc->irq = platform_get_irq_byname(pdev, "SW_CONV_END");
586 if (gpadc->irq < 0) { 586 if (gpadc->irq < 0) {
587 dev_err(gpadc->dev, "failed to get platform irq-%d\n", 587 dev_err(&pdev->dev, "failed to get platform irq-%d\n",
588 gpadc->irq); 588 gpadc->irq);
589 ret = gpadc->irq; 589 ret = gpadc->irq;
590 goto fail; 590 goto fail;
@@ -648,12 +648,18 @@ static int __devexit ab8500_gpadc_remove(struct platform_device *pdev)
648 return 0; 648 return 0;
649} 649}
650 650
651static const struct of_device_id ab8500_gpadc_match[] = {
652 { .compatible = "stericsson,ab8500-gpadc", },
653 {}
654};
655
651static struct platform_driver ab8500_gpadc_driver = { 656static struct platform_driver ab8500_gpadc_driver = {
652 .probe = ab8500_gpadc_probe, 657 .probe = ab8500_gpadc_probe,
653 .remove = __devexit_p(ab8500_gpadc_remove), 658 .remove = __devexit_p(ab8500_gpadc_remove),
654 .driver = { 659 .driver = {
655 .name = "ab8500-gpadc", 660 .name = "ab8500-gpadc",
656 .owner = THIS_MODULE, 661 .owner = THIS_MODULE,
662 .of_match_table = ab8500_gpadc_match,
657 }, 663 },
658}; 664};
659 665
diff --git a/drivers/mfd/ab8500-i2c.c b/drivers/mfd/ab8500-i2c.c
deleted file mode 100644
index b83045f102be..000000000000
--- a/drivers/mfd/ab8500-i2c.c
+++ /dev/null
@@ -1,128 +0,0 @@
1/*
2 * Copyright (C) ST-Ericsson SA 2010
3 * Author: Mattias Wallin <mattias.wallin@stericsson.com> for ST-Ericsson.
4 * License Terms: GNU General Public License v2
5 * This file was based on drivers/mfd/ab8500-spi.c
6 */
7
8#include <linux/kernel.h>
9#include <linux/slab.h>
10#include <linux/init.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/mfd/abx500/ab8500.h>
14#include <linux/mfd/dbx500-prcmu.h>
15
16static int ab8500_i2c_write(struct ab8500 *ab8500, u16 addr, u8 data)
17{
18 int ret;
19
20 ret = prcmu_abb_write((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
21 if (ret < 0)
22 dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
23 return ret;
24}
25
26static int ab8500_i2c_write_masked(struct ab8500 *ab8500, u16 addr, u8 mask,
27 u8 data)
28{
29 int ret;
30
31 ret = prcmu_abb_write_masked((u8)(addr >> 8), (u8)(addr & 0xFF), &data,
32 &mask, 1);
33 if (ret < 0)
34 dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
35 return ret;
36}
37
38static int ab8500_i2c_read(struct ab8500 *ab8500, u16 addr)
39{
40 int ret;
41 u8 data;
42
43 ret = prcmu_abb_read((u8)(addr >> 8), (u8)(addr & 0xFF), &data, 1);
44 if (ret < 0) {
45 dev_err(ab8500->dev, "prcmu i2c error %d\n", ret);
46 return ret;
47 }
48 return (int)data;
49}
50
51static int __devinit ab8500_i2c_probe(struct platform_device *plf)
52{
53 const struct platform_device_id *platid = platform_get_device_id(plf);
54 struct ab8500 *ab8500;
55 struct resource *resource;
56 int ret;
57
58 ab8500 = kzalloc(sizeof *ab8500, GFP_KERNEL);
59 if (!ab8500)
60 return -ENOMEM;
61
62 ab8500->dev = &plf->dev;
63
64 resource = platform_get_resource(plf, IORESOURCE_IRQ, 0);
65 if (!resource) {
66 kfree(ab8500);
67 return -ENODEV;
68 }
69
70 ab8500->irq = resource->start;
71
72 ab8500->read = ab8500_i2c_read;
73 ab8500->write = ab8500_i2c_write;
74 ab8500->write_masked = ab8500_i2c_write_masked;
75
76 platform_set_drvdata(plf, ab8500);
77
78 ret = ab8500_init(ab8500, platid->driver_data);
79 if (ret)
80 kfree(ab8500);
81
82
83 return ret;
84}
85
86static int __devexit ab8500_i2c_remove(struct platform_device *plf)
87{
88 struct ab8500 *ab8500 = platform_get_drvdata(plf);
89
90 ab8500_exit(ab8500);
91 kfree(ab8500);
92
93 return 0;
94}
95
96static const struct platform_device_id ab8500_id[] = {
97 { "ab8500-i2c", AB8500_VERSION_AB8500 },
98 { "ab8505-i2c", AB8500_VERSION_AB8505 },
99 { "ab9540-i2c", AB8500_VERSION_AB9540 },
100 { "ab8540-i2c", AB8500_VERSION_AB8540 },
101 { }
102};
103
104static struct platform_driver ab8500_i2c_driver = {
105 .driver = {
106 .name = "ab8500-i2c",
107 .owner = THIS_MODULE,
108 },
109 .probe = ab8500_i2c_probe,
110 .remove = __devexit_p(ab8500_i2c_remove),
111 .id_table = ab8500_id,
112};
113
114static int __init ab8500_i2c_init(void)
115{
116 return platform_driver_register(&ab8500_i2c_driver);
117}
118
119static void __exit ab8500_i2c_exit(void)
120{
121 platform_driver_unregister(&ab8500_i2c_driver);
122}
123arch_initcall(ab8500_i2c_init);
124module_exit(ab8500_i2c_exit);
125
126MODULE_AUTHOR("Mattias WALLIN <mattias.wallin@stericsson.com");
127MODULE_DESCRIPTION("AB8500 Core access via PRCMU I2C");
128MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/ab8500-sysctrl.c b/drivers/mfd/ab8500-sysctrl.c
index c28d4eb1eff0..5a3e51ccf258 100644
--- a/drivers/mfd/ab8500-sysctrl.c
+++ b/drivers/mfd/ab8500-sysctrl.c
@@ -61,10 +61,16 @@ static int __devexit ab8500_sysctrl_remove(struct platform_device *pdev)
61 return 0; 61 return 0;
62} 62}
63 63
64static const struct of_device_id ab8500_sysctrl_match[] = {
65 { .compatible = "stericsson,ab8500-sysctrl", },
66 {}
67};
68
64static struct platform_driver ab8500_sysctrl_driver = { 69static struct platform_driver ab8500_sysctrl_driver = {
65 .driver = { 70 .driver = {
66 .name = "ab8500-sysctrl", 71 .name = "ab8500-sysctrl",
67 .owner = THIS_MODULE, 72 .owner = THIS_MODULE,
73 .of_match_table = ab8500_sysctrl_match,
68 }, 74 },
69 .probe = ab8500_sysctrl_probe, 75 .probe = ab8500_sysctrl_probe,
70 .remove = __devexit_p(ab8500_sysctrl_remove), 76 .remove = __devexit_p(ab8500_sysctrl_remove),
diff --git a/drivers/mfd/anatop-mfd.c b/drivers/mfd/anatop-mfd.c
index 2af42480635e..6da06341f6c9 100644
--- a/drivers/mfd/anatop-mfd.c
+++ b/drivers/mfd/anatop-mfd.c
@@ -41,39 +41,26 @@
41#include <linux/of_address.h> 41#include <linux/of_address.h>
42#include <linux/mfd/anatop.h> 42#include <linux/mfd/anatop.h>
43 43
44u32 anatop_get_bits(struct anatop *adata, u32 addr, int bit_shift, 44u32 anatop_read_reg(struct anatop *adata, u32 addr)
45 int bit_width)
46{ 45{
47 u32 val, mask; 46 return readl(adata->ioreg + addr);
48
49 if (bit_width == 32)
50 mask = ~0;
51 else
52 mask = (1 << bit_width) - 1;
53
54 val = readl(adata->ioreg + addr);
55 val = (val >> bit_shift) & mask;
56
57 return val;
58} 47}
59EXPORT_SYMBOL_GPL(anatop_get_bits); 48EXPORT_SYMBOL_GPL(anatop_read_reg);
60 49
61void anatop_set_bits(struct anatop *adata, u32 addr, int bit_shift, 50void anatop_write_reg(struct anatop *adata, u32 addr, u32 data, u32 mask)
62 int bit_width, u32 data)
63{ 51{
64 u32 val, mask; 52 u32 val;
65 53
66 if (bit_width == 32) 54 data &= mask;
67 mask = ~0;
68 else
69 mask = (1 << bit_width) - 1;
70 55
71 spin_lock(&adata->reglock); 56 spin_lock(&adata->reglock);
72 val = readl(adata->ioreg + addr) & ~(mask << bit_shift); 57 val = readl(adata->ioreg + addr);
73 writel((data << bit_shift) | val, adata->ioreg + addr); 58 val &= ~mask;
59 val |= data;
60 writel(val, adata->ioreg + addr);
74 spin_unlock(&adata->reglock); 61 spin_unlock(&adata->reglock);
75} 62}
76EXPORT_SYMBOL_GPL(anatop_set_bits); 63EXPORT_SYMBOL_GPL(anatop_write_reg);
77 64
78static const struct of_device_id of_anatop_match[] = { 65static const struct of_device_id of_anatop_match[] = {
79 { .compatible = "fsl,imx6q-anatop", }, 66 { .compatible = "fsl,imx6q-anatop", },
diff --git a/drivers/mfd/asic3.c b/drivers/mfd/asic3.c
index 1582c3d95257..383421bf5760 100644
--- a/drivers/mfd/asic3.c
+++ b/drivers/mfd/asic3.c
@@ -353,12 +353,28 @@ static int asic3_gpio_irq_type(struct irq_data *data, unsigned int type)
353 return 0; 353 return 0;
354} 354}
355 355
356static int asic3_gpio_irq_set_wake(struct irq_data *data, unsigned int on)
357{
358 struct asic3 *asic = irq_data_get_irq_chip_data(data);
359 u32 bank, index;
360 u16 bit;
361
362 bank = asic3_irq_to_bank(asic, data->irq);
363 index = asic3_irq_to_index(asic, data->irq);
364 bit = 1<<index;
365
366 asic3_set_register(asic, bank + ASIC3_GPIO_SLEEP_MASK, bit, !on);
367
368 return 0;
369}
370
356static struct irq_chip asic3_gpio_irq_chip = { 371static struct irq_chip asic3_gpio_irq_chip = {
357 .name = "ASIC3-GPIO", 372 .name = "ASIC3-GPIO",
358 .irq_ack = asic3_mask_gpio_irq, 373 .irq_ack = asic3_mask_gpio_irq,
359 .irq_mask = asic3_mask_gpio_irq, 374 .irq_mask = asic3_mask_gpio_irq,
360 .irq_unmask = asic3_unmask_gpio_irq, 375 .irq_unmask = asic3_unmask_gpio_irq,
361 .irq_set_type = asic3_gpio_irq_type, 376 .irq_set_type = asic3_gpio_irq_type,
377 .irq_set_wake = asic3_gpio_irq_set_wake,
362}; 378};
363 379
364static struct irq_chip asic3_irq_chip = { 380static struct irq_chip asic3_irq_chip = {
@@ -529,7 +545,7 @@ static int asic3_gpio_to_irq(struct gpio_chip *chip, unsigned offset)
529{ 545{
530 struct asic3 *asic = container_of(chip, struct asic3, gpio); 546 struct asic3 *asic = container_of(chip, struct asic3, gpio);
531 547
532 return (offset < ASIC3_NUM_GPIOS) ? asic->irq_base + offset : -ENXIO; 548 return asic->irq_base + offset;
533} 549}
534 550
535static __init int asic3_gpio_probe(struct platform_device *pdev, 551static __init int asic3_gpio_probe(struct platform_device *pdev,
@@ -894,10 +910,13 @@ static int __init asic3_mfd_probe(struct platform_device *pdev,
894 asic3_mmc_resources[0].start >>= asic->bus_shift; 910 asic3_mmc_resources[0].start >>= asic->bus_shift;
895 asic3_mmc_resources[0].end >>= asic->bus_shift; 911 asic3_mmc_resources[0].end >>= asic->bus_shift;
896 912
897 ret = mfd_add_devices(&pdev->dev, pdev->id, 913 if (pdata->clock_rate) {
914 ds1wm_pdata.clock_rate = pdata->clock_rate;
915 ret = mfd_add_devices(&pdev->dev, pdev->id,
898 &asic3_cell_ds1wm, 1, mem, asic->irq_base); 916 &asic3_cell_ds1wm, 1, mem, asic->irq_base);
899 if (ret < 0) 917 if (ret < 0)
900 goto out; 918 goto out;
919 }
901 920
902 if (mem_sdio && (irq >= 0)) { 921 if (mem_sdio && (irq >= 0)) {
903 ret = mfd_add_devices(&pdev->dev, pdev->id, 922 ret = mfd_add_devices(&pdev->dev, pdev->id,
@@ -1000,6 +1019,9 @@ static int __init asic3_probe(struct platform_device *pdev)
1000 1019
1001 asic3_mfd_probe(pdev, pdata, mem); 1020 asic3_mfd_probe(pdev, pdata, mem);
1002 1021
1022 asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
1023 (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 1);
1024
1003 dev_info(asic->dev, "ASIC3 Core driver\n"); 1025 dev_info(asic->dev, "ASIC3 Core driver\n");
1004 1026
1005 return 0; 1027 return 0;
@@ -1021,6 +1043,9 @@ static int __devexit asic3_remove(struct platform_device *pdev)
1021 int ret; 1043 int ret;
1022 struct asic3 *asic = platform_get_drvdata(pdev); 1044 struct asic3 *asic = platform_get_drvdata(pdev);
1023 1045
1046 asic3_set_register(asic, ASIC3_OFFSET(EXTCF, SELECT),
1047 (ASIC3_EXTCF_CF0_BUF_EN|ASIC3_EXTCF_CF0_PWAIT_EN), 0);
1048
1024 asic3_mfd_remove(pdev); 1049 asic3_mfd_remove(pdev);
1025 1050
1026 ret = asic3_gpio_remove(pdev); 1051 ret = asic3_gpio_remove(pdev);
diff --git a/drivers/mfd/cs5535-mfd.c b/drivers/mfd/cs5535-mfd.c
index 315fef5d466a..3419e726de47 100644
--- a/drivers/mfd/cs5535-mfd.c
+++ b/drivers/mfd/cs5535-mfd.c
@@ -186,18 +186,7 @@ static struct pci_driver cs5535_mfd_driver = {
186 .remove = __devexit_p(cs5535_mfd_remove), 186 .remove = __devexit_p(cs5535_mfd_remove),
187}; 187};
188 188
189static int __init cs5535_mfd_init(void) 189module_pci_driver(cs5535_mfd_driver);
190{
191 return pci_register_driver(&cs5535_mfd_driver);
192}
193
194static void __exit cs5535_mfd_exit(void)
195{
196 pci_unregister_driver(&cs5535_mfd_driver);
197}
198
199module_init(cs5535_mfd_init);
200module_exit(cs5535_mfd_exit);
201 190
202MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>"); 191MODULE_AUTHOR("Andres Salomon <dilinger@queued.net>");
203MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device"); 192MODULE_DESCRIPTION("MFD driver for CS5535/CS5536 southbridge's ISA PCI device");
diff --git a/drivers/mfd/da9052-core.c b/drivers/mfd/da9052-core.c
index 7776aff46269..1f1313c90573 100644
--- a/drivers/mfd/da9052-core.c
+++ b/drivers/mfd/da9052-core.c
@@ -318,6 +318,135 @@ static bool da9052_reg_volatile(struct device *dev, unsigned int reg)
318 } 318 }
319} 319}
320 320
321/*
322 * TBAT look-up table is computed from the R90 reg (8 bit register)
323 * reading as below. The battery temperature is in milliCentigrade
324 * TBAT = (1/(t1+1/298) - 273) * 1000 mC
325 * where t1 = (1/B)* ln(( ADCval * 2.5)/(R25*ITBAT*255))
326 * Default values are R25 = 10e3, B = 3380, ITBAT = 50e-6
327 * Example:
328 * R25=10E3, B=3380, ITBAT=50e-6, ADCVAL=62d calculates
329 * TBAT = 20015 mili degrees Centrigrade
330 *
331*/
332static const int32_t tbat_lookup[255] = {
333 183258, 144221, 124334, 111336, 101826, 94397, 88343, 83257,
334 78889, 75071, 71688, 68656, 65914, 63414, 61120, 59001,
335 570366, 55204, 53490, 51881, 50364, 48931, 47574, 46285,
336 45059, 43889, 42772, 41703, 40678, 39694, 38748, 37838,
337 36961, 36115, 35297, 34507, 33743, 33002, 32284, 31588,
338 30911, 30254, 29615, 28994, 28389, 27799, 27225, 26664,
339 26117, 25584, 25062, 24553, 24054, 23567, 23091, 22624,
340 22167, 21719, 21281, 20851, 20429, 20015, 19610, 19211,
341 18820, 18436, 18058, 17688, 17323, 16965, 16612, 16266,
342 15925, 15589, 15259, 14933, 14613, 14298, 13987, 13681,
343 13379, 13082, 12788, 12499, 12214, 11933, 11655, 11382,
344 11112, 10845, 10582, 10322, 10066, 9812, 9562, 9315,
345 9071, 8830, 8591, 8356, 8123, 7893, 7665, 7440,
346 7218, 6998, 6780, 6565, 6352, 6141, 5933, 5726,
347 5522, 5320, 5120, 4922, 4726, 4532, 4340, 4149,
348 3961, 3774, 3589, 3406, 3225, 3045, 2867, 2690,
349 2516, 2342, 2170, 2000, 1831, 1664, 1498, 1334,
350 1171, 1009, 849, 690, 532, 376, 221, 67,
351 -84, -236, -386, -535, -683, -830, -975, -1119,
352 -1263, -1405, -1546, -1686, -1825, -1964, -2101, -2237,
353 -2372, -2506, -2639, -2771, -2902, -3033, -3162, -3291,
354 -3418, -3545, -3671, -3796, -3920, -4044, -4166, -4288,
355 -4409, -4529, -4649, -4767, -4885, -5002, -5119, -5235,
356 -5349, -5464, -5577, -5690, -5802, -5913, -6024, -6134,
357 -6244, -6352, -6461, -6568, -6675, -6781, -6887, -6992,
358 -7096, -7200, -7303, -7406, -7508, -7609, -7710, -7810,
359 -7910, -8009, -8108, -8206, -8304, -8401, -8497, -8593,
360 -8689, -8784, -8878, -8972, -9066, -9159, -9251, -9343,
361 -9435, -9526, -9617, -9707, -9796, -9886, -9975, -10063,
362 -10151, -10238, -10325, -10412, -10839, -10923, -11007, -11090,
363 -11173, -11256, -11338, -11420, -11501, -11583, -11663, -11744,
364 -11823, -11903, -11982
365};
366
367static const u8 chan_mux[DA9052_ADC_VBBAT + 1] = {
368 [DA9052_ADC_VDDOUT] = DA9052_ADC_MAN_MUXSEL_VDDOUT,
369 [DA9052_ADC_ICH] = DA9052_ADC_MAN_MUXSEL_ICH,
370 [DA9052_ADC_TBAT] = DA9052_ADC_MAN_MUXSEL_TBAT,
371 [DA9052_ADC_VBAT] = DA9052_ADC_MAN_MUXSEL_VBAT,
372 [DA9052_ADC_IN4] = DA9052_ADC_MAN_MUXSEL_AD4,
373 [DA9052_ADC_IN5] = DA9052_ADC_MAN_MUXSEL_AD5,
374 [DA9052_ADC_IN6] = DA9052_ADC_MAN_MUXSEL_AD6,
375 [DA9052_ADC_VBBAT] = DA9052_ADC_MAN_MUXSEL_VBBAT
376};
377
378int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel)
379{
380 int ret;
381 unsigned short calc_data;
382 unsigned short data;
383 unsigned char mux_sel;
384
385 if (channel > DA9052_ADC_VBBAT)
386 return -EINVAL;
387
388 mutex_lock(&da9052->auxadc_lock);
389
390 /* Channel gets activated on enabling the Conversion bit */
391 mux_sel = chan_mux[channel] | DA9052_ADC_MAN_MAN_CONV;
392
393 ret = da9052_reg_write(da9052, DA9052_ADC_MAN_REG, mux_sel);
394 if (ret < 0)
395 goto err;
396
397 /* Wait for an interrupt */
398 if (!wait_for_completion_timeout(&da9052->done,
399 msecs_to_jiffies(500))) {
400 dev_err(da9052->dev,
401 "timeout waiting for ADC conversion interrupt\n");
402 ret = -ETIMEDOUT;
403 goto err;
404 }
405
406 ret = da9052_reg_read(da9052, DA9052_ADC_RES_H_REG);
407 if (ret < 0)
408 goto err;
409
410 calc_data = (unsigned short)ret;
411 data = calc_data << 2;
412
413 ret = da9052_reg_read(da9052, DA9052_ADC_RES_L_REG);
414 if (ret < 0)
415 goto err;
416
417 calc_data = (unsigned short)(ret & DA9052_ADC_RES_LSB);
418 data |= calc_data;
419
420 ret = data;
421
422err:
423 mutex_unlock(&da9052->auxadc_lock);
424 return ret;
425}
426EXPORT_SYMBOL_GPL(da9052_adc_manual_read);
427
428static irqreturn_t da9052_auxadc_irq(int irq, void *irq_data)
429{
430 struct da9052 *da9052 = irq_data;
431
432 complete(&da9052->done);
433
434 return IRQ_HANDLED;
435}
436
437int da9052_adc_read_temp(struct da9052 *da9052)
438{
439 int tbat;
440
441 tbat = da9052_reg_read(da9052, DA9052_TBAT_RES_REG);
442 if (tbat <= 0)
443 return tbat;
444
445 /* ARRAY_SIZE check is not needed since TBAT is a 8-bit register */
446 return tbat_lookup[tbat - 1];
447}
448EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
449
321static struct resource da9052_rtc_resource = { 450static struct resource da9052_rtc_resource = {
322 .name = "ALM", 451 .name = "ALM",
323 .start = DA9052_IRQ_ALARM, 452 .start = DA9052_IRQ_ALARM,
@@ -646,6 +775,9 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
646 struct irq_desc *desc; 775 struct irq_desc *desc;
647 int ret; 776 int ret;
648 777
778 mutex_init(&da9052->auxadc_lock);
779 init_completion(&da9052->done);
780
649 if (pdata && pdata->init != NULL) 781 if (pdata && pdata->init != NULL)
650 pdata->init(da9052); 782 pdata->init(da9052);
651 783
@@ -665,6 +797,12 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
665 797
666 da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data); 798 da9052->irq_base = regmap_irq_chip_get_base(da9052->irq_data);
667 799
800 ret = request_threaded_irq(DA9052_IRQ_ADC_EOM, NULL, da9052_auxadc_irq,
801 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
802 "adc irq", da9052);
803 if (ret != 0)
804 dev_err(da9052->dev, "DA9052 ADC IRQ failed ret=%d\n", ret);
805
668 ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info, 806 ret = mfd_add_devices(da9052->dev, -1, da9052_subdev_info,
669 ARRAY_SIZE(da9052_subdev_info), NULL, 0); 807 ARRAY_SIZE(da9052_subdev_info), NULL, 0);
670 if (ret) 808 if (ret)
@@ -673,6 +811,7 @@ int __devinit da9052_device_init(struct da9052 *da9052, u8 chip_id)
673 return 0; 811 return 0;
674 812
675err: 813err:
814 free_irq(DA9052_IRQ_ADC_EOM, da9052);
676 mfd_remove_devices(da9052->dev); 815 mfd_remove_devices(da9052->dev);
677regmap_err: 816regmap_err:
678 return ret; 817 return ret;
@@ -680,6 +819,7 @@ regmap_err:
680 819
681void da9052_device_exit(struct da9052 *da9052) 820void da9052_device_exit(struct da9052 *da9052)
682{ 821{
822 free_irq(DA9052_IRQ_ADC_EOM, da9052);
683 regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data); 823 regmap_del_irq_chip(da9052->chip_irq, da9052->irq_data);
684 mfd_remove_devices(da9052->dev); 824 mfd_remove_devices(da9052->dev);
685} 825}
diff --git a/drivers/mfd/da9052-i2c.c b/drivers/mfd/da9052-i2c.c
index 36b88e395499..82c9d6450286 100644
--- a/drivers/mfd/da9052-i2c.c
+++ b/drivers/mfd/da9052-i2c.c
@@ -22,6 +22,11 @@
22#include <linux/mfd/da9052/da9052.h> 22#include <linux/mfd/da9052/da9052.h>
23#include <linux/mfd/da9052/reg.h> 23#include <linux/mfd/da9052/reg.h>
24 24
25#ifdef CONFIG_OF
26#include <linux/of.h>
27#include <linux/of_device.h>
28#endif
29
25static int da9052_i2c_enable_multiwrite(struct da9052 *da9052) 30static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
26{ 31{
27 int reg_val, ret; 32 int reg_val, ret;
@@ -41,13 +46,31 @@ static int da9052_i2c_enable_multiwrite(struct da9052 *da9052)
41 return 0; 46 return 0;
42} 47}
43 48
49static struct i2c_device_id da9052_i2c_id[] = {
50 {"da9052", DA9052},
51 {"da9053-aa", DA9053_AA},
52 {"da9053-ba", DA9053_BA},
53 {"da9053-bb", DA9053_BB},
54 {}
55};
56
57#ifdef CONFIG_OF
58static const struct of_device_id dialog_dt_ids[] = {
59 { .compatible = "dlg,da9052", .data = &da9052_i2c_id[0] },
60 { .compatible = "dlg,da9053-aa", .data = &da9052_i2c_id[1] },
61 { .compatible = "dlg,da9053-ab", .data = &da9052_i2c_id[2] },
62 { .compatible = "dlg,da9053-bb", .data = &da9052_i2c_id[3] },
63 { /* sentinel */ }
64};
65#endif
66
44static int __devinit da9052_i2c_probe(struct i2c_client *client, 67static int __devinit da9052_i2c_probe(struct i2c_client *client,
45 const struct i2c_device_id *id) 68 const struct i2c_device_id *id)
46{ 69{
47 struct da9052 *da9052; 70 struct da9052 *da9052;
48 int ret; 71 int ret;
49 72
50 da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL); 73 da9052 = devm_kzalloc(&client->dev, sizeof(struct da9052), GFP_KERNEL);
51 if (!da9052) 74 if (!da9052)
52 return -ENOMEM; 75 return -ENOMEM;
53 76
@@ -55,8 +78,7 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
55 I2C_FUNC_SMBUS_BYTE_DATA)) { 78 I2C_FUNC_SMBUS_BYTE_DATA)) {
56 dev_info(&client->dev, "Error in %s:i2c_check_functionality\n", 79 dev_info(&client->dev, "Error in %s:i2c_check_functionality\n",
57 __func__); 80 __func__);
58 ret = -ENODEV; 81 return -ENODEV;
59 goto err;
60 } 82 }
61 83
62 da9052->dev = &client->dev; 84 da9052->dev = &client->dev;
@@ -64,29 +86,39 @@ static int __devinit da9052_i2c_probe(struct i2c_client *client,
64 86
65 i2c_set_clientdata(client, da9052); 87 i2c_set_clientdata(client, da9052);
66 88
67 da9052->regmap = regmap_init_i2c(client, &da9052_regmap_config); 89 da9052->regmap = devm_regmap_init_i2c(client, &da9052_regmap_config);
68 if (IS_ERR(da9052->regmap)) { 90 if (IS_ERR(da9052->regmap)) {
69 ret = PTR_ERR(da9052->regmap); 91 ret = PTR_ERR(da9052->regmap);
70 dev_err(&client->dev, "Failed to allocate register map: %d\n", 92 dev_err(&client->dev, "Failed to allocate register map: %d\n",
71 ret); 93 ret);
72 goto err; 94 return ret;
73 } 95 }
74 96
75 ret = da9052_i2c_enable_multiwrite(da9052); 97 ret = da9052_i2c_enable_multiwrite(da9052);
76 if (ret < 0) 98 if (ret < 0)
77 goto err_regmap; 99 return ret;
100
101#ifdef CONFIG_OF
102 if (!id) {
103 struct device_node *np = client->dev.of_node;
104 const struct of_device_id *deviceid;
105
106 deviceid = of_match_node(dialog_dt_ids, np);
107 id = (const struct i2c_device_id *)deviceid->data;
108 }
109#endif
110
111 if (!id) {
112 ret = -ENODEV;
113 dev_err(&client->dev, "id is null.\n");
114 return ret;
115 }
78 116
79 ret = da9052_device_init(da9052, id->driver_data); 117 ret = da9052_device_init(da9052, id->driver_data);
80 if (ret != 0) 118 if (ret != 0)
81 goto err_regmap; 119 return ret;
82 120
83 return 0; 121 return 0;
84
85err_regmap:
86 regmap_exit(da9052->regmap);
87err:
88 kfree(da9052);
89 return ret;
90} 122}
91 123
92static int __devexit da9052_i2c_remove(struct i2c_client *client) 124static int __devexit da9052_i2c_remove(struct i2c_client *client)
@@ -94,20 +126,9 @@ static int __devexit da9052_i2c_remove(struct i2c_client *client)
94 struct da9052 *da9052 = i2c_get_clientdata(client); 126 struct da9052 *da9052 = i2c_get_clientdata(client);
95 127
96 da9052_device_exit(da9052); 128 da9052_device_exit(da9052);
97 regmap_exit(da9052->regmap);
98 kfree(da9052);
99
100 return 0; 129 return 0;
101} 130}
102 131
103static struct i2c_device_id da9052_i2c_id[] = {
104 {"da9052", DA9052},
105 {"da9053-aa", DA9053_AA},
106 {"da9053-ba", DA9053_BA},
107 {"da9053-bb", DA9053_BB},
108 {}
109};
110
111static struct i2c_driver da9052_i2c_driver = { 132static struct i2c_driver da9052_i2c_driver = {
112 .probe = da9052_i2c_probe, 133 .probe = da9052_i2c_probe,
113 .remove = __devexit_p(da9052_i2c_remove), 134 .remove = __devexit_p(da9052_i2c_remove),
@@ -115,6 +136,9 @@ static struct i2c_driver da9052_i2c_driver = {
115 .driver = { 136 .driver = {
116 .name = "da9052", 137 .name = "da9052",
117 .owner = THIS_MODULE, 138 .owner = THIS_MODULE,
139#ifdef CONFIG_OF
140 .of_match_table = dialog_dt_ids,
141#endif
118 }, 142 },
119}; 143};
120 144
diff --git a/drivers/mfd/da9052-spi.c b/drivers/mfd/da9052-spi.c
index 6faf149e8d94..dbeadc5a6436 100644
--- a/drivers/mfd/da9052-spi.c
+++ b/drivers/mfd/da9052-spi.c
@@ -25,8 +25,9 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
25{ 25{
26 int ret; 26 int ret;
27 const struct spi_device_id *id = spi_get_device_id(spi); 27 const struct spi_device_id *id = spi_get_device_id(spi);
28 struct da9052 *da9052 = kzalloc(sizeof(struct da9052), GFP_KERNEL); 28 struct da9052 *da9052;
29 29
30 da9052 = devm_kzalloc(&spi->dev, sizeof(struct da9052), GFP_KERNEL);
30 if (!da9052) 31 if (!da9052)
31 return -ENOMEM; 32 return -ENOMEM;
32 33
@@ -42,25 +43,19 @@ static int __devinit da9052_spi_probe(struct spi_device *spi)
42 da9052_regmap_config.read_flag_mask = 1; 43 da9052_regmap_config.read_flag_mask = 1;
43 da9052_regmap_config.write_flag_mask = 0; 44 da9052_regmap_config.write_flag_mask = 0;
44 45
45 da9052->regmap = regmap_init_spi(spi, &da9052_regmap_config); 46 da9052->regmap = devm_regmap_init_spi(spi, &da9052_regmap_config);
46 if (IS_ERR(da9052->regmap)) { 47 if (IS_ERR(da9052->regmap)) {
47 ret = PTR_ERR(da9052->regmap); 48 ret = PTR_ERR(da9052->regmap);
48 dev_err(&spi->dev, "Failed to allocate register map: %d\n", 49 dev_err(&spi->dev, "Failed to allocate register map: %d\n",
49 ret); 50 ret);
50 goto err; 51 return ret;
51 } 52 }
52 53
53 ret = da9052_device_init(da9052, id->driver_data); 54 ret = da9052_device_init(da9052, id->driver_data);
54 if (ret != 0) 55 if (ret != 0)
55 goto err_regmap; 56 return ret;
56 57
57 return 0; 58 return 0;
58
59err_regmap:
60 regmap_exit(da9052->regmap);
61err:
62 kfree(da9052);
63 return ret;
64} 59}
65 60
66static int __devexit da9052_spi_remove(struct spi_device *spi) 61static int __devexit da9052_spi_remove(struct spi_device *spi)
@@ -68,9 +63,6 @@ static int __devexit da9052_spi_remove(struct spi_device *spi)
68 struct da9052 *da9052 = dev_get_drvdata(&spi->dev); 63 struct da9052 *da9052 = dev_get_drvdata(&spi->dev);
69 64
70 da9052_device_exit(da9052); 65 da9052_device_exit(da9052);
71 regmap_exit(da9052->regmap);
72 kfree(da9052);
73
74 return 0; 66 return 0;
75} 67}
76 68
@@ -88,7 +80,6 @@ static struct spi_driver da9052_spi_driver = {
88 .id_table = da9052_spi_id, 80 .id_table = da9052_spi_id,
89 .driver = { 81 .driver = {
90 .name = "da9052", 82 .name = "da9052",
91 .bus = &spi_bus_type,
92 .owner = THIS_MODULE, 83 .owner = THIS_MODULE,
93 }, 84 },
94}; 85};
diff --git a/drivers/mfd/db8500-prcmu.c b/drivers/mfd/db8500-prcmu.c
index 5be32489714f..671c8bc14bbc 100644
--- a/drivers/mfd/db8500-prcmu.c
+++ b/drivers/mfd/db8500-prcmu.c
@@ -2720,6 +2720,7 @@ static struct regulator_consumer_supply db8500_vape_consumers[] = {
2720 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"), 2720 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.1"),
2721 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"), 2721 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.2"),
2722 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"), 2722 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.3"),
2723 REGULATOR_SUPPLY("v-i2c", "nmk-i2c.4"),
2723 /* "v-mmc" changed to "vcore" in the mainline kernel */ 2724 /* "v-mmc" changed to "vcore" in the mainline kernel */
2724 REGULATOR_SUPPLY("vcore", "sdi0"), 2725 REGULATOR_SUPPLY("vcore", "sdi0"),
2725 REGULATOR_SUPPLY("vcore", "sdi1"), 2726 REGULATOR_SUPPLY("vcore", "sdi1"),
@@ -2958,9 +2959,10 @@ static struct mfd_cell db8500_prcmu_devs[] = {
2958 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic 2959 * prcmu_fw_init - arch init call for the Linux PRCMU fw init logic
2959 * 2960 *
2960 */ 2961 */
2961static int __init db8500_prcmu_probe(struct platform_device *pdev) 2962static int __devinit db8500_prcmu_probe(struct platform_device *pdev)
2962{ 2963{
2963 int err = 0; 2964 struct device_node *np = pdev->dev.of_node;
2965 int irq = 0, err = 0;
2964 2966
2965 if (ux500_is_svp()) 2967 if (ux500_is_svp())
2966 return -ENODEV; 2968 return -ENODEV;
@@ -2970,8 +2972,14 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
2970 /* Clean up the mailbox interrupts after pre-kernel code. */ 2972 /* Clean up the mailbox interrupts after pre-kernel code. */
2971 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR); 2973 writel(ALL_MBOX_BITS, PRCM_ARM_IT1_CLR);
2972 2974
2973 err = request_threaded_irq(IRQ_DB8500_PRCMU1, prcmu_irq_handler, 2975 if (np)
2974 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL); 2976 irq = platform_get_irq(pdev, 0);
2977
2978 if (!np || irq <= 0)
2979 irq = IRQ_DB8500_PRCMU1;
2980
2981 err = request_threaded_irq(irq, prcmu_irq_handler,
2982 prcmu_irq_thread_fn, IRQF_NO_SUSPEND, "prcmu", NULL);
2975 if (err < 0) { 2983 if (err < 0) {
2976 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n"); 2984 pr_err("prcmu: Failed to allocate IRQ_DB8500_PRCMU1.\n");
2977 err = -EBUSY; 2985 err = -EBUSY;
@@ -2981,14 +2989,16 @@ static int __init db8500_prcmu_probe(struct platform_device *pdev)
2981 if (cpu_is_u8500v20_or_later()) 2989 if (cpu_is_u8500v20_or_later())
2982 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET); 2990 prcmu_config_esram0_deep_sleep(ESRAM0_DEEP_SLEEP_STATE_RET);
2983 2991
2984 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs, 2992 if (!np) {
2985 ARRAY_SIZE(db8500_prcmu_devs), NULL, 2993 err = mfd_add_devices(&pdev->dev, 0, db8500_prcmu_devs,
2986 0); 2994 ARRAY_SIZE(db8500_prcmu_devs), NULL, 0);
2995 if (err) {
2996 pr_err("prcmu: Failed to add subdevices\n");
2997 return err;
2998 }
2999 }
2987 3000
2988 if (err) 3001 pr_info("DB8500 PRCMU initialized\n");
2989 pr_err("prcmu: Failed to add subdevices\n");
2990 else
2991 pr_info("DB8500 PRCMU initialized\n");
2992 3002
2993no_irq_return: 3003no_irq_return:
2994 return err; 3004 return err;
@@ -2999,11 +3009,12 @@ static struct platform_driver db8500_prcmu_driver = {
2999 .name = "db8500-prcmu", 3009 .name = "db8500-prcmu",
3000 .owner = THIS_MODULE, 3010 .owner = THIS_MODULE,
3001 }, 3011 },
3012 .probe = db8500_prcmu_probe,
3002}; 3013};
3003 3014
3004static int __init db8500_prcmu_init(void) 3015static int __init db8500_prcmu_init(void)
3005{ 3016{
3006 return platform_driver_probe(&db8500_prcmu_driver, db8500_prcmu_probe); 3017 return platform_driver_register(&db8500_prcmu_driver);
3007} 3018}
3008 3019
3009arch_initcall(db8500_prcmu_init); 3020arch_initcall(db8500_prcmu_init);
diff --git a/drivers/mfd/intel_msic.c b/drivers/mfd/intel_msic.c
index b76657eb0c51..59df5584cb58 100644
--- a/drivers/mfd/intel_msic.c
+++ b/drivers/mfd/intel_msic.c
@@ -406,7 +406,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
406 return -ENXIO; 406 return -ENXIO;
407 } 407 }
408 408
409 msic = kzalloc(sizeof(*msic), GFP_KERNEL); 409 msic = devm_kzalloc(&pdev->dev, sizeof(*msic), GFP_KERNEL);
410 if (!msic) 410 if (!msic)
411 return -ENOMEM; 411 return -ENOMEM;
412 412
@@ -421,21 +421,13 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
421 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 421 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
422 if (!res) { 422 if (!res) {
423 dev_err(&pdev->dev, "failed to get SRAM iomem resource\n"); 423 dev_err(&pdev->dev, "failed to get SRAM iomem resource\n");
424 ret = -ENODEV; 424 return -ENODEV;
425 goto fail_free_msic;
426 } 425 }
427 426
428 res = request_mem_region(res->start, resource_size(res), pdev->name); 427 msic->irq_base = devm_request_and_ioremap(&pdev->dev, res);
429 if (!res) {
430 ret = -EBUSY;
431 goto fail_free_msic;
432 }
433
434 msic->irq_base = ioremap_nocache(res->start, resource_size(res));
435 if (!msic->irq_base) { 428 if (!msic->irq_base) {
436 dev_err(&pdev->dev, "failed to map SRAM memory\n"); 429 dev_err(&pdev->dev, "failed to map SRAM memory\n");
437 ret = -ENOMEM; 430 return -ENOMEM;
438 goto fail_release_region;
439 } 431 }
440 432
441 platform_set_drvdata(pdev, msic); 433 platform_set_drvdata(pdev, msic);
@@ -443,7 +435,7 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
443 ret = intel_msic_init_devices(msic); 435 ret = intel_msic_init_devices(msic);
444 if (ret) { 436 if (ret) {
445 dev_err(&pdev->dev, "failed to initialize MSIC devices\n"); 437 dev_err(&pdev->dev, "failed to initialize MSIC devices\n");
446 goto fail_unmap_mem; 438 return ret;
447 } 439 }
448 440
449 dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n", 441 dev_info(&pdev->dev, "Intel MSIC version %c%d (vendor %#x)\n",
@@ -451,27 +443,14 @@ static int __devinit intel_msic_probe(struct platform_device *pdev)
451 msic->vendor); 443 msic->vendor);
452 444
453 return 0; 445 return 0;
454
455fail_unmap_mem:
456 iounmap(msic->irq_base);
457fail_release_region:
458 release_mem_region(res->start, resource_size(res));
459fail_free_msic:
460 kfree(msic);
461
462 return ret;
463} 446}
464 447
465static int __devexit intel_msic_remove(struct platform_device *pdev) 448static int __devexit intel_msic_remove(struct platform_device *pdev)
466{ 449{
467 struct intel_msic *msic = platform_get_drvdata(pdev); 450 struct intel_msic *msic = platform_get_drvdata(pdev);
468 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
469 451
470 intel_msic_remove_devices(msic); 452 intel_msic_remove_devices(msic);
471 platform_set_drvdata(pdev, NULL); 453 platform_set_drvdata(pdev, NULL);
472 iounmap(msic->irq_base);
473 release_mem_region(res->start, resource_size(res));
474 kfree(msic);
475 454
476 return 0; 455 return 0;
477} 456}
diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
index a9223ed1b7c5..2ea99989551a 100644
--- a/drivers/mfd/janz-cmodio.c
+++ b/drivers/mfd/janz-cmodio.c
@@ -283,23 +283,8 @@ static struct pci_driver cmodio_pci_driver = {
283 .remove = __devexit_p(cmodio_pci_remove), 283 .remove = __devexit_p(cmodio_pci_remove),
284}; 284};
285 285
286/* 286module_pci_driver(cmodio_pci_driver);
287 * Module Init / Exit
288 */
289
290static int __init cmodio_init(void)
291{
292 return pci_register_driver(&cmodio_pci_driver);
293}
294
295static void __exit cmodio_exit(void)
296{
297 pci_unregister_driver(&cmodio_pci_driver);
298}
299 287
300MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>"); 288MODULE_AUTHOR("Ira W. Snyder <iws@ovro.caltech.edu>");
301MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver"); 289MODULE_DESCRIPTION("Janz CMOD-IO PCI MODULbus Carrier Board Driver");
302MODULE_LICENSE("GPL"); 290MODULE_LICENSE("GPL");
303
304module_init(cmodio_init);
305module_exit(cmodio_exit);
diff --git a/drivers/mfd/lm3533-core.c b/drivers/mfd/lm3533-core.c
new file mode 100644
index 000000000000..0b2879b87fd9
--- /dev/null
+++ b/drivers/mfd/lm3533-core.c
@@ -0,0 +1,667 @@
1/*
2 * lm3533-core.c -- LM3533 Core
3 *
4 * Copyright (C) 2011-2012 Texas Instruments
5 *
6 * Author: Johan Hovold <jhovold@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/kernel.h>
17#include <linux/err.h>
18#include <linux/gpio.h>
19#include <linux/i2c.h>
20#include <linux/mfd/core.h>
21#include <linux/regmap.h>
22#include <linux/seq_file.h>
23#include <linux/slab.h>
24#include <linux/uaccess.h>
25
26#include <linux/mfd/lm3533.h>
27
28
29#define LM3533_BOOST_OVP_MASK 0x06
30#define LM3533_BOOST_OVP_SHIFT 1
31
32#define LM3533_BOOST_FREQ_MASK 0x01
33#define LM3533_BOOST_FREQ_SHIFT 0
34
35#define LM3533_BL_ID_MASK 1
36#define LM3533_LED_ID_MASK 3
37#define LM3533_BL_ID_MAX 1
38#define LM3533_LED_ID_MAX 3
39
40#define LM3533_HVLED_ID_MAX 2
41#define LM3533_LVLED_ID_MAX 5
42
43#define LM3533_REG_OUTPUT_CONF1 0x10
44#define LM3533_REG_OUTPUT_CONF2 0x11
45#define LM3533_REG_BOOST_PWM 0x2c
46
47#define LM3533_REG_MAX 0xb2
48
49
50static struct mfd_cell lm3533_als_devs[] = {
51 {
52 .name = "lm3533-als",
53 .id = -1,
54 },
55};
56
57static struct mfd_cell lm3533_bl_devs[] = {
58 {
59 .name = "lm3533-backlight",
60 .id = 0,
61 },
62 {
63 .name = "lm3533-backlight",
64 .id = 1,
65 },
66};
67
68static struct mfd_cell lm3533_led_devs[] = {
69 {
70 .name = "lm3533-leds",
71 .id = 0,
72 },
73 {
74 .name = "lm3533-leds",
75 .id = 1,
76 },
77 {
78 .name = "lm3533-leds",
79 .id = 2,
80 },
81 {
82 .name = "lm3533-leds",
83 .id = 3,
84 },
85};
86
87int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val)
88{
89 int tmp;
90 int ret;
91
92 ret = regmap_read(lm3533->regmap, reg, &tmp);
93 if (ret < 0) {
94 dev_err(lm3533->dev, "failed to read register %02x: %d\n",
95 reg, ret);
96 return ret;
97 }
98
99 *val = tmp;
100
101 dev_dbg(lm3533->dev, "read [%02x]: %02x\n", reg, *val);
102
103 return ret;
104}
105EXPORT_SYMBOL_GPL(lm3533_read);
106
107int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val)
108{
109 int ret;
110
111 dev_dbg(lm3533->dev, "write [%02x]: %02x\n", reg, val);
112
113 ret = regmap_write(lm3533->regmap, reg, val);
114 if (ret < 0) {
115 dev_err(lm3533->dev, "failed to write register %02x: %d\n",
116 reg, ret);
117 }
118
119 return ret;
120}
121EXPORT_SYMBOL_GPL(lm3533_write);
122
123int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask)
124{
125 int ret;
126
127 dev_dbg(lm3533->dev, "update [%02x]: %02x/%02x\n", reg, val, mask);
128
129 ret = regmap_update_bits(lm3533->regmap, reg, mask, val);
130 if (ret < 0) {
131 dev_err(lm3533->dev, "failed to update register %02x: %d\n",
132 reg, ret);
133 }
134
135 return ret;
136}
137EXPORT_SYMBOL_GPL(lm3533_update);
138
139static int lm3533_set_boost_freq(struct lm3533 *lm3533,
140 enum lm3533_boost_freq freq)
141{
142 int ret;
143
144 ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
145 freq << LM3533_BOOST_FREQ_SHIFT,
146 LM3533_BOOST_FREQ_MASK);
147 if (ret)
148 dev_err(lm3533->dev, "failed to set boost frequency\n");
149
150 return ret;
151}
152
153
154static int lm3533_set_boost_ovp(struct lm3533 *lm3533,
155 enum lm3533_boost_ovp ovp)
156{
157 int ret;
158
159 ret = lm3533_update(lm3533, LM3533_REG_BOOST_PWM,
160 ovp << LM3533_BOOST_OVP_SHIFT,
161 LM3533_BOOST_OVP_MASK);
162 if (ret)
163 dev_err(lm3533->dev, "failed to set boost ovp\n");
164
165 return ret;
166}
167
168/*
169 * HVLED output config -- output hvled controlled by backlight bl
170 */
171static int lm3533_set_hvled_config(struct lm3533 *lm3533, u8 hvled, u8 bl)
172{
173 u8 val;
174 u8 mask;
175 int shift;
176 int ret;
177
178 if (hvled == 0 || hvled > LM3533_HVLED_ID_MAX)
179 return -EINVAL;
180
181 if (bl > LM3533_BL_ID_MAX)
182 return -EINVAL;
183
184 shift = hvled - 1;
185 mask = LM3533_BL_ID_MASK << shift;
186 val = bl << shift;
187
188 ret = lm3533_update(lm3533, LM3533_REG_OUTPUT_CONF1, val, mask);
189 if (ret)
190 dev_err(lm3533->dev, "failed to set hvled config\n");
191
192 return ret;
193}
194
195/*
196 * LVLED output config -- output lvled controlled by LED led
197 */
198static int lm3533_set_lvled_config(struct lm3533 *lm3533, u8 lvled, u8 led)
199{
200 u8 reg;
201 u8 val;
202 u8 mask;
203 int shift;
204 int ret;
205
206 if (lvled == 0 || lvled > LM3533_LVLED_ID_MAX)
207 return -EINVAL;
208
209 if (led > LM3533_LED_ID_MAX)
210 return -EINVAL;
211
212 if (lvled < 4) {
213 reg = LM3533_REG_OUTPUT_CONF1;
214 shift = 2 * lvled;
215 } else {
216 reg = LM3533_REG_OUTPUT_CONF2;
217 shift = 2 * (lvled - 4);
218 }
219
220 mask = LM3533_LED_ID_MASK << shift;
221 val = led << shift;
222
223 ret = lm3533_update(lm3533, reg, val, mask);
224 if (ret)
225 dev_err(lm3533->dev, "failed to set lvled config\n");
226
227 return ret;
228}
229
230static void lm3533_enable(struct lm3533 *lm3533)
231{
232 if (gpio_is_valid(lm3533->gpio_hwen))
233 gpio_set_value(lm3533->gpio_hwen, 1);
234}
235
236static void lm3533_disable(struct lm3533 *lm3533)
237{
238 if (gpio_is_valid(lm3533->gpio_hwen))
239 gpio_set_value(lm3533->gpio_hwen, 0);
240}
241
242enum lm3533_attribute_type {
243 LM3533_ATTR_TYPE_BACKLIGHT,
244 LM3533_ATTR_TYPE_LED,
245};
246
247struct lm3533_device_attribute {
248 struct device_attribute dev_attr;
249 enum lm3533_attribute_type type;
250 union {
251 struct {
252 u8 id;
253 } output;
254 } u;
255};
256
257#define to_lm3533_dev_attr(_attr) \
258 container_of(_attr, struct lm3533_device_attribute, dev_attr)
259
260static ssize_t show_output(struct device *dev,
261 struct device_attribute *attr, char *buf)
262{
263 struct lm3533 *lm3533 = dev_get_drvdata(dev);
264 struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
265 int id = lattr->u.output.id;
266 u8 reg;
267 u8 val;
268 u8 mask;
269 int shift;
270 int ret;
271
272 if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT) {
273 reg = LM3533_REG_OUTPUT_CONF1;
274 shift = id - 1;
275 mask = LM3533_BL_ID_MASK << shift;
276 } else {
277 if (id < 4) {
278 reg = LM3533_REG_OUTPUT_CONF1;
279 shift = 2 * id;
280 } else {
281 reg = LM3533_REG_OUTPUT_CONF2;
282 shift = 2 * (id - 4);
283 }
284 mask = LM3533_LED_ID_MASK << shift;
285 }
286
287 ret = lm3533_read(lm3533, reg, &val);
288 if (ret)
289 return ret;
290
291 val = (val & mask) >> shift;
292
293 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
294}
295
296static ssize_t store_output(struct device *dev,
297 struct device_attribute *attr,
298 const char *buf, size_t len)
299{
300 struct lm3533 *lm3533 = dev_get_drvdata(dev);
301 struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(attr);
302 int id = lattr->u.output.id;
303 u8 val;
304 int ret;
305
306 if (kstrtou8(buf, 0, &val))
307 return -EINVAL;
308
309 if (lattr->type == LM3533_ATTR_TYPE_BACKLIGHT)
310 ret = lm3533_set_hvled_config(lm3533, id, val);
311 else
312 ret = lm3533_set_lvled_config(lm3533, id, val);
313
314 if (ret)
315 return ret;
316
317 return len;
318}
319
320#define LM3533_OUTPUT_ATTR(_name, _mode, _show, _store, _type, _id) \
321 struct lm3533_device_attribute lm3533_dev_attr_##_name = \
322 { .dev_attr = __ATTR(_name, _mode, _show, _store), \
323 .type = _type, \
324 .u.output = { .id = _id }, }
325
326#define LM3533_OUTPUT_ATTR_RW(_name, _type, _id) \
327 LM3533_OUTPUT_ATTR(output_##_name, S_IRUGO | S_IWUSR, \
328 show_output, store_output, _type, _id)
329
330#define LM3533_OUTPUT_HVLED_ATTR_RW(_nr) \
331 LM3533_OUTPUT_ATTR_RW(hvled##_nr, LM3533_ATTR_TYPE_BACKLIGHT, _nr)
332#define LM3533_OUTPUT_LVLED_ATTR_RW(_nr) \
333 LM3533_OUTPUT_ATTR_RW(lvled##_nr, LM3533_ATTR_TYPE_LED, _nr)
334/*
335 * Output config:
336 *
337 * output_hvled<nr> 0-1
338 * output_lvled<nr> 0-3
339 */
340static LM3533_OUTPUT_HVLED_ATTR_RW(1);
341static LM3533_OUTPUT_HVLED_ATTR_RW(2);
342static LM3533_OUTPUT_LVLED_ATTR_RW(1);
343static LM3533_OUTPUT_LVLED_ATTR_RW(2);
344static LM3533_OUTPUT_LVLED_ATTR_RW(3);
345static LM3533_OUTPUT_LVLED_ATTR_RW(4);
346static LM3533_OUTPUT_LVLED_ATTR_RW(5);
347
348static struct attribute *lm3533_attributes[] = {
349 &lm3533_dev_attr_output_hvled1.dev_attr.attr,
350 &lm3533_dev_attr_output_hvled2.dev_attr.attr,
351 &lm3533_dev_attr_output_lvled1.dev_attr.attr,
352 &lm3533_dev_attr_output_lvled2.dev_attr.attr,
353 &lm3533_dev_attr_output_lvled3.dev_attr.attr,
354 &lm3533_dev_attr_output_lvled4.dev_attr.attr,
355 &lm3533_dev_attr_output_lvled5.dev_attr.attr,
356 NULL,
357};
358
359#define to_dev_attr(_attr) \
360 container_of(_attr, struct device_attribute, attr)
361
362static umode_t lm3533_attr_is_visible(struct kobject *kobj,
363 struct attribute *attr, int n)
364{
365 struct device *dev = container_of(kobj, struct device, kobj);
366 struct lm3533 *lm3533 = dev_get_drvdata(dev);
367 struct device_attribute *dattr = to_dev_attr(attr);
368 struct lm3533_device_attribute *lattr = to_lm3533_dev_attr(dattr);
369 enum lm3533_attribute_type type = lattr->type;
370 umode_t mode = attr->mode;
371
372 if (!lm3533->have_backlights && type == LM3533_ATTR_TYPE_BACKLIGHT)
373 mode = 0;
374 else if (!lm3533->have_leds && type == LM3533_ATTR_TYPE_LED)
375 mode = 0;
376
377 return mode;
378};
379
380static struct attribute_group lm3533_attribute_group = {
381 .is_visible = lm3533_attr_is_visible,
382 .attrs = lm3533_attributes
383};
384
385static int __devinit lm3533_device_als_init(struct lm3533 *lm3533)
386{
387 struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
388 int ret;
389
390 if (!pdata->als)
391 return 0;
392
393 lm3533_als_devs[0].platform_data = pdata->als;
394 lm3533_als_devs[0].pdata_size = sizeof(*pdata->als);
395
396 ret = mfd_add_devices(lm3533->dev, 0, lm3533_als_devs, 1, NULL, 0);
397 if (ret) {
398 dev_err(lm3533->dev, "failed to add ALS device\n");
399 return ret;
400 }
401
402 lm3533->have_als = 1;
403
404 return 0;
405}
406
407static int __devinit lm3533_device_bl_init(struct lm3533 *lm3533)
408{
409 struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
410 int i;
411 int ret;
412
413 if (!pdata->backlights || pdata->num_backlights == 0)
414 return 0;
415
416 if (pdata->num_backlights > ARRAY_SIZE(lm3533_bl_devs))
417 pdata->num_backlights = ARRAY_SIZE(lm3533_bl_devs);
418
419 for (i = 0; i < pdata->num_backlights; ++i) {
420 lm3533_bl_devs[i].platform_data = &pdata->backlights[i];
421 lm3533_bl_devs[i].pdata_size = sizeof(pdata->backlights[i]);
422 }
423
424 ret = mfd_add_devices(lm3533->dev, 0, lm3533_bl_devs,
425 pdata->num_backlights, NULL, 0);
426 if (ret) {
427 dev_err(lm3533->dev, "failed to add backlight devices\n");
428 return ret;
429 }
430
431 lm3533->have_backlights = 1;
432
433 return 0;
434}
435
436static int __devinit lm3533_device_led_init(struct lm3533 *lm3533)
437{
438 struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
439 int i;
440 int ret;
441
442 if (!pdata->leds || pdata->num_leds == 0)
443 return 0;
444
445 if (pdata->num_leds > ARRAY_SIZE(lm3533_led_devs))
446 pdata->num_leds = ARRAY_SIZE(lm3533_led_devs);
447
448 for (i = 0; i < pdata->num_leds; ++i) {
449 lm3533_led_devs[i].platform_data = &pdata->leds[i];
450 lm3533_led_devs[i].pdata_size = sizeof(pdata->leds[i]);
451 }
452
453 ret = mfd_add_devices(lm3533->dev, 0, lm3533_led_devs,
454 pdata->num_leds, NULL, 0);
455 if (ret) {
456 dev_err(lm3533->dev, "failed to add LED devices\n");
457 return ret;
458 }
459
460 lm3533->have_leds = 1;
461
462 return 0;
463}
464
465static int __devinit lm3533_device_setup(struct lm3533 *lm3533,
466 struct lm3533_platform_data *pdata)
467{
468 int ret;
469
470 ret = lm3533_set_boost_freq(lm3533, pdata->boost_freq);
471 if (ret)
472 return ret;
473
474 ret = lm3533_set_boost_ovp(lm3533, pdata->boost_ovp);
475 if (ret)
476 return ret;
477
478 return 0;
479}
480
481static int __devinit lm3533_device_init(struct lm3533 *lm3533)
482{
483 struct lm3533_platform_data *pdata = lm3533->dev->platform_data;
484 int ret;
485
486 dev_dbg(lm3533->dev, "%s\n", __func__);
487
488 if (!pdata) {
489 dev_err(lm3533->dev, "no platform data\n");
490 return -EINVAL;
491 }
492
493 lm3533->gpio_hwen = pdata->gpio_hwen;
494
495 dev_set_drvdata(lm3533->dev, lm3533);
496
497 if (gpio_is_valid(lm3533->gpio_hwen)) {
498 ret = gpio_request_one(lm3533->gpio_hwen, GPIOF_OUT_INIT_LOW,
499 "lm3533-hwen");
500 if (ret < 0) {
501 dev_err(lm3533->dev,
502 "failed to request HWEN GPIO %d\n",
503 lm3533->gpio_hwen);
504 return ret;
505 }
506 }
507
508 lm3533_enable(lm3533);
509
510 ret = lm3533_device_setup(lm3533, pdata);
511 if (ret)
512 goto err_disable;
513
514 lm3533_device_als_init(lm3533);
515 lm3533_device_bl_init(lm3533);
516 lm3533_device_led_init(lm3533);
517
518 ret = sysfs_create_group(&lm3533->dev->kobj, &lm3533_attribute_group);
519 if (ret < 0) {
520 dev_err(lm3533->dev, "failed to create sysfs attributes\n");
521 goto err_unregister;
522 }
523
524 return 0;
525
526err_unregister:
527 mfd_remove_devices(lm3533->dev);
528err_disable:
529 lm3533_disable(lm3533);
530 if (gpio_is_valid(lm3533->gpio_hwen))
531 gpio_free(lm3533->gpio_hwen);
532
533 return ret;
534}
535
536static void __devexit lm3533_device_exit(struct lm3533 *lm3533)
537{
538 dev_dbg(lm3533->dev, "%s\n", __func__);
539
540 sysfs_remove_group(&lm3533->dev->kobj, &lm3533_attribute_group);
541
542 mfd_remove_devices(lm3533->dev);
543 lm3533_disable(lm3533);
544 if (gpio_is_valid(lm3533->gpio_hwen))
545 gpio_free(lm3533->gpio_hwen);
546}
547
548static bool lm3533_readable_register(struct device *dev, unsigned int reg)
549{
550 switch (reg) {
551 case 0x10 ... 0x2c:
552 case 0x30 ... 0x38:
553 case 0x40 ... 0x45:
554 case 0x50 ... 0x57:
555 case 0x60 ... 0x6e:
556 case 0x70 ... 0x75:
557 case 0x80 ... 0x85:
558 case 0x90 ... 0x95:
559 case 0xa0 ... 0xa5:
560 case 0xb0 ... 0xb2:
561 return true;
562 default:
563 return false;
564 }
565}
566
567static bool lm3533_volatile_register(struct device *dev, unsigned int reg)
568{
569 switch (reg) {
570 case 0x34 ... 0x36: /* zone */
571 case 0x37 ... 0x38: /* adc */
572 case 0xb0 ... 0xb1: /* fault */
573 return true;
574 default:
575 return false;
576 }
577}
578
579static bool lm3533_precious_register(struct device *dev, unsigned int reg)
580{
581 switch (reg) {
582 case 0x34: /* zone */
583 return true;
584 default:
585 return false;
586 }
587}
588
589static struct regmap_config regmap_config = {
590 .reg_bits = 8,
591 .val_bits = 8,
592 .max_register = LM3533_REG_MAX,
593 .readable_reg = lm3533_readable_register,
594 .volatile_reg = lm3533_volatile_register,
595 .precious_reg = lm3533_precious_register,
596};
597
598static int __devinit lm3533_i2c_probe(struct i2c_client *i2c,
599 const struct i2c_device_id *id)
600{
601 struct lm3533 *lm3533;
602 int ret;
603
604 dev_dbg(&i2c->dev, "%s\n", __func__);
605
606 lm3533 = devm_kzalloc(&i2c->dev, sizeof(*lm3533), GFP_KERNEL);
607 if (!lm3533)
608 return -ENOMEM;
609
610 i2c_set_clientdata(i2c, lm3533);
611
612 lm3533->regmap = devm_regmap_init_i2c(i2c, &regmap_config);
613 if (IS_ERR(lm3533->regmap))
614 return PTR_ERR(lm3533->regmap);
615
616 lm3533->dev = &i2c->dev;
617 lm3533->irq = i2c->irq;
618
619 ret = lm3533_device_init(lm3533);
620 if (ret)
621 return ret;
622
623 return 0;
624}
625
626static int __devexit lm3533_i2c_remove(struct i2c_client *i2c)
627{
628 struct lm3533 *lm3533 = i2c_get_clientdata(i2c);
629
630 dev_dbg(&i2c->dev, "%s\n", __func__);
631
632 lm3533_device_exit(lm3533);
633
634 return 0;
635}
636
637static const struct i2c_device_id lm3533_i2c_ids[] = {
638 { "lm3533", 0 },
639 { },
640};
641MODULE_DEVICE_TABLE(i2c, lm3533_i2c_ids);
642
643static struct i2c_driver lm3533_i2c_driver = {
644 .driver = {
645 .name = "lm3533",
646 .owner = THIS_MODULE,
647 },
648 .id_table = lm3533_i2c_ids,
649 .probe = lm3533_i2c_probe,
650 .remove = __devexit_p(lm3533_i2c_remove),
651};
652
653static int __init lm3533_i2c_init(void)
654{
655 return i2c_add_driver(&lm3533_i2c_driver);
656}
657subsys_initcall(lm3533_i2c_init);
658
659static void __exit lm3533_i2c_exit(void)
660{
661 i2c_del_driver(&lm3533_i2c_driver);
662}
663module_exit(lm3533_i2c_exit);
664
665MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
666MODULE_DESCRIPTION("LM3533 Core");
667MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lm3533-ctrlbank.c b/drivers/mfd/lm3533-ctrlbank.c
new file mode 100644
index 000000000000..a4cb7a5220a7
--- /dev/null
+++ b/drivers/mfd/lm3533-ctrlbank.c
@@ -0,0 +1,148 @@
1/*
2 * lm3533-ctrlbank.c -- LM3533 Generic Control Bank interface
3 *
4 * Copyright (C) 2011-2012 Texas Instruments
5 *
6 * Author: Johan Hovold <jhovold@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/device.h>
15#include <linux/module.h>
16
17#include <linux/mfd/lm3533.h>
18
19
20#define LM3533_MAX_CURRENT_MIN 5000
21#define LM3533_MAX_CURRENT_MAX 29800
22#define LM3533_MAX_CURRENT_STEP 800
23
24#define LM3533_BRIGHTNESS_MAX 255
25#define LM3533_PWM_MAX 0x3f
26
27#define LM3533_REG_PWM_BASE 0x14
28#define LM3533_REG_MAX_CURRENT_BASE 0x1f
29#define LM3533_REG_CTRLBANK_ENABLE 0x27
30#define LM3533_REG_BRIGHTNESS_BASE 0x40
31
32
33static inline u8 lm3533_ctrlbank_get_reg(struct lm3533_ctrlbank *cb, u8 base)
34{
35 return base + cb->id;
36}
37
38int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb)
39{
40 u8 mask;
41 int ret;
42
43 dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
44
45 mask = 1 << cb->id;
46 ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE,
47 mask, mask);
48 if (ret)
49 dev_err(cb->dev, "failed to enable ctrlbank %d\n", cb->id);
50
51 return ret;
52}
53EXPORT_SYMBOL_GPL(lm3533_ctrlbank_enable);
54
55int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb)
56{
57 u8 mask;
58 int ret;
59
60 dev_dbg(cb->dev, "%s - %d\n", __func__, cb->id);
61
62 mask = 1 << cb->id;
63 ret = lm3533_update(cb->lm3533, LM3533_REG_CTRLBANK_ENABLE, 0, mask);
64 if (ret)
65 dev_err(cb->dev, "failed to disable ctrlbank %d\n", cb->id);
66
67 return ret;
68}
69EXPORT_SYMBOL_GPL(lm3533_ctrlbank_disable);
70
71/*
72 * Full-scale current.
73 *
74 * imax 5000 - 29800 uA (800 uA step)
75 */
76int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb, u16 imax)
77{
78 u8 reg;
79 u8 val;
80 int ret;
81
82 if (imax < LM3533_MAX_CURRENT_MIN || imax > LM3533_MAX_CURRENT_MAX)
83 return -EINVAL;
84
85 val = (imax - LM3533_MAX_CURRENT_MIN) / LM3533_MAX_CURRENT_STEP;
86
87 reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_MAX_CURRENT_BASE);
88 ret = lm3533_write(cb->lm3533, reg, val);
89 if (ret)
90 dev_err(cb->dev, "failed to set max current\n");
91
92 return ret;
93}
94EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_max_current);
95
96#define lm3533_ctrlbank_set(_name, _NAME) \
97int lm3533_ctrlbank_set_##_name(struct lm3533_ctrlbank *cb, u8 val) \
98{ \
99 u8 reg; \
100 int ret; \
101 \
102 if (val > LM3533_##_NAME##_MAX) \
103 return -EINVAL; \
104 \
105 reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
106 ret = lm3533_write(cb->lm3533, reg, val); \
107 if (ret) \
108 dev_err(cb->dev, "failed to set " #_name "\n"); \
109 \
110 return ret; \
111} \
112EXPORT_SYMBOL_GPL(lm3533_ctrlbank_set_##_name);
113
114#define lm3533_ctrlbank_get(_name, _NAME) \
115int lm3533_ctrlbank_get_##_name(struct lm3533_ctrlbank *cb, u8 *val) \
116{ \
117 u8 reg; \
118 int ret; \
119 \
120 reg = lm3533_ctrlbank_get_reg(cb, LM3533_REG_##_NAME##_BASE); \
121 ret = lm3533_read(cb->lm3533, reg, val); \
122 if (ret) \
123 dev_err(cb->dev, "failed to get " #_name "\n"); \
124 \
125 return ret; \
126} \
127EXPORT_SYMBOL_GPL(lm3533_ctrlbank_get_##_name);
128
129lm3533_ctrlbank_set(brightness, BRIGHTNESS);
130lm3533_ctrlbank_get(brightness, BRIGHTNESS);
131
132/*
133 * PWM-input control mask:
134 *
135 * bit 5 - PWM-input enabled in Zone 4
136 * bit 4 - PWM-input enabled in Zone 3
137 * bit 3 - PWM-input enabled in Zone 2
138 * bit 2 - PWM-input enabled in Zone 1
139 * bit 1 - PWM-input enabled in Zone 0
140 * bit 0 - PWM-input enabled
141 */
142lm3533_ctrlbank_set(pwm, PWM);
143lm3533_ctrlbank_get(pwm, PWM);
144
145
146MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
147MODULE_DESCRIPTION("LM3533 Control Bank interface");
148MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_ich.c b/drivers/mfd/lpc_ich.c
new file mode 100644
index 000000000000..027cc8f86132
--- /dev/null
+++ b/drivers/mfd/lpc_ich.c
@@ -0,0 +1,888 @@
1/*
2 * lpc_ich.c - LPC interface for Intel ICH
3 *
4 * LPC bridge function of the Intel ICH contains many other
5 * functional units, such as Interrupt controllers, Timers,
6 * Power Management, System Management, GPIO, RTC, and LPC
7 * Configuration Registers.
8 *
9 * This driver is derived from lpc_sch.
10
11 * Copyright (c) 2011 Extreme Engineering Solution, Inc.
12 * Author: Aaron Sierra <asierra@xes-inc.com>
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License 2 as published
16 * by the Free Software Foundation.
17 *
18 * This program is distributed in the hope that it will be useful,
19 * but WITHOUT ANY WARRANTY; without even the implied warranty of
20 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
21 * GNU General Public License for more details.
22 *
23 * You should have received a copy of the GNU General Public License
24 * along with this program; see the file COPYING. If not, write to
25 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
26 *
27 * This driver supports the following I/O Controller hubs:
28 * (See the intel documentation on http://developer.intel.com.)
29 * document number 290655-003, 290677-014: 82801AA (ICH), 82801AB (ICHO)
30 * document number 290687-002, 298242-027: 82801BA (ICH2)
31 * document number 290733-003, 290739-013: 82801CA (ICH3-S)
32 * document number 290716-001, 290718-007: 82801CAM (ICH3-M)
33 * document number 290744-001, 290745-025: 82801DB (ICH4)
34 * document number 252337-001, 252663-008: 82801DBM (ICH4-M)
35 * document number 273599-001, 273645-002: 82801E (C-ICH)
36 * document number 252516-001, 252517-028: 82801EB (ICH5), 82801ER (ICH5R)
37 * document number 300641-004, 300884-013: 6300ESB
38 * document number 301473-002, 301474-026: 82801F (ICH6)
39 * document number 313082-001, 313075-006: 631xESB, 632xESB
40 * document number 307013-003, 307014-024: 82801G (ICH7)
41 * document number 322896-001, 322897-001: NM10
42 * document number 313056-003, 313057-017: 82801H (ICH8)
43 * document number 316972-004, 316973-012: 82801I (ICH9)
44 * document number 319973-002, 319974-002: 82801J (ICH10)
45 * document number 322169-001, 322170-003: 5 Series, 3400 Series (PCH)
46 * document number 320066-003, 320257-008: EP80597 (IICH)
47 * document number 324645-001, 324646-001: Cougar Point (CPT)
48 * document number TBD : Patsburg (PBG)
49 * document number TBD : DH89xxCC
50 * document number TBD : Panther Point
51 * document number TBD : Lynx Point
52 */
53
54#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
55
56#include <linux/init.h>
57#include <linux/kernel.h>
58#include <linux/module.h>
59#include <linux/errno.h>
60#include <linux/acpi.h>
61#include <linux/pci.h>
62#include <linux/mfd/core.h>
63#include <linux/mfd/lpc_ich.h>
64
65#define ACPIBASE 0x40
66#define ACPIBASE_GPE_OFF 0x28
67#define ACPIBASE_GPE_END 0x2f
68#define ACPIBASE_SMI_OFF 0x30
69#define ACPIBASE_SMI_END 0x33
70#define ACPIBASE_TCO_OFF 0x60
71#define ACPIBASE_TCO_END 0x7f
72#define ACPICTRL 0x44
73
74#define ACPIBASE_GCS_OFF 0x3410
75#define ACPIBASE_GCS_END 0x3414
76
77#define GPIOBASE 0x48
78#define GPIOCTRL 0x4C
79
80#define RCBABASE 0xf0
81
82#define wdt_io_res(i) wdt_res(0, i)
83#define wdt_mem_res(i) wdt_res(ICH_RES_MEM_OFF, i)
84#define wdt_res(b, i) (&wdt_ich_res[(b) + (i)])
85
86static int lpc_ich_acpi_save = -1;
87static int lpc_ich_gpio_save = -1;
88
89static struct resource wdt_ich_res[] = {
90 /* ACPI - TCO */
91 {
92 .flags = IORESOURCE_IO,
93 },
94 /* ACPI - SMI */
95 {
96 .flags = IORESOURCE_IO,
97 },
98 /* GCS */
99 {
100 .flags = IORESOURCE_MEM,
101 },
102};
103
104static struct resource gpio_ich_res[] = {
105 /* GPIO */
106 {
107 .flags = IORESOURCE_IO,
108 },
109 /* ACPI - GPE0 */
110 {
111 .flags = IORESOURCE_IO,
112 },
113};
114
115enum lpc_cells {
116 LPC_WDT = 0,
117 LPC_GPIO,
118};
119
120static struct mfd_cell lpc_ich_cells[] = {
121 [LPC_WDT] = {
122 .name = "iTCO_wdt",
123 .num_resources = ARRAY_SIZE(wdt_ich_res),
124 .resources = wdt_ich_res,
125 .ignore_resource_conflicts = true,
126 },
127 [LPC_GPIO] = {
128 .name = "gpio_ich",
129 .num_resources = ARRAY_SIZE(gpio_ich_res),
130 .resources = gpio_ich_res,
131 .ignore_resource_conflicts = true,
132 },
133};
134
135/* chipset related info */
136enum lpc_chipsets {
137 LPC_ICH = 0, /* ICH */
138 LPC_ICH0, /* ICH0 */
139 LPC_ICH2, /* ICH2 */
140 LPC_ICH2M, /* ICH2-M */
141 LPC_ICH3, /* ICH3-S */
142 LPC_ICH3M, /* ICH3-M */
143 LPC_ICH4, /* ICH4 */
144 LPC_ICH4M, /* ICH4-M */
145 LPC_CICH, /* C-ICH */
146 LPC_ICH5, /* ICH5 & ICH5R */
147 LPC_6300ESB, /* 6300ESB */
148 LPC_ICH6, /* ICH6 & ICH6R */
149 LPC_ICH6M, /* ICH6-M */
150 LPC_ICH6W, /* ICH6W & ICH6RW */
151 LPC_631XESB, /* 631xESB/632xESB */
152 LPC_ICH7, /* ICH7 & ICH7R */
153 LPC_ICH7DH, /* ICH7DH */
154 LPC_ICH7M, /* ICH7-M & ICH7-U */
155 LPC_ICH7MDH, /* ICH7-M DH */
156 LPC_NM10, /* NM10 */
157 LPC_ICH8, /* ICH8 & ICH8R */
158 LPC_ICH8DH, /* ICH8DH */
159 LPC_ICH8DO, /* ICH8DO */
160 LPC_ICH8M, /* ICH8M */
161 LPC_ICH8ME, /* ICH8M-E */
162 LPC_ICH9, /* ICH9 */
163 LPC_ICH9R, /* ICH9R */
164 LPC_ICH9DH, /* ICH9DH */
165 LPC_ICH9DO, /* ICH9DO */
166 LPC_ICH9M, /* ICH9M */
167 LPC_ICH9ME, /* ICH9M-E */
168 LPC_ICH10, /* ICH10 */
169 LPC_ICH10R, /* ICH10R */
170 LPC_ICH10D, /* ICH10D */
171 LPC_ICH10DO, /* ICH10DO */
172 LPC_PCH, /* PCH Desktop Full Featured */
173 LPC_PCHM, /* PCH Mobile Full Featured */
174 LPC_P55, /* P55 */
175 LPC_PM55, /* PM55 */
176 LPC_H55, /* H55 */
177 LPC_QM57, /* QM57 */
178 LPC_H57, /* H57 */
179 LPC_HM55, /* HM55 */
180 LPC_Q57, /* Q57 */
181 LPC_HM57, /* HM57 */
182 LPC_PCHMSFF, /* PCH Mobile SFF Full Featured */
183 LPC_QS57, /* QS57 */
184 LPC_3400, /* 3400 */
185 LPC_3420, /* 3420 */
186 LPC_3450, /* 3450 */
187 LPC_EP80579, /* EP80579 */
188 LPC_CPT, /* Cougar Point */
189 LPC_CPTD, /* Cougar Point Desktop */
190 LPC_CPTM, /* Cougar Point Mobile */
191 LPC_PBG, /* Patsburg */
192 LPC_DH89XXCC, /* DH89xxCC */
193 LPC_PPT, /* Panther Point */
194 LPC_LPT, /* Lynx Point */
195};
196
197struct lpc_ich_info lpc_chipset_info[] __devinitdata = {
198 [LPC_ICH] = {
199 .name = "ICH",
200 .iTCO_version = 1,
201 },
202 [LPC_ICH0] = {
203 .name = "ICH0",
204 .iTCO_version = 1,
205 },
206 [LPC_ICH2] = {
207 .name = "ICH2",
208 .iTCO_version = 1,
209 },
210 [LPC_ICH2M] = {
211 .name = "ICH2-M",
212 .iTCO_version = 1,
213 },
214 [LPC_ICH3] = {
215 .name = "ICH3-S",
216 .iTCO_version = 1,
217 },
218 [LPC_ICH3M] = {
219 .name = "ICH3-M",
220 .iTCO_version = 1,
221 },
222 [LPC_ICH4] = {
223 .name = "ICH4",
224 .iTCO_version = 1,
225 },
226 [LPC_ICH4M] = {
227 .name = "ICH4-M",
228 .iTCO_version = 1,
229 },
230 [LPC_CICH] = {
231 .name = "C-ICH",
232 .iTCO_version = 1,
233 },
234 [LPC_ICH5] = {
235 .name = "ICH5 or ICH5R",
236 .iTCO_version = 1,
237 },
238 [LPC_6300ESB] = {
239 .name = "6300ESB",
240 .iTCO_version = 1,
241 },
242 [LPC_ICH6] = {
243 .name = "ICH6 or ICH6R",
244 .iTCO_version = 2,
245 .gpio_version = ICH_V6_GPIO,
246 },
247 [LPC_ICH6M] = {
248 .name = "ICH6-M",
249 .iTCO_version = 2,
250 .gpio_version = ICH_V6_GPIO,
251 },
252 [LPC_ICH6W] = {
253 .name = "ICH6W or ICH6RW",
254 .iTCO_version = 2,
255 .gpio_version = ICH_V6_GPIO,
256 },
257 [LPC_631XESB] = {
258 .name = "631xESB/632xESB",
259 .iTCO_version = 2,
260 .gpio_version = ICH_V6_GPIO,
261 },
262 [LPC_ICH7] = {
263 .name = "ICH7 or ICH7R",
264 .iTCO_version = 2,
265 .gpio_version = ICH_V7_GPIO,
266 },
267 [LPC_ICH7DH] = {
268 .name = "ICH7DH",
269 .iTCO_version = 2,
270 .gpio_version = ICH_V7_GPIO,
271 },
272 [LPC_ICH7M] = {
273 .name = "ICH7-M or ICH7-U",
274 .iTCO_version = 2,
275 .gpio_version = ICH_V7_GPIO,
276 },
277 [LPC_ICH7MDH] = {
278 .name = "ICH7-M DH",
279 .iTCO_version = 2,
280 .gpio_version = ICH_V7_GPIO,
281 },
282 [LPC_NM10] = {
283 .name = "NM10",
284 .iTCO_version = 2,
285 },
286 [LPC_ICH8] = {
287 .name = "ICH8 or ICH8R",
288 .iTCO_version = 2,
289 .gpio_version = ICH_V7_GPIO,
290 },
291 [LPC_ICH8DH] = {
292 .name = "ICH8DH",
293 .iTCO_version = 2,
294 .gpio_version = ICH_V7_GPIO,
295 },
296 [LPC_ICH8DO] = {
297 .name = "ICH8DO",
298 .iTCO_version = 2,
299 .gpio_version = ICH_V7_GPIO,
300 },
301 [LPC_ICH8M] = {
302 .name = "ICH8M",
303 .iTCO_version = 2,
304 .gpio_version = ICH_V7_GPIO,
305 },
306 [LPC_ICH8ME] = {
307 .name = "ICH8M-E",
308 .iTCO_version = 2,
309 .gpio_version = ICH_V7_GPIO,
310 },
311 [LPC_ICH9] = {
312 .name = "ICH9",
313 .iTCO_version = 2,
314 .gpio_version = ICH_V9_GPIO,
315 },
316 [LPC_ICH9R] = {
317 .name = "ICH9R",
318 .iTCO_version = 2,
319 .gpio_version = ICH_V9_GPIO,
320 },
321 [LPC_ICH9DH] = {
322 .name = "ICH9DH",
323 .iTCO_version = 2,
324 .gpio_version = ICH_V9_GPIO,
325 },
326 [LPC_ICH9DO] = {
327 .name = "ICH9DO",
328 .iTCO_version = 2,
329 .gpio_version = ICH_V9_GPIO,
330 },
331 [LPC_ICH9M] = {
332 .name = "ICH9M",
333 .iTCO_version = 2,
334 .gpio_version = ICH_V9_GPIO,
335 },
336 [LPC_ICH9ME] = {
337 .name = "ICH9M-E",
338 .iTCO_version = 2,
339 .gpio_version = ICH_V9_GPIO,
340 },
341 [LPC_ICH10] = {
342 .name = "ICH10",
343 .iTCO_version = 2,
344 .gpio_version = ICH_V10CONS_GPIO,
345 },
346 [LPC_ICH10R] = {
347 .name = "ICH10R",
348 .iTCO_version = 2,
349 .gpio_version = ICH_V10CONS_GPIO,
350 },
351 [LPC_ICH10D] = {
352 .name = "ICH10D",
353 .iTCO_version = 2,
354 .gpio_version = ICH_V10CORP_GPIO,
355 },
356 [LPC_ICH10DO] = {
357 .name = "ICH10DO",
358 .iTCO_version = 2,
359 .gpio_version = ICH_V10CORP_GPIO,
360 },
361 [LPC_PCH] = {
362 .name = "PCH Desktop Full Featured",
363 .iTCO_version = 2,
364 .gpio_version = ICH_V5_GPIO,
365 },
366 [LPC_PCHM] = {
367 .name = "PCH Mobile Full Featured",
368 .iTCO_version = 2,
369 .gpio_version = ICH_V5_GPIO,
370 },
371 [LPC_P55] = {
372 .name = "P55",
373 .iTCO_version = 2,
374 .gpio_version = ICH_V5_GPIO,
375 },
376 [LPC_PM55] = {
377 .name = "PM55",
378 .iTCO_version = 2,
379 .gpio_version = ICH_V5_GPIO,
380 },
381 [LPC_H55] = {
382 .name = "H55",
383 .iTCO_version = 2,
384 .gpio_version = ICH_V5_GPIO,
385 },
386 [LPC_QM57] = {
387 .name = "QM57",
388 .iTCO_version = 2,
389 .gpio_version = ICH_V5_GPIO,
390 },
391 [LPC_H57] = {
392 .name = "H57",
393 .iTCO_version = 2,
394 .gpio_version = ICH_V5_GPIO,
395 },
396 [LPC_HM55] = {
397 .name = "HM55",
398 .iTCO_version = 2,
399 .gpio_version = ICH_V5_GPIO,
400 },
401 [LPC_Q57] = {
402 .name = "Q57",
403 .iTCO_version = 2,
404 .gpio_version = ICH_V5_GPIO,
405 },
406 [LPC_HM57] = {
407 .name = "HM57",
408 .iTCO_version = 2,
409 .gpio_version = ICH_V5_GPIO,
410 },
411 [LPC_PCHMSFF] = {
412 .name = "PCH Mobile SFF Full Featured",
413 .iTCO_version = 2,
414 .gpio_version = ICH_V5_GPIO,
415 },
416 [LPC_QS57] = {
417 .name = "QS57",
418 .iTCO_version = 2,
419 .gpio_version = ICH_V5_GPIO,
420 },
421 [LPC_3400] = {
422 .name = "3400",
423 .iTCO_version = 2,
424 .gpio_version = ICH_V5_GPIO,
425 },
426 [LPC_3420] = {
427 .name = "3420",
428 .iTCO_version = 2,
429 .gpio_version = ICH_V5_GPIO,
430 },
431 [LPC_3450] = {
432 .name = "3450",
433 .iTCO_version = 2,
434 .gpio_version = ICH_V5_GPIO,
435 },
436 [LPC_EP80579] = {
437 .name = "EP80579",
438 .iTCO_version = 2,
439 },
440 [LPC_CPT] = {
441 .name = "Cougar Point",
442 .iTCO_version = 2,
443 .gpio_version = ICH_V5_GPIO,
444 },
445 [LPC_CPTD] = {
446 .name = "Cougar Point Desktop",
447 .iTCO_version = 2,
448 .gpio_version = ICH_V5_GPIO,
449 },
450 [LPC_CPTM] = {
451 .name = "Cougar Point Mobile",
452 .iTCO_version = 2,
453 .gpio_version = ICH_V5_GPIO,
454 },
455 [LPC_PBG] = {
456 .name = "Patsburg",
457 .iTCO_version = 2,
458 },
459 [LPC_DH89XXCC] = {
460 .name = "DH89xxCC",
461 .iTCO_version = 2,
462 },
463 [LPC_PPT] = {
464 .name = "Panther Point",
465 .iTCO_version = 2,
466 },
467 [LPC_LPT] = {
468 .name = "Lynx Point",
469 .iTCO_version = 2,
470 },
471};
472
473/*
474 * This data only exists for exporting the supported PCI ids
475 * via MODULE_DEVICE_TABLE. We do not actually register a
476 * pci_driver, because the I/O Controller Hub has also other
477 * functions that probably will be registered by other drivers.
478 */
479static DEFINE_PCI_DEVICE_TABLE(lpc_ich_ids) = {
480 { PCI_VDEVICE(INTEL, 0x2410), LPC_ICH},
481 { PCI_VDEVICE(INTEL, 0x2420), LPC_ICH0},
482 { PCI_VDEVICE(INTEL, 0x2440), LPC_ICH2},
483 { PCI_VDEVICE(INTEL, 0x244c), LPC_ICH2M},
484 { PCI_VDEVICE(INTEL, 0x2480), LPC_ICH3},
485 { PCI_VDEVICE(INTEL, 0x248c), LPC_ICH3M},
486 { PCI_VDEVICE(INTEL, 0x24c0), LPC_ICH4},
487 { PCI_VDEVICE(INTEL, 0x24cc), LPC_ICH4M},
488 { PCI_VDEVICE(INTEL, 0x2450), LPC_CICH},
489 { PCI_VDEVICE(INTEL, 0x24d0), LPC_ICH5},
490 { PCI_VDEVICE(INTEL, 0x25a1), LPC_6300ESB},
491 { PCI_VDEVICE(INTEL, 0x2640), LPC_ICH6},
492 { PCI_VDEVICE(INTEL, 0x2641), LPC_ICH6M},
493 { PCI_VDEVICE(INTEL, 0x2642), LPC_ICH6W},
494 { PCI_VDEVICE(INTEL, 0x2670), LPC_631XESB},
495 { PCI_VDEVICE(INTEL, 0x2671), LPC_631XESB},
496 { PCI_VDEVICE(INTEL, 0x2672), LPC_631XESB},
497 { PCI_VDEVICE(INTEL, 0x2673), LPC_631XESB},
498 { PCI_VDEVICE(INTEL, 0x2674), LPC_631XESB},
499 { PCI_VDEVICE(INTEL, 0x2675), LPC_631XESB},
500 { PCI_VDEVICE(INTEL, 0x2676), LPC_631XESB},
501 { PCI_VDEVICE(INTEL, 0x2677), LPC_631XESB},
502 { PCI_VDEVICE(INTEL, 0x2678), LPC_631XESB},
503 { PCI_VDEVICE(INTEL, 0x2679), LPC_631XESB},
504 { PCI_VDEVICE(INTEL, 0x267a), LPC_631XESB},
505 { PCI_VDEVICE(INTEL, 0x267b), LPC_631XESB},
506 { PCI_VDEVICE(INTEL, 0x267c), LPC_631XESB},
507 { PCI_VDEVICE(INTEL, 0x267d), LPC_631XESB},
508 { PCI_VDEVICE(INTEL, 0x267e), LPC_631XESB},
509 { PCI_VDEVICE(INTEL, 0x267f), LPC_631XESB},
510 { PCI_VDEVICE(INTEL, 0x27b8), LPC_ICH7},
511 { PCI_VDEVICE(INTEL, 0x27b0), LPC_ICH7DH},
512 { PCI_VDEVICE(INTEL, 0x27b9), LPC_ICH7M},
513 { PCI_VDEVICE(INTEL, 0x27bd), LPC_ICH7MDH},
514 { PCI_VDEVICE(INTEL, 0x27bc), LPC_NM10},
515 { PCI_VDEVICE(INTEL, 0x2810), LPC_ICH8},
516 { PCI_VDEVICE(INTEL, 0x2812), LPC_ICH8DH},
517 { PCI_VDEVICE(INTEL, 0x2814), LPC_ICH8DO},
518 { PCI_VDEVICE(INTEL, 0x2815), LPC_ICH8M},
519 { PCI_VDEVICE(INTEL, 0x2811), LPC_ICH8ME},
520 { PCI_VDEVICE(INTEL, 0x2918), LPC_ICH9},
521 { PCI_VDEVICE(INTEL, 0x2916), LPC_ICH9R},
522 { PCI_VDEVICE(INTEL, 0x2912), LPC_ICH9DH},
523 { PCI_VDEVICE(INTEL, 0x2914), LPC_ICH9DO},
524 { PCI_VDEVICE(INTEL, 0x2919), LPC_ICH9M},
525 { PCI_VDEVICE(INTEL, 0x2917), LPC_ICH9ME},
526 { PCI_VDEVICE(INTEL, 0x3a18), LPC_ICH10},
527 { PCI_VDEVICE(INTEL, 0x3a16), LPC_ICH10R},
528 { PCI_VDEVICE(INTEL, 0x3a1a), LPC_ICH10D},
529 { PCI_VDEVICE(INTEL, 0x3a14), LPC_ICH10DO},
530 { PCI_VDEVICE(INTEL, 0x3b00), LPC_PCH},
531 { PCI_VDEVICE(INTEL, 0x3b01), LPC_PCHM},
532 { PCI_VDEVICE(INTEL, 0x3b02), LPC_P55},
533 { PCI_VDEVICE(INTEL, 0x3b03), LPC_PM55},
534 { PCI_VDEVICE(INTEL, 0x3b06), LPC_H55},
535 { PCI_VDEVICE(INTEL, 0x3b07), LPC_QM57},
536 { PCI_VDEVICE(INTEL, 0x3b08), LPC_H57},
537 { PCI_VDEVICE(INTEL, 0x3b09), LPC_HM55},
538 { PCI_VDEVICE(INTEL, 0x3b0a), LPC_Q57},
539 { PCI_VDEVICE(INTEL, 0x3b0b), LPC_HM57},
540 { PCI_VDEVICE(INTEL, 0x3b0d), LPC_PCHMSFF},
541 { PCI_VDEVICE(INTEL, 0x3b0f), LPC_QS57},
542 { PCI_VDEVICE(INTEL, 0x3b12), LPC_3400},
543 { PCI_VDEVICE(INTEL, 0x3b14), LPC_3420},
544 { PCI_VDEVICE(INTEL, 0x3b16), LPC_3450},
545 { PCI_VDEVICE(INTEL, 0x5031), LPC_EP80579},
546 { PCI_VDEVICE(INTEL, 0x1c41), LPC_CPT},
547 { PCI_VDEVICE(INTEL, 0x1c42), LPC_CPTD},
548 { PCI_VDEVICE(INTEL, 0x1c43), LPC_CPTM},
549 { PCI_VDEVICE(INTEL, 0x1c44), LPC_CPT},
550 { PCI_VDEVICE(INTEL, 0x1c45), LPC_CPT},
551 { PCI_VDEVICE(INTEL, 0x1c46), LPC_CPT},
552 { PCI_VDEVICE(INTEL, 0x1c47), LPC_CPT},
553 { PCI_VDEVICE(INTEL, 0x1c48), LPC_CPT},
554 { PCI_VDEVICE(INTEL, 0x1c49), LPC_CPT},
555 { PCI_VDEVICE(INTEL, 0x1c4a), LPC_CPT},
556 { PCI_VDEVICE(INTEL, 0x1c4b), LPC_CPT},
557 { PCI_VDEVICE(INTEL, 0x1c4c), LPC_CPT},
558 { PCI_VDEVICE(INTEL, 0x1c4d), LPC_CPT},
559 { PCI_VDEVICE(INTEL, 0x1c4e), LPC_CPT},
560 { PCI_VDEVICE(INTEL, 0x1c4f), LPC_CPT},
561 { PCI_VDEVICE(INTEL, 0x1c50), LPC_CPT},
562 { PCI_VDEVICE(INTEL, 0x1c51), LPC_CPT},
563 { PCI_VDEVICE(INTEL, 0x1c52), LPC_CPT},
564 { PCI_VDEVICE(INTEL, 0x1c53), LPC_CPT},
565 { PCI_VDEVICE(INTEL, 0x1c54), LPC_CPT},
566 { PCI_VDEVICE(INTEL, 0x1c55), LPC_CPT},
567 { PCI_VDEVICE(INTEL, 0x1c56), LPC_CPT},
568 { PCI_VDEVICE(INTEL, 0x1c57), LPC_CPT},
569 { PCI_VDEVICE(INTEL, 0x1c58), LPC_CPT},
570 { PCI_VDEVICE(INTEL, 0x1c59), LPC_CPT},
571 { PCI_VDEVICE(INTEL, 0x1c5a), LPC_CPT},
572 { PCI_VDEVICE(INTEL, 0x1c5b), LPC_CPT},
573 { PCI_VDEVICE(INTEL, 0x1c5c), LPC_CPT},
574 { PCI_VDEVICE(INTEL, 0x1c5d), LPC_CPT},
575 { PCI_VDEVICE(INTEL, 0x1c5e), LPC_CPT},
576 { PCI_VDEVICE(INTEL, 0x1c5f), LPC_CPT},
577 { PCI_VDEVICE(INTEL, 0x1d40), LPC_PBG},
578 { PCI_VDEVICE(INTEL, 0x1d41), LPC_PBG},
579 { PCI_VDEVICE(INTEL, 0x2310), LPC_DH89XXCC},
580 { PCI_VDEVICE(INTEL, 0x1e40), LPC_PPT},
581 { PCI_VDEVICE(INTEL, 0x1e41), LPC_PPT},
582 { PCI_VDEVICE(INTEL, 0x1e42), LPC_PPT},
583 { PCI_VDEVICE(INTEL, 0x1e43), LPC_PPT},
584 { PCI_VDEVICE(INTEL, 0x1e44), LPC_PPT},
585 { PCI_VDEVICE(INTEL, 0x1e45), LPC_PPT},
586 { PCI_VDEVICE(INTEL, 0x1e46), LPC_PPT},
587 { PCI_VDEVICE(INTEL, 0x1e47), LPC_PPT},
588 { PCI_VDEVICE(INTEL, 0x1e48), LPC_PPT},
589 { PCI_VDEVICE(INTEL, 0x1e49), LPC_PPT},
590 { PCI_VDEVICE(INTEL, 0x1e4a), LPC_PPT},
591 { PCI_VDEVICE(INTEL, 0x1e4b), LPC_PPT},
592 { PCI_VDEVICE(INTEL, 0x1e4c), LPC_PPT},
593 { PCI_VDEVICE(INTEL, 0x1e4d), LPC_PPT},
594 { PCI_VDEVICE(INTEL, 0x1e4e), LPC_PPT},
595 { PCI_VDEVICE(INTEL, 0x1e4f), LPC_PPT},
596 { PCI_VDEVICE(INTEL, 0x1e50), LPC_PPT},
597 { PCI_VDEVICE(INTEL, 0x1e51), LPC_PPT},
598 { PCI_VDEVICE(INTEL, 0x1e52), LPC_PPT},
599 { PCI_VDEVICE(INTEL, 0x1e53), LPC_PPT},
600 { PCI_VDEVICE(INTEL, 0x1e54), LPC_PPT},
601 { PCI_VDEVICE(INTEL, 0x1e55), LPC_PPT},
602 { PCI_VDEVICE(INTEL, 0x1e56), LPC_PPT},
603 { PCI_VDEVICE(INTEL, 0x1e57), LPC_PPT},
604 { PCI_VDEVICE(INTEL, 0x1e58), LPC_PPT},
605 { PCI_VDEVICE(INTEL, 0x1e59), LPC_PPT},
606 { PCI_VDEVICE(INTEL, 0x1e5a), LPC_PPT},
607 { PCI_VDEVICE(INTEL, 0x1e5b), LPC_PPT},
608 { PCI_VDEVICE(INTEL, 0x1e5c), LPC_PPT},
609 { PCI_VDEVICE(INTEL, 0x1e5d), LPC_PPT},
610 { PCI_VDEVICE(INTEL, 0x1e5e), LPC_PPT},
611 { PCI_VDEVICE(INTEL, 0x1e5f), LPC_PPT},
612 { PCI_VDEVICE(INTEL, 0x8c40), LPC_LPT},
613 { PCI_VDEVICE(INTEL, 0x8c41), LPC_LPT},
614 { PCI_VDEVICE(INTEL, 0x8c42), LPC_LPT},
615 { PCI_VDEVICE(INTEL, 0x8c43), LPC_LPT},
616 { PCI_VDEVICE(INTEL, 0x8c44), LPC_LPT},
617 { PCI_VDEVICE(INTEL, 0x8c45), LPC_LPT},
618 { PCI_VDEVICE(INTEL, 0x8c46), LPC_LPT},
619 { PCI_VDEVICE(INTEL, 0x8c47), LPC_LPT},
620 { PCI_VDEVICE(INTEL, 0x8c48), LPC_LPT},
621 { PCI_VDEVICE(INTEL, 0x8c49), LPC_LPT},
622 { PCI_VDEVICE(INTEL, 0x8c4a), LPC_LPT},
623 { PCI_VDEVICE(INTEL, 0x8c4b), LPC_LPT},
624 { PCI_VDEVICE(INTEL, 0x8c4c), LPC_LPT},
625 { PCI_VDEVICE(INTEL, 0x8c4d), LPC_LPT},
626 { PCI_VDEVICE(INTEL, 0x8c4e), LPC_LPT},
627 { PCI_VDEVICE(INTEL, 0x8c4f), LPC_LPT},
628 { PCI_VDEVICE(INTEL, 0x8c50), LPC_LPT},
629 { PCI_VDEVICE(INTEL, 0x8c51), LPC_LPT},
630 { PCI_VDEVICE(INTEL, 0x8c52), LPC_LPT},
631 { PCI_VDEVICE(INTEL, 0x8c53), LPC_LPT},
632 { PCI_VDEVICE(INTEL, 0x8c54), LPC_LPT},
633 { PCI_VDEVICE(INTEL, 0x8c55), LPC_LPT},
634 { PCI_VDEVICE(INTEL, 0x8c56), LPC_LPT},
635 { PCI_VDEVICE(INTEL, 0x8c57), LPC_LPT},
636 { PCI_VDEVICE(INTEL, 0x8c58), LPC_LPT},
637 { PCI_VDEVICE(INTEL, 0x8c59), LPC_LPT},
638 { PCI_VDEVICE(INTEL, 0x8c5a), LPC_LPT},
639 { PCI_VDEVICE(INTEL, 0x8c5b), LPC_LPT},
640 { PCI_VDEVICE(INTEL, 0x8c5c), LPC_LPT},
641 { PCI_VDEVICE(INTEL, 0x8c5d), LPC_LPT},
642 { PCI_VDEVICE(INTEL, 0x8c5e), LPC_LPT},
643 { PCI_VDEVICE(INTEL, 0x8c5f), LPC_LPT},
644 { 0, }, /* End of list */
645};
646MODULE_DEVICE_TABLE(pci, lpc_ich_ids);
647
648static void lpc_ich_restore_config_space(struct pci_dev *dev)
649{
650 if (lpc_ich_acpi_save >= 0) {
651 pci_write_config_byte(dev, ACPICTRL, lpc_ich_acpi_save);
652 lpc_ich_acpi_save = -1;
653 }
654
655 if (lpc_ich_gpio_save >= 0) {
656 pci_write_config_byte(dev, GPIOCTRL, lpc_ich_gpio_save);
657 lpc_ich_gpio_save = -1;
658 }
659}
660
661static void __devinit lpc_ich_enable_acpi_space(struct pci_dev *dev)
662{
663 u8 reg_save;
664
665 pci_read_config_byte(dev, ACPICTRL, &reg_save);
666 pci_write_config_byte(dev, ACPICTRL, reg_save | 0x10);
667 lpc_ich_acpi_save = reg_save;
668}
669
670static void __devinit lpc_ich_enable_gpio_space(struct pci_dev *dev)
671{
672 u8 reg_save;
673
674 pci_read_config_byte(dev, GPIOCTRL, &reg_save);
675 pci_write_config_byte(dev, GPIOCTRL, reg_save | 0x10);
676 lpc_ich_gpio_save = reg_save;
677}
678
679static void __devinit lpc_ich_finalize_cell(struct mfd_cell *cell,
680 const struct pci_device_id *id)
681{
682 cell->platform_data = &lpc_chipset_info[id->driver_data];
683 cell->pdata_size = sizeof(struct lpc_ich_info);
684}
685
686static int __devinit lpc_ich_init_gpio(struct pci_dev *dev,
687 const struct pci_device_id *id)
688{
689 u32 base_addr_cfg;
690 u32 base_addr;
691 int ret;
692 bool acpi_conflict = false;
693 struct resource *res;
694
695 /* Setup power management base register */
696 pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
697 base_addr = base_addr_cfg & 0x0000ff80;
698 if (!base_addr) {
699 dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
700 lpc_ich_cells[LPC_GPIO].num_resources--;
701 goto gpe0_done;
702 }
703
704 res = &gpio_ich_res[ICH_RES_GPE0];
705 res->start = base_addr + ACPIBASE_GPE_OFF;
706 res->end = base_addr + ACPIBASE_GPE_END;
707 ret = acpi_check_resource_conflict(res);
708 if (ret) {
709 /*
710 * This isn't fatal for the GPIO, but we have to make sure that
711 * the platform_device subsystem doesn't see this resource
712 * or it will register an invalid region.
713 */
714 lpc_ich_cells[LPC_GPIO].num_resources--;
715 acpi_conflict = true;
716 } else {
717 lpc_ich_enable_acpi_space(dev);
718 }
719
720gpe0_done:
721 /* Setup GPIO base register */
722 pci_read_config_dword(dev, GPIOBASE, &base_addr_cfg);
723 base_addr = base_addr_cfg & 0x0000ff80;
724 if (!base_addr) {
725 dev_err(&dev->dev, "I/O space for GPIO uninitialized\n");
726 ret = -ENODEV;
727 goto gpio_done;
728 }
729
730 /* Older devices provide fewer GPIO and have a smaller resource size. */
731 res = &gpio_ich_res[ICH_RES_GPIO];
732 res->start = base_addr;
733 switch (lpc_chipset_info[id->driver_data].gpio_version) {
734 case ICH_V5_GPIO:
735 case ICH_V10CORP_GPIO:
736 res->end = res->start + 128 - 1;
737 break;
738 default:
739 res->end = res->start + 64 - 1;
740 break;
741 }
742
743 ret = acpi_check_resource_conflict(res);
744 if (ret) {
745 /* this isn't necessarily fatal for the GPIO */
746 acpi_conflict = true;
747 goto gpio_done;
748 }
749 lpc_ich_enable_gpio_space(dev);
750
751 lpc_ich_finalize_cell(&lpc_ich_cells[LPC_GPIO], id);
752 ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_GPIO],
753 1, NULL, 0);
754
755gpio_done:
756 if (acpi_conflict)
757 pr_warn("Resource conflict(s) found affecting %s\n",
758 lpc_ich_cells[LPC_GPIO].name);
759 return ret;
760}
761
762static int __devinit lpc_ich_init_wdt(struct pci_dev *dev,
763 const struct pci_device_id *id)
764{
765 u32 base_addr_cfg;
766 u32 base_addr;
767 int ret;
768 bool acpi_conflict = false;
769 struct resource *res;
770
771 /* Setup power management base register */
772 pci_read_config_dword(dev, ACPIBASE, &base_addr_cfg);
773 base_addr = base_addr_cfg & 0x0000ff80;
774 if (!base_addr) {
775 dev_err(&dev->dev, "I/O space for ACPI uninitialized\n");
776 ret = -ENODEV;
777 goto wdt_done;
778 }
779
780 res = wdt_io_res(ICH_RES_IO_TCO);
781 res->start = base_addr + ACPIBASE_TCO_OFF;
782 res->end = base_addr + ACPIBASE_TCO_END;
783 ret = acpi_check_resource_conflict(res);
784 if (ret) {
785 acpi_conflict = true;
786 goto wdt_done;
787 }
788
789 res = wdt_io_res(ICH_RES_IO_SMI);
790 res->start = base_addr + ACPIBASE_SMI_OFF;
791 res->end = base_addr + ACPIBASE_SMI_END;
792 ret = acpi_check_resource_conflict(res);
793 if (ret) {
794 acpi_conflict = true;
795 goto wdt_done;
796 }
797 lpc_ich_enable_acpi_space(dev);
798
799 /*
800 * Get the Memory-Mapped GCS register. To get access to it
801 * we have to read RCBA from PCI Config space 0xf0 and use
802 * it as base. GCS = RCBA + ICH6_GCS(0x3410).
803 */
804 if (lpc_chipset_info[id->driver_data].iTCO_version == 2) {
805 pci_read_config_dword(dev, RCBABASE, &base_addr_cfg);
806 base_addr = base_addr_cfg & 0xffffc000;
807 if (!(base_addr_cfg & 1)) {
808 pr_err("RCBA is disabled by hardware/BIOS, "
809 "device disabled\n");
810 ret = -ENODEV;
811 goto wdt_done;
812 }
813 res = wdt_mem_res(ICH_RES_MEM_GCS);
814 res->start = base_addr + ACPIBASE_GCS_OFF;
815 res->end = base_addr + ACPIBASE_GCS_END;
816 ret = acpi_check_resource_conflict(res);
817 if (ret) {
818 acpi_conflict = true;
819 goto wdt_done;
820 }
821 }
822
823 lpc_ich_finalize_cell(&lpc_ich_cells[LPC_WDT], id);
824 ret = mfd_add_devices(&dev->dev, -1, &lpc_ich_cells[LPC_WDT],
825 1, NULL, 0);
826
827wdt_done:
828 if (acpi_conflict)
829 pr_warn("Resource conflict(s) found affecting %s\n",
830 lpc_ich_cells[LPC_WDT].name);
831 return ret;
832}
833
834static int __devinit lpc_ich_probe(struct pci_dev *dev,
835 const struct pci_device_id *id)
836{
837 int ret;
838 bool cell_added = false;
839
840 ret = lpc_ich_init_wdt(dev, id);
841 if (!ret)
842 cell_added = true;
843
844 ret = lpc_ich_init_gpio(dev, id);
845 if (!ret)
846 cell_added = true;
847
848 /*
849 * We only care if at least one or none of the cells registered
850 * successfully.
851 */
852 if (!cell_added) {
853 lpc_ich_restore_config_space(dev);
854 return -ENODEV;
855 }
856
857 return 0;
858}
859
860static void __devexit lpc_ich_remove(struct pci_dev *dev)
861{
862 mfd_remove_devices(&dev->dev);
863 lpc_ich_restore_config_space(dev);
864}
865
866static struct pci_driver lpc_ich_driver = {
867 .name = "lpc_ich",
868 .id_table = lpc_ich_ids,
869 .probe = lpc_ich_probe,
870 .remove = __devexit_p(lpc_ich_remove),
871};
872
873static int __init lpc_ich_init(void)
874{
875 return pci_register_driver(&lpc_ich_driver);
876}
877
878static void __exit lpc_ich_exit(void)
879{
880 pci_unregister_driver(&lpc_ich_driver);
881}
882
883module_init(lpc_ich_init);
884module_exit(lpc_ich_exit);
885
886MODULE_AUTHOR("Aaron Sierra <asierra@xes-inc.com>");
887MODULE_DESCRIPTION("LPC interface for Intel ICH");
888MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/lpc_sch.c b/drivers/mfd/lpc_sch.c
index abc421364a45..9f20abc5e393 100644
--- a/drivers/mfd/lpc_sch.c
+++ b/drivers/mfd/lpc_sch.c
@@ -36,6 +36,7 @@
36 36
37#define GPIOBASE 0x44 37#define GPIOBASE 0x44
38#define GPIO_IO_SIZE 64 38#define GPIO_IO_SIZE 64
39#define GPIO_IO_SIZE_CENTERTON 128
39 40
40#define WDTBASE 0x84 41#define WDTBASE 0x84
41#define WDT_IO_SIZE 64 42#define WDT_IO_SIZE 64
@@ -68,7 +69,7 @@ static struct resource wdt_sch_resource = {
68 69
69static struct mfd_cell tunnelcreek_cells[] = { 70static struct mfd_cell tunnelcreek_cells[] = {
70 { 71 {
71 .name = "tunnelcreek_wdt", 72 .name = "ie6xx_wdt",
72 .num_resources = 1, 73 .num_resources = 1,
73 .resources = &wdt_sch_resource, 74 .resources = &wdt_sch_resource,
74 }, 75 },
@@ -77,6 +78,7 @@ static struct mfd_cell tunnelcreek_cells[] = {
77static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = { 78static DEFINE_PCI_DEVICE_TABLE(lpc_sch_ids) = {
78 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) }, 79 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_SCH_LPC) },
79 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) }, 80 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ITC_LPC) },
81 { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CENTERTON_ILB) },
80 { 0, } 82 { 0, }
81}; 83};
82MODULE_DEVICE_TABLE(pci, lpc_sch_ids); 84MODULE_DEVICE_TABLE(pci, lpc_sch_ids);
@@ -115,7 +117,11 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
115 } 117 }
116 118
117 gpio_sch_resource.start = base_addr; 119 gpio_sch_resource.start = base_addr;
118 gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1; 120
121 if (id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB)
122 gpio_sch_resource.end = base_addr + GPIO_IO_SIZE_CENTERTON - 1;
123 else
124 gpio_sch_resource.end = base_addr + GPIO_IO_SIZE - 1;
119 125
120 for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++) 126 for (i=0; i < ARRAY_SIZE(lpc_sch_cells); i++)
121 lpc_sch_cells[i].id = id->device; 127 lpc_sch_cells[i].id = id->device;
@@ -125,7 +131,8 @@ static int __devinit lpc_sch_probe(struct pci_dev *dev,
125 if (ret) 131 if (ret)
126 goto out_dev; 132 goto out_dev;
127 133
128 if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC) { 134 if (id->device == PCI_DEVICE_ID_INTEL_ITC_LPC
135 || id->device == PCI_DEVICE_ID_INTEL_CENTERTON_ILB) {
129 pci_read_config_dword(dev, WDTBASE, &base_addr_cfg); 136 pci_read_config_dword(dev, WDTBASE, &base_addr_cfg);
130 if (!(base_addr_cfg & (1 << 31))) { 137 if (!(base_addr_cfg & (1 << 31))) {
131 dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n"); 138 dev_err(&dev->dev, "Decode of the WDT I/O range disabled\n");
@@ -167,18 +174,7 @@ static struct pci_driver lpc_sch_driver = {
167 .remove = __devexit_p(lpc_sch_remove), 174 .remove = __devexit_p(lpc_sch_remove),
168}; 175};
169 176
170static int __init lpc_sch_init(void) 177module_pci_driver(lpc_sch_driver);
171{
172 return pci_register_driver(&lpc_sch_driver);
173}
174
175static void __exit lpc_sch_exit(void)
176{
177 pci_unregister_driver(&lpc_sch_driver);
178}
179
180module_init(lpc_sch_init);
181module_exit(lpc_sch_exit);
182 178
183MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>"); 179MODULE_AUTHOR("Denis Turischev <denis@compulab.co.il>");
184MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH"); 180MODULE_DESCRIPTION("LPC interface for Intel Poulsbo SCH");
diff --git a/drivers/mfd/max77693-irq.c b/drivers/mfd/max77693-irq.c
new file mode 100644
index 000000000000..2b403569e0a6
--- /dev/null
+++ b/drivers/mfd/max77693-irq.c
@@ -0,0 +1,309 @@
1/*
2 * max77693-irq.c - Interrupt controller support for MAX77693
3 *
4 * Copyright (C) 2012 Samsung Electronics Co.Ltd
5 * SangYoung Son <hello.son@samsung.com>
6 *
7 * This program is not provided / owned by Maxim Integrated Products.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * This driver is based on max8997-irq.c
24 */
25
26#include <linux/err.h>
27#include <linux/irq.h>
28#include <linux/interrupt.h>
29#include <linux/module.h>
30#include <linux/irqdomain.h>
31#include <linux/mfd/max77693.h>
32#include <linux/mfd/max77693-private.h>
33
34static const u8 max77693_mask_reg[] = {
35 [LED_INT] = MAX77693_LED_REG_FLASH_INT_MASK,
36 [TOPSYS_INT] = MAX77693_PMIC_REG_TOPSYS_INT_MASK,
37 [CHG_INT] = MAX77693_CHG_REG_CHG_INT_MASK,
38 [MUIC_INT1] = MAX77693_MUIC_REG_INTMASK1,
39 [MUIC_INT2] = MAX77693_MUIC_REG_INTMASK2,
40 [MUIC_INT3] = MAX77693_MUIC_REG_INTMASK3,
41};
42
43static struct regmap *max77693_get_regmap(struct max77693_dev *max77693,
44 enum max77693_irq_source src)
45{
46 switch (src) {
47 case LED_INT ... CHG_INT:
48 return max77693->regmap;
49 case MUIC_INT1 ... MUIC_INT3:
50 return max77693->regmap_muic;
51 default:
52 return ERR_PTR(-EINVAL);
53 }
54}
55
56struct max77693_irq_data {
57 int mask;
58 enum max77693_irq_source group;
59};
60
61#define DECLARE_IRQ(idx, _group, _mask) \
62 [(idx)] = { .group = (_group), .mask = (_mask) }
63static const struct max77693_irq_data max77693_irqs[] = {
64 DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_OPEN, LED_INT, 1 << 0),
65 DECLARE_IRQ(MAX77693_LED_IRQ_FLED2_SHORT, LED_INT, 1 << 1),
66 DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_OPEN, LED_INT, 1 << 2),
67 DECLARE_IRQ(MAX77693_LED_IRQ_FLED1_SHORT, LED_INT, 1 << 3),
68 DECLARE_IRQ(MAX77693_LED_IRQ_MAX_FLASH, LED_INT, 1 << 4),
69
70 DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T120C_INT, TOPSYS_INT, 1 << 0),
71 DECLARE_IRQ(MAX77693_TOPSYS_IRQ_T140C_INT, TOPSYS_INT, 1 << 1),
72 DECLARE_IRQ(MAX77693_TOPSYS_IRQ_LOWSYS_INT, TOPSYS_INT, 1 << 3),
73
74 DECLARE_IRQ(MAX77693_CHG_IRQ_BYP_I, CHG_INT, 1 << 0),
75 DECLARE_IRQ(MAX77693_CHG_IRQ_THM_I, CHG_INT, 1 << 2),
76 DECLARE_IRQ(MAX77693_CHG_IRQ_BAT_I, CHG_INT, 1 << 3),
77 DECLARE_IRQ(MAX77693_CHG_IRQ_CHG_I, CHG_INT, 1 << 4),
78 DECLARE_IRQ(MAX77693_CHG_IRQ_CHGIN_I, CHG_INT, 1 << 6),
79
80 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC, MUIC_INT1, 1 << 0),
81 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_LOW, MUIC_INT1, 1 << 1),
82 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC_ERR, MUIC_INT1, 1 << 2),
83 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT1_ADC1K, MUIC_INT1, 1 << 3),
84
85 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGTYP, MUIC_INT2, 1 << 0),
86 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_CHGDETREUN, MUIC_INT2, 1 << 1),
87 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DCDTMR, MUIC_INT2, 1 << 2),
88 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_DXOVP, MUIC_INT2, 1 << 3),
89 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VBVOLT, MUIC_INT2, 1 << 4),
90 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT2_VIDRM, MUIC_INT2, 1 << 5),
91
92 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_EOC, MUIC_INT3, 1 << 0),
93 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CGMBC, MUIC_INT3, 1 << 1),
94 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_OVP, MUIC_INT3, 1 << 2),
95 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR, MUIC_INT3, 1 << 3),
96 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_CHG_ENABLED, MUIC_INT3, 1 << 4),
97 DECLARE_IRQ(MAX77693_MUIC_IRQ_INT3_BAT_DET, MUIC_INT3, 1 << 5),
98};
99
100static void max77693_irq_lock(struct irq_data *data)
101{
102 struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
103
104 mutex_lock(&max77693->irqlock);
105}
106
107static void max77693_irq_sync_unlock(struct irq_data *data)
108{
109 struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
110 int i;
111
112 for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
113 u8 mask_reg = max77693_mask_reg[i];
114 struct regmap *map = max77693_get_regmap(max77693, i);
115
116 if (mask_reg == MAX77693_REG_INVALID ||
117 IS_ERR_OR_NULL(map))
118 continue;
119 max77693->irq_masks_cache[i] = max77693->irq_masks_cur[i];
120
121 max77693_write_reg(map, max77693_mask_reg[i],
122 max77693->irq_masks_cur[i]);
123 }
124
125 mutex_unlock(&max77693->irqlock);
126}
127
128static const inline struct max77693_irq_data *
129irq_to_max77693_irq(struct max77693_dev *max77693, int irq)
130{
131 return &max77693_irqs[irq];
132}
133
134static void max77693_irq_mask(struct irq_data *data)
135{
136 struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
137 const struct max77693_irq_data *irq_data =
138 irq_to_max77693_irq(max77693, data->irq);
139
140 if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
141 max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
142 else
143 max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
144}
145
146static void max77693_irq_unmask(struct irq_data *data)
147{
148 struct max77693_dev *max77693 = irq_get_chip_data(data->irq);
149 const struct max77693_irq_data *irq_data =
150 irq_to_max77693_irq(max77693, data->irq);
151
152 if (irq_data->group >= MUIC_INT1 && irq_data->group <= MUIC_INT3)
153 max77693->irq_masks_cur[irq_data->group] |= irq_data->mask;
154 else
155 max77693->irq_masks_cur[irq_data->group] &= ~irq_data->mask;
156}
157
158static struct irq_chip max77693_irq_chip = {
159 .name = "max77693",
160 .irq_bus_lock = max77693_irq_lock,
161 .irq_bus_sync_unlock = max77693_irq_sync_unlock,
162 .irq_mask = max77693_irq_mask,
163 .irq_unmask = max77693_irq_unmask,
164};
165
166#define MAX77693_IRQSRC_CHG (1 << 0)
167#define MAX77693_IRQSRC_TOP (1 << 1)
168#define MAX77693_IRQSRC_FLASH (1 << 2)
169#define MAX77693_IRQSRC_MUIC (1 << 3)
170static irqreturn_t max77693_irq_thread(int irq, void *data)
171{
172 struct max77693_dev *max77693 = data;
173 u8 irq_reg[MAX77693_IRQ_GROUP_NR] = {};
174 u8 irq_src;
175 int ret;
176 int i, cur_irq;
177
178 ret = max77693_read_reg(max77693->regmap, MAX77693_PMIC_REG_INTSRC,
179 &irq_src);
180 if (ret < 0) {
181 dev_err(max77693->dev, "Failed to read interrupt source: %d\n",
182 ret);
183 return IRQ_NONE;
184 }
185
186 if (irq_src & MAX77693_IRQSRC_CHG)
187 /* CHG_INT */
188 ret = max77693_read_reg(max77693->regmap, MAX77693_CHG_REG_CHG_INT,
189 &irq_reg[CHG_INT]);
190
191 if (irq_src & MAX77693_IRQSRC_TOP)
192 /* TOPSYS_INT */
193 ret = max77693_read_reg(max77693->regmap,
194 MAX77693_PMIC_REG_TOPSYS_INT, &irq_reg[TOPSYS_INT]);
195
196 if (irq_src & MAX77693_IRQSRC_FLASH)
197 /* LED_INT */
198 ret = max77693_read_reg(max77693->regmap,
199 MAX77693_LED_REG_FLASH_INT, &irq_reg[LED_INT]);
200
201 if (irq_src & MAX77693_IRQSRC_MUIC)
202 /* MUIC INT1 ~ INT3 */
203 max77693_bulk_read(max77693->regmap, MAX77693_MUIC_REG_INT1,
204 MAX77693_NUM_IRQ_MUIC_REGS, &irq_reg[MUIC_INT1]);
205
206 /* Apply masking */
207 for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
208 if (i >= MUIC_INT1 && i <= MUIC_INT3)
209 irq_reg[i] &= max77693->irq_masks_cur[i];
210 else
211 irq_reg[i] &= ~max77693->irq_masks_cur[i];
212 }
213
214 /* Report */
215 for (i = 0; i < MAX77693_IRQ_NR; i++) {
216 if (irq_reg[max77693_irqs[i].group] & max77693_irqs[i].mask) {
217 cur_irq = irq_find_mapping(max77693->irq_domain, i);
218 if (cur_irq)
219 handle_nested_irq(cur_irq);
220 }
221 }
222
223 return IRQ_HANDLED;
224}
225
226int max77693_irq_resume(struct max77693_dev *max77693)
227{
228 if (max77693->irq)
229 max77693_irq_thread(0, max77693);
230
231 return 0;
232}
233
234static int max77693_irq_domain_map(struct irq_domain *d, unsigned int irq,
235 irq_hw_number_t hw)
236{
237 struct max77693_dev *max77693 = d->host_data;
238
239 irq_set_chip_data(irq, max77693);
240 irq_set_chip_and_handler(irq, &max77693_irq_chip, handle_edge_irq);
241 irq_set_nested_thread(irq, 1);
242#ifdef CONFIG_ARM
243 set_irq_flags(irq, IRQF_VALID);
244#else
245 irq_set_noprobe(irq);
246#endif
247 return 0;
248}
249
250static struct irq_domain_ops max77693_irq_domain_ops = {
251 .map = max77693_irq_domain_map,
252};
253
254int max77693_irq_init(struct max77693_dev *max77693)
255{
256 struct irq_domain *domain;
257 int i;
258 int ret;
259
260 mutex_init(&max77693->irqlock);
261
262 /* Mask individual interrupt sources */
263 for (i = 0; i < MAX77693_IRQ_GROUP_NR; i++) {
264 struct regmap *map;
265 /* MUIC IRQ 0:MASK 1:NOT MASK */
266 /* Other IRQ 1:MASK 0:NOT MASK */
267 if (i >= MUIC_INT1 && i <= MUIC_INT3) {
268 max77693->irq_masks_cur[i] = 0x00;
269 max77693->irq_masks_cache[i] = 0x00;
270 } else {
271 max77693->irq_masks_cur[i] = 0xff;
272 max77693->irq_masks_cache[i] = 0xff;
273 }
274 map = max77693_get_regmap(max77693, i);
275
276 if (IS_ERR_OR_NULL(map))
277 continue;
278 if (max77693_mask_reg[i] == MAX77693_REG_INVALID)
279 continue;
280 if (i >= MUIC_INT1 && i <= MUIC_INT3)
281 max77693_write_reg(map, max77693_mask_reg[i], 0x00);
282 else
283 max77693_write_reg(map, max77693_mask_reg[i], 0xff);
284 }
285
286 domain = irq_domain_add_linear(NULL, MAX77693_IRQ_NR,
287 &max77693_irq_domain_ops, max77693);
288 if (!domain) {
289 dev_err(max77693->dev, "could not create irq domain\n");
290 return -ENODEV;
291 }
292 max77693->irq_domain = domain;
293
294 ret = request_threaded_irq(max77693->irq, NULL, max77693_irq_thread,
295 IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
296 "max77693-irq", max77693);
297
298 if (ret)
299 dev_err(max77693->dev, "Failed to request IRQ %d: %d\n",
300 max77693->irq, ret);
301
302 return 0;
303}
304
305void max77693_irq_exit(struct max77693_dev *max77693)
306{
307 if (max77693->irq)
308 free_irq(max77693->irq, max77693);
309}
diff --git a/drivers/mfd/max77693.c b/drivers/mfd/max77693.c
new file mode 100644
index 000000000000..e9e4278722f3
--- /dev/null
+++ b/drivers/mfd/max77693.c
@@ -0,0 +1,249 @@
1/*
2 * max77693.c - mfd core driver for the MAX 77693
3 *
4 * Copyright (C) 2012 Samsung Electronics
5 * SangYoung Son <hello.son@smasung.com>
6 *
7 * This program is not provided / owned by Maxim Integrated Products.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * This driver is based on max8997.c
24 */
25
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/i2c.h>
29#include <linux/err.h>
30#include <linux/interrupt.h>
31#include <linux/pm_runtime.h>
32#include <linux/mutex.h>
33#include <linux/mfd/core.h>
34#include <linux/mfd/max77693.h>
35#include <linux/mfd/max77693-private.h>
36#include <linux/regulator/machine.h>
37#include <linux/regmap.h>
38
39#define I2C_ADDR_PMIC (0xCC >> 1) /* Charger, Flash LED */
40#define I2C_ADDR_MUIC (0x4A >> 1)
41#define I2C_ADDR_HAPTIC (0x90 >> 1)
42
43static struct mfd_cell max77693_devs[] = {
44 { .name = "max77693-pmic", },
45 { .name = "max77693-charger", },
46 { .name = "max77693-flash", },
47 { .name = "max77693-muic", },
48 { .name = "max77693-haptic", },
49};
50
51int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest)
52{
53 unsigned int val;
54 int ret;
55
56 ret = regmap_read(map, reg, &val);
57 *dest = val;
58
59 return ret;
60}
61EXPORT_SYMBOL_GPL(max77693_read_reg);
62
63int max77693_bulk_read(struct regmap *map, u8 reg, int count, u8 *buf)
64{
65 int ret;
66
67 ret = regmap_bulk_read(map, reg, buf, count);
68
69 return ret;
70}
71EXPORT_SYMBOL_GPL(max77693_bulk_read);
72
73int max77693_write_reg(struct regmap *map, u8 reg, u8 value)
74{
75 int ret;
76
77 ret = regmap_write(map, reg, value);
78
79 return ret;
80}
81EXPORT_SYMBOL_GPL(max77693_write_reg);
82
83int max77693_bulk_write(struct regmap *map, u8 reg, int count, u8 *buf)
84{
85 int ret;
86
87 ret = regmap_bulk_write(map, reg, buf, count);
88
89 return ret;
90}
91EXPORT_SYMBOL_GPL(max77693_bulk_write);
92
93int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask)
94{
95 int ret;
96
97 ret = regmap_update_bits(map, reg, mask, val);
98
99 return ret;
100}
101EXPORT_SYMBOL_GPL(max77693_update_reg);
102
103static const struct regmap_config max77693_regmap_config = {
104 .reg_bits = 8,
105 .val_bits = 8,
106 .max_register = MAX77693_PMIC_REG_END,
107};
108
109static int max77693_i2c_probe(struct i2c_client *i2c,
110 const struct i2c_device_id *id)
111{
112 struct max77693_dev *max77693;
113 struct max77693_platform_data *pdata = i2c->dev.platform_data;
114 u8 reg_data;
115 int ret = 0;
116
117 max77693 = devm_kzalloc(&i2c->dev,
118 sizeof(struct max77693_dev), GFP_KERNEL);
119 if (max77693 == NULL)
120 return -ENOMEM;
121
122 max77693->regmap = devm_regmap_init_i2c(i2c, &max77693_regmap_config);
123 if (IS_ERR(max77693->regmap)) {
124 ret = PTR_ERR(max77693->regmap);
125 dev_err(max77693->dev,"failed to allocate register map: %d\n",
126 ret);
127 goto err_regmap;
128 }
129
130 i2c_set_clientdata(i2c, max77693);
131 max77693->dev = &i2c->dev;
132 max77693->i2c = i2c;
133 max77693->irq = i2c->irq;
134 max77693->type = id->driver_data;
135
136 if (!pdata)
137 goto err_regmap;
138
139 max77693->wakeup = pdata->wakeup;
140
141 mutex_init(&max77693->iolock);
142
143 if (max77693_read_reg(max77693->regmap,
144 MAX77693_PMIC_REG_PMIC_ID2, &reg_data) < 0) {
145 dev_err(max77693->dev, "device not found on this channel\n");
146 ret = -ENODEV;
147 goto err_regmap;
148 } else
149 dev_info(max77693->dev, "device ID: 0x%x\n", reg_data);
150
151 max77693->muic = i2c_new_dummy(i2c->adapter, I2C_ADDR_MUIC);
152 i2c_set_clientdata(max77693->muic, max77693);
153
154 max77693->haptic = i2c_new_dummy(i2c->adapter, I2C_ADDR_HAPTIC);
155 i2c_set_clientdata(max77693->haptic, max77693);
156
157 ret = max77693_irq_init(max77693);
158 if (ret < 0)
159 goto err_mfd;
160
161 pm_runtime_set_active(max77693->dev);
162
163 ret = mfd_add_devices(max77693->dev, -1, max77693_devs,
164 ARRAY_SIZE(max77693_devs), NULL, 0);
165 if (ret < 0)
166 goto err_mfd;
167
168 device_init_wakeup(max77693->dev, pdata->wakeup);
169
170 return ret;
171
172err_mfd:
173 i2c_unregister_device(max77693->muic);
174 i2c_unregister_device(max77693->haptic);
175err_regmap:
176 kfree(max77693);
177
178 return ret;
179}
180
181static int max77693_i2c_remove(struct i2c_client *i2c)
182{
183 struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
184
185 mfd_remove_devices(max77693->dev);
186 i2c_unregister_device(max77693->muic);
187 i2c_unregister_device(max77693->haptic);
188
189 return 0;
190}
191
192static const struct i2c_device_id max77693_i2c_id[] = {
193 { "max77693", TYPE_MAX77693 },
194 { }
195};
196MODULE_DEVICE_TABLE(i2c, max77693_i2c_id);
197
198static int max77693_suspend(struct device *dev)
199{
200 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
201 struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
202
203 if (device_may_wakeup(dev))
204 irq_set_irq_wake(max77693->irq, 1);
205 return 0;
206}
207
208static int max77693_resume(struct device *dev)
209{
210 struct i2c_client *i2c = container_of(dev, struct i2c_client, dev);
211 struct max77693_dev *max77693 = i2c_get_clientdata(i2c);
212
213 if (device_may_wakeup(dev))
214 irq_set_irq_wake(max77693->irq, 0);
215 return max77693_irq_resume(max77693);
216}
217
218const struct dev_pm_ops max77693_pm = {
219 .suspend = max77693_suspend,
220 .resume = max77693_resume,
221};
222
223static struct i2c_driver max77693_i2c_driver = {
224 .driver = {
225 .name = "max77693",
226 .owner = THIS_MODULE,
227 .pm = &max77693_pm,
228 },
229 .probe = max77693_i2c_probe,
230 .remove = max77693_i2c_remove,
231 .id_table = max77693_i2c_id,
232};
233
234static int __init max77693_i2c_init(void)
235{
236 return i2c_add_driver(&max77693_i2c_driver);
237}
238/* init early so consumer devices can complete system boot */
239subsys_initcall(max77693_i2c_init);
240
241static void __exit max77693_i2c_exit(void)
242{
243 i2c_del_driver(&max77693_i2c_driver);
244}
245module_exit(max77693_i2c_exit);
246
247MODULE_DESCRIPTION("MAXIM 77693 multi-function core driver");
248MODULE_AUTHOR("SangYoung, Son <hello.son@samsung.com>");
249MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/mc13xxx-core.c b/drivers/mfd/mc13xxx-core.c
index 738722cdecaa..f0ea3b8b3e4a 100644
--- a/drivers/mfd/mc13xxx-core.c
+++ b/drivers/mfd/mc13xxx-core.c
@@ -15,24 +15,13 @@
15#include <linux/platform_device.h> 15#include <linux/platform_device.h>
16#include <linux/mutex.h> 16#include <linux/mutex.h>
17#include <linux/interrupt.h> 17#include <linux/interrupt.h>
18#include <linux/spi/spi.h>
19#include <linux/mfd/core.h> 18#include <linux/mfd/core.h>
20#include <linux/mfd/mc13xxx.h> 19#include <linux/mfd/mc13xxx.h>
21#include <linux/of.h> 20#include <linux/of.h>
22#include <linux/of_device.h> 21#include <linux/of_device.h>
23#include <linux/of_gpio.h> 22#include <linux/of_gpio.h>
24 23
25struct mc13xxx { 24#include "mc13xxx.h"
26 struct spi_device *spidev;
27 struct mutex lock;
28 int irq;
29 int flags;
30
31 irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
32 void *irqdata[MC13XXX_NUM_IRQ];
33
34 int adcflags;
35};
36 25
37#define MC13XXX_IRQSTAT0 0 26#define MC13XXX_IRQSTAT0 0
38#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0) 27#define MC13XXX_IRQSTAT0_ADCDONEI (1 << 0)
@@ -139,34 +128,29 @@ struct mc13xxx {
139 128
140#define MC13XXX_ADC2 45 129#define MC13XXX_ADC2 45
141 130
142#define MC13XXX_NUMREGS 0x3f
143
144void mc13xxx_lock(struct mc13xxx *mc13xxx) 131void mc13xxx_lock(struct mc13xxx *mc13xxx)
145{ 132{
146 if (!mutex_trylock(&mc13xxx->lock)) { 133 if (!mutex_trylock(&mc13xxx->lock)) {
147 dev_dbg(&mc13xxx->spidev->dev, "wait for %s from %pf\n", 134 dev_dbg(mc13xxx->dev, "wait for %s from %pf\n",
148 __func__, __builtin_return_address(0)); 135 __func__, __builtin_return_address(0));
149 136
150 mutex_lock(&mc13xxx->lock); 137 mutex_lock(&mc13xxx->lock);
151 } 138 }
152 dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n", 139 dev_dbg(mc13xxx->dev, "%s from %pf\n",
153 __func__, __builtin_return_address(0)); 140 __func__, __builtin_return_address(0));
154} 141}
155EXPORT_SYMBOL(mc13xxx_lock); 142EXPORT_SYMBOL(mc13xxx_lock);
156 143
157void mc13xxx_unlock(struct mc13xxx *mc13xxx) 144void mc13xxx_unlock(struct mc13xxx *mc13xxx)
158{ 145{
159 dev_dbg(&mc13xxx->spidev->dev, "%s from %pf\n", 146 dev_dbg(mc13xxx->dev, "%s from %pf\n",
160 __func__, __builtin_return_address(0)); 147 __func__, __builtin_return_address(0));
161 mutex_unlock(&mc13xxx->lock); 148 mutex_unlock(&mc13xxx->lock);
162} 149}
163EXPORT_SYMBOL(mc13xxx_unlock); 150EXPORT_SYMBOL(mc13xxx_unlock);
164 151
165#define MC13XXX_REGOFFSET_SHIFT 25
166int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val) 152int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
167{ 153{
168 struct spi_transfer t;
169 struct spi_message m;
170 int ret; 154 int ret;
171 155
172 BUG_ON(!mutex_is_locked(&mc13xxx->lock)); 156 BUG_ON(!mutex_is_locked(&mc13xxx->lock));
@@ -174,84 +158,35 @@ int mc13xxx_reg_read(struct mc13xxx *mc13xxx, unsigned int offset, u32 *val)
174 if (offset > MC13XXX_NUMREGS) 158 if (offset > MC13XXX_NUMREGS)
175 return -EINVAL; 159 return -EINVAL;
176 160
177 *val = offset << MC13XXX_REGOFFSET_SHIFT; 161 ret = regmap_read(mc13xxx->regmap, offset, val);
178 162 dev_vdbg(mc13xxx->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
179 memset(&t, 0, sizeof(t));
180
181 t.tx_buf = val;
182 t.rx_buf = val;
183 t.len = sizeof(u32);
184
185 spi_message_init(&m);
186 spi_message_add_tail(&t, &m);
187
188 ret = spi_sync(mc13xxx->spidev, &m);
189
190 /* error in message.status implies error return from spi_sync */
191 BUG_ON(!ret && m.status);
192 163
193 if (ret) 164 return ret;
194 return ret;
195
196 *val &= 0xffffff;
197
198 dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] -> 0x%06x\n", offset, *val);
199
200 return 0;
201} 165}
202EXPORT_SYMBOL(mc13xxx_reg_read); 166EXPORT_SYMBOL(mc13xxx_reg_read);
203 167
204int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val) 168int mc13xxx_reg_write(struct mc13xxx *mc13xxx, unsigned int offset, u32 val)
205{ 169{
206 u32 buf;
207 struct spi_transfer t;
208 struct spi_message m;
209 int ret;
210
211 BUG_ON(!mutex_is_locked(&mc13xxx->lock)); 170 BUG_ON(!mutex_is_locked(&mc13xxx->lock));
212 171
213 dev_vdbg(&mc13xxx->spidev->dev, "[0x%02x] <- 0x%06x\n", offset, val); 172 dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x\n", offset, val);
214 173
215 if (offset > MC13XXX_NUMREGS || val > 0xffffff) 174 if (offset > MC13XXX_NUMREGS || val > 0xffffff)
216 return -EINVAL; 175 return -EINVAL;
217 176
218 buf = 1 << 31 | offset << MC13XXX_REGOFFSET_SHIFT | val; 177 return regmap_write(mc13xxx->regmap, offset, val);
219
220 memset(&t, 0, sizeof(t));
221
222 t.tx_buf = &buf;
223 t.rx_buf = &buf;
224 t.len = sizeof(u32);
225
226 spi_message_init(&m);
227 spi_message_add_tail(&t, &m);
228
229 ret = spi_sync(mc13xxx->spidev, &m);
230
231 BUG_ON(!ret && m.status);
232
233 if (ret)
234 return ret;
235
236 return 0;
237} 178}
238EXPORT_SYMBOL(mc13xxx_reg_write); 179EXPORT_SYMBOL(mc13xxx_reg_write);
239 180
240int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset, 181int mc13xxx_reg_rmw(struct mc13xxx *mc13xxx, unsigned int offset,
241 u32 mask, u32 val) 182 u32 mask, u32 val)
242{ 183{
243 int ret; 184 BUG_ON(!mutex_is_locked(&mc13xxx->lock));
244 u32 valread;
245
246 BUG_ON(val & ~mask); 185 BUG_ON(val & ~mask);
186 dev_vdbg(mc13xxx->dev, "[0x%02x] <- 0x%06x (mask: 0x%06x)\n",
187 offset, val, mask);
247 188
248 ret = mc13xxx_reg_read(mc13xxx, offset, &valread); 189 return regmap_update_bits(mc13xxx->regmap, offset, mask, val);
249 if (ret)
250 return ret;
251
252 valread = (valread & ~mask) | val;
253
254 return mc13xxx_reg_write(mc13xxx, offset, valread);
255} 190}
256EXPORT_SYMBOL(mc13xxx_reg_rmw); 191EXPORT_SYMBOL(mc13xxx_reg_rmw);
257 192
@@ -439,7 +374,7 @@ static int mc13xxx_irq_handle(struct mc13xxx *mc13xxx,
439 if (handled == IRQ_HANDLED) 374 if (handled == IRQ_HANDLED)
440 num_handled++; 375 num_handled++;
441 } else { 376 } else {
442 dev_err(&mc13xxx->spidev->dev, 377 dev_err(mc13xxx->dev,
443 "BUG: irq %u but no handler\n", 378 "BUG: irq %u but no handler\n",
444 baseirq + irq); 379 baseirq + irq);
445 380
@@ -475,25 +410,23 @@ static irqreturn_t mc13xxx_irq_thread(int irq, void *data)
475 return IRQ_RETVAL(handled); 410 return IRQ_RETVAL(handled);
476} 411}
477 412
478enum mc13xxx_id {
479 MC13XXX_ID_MC13783,
480 MC13XXX_ID_MC13892,
481 MC13XXX_ID_INVALID,
482};
483
484static const char *mc13xxx_chipname[] = { 413static const char *mc13xxx_chipname[] = {
485 [MC13XXX_ID_MC13783] = "mc13783", 414 [MC13XXX_ID_MC13783] = "mc13783",
486 [MC13XXX_ID_MC13892] = "mc13892", 415 [MC13XXX_ID_MC13892] = "mc13892",
487}; 416};
488 417
489#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask)) 418#define maskval(reg, mask) (((reg) & (mask)) >> __ffs(mask))
490static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id) 419static int mc13xxx_identify(struct mc13xxx *mc13xxx)
491{ 420{
492 u32 icid; 421 u32 icid;
493 u32 revision; 422 u32 revision;
494 const char *name;
495 int ret; 423 int ret;
496 424
425 /*
426 * Get the generation ID from register 46, as apparently some older
427 * IC revisions only have this info at this location. Newer ICs seem to
428 * have both.
429 */
497 ret = mc13xxx_reg_read(mc13xxx, 46, &icid); 430 ret = mc13xxx_reg_read(mc13xxx, 46, &icid);
498 if (ret) 431 if (ret)
499 return ret; 432 return ret;
@@ -502,26 +435,23 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
502 435
503 switch (icid) { 436 switch (icid) {
504 case 2: 437 case 2:
505 *id = MC13XXX_ID_MC13783; 438 mc13xxx->ictype = MC13XXX_ID_MC13783;
506 name = "mc13783";
507 break; 439 break;
508 case 7: 440 case 7:
509 *id = MC13XXX_ID_MC13892; 441 mc13xxx->ictype = MC13XXX_ID_MC13892;
510 name = "mc13892";
511 break; 442 break;
512 default: 443 default:
513 *id = MC13XXX_ID_INVALID; 444 mc13xxx->ictype = MC13XXX_ID_INVALID;
514 break; 445 break;
515 } 446 }
516 447
517 if (*id == MC13XXX_ID_MC13783 || *id == MC13XXX_ID_MC13892) { 448 if (mc13xxx->ictype == MC13XXX_ID_MC13783 ||
449 mc13xxx->ictype == MC13XXX_ID_MC13892) {
518 ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision); 450 ret = mc13xxx_reg_read(mc13xxx, MC13XXX_REVISION, &revision);
519 if (ret)
520 return ret;
521 451
522 dev_info(&mc13xxx->spidev->dev, "%s: rev: %d.%d, " 452 dev_info(mc13xxx->dev, "%s: rev: %d.%d, "
523 "fin: %d, fab: %d, icid: %d/%d\n", 453 "fin: %d, fab: %d, icid: %d/%d\n",
524 mc13xxx_chipname[*id], 454 mc13xxx_chipname[mc13xxx->ictype],
525 maskval(revision, MC13XXX_REVISION_REVFULL), 455 maskval(revision, MC13XXX_REVISION_REVFULL),
526 maskval(revision, MC13XXX_REVISION_REVMETAL), 456 maskval(revision, MC13XXX_REVISION_REVMETAL),
527 maskval(revision, MC13XXX_REVISION_FIN), 457 maskval(revision, MC13XXX_REVISION_FIN),
@@ -530,26 +460,12 @@ static int mc13xxx_identify(struct mc13xxx *mc13xxx, enum mc13xxx_id *id)
530 maskval(revision, MC13XXX_REVISION_ICIDCODE)); 460 maskval(revision, MC13XXX_REVISION_ICIDCODE));
531 } 461 }
532 462
533 if (*id != MC13XXX_ID_INVALID) { 463 return (mc13xxx->ictype == MC13XXX_ID_INVALID) ? -ENODEV : 0;
534 const struct spi_device_id *devid =
535 spi_get_device_id(mc13xxx->spidev);
536 if (!devid || devid->driver_data != *id)
537 dev_warn(&mc13xxx->spidev->dev, "device id doesn't "
538 "match auto detection!\n");
539 }
540
541 return 0;
542} 464}
543 465
544static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx) 466static const char *mc13xxx_get_chipname(struct mc13xxx *mc13xxx)
545{ 467{
546 const struct spi_device_id *devid = 468 return mc13xxx_chipname[mc13xxx->ictype];
547 spi_get_device_id(mc13xxx->spidev);
548
549 if (!devid)
550 return NULL;
551
552 return mc13xxx_chipname[devid->driver_data];
553} 469}
554 470
555int mc13xxx_get_flags(struct mc13xxx *mc13xxx) 471int mc13xxx_get_flags(struct mc13xxx *mc13xxx)
@@ -592,7 +508,7 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
592 }; 508 };
593 init_completion(&adcdone_data.done); 509 init_completion(&adcdone_data.done);
594 510
595 dev_dbg(&mc13xxx->spidev->dev, "%s\n", __func__); 511 dev_dbg(mc13xxx->dev, "%s\n", __func__);
596 512
597 mc13xxx_lock(mc13xxx); 513 mc13xxx_lock(mc13xxx);
598 514
@@ -637,7 +553,8 @@ int mc13xxx_adc_do_conversion(struct mc13xxx *mc13xxx, unsigned int mode,
637 adc1 |= ato << MC13783_ADC1_ATO_SHIFT; 553 adc1 |= ato << MC13783_ADC1_ATO_SHIFT;
638 if (atox) 554 if (atox)
639 adc1 |= MC13783_ADC1_ATOX; 555 adc1 |= MC13783_ADC1_ATOX;
640 dev_dbg(&mc13xxx->spidev->dev, "%s: request irq\n", __func__); 556
557 dev_dbg(mc13xxx->dev, "%s: request irq\n", __func__);
641 mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE, 558 mc13xxx_irq_request(mc13xxx, MC13XXX_IRQ_ADCDONE,
642 mc13xxx_handler_adcdone, __func__, &adcdone_data); 559 mc13xxx_handler_adcdone, __func__, &adcdone_data);
643 mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE); 560 mc13xxx_irq_ack(mc13xxx, MC13XXX_IRQ_ADCDONE);
@@ -695,7 +612,7 @@ static int mc13xxx_add_subdevice_pdata(struct mc13xxx *mc13xxx,
695 if (!cell.name) 612 if (!cell.name)
696 return -ENOMEM; 613 return -ENOMEM;
697 614
698 return mfd_add_devices(&mc13xxx->spidev->dev, -1, &cell, 1, NULL, 0); 615 return mfd_add_devices(mc13xxx->dev, -1, &cell, 1, NULL, 0);
699} 616}
700 617
701static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format) 618static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
@@ -706,7 +623,7 @@ static int mc13xxx_add_subdevice(struct mc13xxx *mc13xxx, const char *format)
706#ifdef CONFIG_OF 623#ifdef CONFIG_OF
707static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx) 624static int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
708{ 625{
709 struct device_node *np = mc13xxx->spidev->dev.of_node; 626 struct device_node *np = mc13xxx->dev->of_node;
710 627
711 if (!np) 628 if (!np)
712 return -ENODEV; 629 return -ENODEV;
@@ -732,55 +649,15 @@ static inline int mc13xxx_probe_flags_dt(struct mc13xxx *mc13xxx)
732} 649}
733#endif 650#endif
734 651
735static const struct spi_device_id mc13xxx_device_id[] = { 652int mc13xxx_common_init(struct mc13xxx *mc13xxx,
736 { 653 struct mc13xxx_platform_data *pdata, int irq)
737 .name = "mc13783",
738 .driver_data = MC13XXX_ID_MC13783,
739 }, {
740 .name = "mc13892",
741 .driver_data = MC13XXX_ID_MC13892,
742 }, {
743 /* sentinel */
744 }
745};
746MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
747
748static const struct of_device_id mc13xxx_dt_ids[] = {
749 { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
750 { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
751 { /* sentinel */ }
752};
753MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
754
755static int mc13xxx_probe(struct spi_device *spi)
756{ 654{
757 const struct of_device_id *of_id;
758 struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
759 struct mc13xxx *mc13xxx;
760 struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
761 enum mc13xxx_id id;
762 int ret; 655 int ret;
763 656
764 of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
765 if (of_id)
766 sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
767
768 mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
769 if (!mc13xxx)
770 return -ENOMEM;
771
772 dev_set_drvdata(&spi->dev, mc13xxx);
773 spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
774 spi->bits_per_word = 32;
775 spi_setup(spi);
776
777 mc13xxx->spidev = spi;
778
779 mutex_init(&mc13xxx->lock);
780 mc13xxx_lock(mc13xxx); 657 mc13xxx_lock(mc13xxx);
781 658
782 ret = mc13xxx_identify(mc13xxx, &id); 659 ret = mc13xxx_identify(mc13xxx);
783 if (ret || id == MC13XXX_ID_INVALID) 660 if (ret)
784 goto err_revision; 661 goto err_revision;
785 662
786 /* mask all irqs */ 663 /* mask all irqs */
@@ -792,18 +669,19 @@ static int mc13xxx_probe(struct spi_device *spi)
792 if (ret) 669 if (ret)
793 goto err_mask; 670 goto err_mask;
794 671
795 ret = request_threaded_irq(spi->irq, NULL, mc13xxx_irq_thread, 672 ret = request_threaded_irq(irq, NULL, mc13xxx_irq_thread,
796 IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx); 673 IRQF_ONESHOT | IRQF_TRIGGER_HIGH, "mc13xxx", mc13xxx);
797 674
798 if (ret) { 675 if (ret) {
799err_mask: 676err_mask:
800err_revision: 677err_revision:
801 mc13xxx_unlock(mc13xxx); 678 mc13xxx_unlock(mc13xxx);
802 dev_set_drvdata(&spi->dev, NULL);
803 kfree(mc13xxx); 679 kfree(mc13xxx);
804 return ret; 680 return ret;
805 } 681 }
806 682
683 mc13xxx->irq = irq;
684
807 mc13xxx_unlock(mc13xxx); 685 mc13xxx_unlock(mc13xxx);
808 686
809 if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata) 687 if (mc13xxx_probe_flags_dt(mc13xxx) < 0 && pdata)
@@ -838,42 +716,19 @@ err_revision:
838 716
839 return 0; 717 return 0;
840} 718}
719EXPORT_SYMBOL_GPL(mc13xxx_common_init);
841 720
842static int __devexit mc13xxx_remove(struct spi_device *spi) 721void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx)
843{ 722{
844 struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev); 723 free_irq(mc13xxx->irq, mc13xxx);
845 724
846 free_irq(mc13xxx->spidev->irq, mc13xxx); 725 mfd_remove_devices(mc13xxx->dev);
847 726
848 mfd_remove_devices(&spi->dev); 727 regmap_exit(mc13xxx->regmap);
849 728
850 kfree(mc13xxx); 729 kfree(mc13xxx);
851
852 return 0;
853}
854
855static struct spi_driver mc13xxx_driver = {
856 .id_table = mc13xxx_device_id,
857 .driver = {
858 .name = "mc13xxx",
859 .owner = THIS_MODULE,
860 .of_match_table = mc13xxx_dt_ids,
861 },
862 .probe = mc13xxx_probe,
863 .remove = __devexit_p(mc13xxx_remove),
864};
865
866static int __init mc13xxx_init(void)
867{
868 return spi_register_driver(&mc13xxx_driver);
869}
870subsys_initcall(mc13xxx_init);
871
872static void __exit mc13xxx_exit(void)
873{
874 spi_unregister_driver(&mc13xxx_driver);
875} 730}
876module_exit(mc13xxx_exit); 731EXPORT_SYMBOL_GPL(mc13xxx_common_cleanup);
877 732
878MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC"); 733MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
879MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>"); 734MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
diff --git a/drivers/mfd/mc13xxx-i2c.c b/drivers/mfd/mc13xxx-i2c.c
new file mode 100644
index 000000000000..d22501dad6a6
--- /dev/null
+++ b/drivers/mfd/mc13xxx-i2c.c
@@ -0,0 +1,128 @@
1/*
2 * Copyright 2009-2010 Creative Product Design
3 * Marc Reilly marc@cpdesign.com.au
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9
10#include <linux/slab.h>
11#include <linux/module.h>
12#include <linux/platform_device.h>
13#include <linux/mutex.h>
14#include <linux/mfd/core.h>
15#include <linux/mfd/mc13xxx.h>
16#include <linux/of.h>
17#include <linux/of_device.h>
18#include <linux/of_gpio.h>
19#include <linux/i2c.h>
20#include <linux/err.h>
21
22#include "mc13xxx.h"
23
24static const struct i2c_device_id mc13xxx_i2c_device_id[] = {
25 {
26 .name = "mc13892",
27 .driver_data = MC13XXX_ID_MC13892,
28 }, {
29 /* sentinel */
30 }
31};
32MODULE_DEVICE_TABLE(i2c, mc13xxx_i2c_device_id);
33
34static const struct of_device_id mc13xxx_dt_ids[] = {
35 {
36 .compatible = "fsl,mc13892",
37 .data = (void *) &mc13xxx_i2c_device_id[0],
38 }, {
39 /* sentinel */
40 }
41};
42MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
43
44static struct regmap_config mc13xxx_regmap_i2c_config = {
45 .reg_bits = 8,
46 .val_bits = 24,
47
48 .max_register = MC13XXX_NUMREGS,
49
50 .cache_type = REGCACHE_NONE,
51};
52
53static int mc13xxx_i2c_probe(struct i2c_client *client,
54 const struct i2c_device_id *id)
55{
56 const struct of_device_id *of_id;
57 struct i2c_driver *idrv = to_i2c_driver(client->dev.driver);
58 struct mc13xxx *mc13xxx;
59 struct mc13xxx_platform_data *pdata = dev_get_platdata(&client->dev);
60 int ret;
61
62 of_id = of_match_device(mc13xxx_dt_ids, &client->dev);
63 if (of_id)
64 idrv->id_table = (const struct i2c_device_id*) of_id->data;
65
66 mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
67 if (!mc13xxx)
68 return -ENOMEM;
69
70 dev_set_drvdata(&client->dev, mc13xxx);
71
72 mc13xxx->dev = &client->dev;
73 mutex_init(&mc13xxx->lock);
74
75 mc13xxx->regmap = regmap_init_i2c(client, &mc13xxx_regmap_i2c_config);
76 if (IS_ERR(mc13xxx->regmap)) {
77 ret = PTR_ERR(mc13xxx->regmap);
78 dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
79 ret);
80 dev_set_drvdata(&client->dev, NULL);
81 kfree(mc13xxx);
82 return ret;
83 }
84
85 ret = mc13xxx_common_init(mc13xxx, pdata, client->irq);
86
87 if (ret == 0 && (id->driver_data != mc13xxx->ictype))
88 dev_warn(mc13xxx->dev,
89 "device id doesn't match auto detection!\n");
90
91 return ret;
92}
93
94static int __devexit mc13xxx_i2c_remove(struct i2c_client *client)
95{
96 struct mc13xxx *mc13xxx = dev_get_drvdata(&client->dev);
97
98 mc13xxx_common_cleanup(mc13xxx);
99
100 return 0;
101}
102
103static struct i2c_driver mc13xxx_i2c_driver = {
104 .id_table = mc13xxx_i2c_device_id,
105 .driver = {
106 .owner = THIS_MODULE,
107 .name = "mc13xxx",
108 .of_match_table = mc13xxx_dt_ids,
109 },
110 .probe = mc13xxx_i2c_probe,
111 .remove = __devexit_p(mc13xxx_i2c_remove),
112};
113
114static int __init mc13xxx_i2c_init(void)
115{
116 return i2c_add_driver(&mc13xxx_i2c_driver);
117}
118subsys_initcall(mc13xxx_i2c_init);
119
120static void __exit mc13xxx_i2c_exit(void)
121{
122 i2c_del_driver(&mc13xxx_i2c_driver);
123}
124module_exit(mc13xxx_i2c_exit);
125
126MODULE_DESCRIPTION("i2c driver for Freescale MC13XXX PMIC");
127MODULE_AUTHOR("Marc Reilly <marc@cpdesign.com.au");
128MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx-spi.c b/drivers/mfd/mc13xxx-spi.c
new file mode 100644
index 000000000000..3fcdab3eb8eb
--- /dev/null
+++ b/drivers/mfd/mc13xxx-spi.c
@@ -0,0 +1,140 @@
1/*
2 * Copyright 2009-2010 Pengutronix
3 * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>
4 *
5 * loosely based on an earlier driver that has
6 * Copyright 2009 Pengutronix, Sascha Hauer <s.hauer@pengutronix.de>
7 *
8 * This program is free software; you can redistribute it and/or modify it under
9 * the terms of the GNU General Public License version 2 as published by the
10 * Free Software Foundation.
11 */
12
13#include <linux/slab.h>
14#include <linux/module.h>
15#include <linux/platform_device.h>
16#include <linux/mutex.h>
17#include <linux/interrupt.h>
18#include <linux/mfd/core.h>
19#include <linux/mfd/mc13xxx.h>
20#include <linux/of.h>
21#include <linux/of_device.h>
22#include <linux/of_gpio.h>
23#include <linux/err.h>
24#include <linux/spi/spi.h>
25
26#include "mc13xxx.h"
27
28static const struct spi_device_id mc13xxx_device_id[] = {
29 {
30 .name = "mc13783",
31 .driver_data = MC13XXX_ID_MC13783,
32 }, {
33 .name = "mc13892",
34 .driver_data = MC13XXX_ID_MC13892,
35 }, {
36 /* sentinel */
37 }
38};
39MODULE_DEVICE_TABLE(spi, mc13xxx_device_id);
40
41static const struct of_device_id mc13xxx_dt_ids[] = {
42 { .compatible = "fsl,mc13783", .data = (void *) MC13XXX_ID_MC13783, },
43 { .compatible = "fsl,mc13892", .data = (void *) MC13XXX_ID_MC13892, },
44 { /* sentinel */ }
45};
46MODULE_DEVICE_TABLE(of, mc13xxx_dt_ids);
47
48static struct regmap_config mc13xxx_regmap_spi_config = {
49 .reg_bits = 7,
50 .pad_bits = 1,
51 .val_bits = 24,
52
53 .max_register = MC13XXX_NUMREGS,
54
55 .cache_type = REGCACHE_NONE,
56};
57
58static int mc13xxx_spi_probe(struct spi_device *spi)
59{
60 const struct of_device_id *of_id;
61 struct spi_driver *sdrv = to_spi_driver(spi->dev.driver);
62 struct mc13xxx *mc13xxx;
63 struct mc13xxx_platform_data *pdata = dev_get_platdata(&spi->dev);
64 int ret;
65
66 of_id = of_match_device(mc13xxx_dt_ids, &spi->dev);
67 if (of_id)
68 sdrv->id_table = &mc13xxx_device_id[(enum mc13xxx_id) of_id->data];
69
70 mc13xxx = kzalloc(sizeof(*mc13xxx), GFP_KERNEL);
71 if (!mc13xxx)
72 return -ENOMEM;
73
74 dev_set_drvdata(&spi->dev, mc13xxx);
75 spi->mode = SPI_MODE_0 | SPI_CS_HIGH;
76 spi->bits_per_word = 32;
77
78 mc13xxx->dev = &spi->dev;
79 mutex_init(&mc13xxx->lock);
80
81 mc13xxx->regmap = regmap_init_spi(spi, &mc13xxx_regmap_spi_config);
82 if (IS_ERR(mc13xxx->regmap)) {
83 ret = PTR_ERR(mc13xxx->regmap);
84 dev_err(mc13xxx->dev, "Failed to initialize register map: %d\n",
85 ret);
86 dev_set_drvdata(&spi->dev, NULL);
87 kfree(mc13xxx);
88 return ret;
89 }
90
91 ret = mc13xxx_common_init(mc13xxx, pdata, spi->irq);
92
93 if (ret) {
94 dev_set_drvdata(&spi->dev, NULL);
95 } else {
96 const struct spi_device_id *devid =
97 spi_get_device_id(spi);
98 if (!devid || devid->driver_data != mc13xxx->ictype)
99 dev_warn(mc13xxx->dev,
100 "device id doesn't match auto detection!\n");
101 }
102
103 return ret;
104}
105
106static int __devexit mc13xxx_spi_remove(struct spi_device *spi)
107{
108 struct mc13xxx *mc13xxx = dev_get_drvdata(&spi->dev);
109
110 mc13xxx_common_cleanup(mc13xxx);
111
112 return 0;
113}
114
115static struct spi_driver mc13xxx_spi_driver = {
116 .id_table = mc13xxx_device_id,
117 .driver = {
118 .name = "mc13xxx",
119 .owner = THIS_MODULE,
120 .of_match_table = mc13xxx_dt_ids,
121 },
122 .probe = mc13xxx_spi_probe,
123 .remove = __devexit_p(mc13xxx_spi_remove),
124};
125
126static int __init mc13xxx_init(void)
127{
128 return spi_register_driver(&mc13xxx_spi_driver);
129}
130subsys_initcall(mc13xxx_init);
131
132static void __exit mc13xxx_exit(void)
133{
134 spi_unregister_driver(&mc13xxx_spi_driver);
135}
136module_exit(mc13xxx_exit);
137
138MODULE_DESCRIPTION("Core driver for Freescale MC13XXX PMIC");
139MODULE_AUTHOR("Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de>");
140MODULE_LICENSE("GPL v2");
diff --git a/drivers/mfd/mc13xxx.h b/drivers/mfd/mc13xxx.h
new file mode 100644
index 000000000000..bbba06feea06
--- /dev/null
+++ b/drivers/mfd/mc13xxx.h
@@ -0,0 +1,45 @@
1/*
2 * Copyright 2012 Creative Product Design
3 * Marc Reilly <marc@cpdesign.com.au>
4 *
5 * This program is free software; you can redistribute it and/or modify it under
6 * the terms of the GNU General Public License version 2 as published by the
7 * Free Software Foundation.
8 */
9#ifndef __DRIVERS_MFD_MC13XXX_H
10#define __DRIVERS_MFD_MC13XXX_H
11
12#include <linux/mutex.h>
13#include <linux/regmap.h>
14#include <linux/mfd/mc13xxx.h>
15
16enum mc13xxx_id {
17 MC13XXX_ID_MC13783,
18 MC13XXX_ID_MC13892,
19 MC13XXX_ID_INVALID,
20};
21
22#define MC13XXX_NUMREGS 0x3f
23
24struct mc13xxx {
25 struct regmap *regmap;
26
27 struct device *dev;
28 enum mc13xxx_id ictype;
29
30 struct mutex lock;
31 int irq;
32 int flags;
33
34 irq_handler_t irqhandler[MC13XXX_NUM_IRQ];
35 void *irqdata[MC13XXX_NUM_IRQ];
36
37 int adcflags;
38};
39
40int mc13xxx_common_init(struct mc13xxx *mc13xxx,
41 struct mc13xxx_platform_data *pdata, int irq);
42
43void mc13xxx_common_cleanup(struct mc13xxx *mc13xxx);
44
45#endif /* __DRIVERS_MFD_MC13XXX_H */
diff --git a/drivers/mfd/pcf50633-core.c b/drivers/mfd/pcf50633-core.c
index 189c2f07b83f..29c122bf28ea 100644
--- a/drivers/mfd/pcf50633-core.c
+++ b/drivers/mfd/pcf50633-core.c
@@ -204,7 +204,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
204 return -ENOENT; 204 return -ENOENT;
205 } 205 }
206 206
207 pcf = kzalloc(sizeof(*pcf), GFP_KERNEL); 207 pcf = devm_kzalloc(&client->dev, sizeof(*pcf), GFP_KERNEL);
208 if (!pcf) 208 if (!pcf)
209 return -ENOMEM; 209 return -ENOMEM;
210 210
@@ -212,12 +212,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
212 212
213 mutex_init(&pcf->lock); 213 mutex_init(&pcf->lock);
214 214
215 pcf->regmap = regmap_init_i2c(client, &pcf50633_regmap_config); 215 pcf->regmap = devm_regmap_init_i2c(client, &pcf50633_regmap_config);
216 if (IS_ERR(pcf->regmap)) { 216 if (IS_ERR(pcf->regmap)) {
217 ret = PTR_ERR(pcf->regmap); 217 ret = PTR_ERR(pcf->regmap);
218 dev_err(pcf->dev, "Failed to allocate register map: %d\n", 218 dev_err(pcf->dev, "Failed to allocate register map: %d\n", ret);
219 ret); 219 return ret;
220 goto err_free;
221 } 220 }
222 221
223 i2c_set_clientdata(client, pcf); 222 i2c_set_clientdata(client, pcf);
@@ -228,7 +227,7 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
228 if (version < 0 || variant < 0) { 227 if (version < 0 || variant < 0) {
229 dev_err(pcf->dev, "Unable to probe pcf50633\n"); 228 dev_err(pcf->dev, "Unable to probe pcf50633\n");
230 ret = -ENODEV; 229 ret = -ENODEV;
231 goto err_regmap; 230 return ret;
232 } 231 }
233 232
234 dev_info(pcf->dev, "Probed device version %d variant %d\n", 233 dev_info(pcf->dev, "Probed device version %d variant %d\n",
@@ -237,16 +236,11 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
237 pcf50633_irq_init(pcf, client->irq); 236 pcf50633_irq_init(pcf, client->irq);
238 237
239 /* Create sub devices */ 238 /* Create sub devices */
240 pcf50633_client_dev_register(pcf, "pcf50633-input", 239 pcf50633_client_dev_register(pcf, "pcf50633-input", &pcf->input_pdev);
241 &pcf->input_pdev); 240 pcf50633_client_dev_register(pcf, "pcf50633-rtc", &pcf->rtc_pdev);
242 pcf50633_client_dev_register(pcf, "pcf50633-rtc", 241 pcf50633_client_dev_register(pcf, "pcf50633-mbc", &pcf->mbc_pdev);
243 &pcf->rtc_pdev); 242 pcf50633_client_dev_register(pcf, "pcf50633-adc", &pcf->adc_pdev);
244 pcf50633_client_dev_register(pcf, "pcf50633-mbc", 243 pcf50633_client_dev_register(pcf, "pcf50633-backlight", &pcf->bl_pdev);
245 &pcf->mbc_pdev);
246 pcf50633_client_dev_register(pcf, "pcf50633-adc",
247 &pcf->adc_pdev);
248 pcf50633_client_dev_register(pcf, "pcf50633-backlight",
249 &pcf->bl_pdev);
250 244
251 245
252 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) { 246 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) {
@@ -274,13 +268,6 @@ static int __devinit pcf50633_probe(struct i2c_client *client,
274 pdata->probe_done(pcf); 268 pdata->probe_done(pcf);
275 269
276 return 0; 270 return 0;
277
278err_regmap:
279 regmap_exit(pcf->regmap);
280err_free:
281 kfree(pcf);
282
283 return ret;
284} 271}
285 272
286static int __devexit pcf50633_remove(struct i2c_client *client) 273static int __devexit pcf50633_remove(struct i2c_client *client)
@@ -300,9 +287,6 @@ static int __devexit pcf50633_remove(struct i2c_client *client)
300 for (i = 0; i < PCF50633_NUM_REGULATORS; i++) 287 for (i = 0; i < PCF50633_NUM_REGULATORS; i++)
301 platform_device_unregister(pcf->regulator_pdev[i]); 288 platform_device_unregister(pcf->regulator_pdev[i]);
302 289
303 regmap_exit(pcf->regmap);
304 kfree(pcf);
305
306 return 0; 290 return 0;
307} 291}
308 292
diff --git a/drivers/mfd/rc5t583.c b/drivers/mfd/rc5t583.c
index 44afae0a69ce..cdc1df7fa0e9 100644
--- a/drivers/mfd/rc5t583.c
+++ b/drivers/mfd/rc5t583.c
@@ -75,6 +75,7 @@ static struct deepsleep_control_data deepsleep_data[] = {
75 (RC5T583_EXT_PWRREQ1_CONTROL | RC5T583_EXT_PWRREQ2_CONTROL) 75 (RC5T583_EXT_PWRREQ1_CONTROL | RC5T583_EXT_PWRREQ2_CONTROL)
76 76
77static struct mfd_cell rc5t583_subdevs[] = { 77static struct mfd_cell rc5t583_subdevs[] = {
78 {.name = "rc5t583-gpio",},
78 {.name = "rc5t583-regulator",}, 79 {.name = "rc5t583-regulator",},
79 {.name = "rc5t583-rtc", }, 80 {.name = "rc5t583-rtc", },
80 {.name = "rc5t583-key", } 81 {.name = "rc5t583-key", }
@@ -267,7 +268,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
267 rc5t583->dev = &i2c->dev; 268 rc5t583->dev = &i2c->dev;
268 i2c_set_clientdata(i2c, rc5t583); 269 i2c_set_clientdata(i2c, rc5t583);
269 270
270 rc5t583->regmap = regmap_init_i2c(i2c, &rc5t583_regmap_config); 271 rc5t583->regmap = devm_regmap_init_i2c(i2c, &rc5t583_regmap_config);
271 if (IS_ERR(rc5t583->regmap)) { 272 if (IS_ERR(rc5t583->regmap)) {
272 ret = PTR_ERR(rc5t583->regmap); 273 ret = PTR_ERR(rc5t583->regmap);
273 dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret); 274 dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
@@ -276,7 +277,7 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
276 277
277 ret = rc5t583_clear_ext_power_req(rc5t583, pdata); 278 ret = rc5t583_clear_ext_power_req(rc5t583, pdata);
278 if (ret < 0) 279 if (ret < 0)
279 goto err_irq_init; 280 return ret;
280 281
281 if (i2c->irq) { 282 if (i2c->irq) {
282 ret = rc5t583_irq_init(rc5t583, i2c->irq, pdata->irq_base); 283 ret = rc5t583_irq_init(rc5t583, i2c->irq, pdata->irq_base);
@@ -299,8 +300,6 @@ static int __devinit rc5t583_i2c_probe(struct i2c_client *i2c,
299err_add_devs: 300err_add_devs:
300 if (irq_init_success) 301 if (irq_init_success)
301 rc5t583_irq_exit(rc5t583); 302 rc5t583_irq_exit(rc5t583);
302err_irq_init:
303 regmap_exit(rc5t583->regmap);
304 return ret; 303 return ret;
305} 304}
306 305
@@ -310,7 +309,6 @@ static int __devexit rc5t583_i2c_remove(struct i2c_client *i2c)
310 309
311 mfd_remove_devices(rc5t583->dev); 310 mfd_remove_devices(rc5t583->dev);
312 rc5t583_irq_exit(rc5t583); 311 rc5t583_irq_exit(rc5t583);
313 regmap_exit(rc5t583->regmap);
314 return 0; 312 return 0;
315} 313}
316 314
diff --git a/drivers/mfd/rdc321x-southbridge.c b/drivers/mfd/rdc321x-southbridge.c
index 809bd4a61089..685d61e431ad 100644
--- a/drivers/mfd/rdc321x-southbridge.c
+++ b/drivers/mfd/rdc321x-southbridge.c
@@ -108,18 +108,7 @@ static struct pci_driver rdc321x_sb_driver = {
108 .remove = __devexit_p(rdc321x_sb_remove), 108 .remove = __devexit_p(rdc321x_sb_remove),
109}; 109};
110 110
111static int __init rdc321x_sb_init(void) 111module_pci_driver(rdc321x_sb_driver);
112{
113 return pci_register_driver(&rdc321x_sb_driver);
114}
115
116static void __exit rdc321x_sb_exit(void)
117{
118 pci_unregister_driver(&rdc321x_sb_driver);
119}
120
121module_init(rdc321x_sb_init);
122module_exit(rdc321x_sb_exit);
123 112
124MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>"); 113MODULE_AUTHOR("Florian Fainelli <florian@openwrt.org>");
125MODULE_LICENSE("GPL"); 114MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/s5m-core.c b/drivers/mfd/s5m-core.c
index 48949d998d10..dd170307e60e 100644
--- a/drivers/mfd/s5m-core.c
+++ b/drivers/mfd/s5m-core.c
@@ -114,12 +114,12 @@ static int s5m87xx_i2c_probe(struct i2c_client *i2c,
114 s5m87xx->wakeup = pdata->wakeup; 114 s5m87xx->wakeup = pdata->wakeup;
115 } 115 }
116 116
117 s5m87xx->regmap = regmap_init_i2c(i2c, &s5m_regmap_config); 117 s5m87xx->regmap = devm_regmap_init_i2c(i2c, &s5m_regmap_config);
118 if (IS_ERR(s5m87xx->regmap)) { 118 if (IS_ERR(s5m87xx->regmap)) {
119 ret = PTR_ERR(s5m87xx->regmap); 119 ret = PTR_ERR(s5m87xx->regmap);
120 dev_err(&i2c->dev, "Failed to allocate register map: %d\n", 120 dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
121 ret); 121 ret);
122 goto err; 122 return ret;
123 } 123 }
124 124
125 s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR); 125 s5m87xx->rtc = i2c_new_dummy(i2c->adapter, RTC_I2C_ADDR);
@@ -159,7 +159,6 @@ err:
159 mfd_remove_devices(s5m87xx->dev); 159 mfd_remove_devices(s5m87xx->dev);
160 s5m_irq_exit(s5m87xx); 160 s5m_irq_exit(s5m87xx);
161 i2c_unregister_device(s5m87xx->rtc); 161 i2c_unregister_device(s5m87xx->rtc);
162 regmap_exit(s5m87xx->regmap);
163 return ret; 162 return ret;
164} 163}
165 164
@@ -170,7 +169,6 @@ static int s5m87xx_i2c_remove(struct i2c_client *i2c)
170 mfd_remove_devices(s5m87xx->dev); 169 mfd_remove_devices(s5m87xx->dev);
171 s5m_irq_exit(s5m87xx); 170 s5m_irq_exit(s5m87xx);
172 i2c_unregister_device(s5m87xx->rtc); 171 i2c_unregister_device(s5m87xx->rtc);
173 regmap_exit(s5m87xx->regmap);
174 return 0; 172 return 0;
175} 173}
176 174
diff --git a/drivers/mfd/sta2x11-mfd.c b/drivers/mfd/sta2x11-mfd.c
new file mode 100644
index 000000000000..d31fed07aefb
--- /dev/null
+++ b/drivers/mfd/sta2x11-mfd.c
@@ -0,0 +1,467 @@
1/*
2 * Copyright (c) 2009-2011 Wind River Systems, Inc.
3 * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 */
19
20#include <linux/kernel.h>
21#include <linux/module.h>
22#include <linux/spinlock.h>
23#include <linux/errno.h>
24#include <linux/device.h>
25#include <linux/slab.h>
26#include <linux/list.h>
27#include <linux/io.h>
28#include <linux/ioport.h>
29#include <linux/pci.h>
30#include <linux/debugfs.h>
31#include <linux/seq_file.h>
32#include <linux/platform_device.h>
33#include <linux/mfd/core.h>
34#include <linux/mfd/sta2x11-mfd.h>
35
36#include <asm/sta2x11.h>
37
38/* This describes STA2X11 MFD chip for us, we may have several */
39struct sta2x11_mfd {
40 struct sta2x11_instance *instance;
41 spinlock_t lock;
42 struct list_head list;
43 void __iomem *sctl_regs;
44 void __iomem *apbreg_regs;
45};
46
47static LIST_HEAD(sta2x11_mfd_list);
48
49/* Three functions to act on the list */
50static struct sta2x11_mfd *sta2x11_mfd_find(struct pci_dev *pdev)
51{
52 struct sta2x11_instance *instance;
53 struct sta2x11_mfd *mfd;
54
55 if (!pdev && !list_empty(&sta2x11_mfd_list)) {
56 pr_warning("%s: Unspecified device, "
57 "using first instance\n", __func__);
58 return list_entry(sta2x11_mfd_list.next,
59 struct sta2x11_mfd, list);
60 }
61
62 instance = sta2x11_get_instance(pdev);
63 if (!instance)
64 return NULL;
65 list_for_each_entry(mfd, &sta2x11_mfd_list, list) {
66 if (mfd->instance == instance)
67 return mfd;
68 }
69 return NULL;
70}
71
72static int __devinit sta2x11_mfd_add(struct pci_dev *pdev, gfp_t flags)
73{
74 struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
75 struct sta2x11_instance *instance;
76
77 if (mfd)
78 return -EBUSY;
79 instance = sta2x11_get_instance(pdev);
80 if (!instance)
81 return -EINVAL;
82 mfd = kzalloc(sizeof(*mfd), flags);
83 if (!mfd)
84 return -ENOMEM;
85 INIT_LIST_HEAD(&mfd->list);
86 spin_lock_init(&mfd->lock);
87 mfd->instance = instance;
88 list_add(&mfd->list, &sta2x11_mfd_list);
89 return 0;
90}
91
92static int __devexit mfd_remove(struct pci_dev *pdev)
93{
94 struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
95
96 if (!mfd)
97 return -ENODEV;
98 list_del(&mfd->list);
99 kfree(mfd);
100 return 0;
101}
102
103/* These two functions are exported and are not expected to fail */
104u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
105{
106 struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
107 u32 r;
108 unsigned long flags;
109
110 if (!mfd) {
111 dev_warn(&pdev->dev, ": can't access sctl regs\n");
112 return 0;
113 }
114 if (!mfd->sctl_regs) {
115 dev_warn(&pdev->dev, ": system ctl not initialized\n");
116 return 0;
117 }
118 spin_lock_irqsave(&mfd->lock, flags);
119 r = readl(mfd->sctl_regs + reg);
120 r &= ~mask;
121 r |= val;
122 if (mask)
123 writel(r, mfd->sctl_regs + reg);
124 spin_unlock_irqrestore(&mfd->lock, flags);
125 return r;
126}
127EXPORT_SYMBOL(sta2x11_sctl_mask);
128
129u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val)
130{
131 struct sta2x11_mfd *mfd = sta2x11_mfd_find(pdev);
132 u32 r;
133 unsigned long flags;
134
135 if (!mfd) {
136 dev_warn(&pdev->dev, ": can't access apb regs\n");
137 return 0;
138 }
139 if (!mfd->apbreg_regs) {
140 dev_warn(&pdev->dev, ": apb bridge not initialized\n");
141 return 0;
142 }
143 spin_lock_irqsave(&mfd->lock, flags);
144 r = readl(mfd->apbreg_regs + reg);
145 r &= ~mask;
146 r |= val;
147 if (mask)
148 writel(r, mfd->apbreg_regs + reg);
149 spin_unlock_irqrestore(&mfd->lock, flags);
150 return r;
151}
152EXPORT_SYMBOL(sta2x11_apbreg_mask);
153
154/* Two debugfs files, for our registers (FIXME: one instance only) */
155#define REG(regname) {.name = #regname, .offset = SCTL_ ## regname}
156static struct debugfs_reg32 sta2x11_sctl_regs[] = {
157 REG(SCCTL), REG(ARMCFG), REG(SCPLLCTL), REG(SCPLLFCTRL),
158 REG(SCRESFRACT), REG(SCRESCTRL1), REG(SCRESXTRL2), REG(SCPEREN0),
159 REG(SCPEREN1), REG(SCPEREN2), REG(SCGRST), REG(SCPCIPMCR1),
160 REG(SCPCIPMCR2), REG(SCPCIPMSR1), REG(SCPCIPMSR2), REG(SCPCIPMSR3),
161 REG(SCINTREN), REG(SCRISR), REG(SCCLKSTAT0), REG(SCCLKSTAT1),
162 REG(SCCLKSTAT2), REG(SCRSTSTA),
163};
164#undef REG
165
166static struct debugfs_regset32 sctl_regset = {
167 .regs = sta2x11_sctl_regs,
168 .nregs = ARRAY_SIZE(sta2x11_sctl_regs),
169};
170
171#define REG(regname) {.name = #regname, .offset = regname}
172static struct debugfs_reg32 sta2x11_apbreg_regs[] = {
173 REG(APBREG_BSR), REG(APBREG_PAER), REG(APBREG_PWAC), REG(APBREG_PRAC),
174 REG(APBREG_PCG), REG(APBREG_PUR), REG(APBREG_EMU_PCG),
175};
176#undef REG
177
178static struct debugfs_regset32 apbreg_regset = {
179 .regs = sta2x11_apbreg_regs,
180 .nregs = ARRAY_SIZE(sta2x11_apbreg_regs),
181};
182
183static struct dentry *sta2x11_sctl_debugfs;
184static struct dentry *sta2x11_apbreg_debugfs;
185
186/* Probe for the two platform devices */
187static int sta2x11_sctl_probe(struct platform_device *dev)
188{
189 struct pci_dev **pdev;
190 struct sta2x11_mfd *mfd;
191 struct resource *res;
192
193 pdev = dev->dev.platform_data;
194 mfd = sta2x11_mfd_find(*pdev);
195 if (!mfd)
196 return -ENODEV;
197
198 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
199 if (!res)
200 return -ENOMEM;
201
202 if (!request_mem_region(res->start, resource_size(res),
203 "sta2x11-sctl"))
204 return -EBUSY;
205
206 mfd->sctl_regs = ioremap(res->start, resource_size(res));
207 if (!mfd->sctl_regs) {
208 release_mem_region(res->start, resource_size(res));
209 return -ENOMEM;
210 }
211 sctl_regset.base = mfd->sctl_regs;
212 sta2x11_sctl_debugfs = debugfs_create_regset32("sta2x11-sctl",
213 S_IFREG | S_IRUGO,
214 NULL, &sctl_regset);
215 return 0;
216}
217
218static int sta2x11_apbreg_probe(struct platform_device *dev)
219{
220 struct pci_dev **pdev;
221 struct sta2x11_mfd *mfd;
222 struct resource *res;
223
224 pdev = dev->dev.platform_data;
225 dev_dbg(&dev->dev, "%s: pdata is %p\n", __func__, pdev);
226 dev_dbg(&dev->dev, "%s: *pdata is %p\n", __func__, *pdev);
227
228 mfd = sta2x11_mfd_find(*pdev);
229 if (!mfd)
230 return -ENODEV;
231
232 res = platform_get_resource(dev, IORESOURCE_MEM, 0);
233 if (!res)
234 return -ENOMEM;
235
236 if (!request_mem_region(res->start, resource_size(res),
237 "sta2x11-apbreg"))
238 return -EBUSY;
239
240 mfd->apbreg_regs = ioremap(res->start, resource_size(res));
241 if (!mfd->apbreg_regs) {
242 release_mem_region(res->start, resource_size(res));
243 return -ENOMEM;
244 }
245 dev_dbg(&dev->dev, "%s: regbase %p\n", __func__, mfd->apbreg_regs);
246
247 apbreg_regset.base = mfd->apbreg_regs;
248 sta2x11_apbreg_debugfs = debugfs_create_regset32("sta2x11-apbreg",
249 S_IFREG | S_IRUGO,
250 NULL, &apbreg_regset);
251 return 0;
252}
253
254/* The two platform drivers */
255static struct platform_driver sta2x11_sctl_platform_driver = {
256 .driver = {
257 .name = "sta2x11-sctl",
258 .owner = THIS_MODULE,
259 },
260 .probe = sta2x11_sctl_probe,
261};
262
263static int __init sta2x11_sctl_init(void)
264{
265 pr_info("%s\n", __func__);
266 return platform_driver_register(&sta2x11_sctl_platform_driver);
267}
268
269static struct platform_driver sta2x11_platform_driver = {
270 .driver = {
271 .name = "sta2x11-apbreg",
272 .owner = THIS_MODULE,
273 },
274 .probe = sta2x11_apbreg_probe,
275};
276
277static int __init sta2x11_apbreg_init(void)
278{
279 pr_info("%s\n", __func__);
280 return platform_driver_register(&sta2x11_platform_driver);
281}
282
283/*
284 * What follows is the PCI device that hosts the above two pdevs.
285 * Each logic block is 4kB and they are all consecutive: we use this info.
286 */
287
288/* Bar 0 */
289enum bar0_cells {
290 STA2X11_GPIO_0 = 0,
291 STA2X11_GPIO_1,
292 STA2X11_GPIO_2,
293 STA2X11_GPIO_3,
294 STA2X11_SCTL,
295 STA2X11_SCR,
296 STA2X11_TIME,
297};
298/* Bar 1 */
299enum bar1_cells {
300 STA2X11_APBREG = 0,
301};
302#define CELL_4K(_name, _cell) { \
303 .name = _name, \
304 .start = _cell * 4096, .end = _cell * 4096 + 4095, \
305 .flags = IORESOURCE_MEM, \
306 }
307
308static const __devinitconst struct resource gpio_resources[] = {
309 {
310 .name = "sta2x11_gpio", /* 4 consecutive cells, 1 driver */
311 .start = 0,
312 .end = (4 * 4096) - 1,
313 .flags = IORESOURCE_MEM,
314 }
315};
316static const __devinitconst struct resource sctl_resources[] = {
317 CELL_4K("sta2x11-sctl", STA2X11_SCTL),
318};
319static const __devinitconst struct resource scr_resources[] = {
320 CELL_4K("sta2x11-scr", STA2X11_SCR),
321};
322static const __devinitconst struct resource time_resources[] = {
323 CELL_4K("sta2x11-time", STA2X11_TIME),
324};
325
326static const __devinitconst struct resource apbreg_resources[] = {
327 CELL_4K("sta2x11-apbreg", STA2X11_APBREG),
328};
329
330#define DEV(_name, _r) \
331 { .name = _name, .num_resources = ARRAY_SIZE(_r), .resources = _r, }
332
333static __devinitdata struct mfd_cell sta2x11_mfd_bar0[] = {
334 DEV("sta2x11-gpio", gpio_resources), /* offset 0: we add pdata later */
335 DEV("sta2x11-sctl", sctl_resources),
336 DEV("sta2x11-scr", scr_resources),
337 DEV("sta2x11-time", time_resources),
338};
339
340static __devinitdata struct mfd_cell sta2x11_mfd_bar1[] = {
341 DEV("sta2x11-apbreg", apbreg_resources),
342};
343
344static int sta2x11_mfd_suspend(struct pci_dev *pdev, pm_message_t state)
345{
346 pci_save_state(pdev);
347 pci_disable_device(pdev);
348 pci_set_power_state(pdev, pci_choose_state(pdev, state));
349
350 return 0;
351}
352
353static int sta2x11_mfd_resume(struct pci_dev *pdev)
354{
355 int err;
356
357 pci_set_power_state(pdev, 0);
358 err = pci_enable_device(pdev);
359 if (err)
360 return err;
361 pci_restore_state(pdev);
362
363 return 0;
364}
365
366static int __devinit sta2x11_mfd_probe(struct pci_dev *pdev,
367 const struct pci_device_id *pci_id)
368{
369 int err, i;
370 struct sta2x11_gpio_pdata *gpio_data;
371
372 dev_info(&pdev->dev, "%s\n", __func__);
373
374 err = pci_enable_device(pdev);
375 if (err) {
376 dev_err(&pdev->dev, "Can't enable device.\n");
377 return err;
378 }
379
380 err = pci_enable_msi(pdev);
381 if (err)
382 dev_info(&pdev->dev, "Enable msi failed\n");
383
384 /* Read gpio config data as pci device's platform data */
385 gpio_data = dev_get_platdata(&pdev->dev);
386 if (!gpio_data)
387 dev_warn(&pdev->dev, "no gpio configuration\n");
388
389 dev_dbg(&pdev->dev, "%s, gpio_data = %p (%p)\n", __func__,
390 gpio_data, &gpio_data);
391 dev_dbg(&pdev->dev, "%s, pdev = %p (%p)\n", __func__,
392 pdev, &pdev);
393
394 /* platform data is the pci device for all of them */
395 for (i = 0; i < ARRAY_SIZE(sta2x11_mfd_bar0); i++) {
396 sta2x11_mfd_bar0[i].pdata_size = sizeof(pdev);
397 sta2x11_mfd_bar0[i].platform_data = &pdev;
398 }
399 sta2x11_mfd_bar1[0].pdata_size = sizeof(pdev);
400 sta2x11_mfd_bar1[0].platform_data = &pdev;
401
402 /* Record this pdev before mfd_add_devices: their probe looks for it */
403 sta2x11_mfd_add(pdev, GFP_ATOMIC);
404
405
406 err = mfd_add_devices(&pdev->dev, -1,
407 sta2x11_mfd_bar0,
408 ARRAY_SIZE(sta2x11_mfd_bar0),
409 &pdev->resource[0],
410 0);
411 if (err) {
412 dev_err(&pdev->dev, "mfd_add_devices[0] failed: %d\n", err);
413 goto err_disable;
414 }
415
416 err = mfd_add_devices(&pdev->dev, -1,
417 sta2x11_mfd_bar1,
418 ARRAY_SIZE(sta2x11_mfd_bar1),
419 &pdev->resource[1],
420 0);
421 if (err) {
422 dev_err(&pdev->dev, "mfd_add_devices[1] failed: %d\n", err);
423 goto err_disable;
424 }
425
426 return 0;
427
428err_disable:
429 mfd_remove_devices(&pdev->dev);
430 pci_disable_device(pdev);
431 pci_disable_msi(pdev);
432 return err;
433}
434
435static DEFINE_PCI_DEVICE_TABLE(sta2x11_mfd_tbl) = {
436 {PCI_DEVICE(PCI_VENDOR_ID_STMICRO, PCI_DEVICE_ID_STMICRO_GPIO)},
437 {0,},
438};
439
440static struct pci_driver sta2x11_mfd_driver = {
441 .name = "sta2x11-mfd",
442 .id_table = sta2x11_mfd_tbl,
443 .probe = sta2x11_mfd_probe,
444 .suspend = sta2x11_mfd_suspend,
445 .resume = sta2x11_mfd_resume,
446};
447
448static int __init sta2x11_mfd_init(void)
449{
450 pr_info("%s\n", __func__);
451 return pci_register_driver(&sta2x11_mfd_driver);
452}
453
454/*
455 * All of this must be ready before "normal" devices like MMCI appear.
456 * But MFD (the pci device) can't be too early. The following choice
457 * prepares platform drivers very early and probe the PCI device later,
458 * but before other PCI devices.
459 */
460subsys_initcall(sta2x11_apbreg_init);
461subsys_initcall(sta2x11_sctl_init);
462rootfs_initcall(sta2x11_mfd_init);
463
464MODULE_LICENSE("GPL v2");
465MODULE_AUTHOR("Wind River");
466MODULE_DESCRIPTION("STA2x11 mfd for GPIO, SCTL and APBREG");
467MODULE_DEVICE_TABLE(pci, sta2x11_mfd_tbl);
diff --git a/drivers/mfd/stmpe-spi.c b/drivers/mfd/stmpe-spi.c
index b58c43c7ea93..afd459013ecb 100644
--- a/drivers/mfd/stmpe-spi.c
+++ b/drivers/mfd/stmpe-spi.c
@@ -122,7 +122,6 @@ MODULE_DEVICE_TABLE(spi, stmpe_id);
122static struct spi_driver stmpe_spi_driver = { 122static struct spi_driver stmpe_spi_driver = {
123 .driver = { 123 .driver = {
124 .name = "stmpe-spi", 124 .name = "stmpe-spi",
125 .bus = &spi_bus_type,
126 .owner = THIS_MODULE, 125 .owner = THIS_MODULE,
127#ifdef CONFIG_PM 126#ifdef CONFIG_PM
128 .pm = &stmpe_dev_pm_ops, 127 .pm = &stmpe_dev_pm_ops,
diff --git a/drivers/mfd/tps65090.c b/drivers/mfd/tps65090.c
index 47f802bf1848..396b9d1b6bd6 100644
--- a/drivers/mfd/tps65090.c
+++ b/drivers/mfd/tps65090.c
@@ -283,27 +283,24 @@ static int __devinit tps65090_i2c_probe(struct i2c_client *client,
283 } 283 }
284 } 284 }
285 285
286 tps65090->rmap = regmap_init_i2c(tps65090->client, 286 tps65090->rmap = devm_regmap_init_i2c(tps65090->client,
287 &tps65090_regmap_config); 287 &tps65090_regmap_config);
288 if (IS_ERR(tps65090->rmap)) { 288 if (IS_ERR(tps65090->rmap)) {
289 dev_err(&client->dev, "regmap_init failed with err: %ld\n", 289 ret = PTR_ERR(tps65090->rmap);
290 PTR_ERR(tps65090->rmap)); 290 dev_err(&client->dev, "regmap_init failed with err: %d\n", ret);
291 goto err_irq_exit; 291 goto err_irq_exit;
292 }; 292 }
293 293
294 ret = mfd_add_devices(tps65090->dev, -1, tps65090s, 294 ret = mfd_add_devices(tps65090->dev, -1, tps65090s,
295 ARRAY_SIZE(tps65090s), NULL, 0); 295 ARRAY_SIZE(tps65090s), NULL, 0);
296 if (ret) { 296 if (ret) {
297 dev_err(&client->dev, "add mfd devices failed with err: %d\n", 297 dev_err(&client->dev, "add mfd devices failed with err: %d\n",
298 ret); 298 ret);
299 goto err_regmap_exit; 299 goto err_irq_exit;
300 } 300 }
301 301
302 return 0; 302 return 0;
303 303
304err_regmap_exit:
305 regmap_exit(tps65090->rmap);
306
307err_irq_exit: 304err_irq_exit:
308 if (client->irq) 305 if (client->irq)
309 free_irq(client->irq, tps65090); 306 free_irq(client->irq, tps65090);
@@ -316,29 +313,34 @@ static int __devexit tps65090_i2c_remove(struct i2c_client *client)
316 struct tps65090 *tps65090 = i2c_get_clientdata(client); 313 struct tps65090 *tps65090 = i2c_get_clientdata(client);
317 314
318 mfd_remove_devices(tps65090->dev); 315 mfd_remove_devices(tps65090->dev);
319 regmap_exit(tps65090->rmap);
320 if (client->irq) 316 if (client->irq)
321 free_irq(client->irq, tps65090); 317 free_irq(client->irq, tps65090);
322 318
323 return 0; 319 return 0;
324} 320}
325 321
326#ifdef CONFIG_PM 322#ifdef CONFIG_PM_SLEEP
327static int tps65090_i2c_suspend(struct i2c_client *client, pm_message_t state) 323static int tps65090_suspend(struct device *dev)
328{ 324{
325 struct i2c_client *client = to_i2c_client(dev);
329 if (client->irq) 326 if (client->irq)
330 disable_irq(client->irq); 327 disable_irq(client->irq);
331 return 0; 328 return 0;
332} 329}
333 330
334static int tps65090_i2c_resume(struct i2c_client *client) 331static int tps65090_resume(struct device *dev)
335{ 332{
333 struct i2c_client *client = to_i2c_client(dev);
336 if (client->irq) 334 if (client->irq)
337 enable_irq(client->irq); 335 enable_irq(client->irq);
338 return 0; 336 return 0;
339} 337}
340#endif 338#endif
341 339
340static const struct dev_pm_ops tps65090_pm_ops = {
341 SET_SYSTEM_SLEEP_PM_OPS(tps65090_suspend, tps65090_resume)
342};
343
342static const struct i2c_device_id tps65090_id_table[] = { 344static const struct i2c_device_id tps65090_id_table[] = {
343 { "tps65090", 0 }, 345 { "tps65090", 0 },
344 { }, 346 { },
@@ -349,13 +351,10 @@ static struct i2c_driver tps65090_driver = {
349 .driver = { 351 .driver = {
350 .name = "tps65090", 352 .name = "tps65090",
351 .owner = THIS_MODULE, 353 .owner = THIS_MODULE,
354 .pm = &tps65090_pm_ops,
352 }, 355 },
353 .probe = tps65090_i2c_probe, 356 .probe = tps65090_i2c_probe,
354 .remove = __devexit_p(tps65090_i2c_remove), 357 .remove = __devexit_p(tps65090_i2c_remove),
355#ifdef CONFIG_PM
356 .suspend = tps65090_i2c_suspend,
357 .resume = tps65090_i2c_resume,
358#endif
359 .id_table = tps65090_id_table, 358 .id_table = tps65090_id_table,
360}; 359};
361 360
diff --git a/drivers/mfd/tps65217.c b/drivers/mfd/tps65217.c
index f7d854e4cc62..db194e433c08 100644
--- a/drivers/mfd/tps65217.c
+++ b/drivers/mfd/tps65217.c
@@ -96,7 +96,7 @@ EXPORT_SYMBOL_GPL(tps65217_reg_write);
96 * @val: Value to write. 96 * @val: Value to write.
97 * @level: Password protected level 97 * @level: Password protected level
98 */ 98 */
99int tps65217_update_bits(struct tps65217 *tps, unsigned int reg, 99static int tps65217_update_bits(struct tps65217 *tps, unsigned int reg,
100 unsigned int mask, unsigned int val, unsigned int level) 100 unsigned int mask, unsigned int val, unsigned int level)
101{ 101{
102 int ret; 102 int ret;
@@ -150,7 +150,7 @@ static int __devinit tps65217_probe(struct i2c_client *client,
150 return -ENOMEM; 150 return -ENOMEM;
151 151
152 tps->pdata = pdata; 152 tps->pdata = pdata;
153 tps->regmap = regmap_init_i2c(client, &tps65217_regmap_config); 153 tps->regmap = devm_regmap_init_i2c(client, &tps65217_regmap_config);
154 if (IS_ERR(tps->regmap)) { 154 if (IS_ERR(tps->regmap)) {
155 ret = PTR_ERR(tps->regmap); 155 ret = PTR_ERR(tps->regmap);
156 dev_err(tps->dev, "Failed to allocate register map: %d\n", 156 dev_err(tps->dev, "Failed to allocate register map: %d\n",
@@ -163,9 +163,9 @@ static int __devinit tps65217_probe(struct i2c_client *client,
163 163
164 ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version); 164 ret = tps65217_reg_read(tps, TPS65217_REG_CHIPID, &version);
165 if (ret < 0) { 165 if (ret < 0) {
166 dev_err(tps->dev, "Failed to read revision" 166 dev_err(tps->dev, "Failed to read revision register: %d\n",
167 " register: %d\n", ret); 167 ret);
168 goto err_regmap; 168 return ret;
169 } 169 }
170 170
171 dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n", 171 dev_info(tps->dev, "TPS65217 ID %#x version 1.%d\n",
@@ -190,11 +190,6 @@ static int __devinit tps65217_probe(struct i2c_client *client,
190 } 190 }
191 191
192 return 0; 192 return 0;
193
194err_regmap:
195 regmap_exit(tps->regmap);
196
197 return ret;
198} 193}
199 194
200static int __devexit tps65217_remove(struct i2c_client *client) 195static int __devexit tps65217_remove(struct i2c_client *client)
@@ -205,8 +200,6 @@ static int __devexit tps65217_remove(struct i2c_client *client)
205 for (i = 0; i < TPS65217_NUM_REGULATOR; i++) 200 for (i = 0; i < TPS65217_NUM_REGULATOR; i++)
206 platform_device_unregister(tps->regulator_pdev[i]); 201 platform_device_unregister(tps->regulator_pdev[i]);
207 202
208 regmap_exit(tps->regmap);
209
210 return 0; 203 return 0;
211} 204}
212 205
diff --git a/drivers/mfd/tps65910-irq.c b/drivers/mfd/tps65910-irq.c
index c9ed5c00a621..09aab3e4776d 100644
--- a/drivers/mfd/tps65910-irq.c
+++ b/drivers/mfd/tps65910-irq.c
@@ -20,15 +20,10 @@
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/interrupt.h> 21#include <linux/interrupt.h>
22#include <linux/irq.h> 22#include <linux/irq.h>
23#include <linux/irqdomain.h>
23#include <linux/gpio.h> 24#include <linux/gpio.h>
24#include <linux/mfd/tps65910.h> 25#include <linux/mfd/tps65910.h>
25 26
26static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
27 int irq)
28{
29 return (irq - tps65910->irq_base);
30}
31
32/* 27/*
33 * This is a threaded IRQ handler so can access I2C/SPI. Since all 28 * This is a threaded IRQ handler so can access I2C/SPI. Since all
34 * interrupts are clear on read the IRQ line will be reasserted and 29 * interrupts are clear on read the IRQ line will be reasserted and
@@ -41,28 +36,28 @@ static inline int irq_to_tps65910_irq(struct tps65910 *tps65910,
41static irqreturn_t tps65910_irq(int irq, void *irq_data) 36static irqreturn_t tps65910_irq(int irq, void *irq_data)
42{ 37{
43 struct tps65910 *tps65910 = irq_data; 38 struct tps65910 *tps65910 = irq_data;
39 unsigned int reg;
44 u32 irq_sts; 40 u32 irq_sts;
45 u32 irq_mask; 41 u32 irq_mask;
46 u8 reg;
47 int i; 42 int i;
48 43
49 tps65910->read(tps65910, TPS65910_INT_STS, 1, &reg); 44 tps65910_reg_read(tps65910, TPS65910_INT_STS, &reg);
50 irq_sts = reg; 45 irq_sts = reg;
51 tps65910->read(tps65910, TPS65910_INT_STS2, 1, &reg); 46 tps65910_reg_read(tps65910, TPS65910_INT_STS2, &reg);
52 irq_sts |= reg << 8; 47 irq_sts |= reg << 8;
53 switch (tps65910_chip_id(tps65910)) { 48 switch (tps65910_chip_id(tps65910)) {
54 case TPS65911: 49 case TPS65911:
55 tps65910->read(tps65910, TPS65910_INT_STS3, 1, &reg); 50 tps65910_reg_read(tps65910, TPS65910_INT_STS3, &reg);
56 irq_sts |= reg << 16; 51 irq_sts |= reg << 16;
57 } 52 }
58 53
59 tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg); 54 tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
60 irq_mask = reg; 55 irq_mask = reg;
61 tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg); 56 tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
62 irq_mask |= reg << 8; 57 irq_mask |= reg << 8;
63 switch (tps65910_chip_id(tps65910)) { 58 switch (tps65910_chip_id(tps65910)) {
64 case TPS65911: 59 case TPS65911:
65 tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg); 60 tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
66 irq_mask |= reg << 16; 61 irq_mask |= reg << 16;
67 } 62 }
68 63
@@ -76,19 +71,19 @@ static irqreturn_t tps65910_irq(int irq, void *irq_data)
76 if (!(irq_sts & (1 << i))) 71 if (!(irq_sts & (1 << i)))
77 continue; 72 continue;
78 73
79 handle_nested_irq(tps65910->irq_base + i); 74 handle_nested_irq(irq_find_mapping(tps65910->domain, i));
80 } 75 }
81 76
82 /* Write the STS register back to clear IRQs we handled */ 77 /* Write the STS register back to clear IRQs we handled */
83 reg = irq_sts & 0xFF; 78 reg = irq_sts & 0xFF;
84 irq_sts >>= 8; 79 irq_sts >>= 8;
85 tps65910->write(tps65910, TPS65910_INT_STS, 1, &reg); 80 tps65910_reg_write(tps65910, TPS65910_INT_STS, reg);
86 reg = irq_sts & 0xFF; 81 reg = irq_sts & 0xFF;
87 tps65910->write(tps65910, TPS65910_INT_STS2, 1, &reg); 82 tps65910_reg_write(tps65910, TPS65910_INT_STS2, reg);
88 switch (tps65910_chip_id(tps65910)) { 83 switch (tps65910_chip_id(tps65910)) {
89 case TPS65911: 84 case TPS65911:
90 reg = irq_sts >> 8; 85 reg = irq_sts >> 8;
91 tps65910->write(tps65910, TPS65910_INT_STS3, 1, &reg); 86 tps65910_reg_write(tps65910, TPS65910_INT_STS3, reg);
92 } 87 }
93 88
94 return IRQ_HANDLED; 89 return IRQ_HANDLED;
@@ -105,27 +100,27 @@ static void tps65910_irq_sync_unlock(struct irq_data *data)
105{ 100{
106 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data); 101 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
107 u32 reg_mask; 102 u32 reg_mask;
108 u8 reg; 103 unsigned int reg;
109 104
110 tps65910->read(tps65910, TPS65910_INT_MSK, 1, &reg); 105 tps65910_reg_read(tps65910, TPS65910_INT_MSK, &reg);
111 reg_mask = reg; 106 reg_mask = reg;
112 tps65910->read(tps65910, TPS65910_INT_MSK2, 1, &reg); 107 tps65910_reg_read(tps65910, TPS65910_INT_MSK2, &reg);
113 reg_mask |= reg << 8; 108 reg_mask |= reg << 8;
114 switch (tps65910_chip_id(tps65910)) { 109 switch (tps65910_chip_id(tps65910)) {
115 case TPS65911: 110 case TPS65911:
116 tps65910->read(tps65910, TPS65910_INT_MSK3, 1, &reg); 111 tps65910_reg_read(tps65910, TPS65910_INT_MSK3, &reg);
117 reg_mask |= reg << 16; 112 reg_mask |= reg << 16;
118 } 113 }
119 114
120 if (tps65910->irq_mask != reg_mask) { 115 if (tps65910->irq_mask != reg_mask) {
121 reg = tps65910->irq_mask & 0xFF; 116 reg = tps65910->irq_mask & 0xFF;
122 tps65910->write(tps65910, TPS65910_INT_MSK, 1, &reg); 117 tps65910_reg_write(tps65910, TPS65910_INT_MSK, reg);
123 reg = tps65910->irq_mask >> 8 & 0xFF; 118 reg = tps65910->irq_mask >> 8 & 0xFF;
124 tps65910->write(tps65910, TPS65910_INT_MSK2, 1, &reg); 119 tps65910_reg_write(tps65910, TPS65910_INT_MSK2, reg);
125 switch (tps65910_chip_id(tps65910)) { 120 switch (tps65910_chip_id(tps65910)) {
126 case TPS65911: 121 case TPS65911:
127 reg = tps65910->irq_mask >> 16; 122 reg = tps65910->irq_mask >> 16;
128 tps65910->write(tps65910, TPS65910_INT_MSK3, 1, &reg); 123 tps65910_reg_write(tps65910, TPS65910_INT_MSK3, reg);
129 } 124 }
130 } 125 }
131 mutex_unlock(&tps65910->irq_lock); 126 mutex_unlock(&tps65910->irq_lock);
@@ -135,14 +130,14 @@ static void tps65910_irq_enable(struct irq_data *data)
135{ 130{
136 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data); 131 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
137 132
138 tps65910->irq_mask &= ~( 1 << irq_to_tps65910_irq(tps65910, data->irq)); 133 tps65910->irq_mask &= ~(1 << data->hwirq);
139} 134}
140 135
141static void tps65910_irq_disable(struct irq_data *data) 136static void tps65910_irq_disable(struct irq_data *data)
142{ 137{
143 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data); 138 struct tps65910 *tps65910 = irq_data_get_irq_chip_data(data);
144 139
145 tps65910->irq_mask |= ( 1 << irq_to_tps65910_irq(tps65910, data->irq)); 140 tps65910->irq_mask |= (1 << data->hwirq);
146} 141}
147 142
148#ifdef CONFIG_PM_SLEEP 143#ifdef CONFIG_PM_SLEEP
@@ -164,10 +159,35 @@ static struct irq_chip tps65910_irq_chip = {
164 .irq_set_wake = tps65910_irq_set_wake, 159 .irq_set_wake = tps65910_irq_set_wake,
165}; 160};
166 161
162static int tps65910_irq_map(struct irq_domain *h, unsigned int virq,
163 irq_hw_number_t hw)
164{
165 struct tps65910 *tps65910 = h->host_data;
166
167 irq_set_chip_data(virq, tps65910);
168 irq_set_chip_and_handler(virq, &tps65910_irq_chip, handle_edge_irq);
169 irq_set_nested_thread(virq, 1);
170
171 /* ARM needs us to explicitly flag the IRQ as valid
172 * and will set them noprobe when we do so. */
173#ifdef CONFIG_ARM
174 set_irq_flags(virq, IRQF_VALID);
175#else
176 irq_set_noprobe(virq);
177#endif
178
179 return 0;
180}
181
182static struct irq_domain_ops tps65910_domain_ops = {
183 .map = tps65910_irq_map,
184 .xlate = irq_domain_xlate_twocell,
185};
186
167int tps65910_irq_init(struct tps65910 *tps65910, int irq, 187int tps65910_irq_init(struct tps65910 *tps65910, int irq,
168 struct tps65910_platform_data *pdata) 188 struct tps65910_platform_data *pdata)
169{ 189{
170 int ret, cur_irq; 190 int ret;
171 int flags = IRQF_ONESHOT; 191 int flags = IRQF_ONESHOT;
172 192
173 if (!irq) { 193 if (!irq) {
@@ -175,17 +195,11 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
175 return -EINVAL; 195 return -EINVAL;
176 } 196 }
177 197
178 if (!pdata || !pdata->irq_base) { 198 if (!pdata) {
179 dev_warn(tps65910->dev, "No interrupt support, no IRQ base\n"); 199 dev_warn(tps65910->dev, "No interrupt support, no pdata\n");
180 return -EINVAL; 200 return -EINVAL;
181 } 201 }
182 202
183 tps65910->irq_mask = 0xFFFFFF;
184
185 mutex_init(&tps65910->irq_lock);
186 tps65910->chip_irq = irq;
187 tps65910->irq_base = pdata->irq_base;
188
189 switch (tps65910_chip_id(tps65910)) { 203 switch (tps65910_chip_id(tps65910)) {
190 case TPS65910: 204 case TPS65910:
191 tps65910->irq_num = TPS65910_NUM_IRQ; 205 tps65910->irq_num = TPS65910_NUM_IRQ;
@@ -195,22 +209,36 @@ int tps65910_irq_init(struct tps65910 *tps65910, int irq,
195 break; 209 break;
196 } 210 }
197 211
198 /* Register with genirq */ 212 if (pdata->irq_base > 0) {
199 for (cur_irq = tps65910->irq_base; 213 pdata->irq_base = irq_alloc_descs(pdata->irq_base, 0,
200 cur_irq < tps65910->irq_num + tps65910->irq_base; 214 tps65910->irq_num, -1);
201 cur_irq++) { 215 if (pdata->irq_base < 0) {
202 irq_set_chip_data(cur_irq, tps65910); 216 dev_warn(tps65910->dev, "Failed to alloc IRQs: %d\n",
203 irq_set_chip_and_handler(cur_irq, &tps65910_irq_chip, 217 pdata->irq_base);
204 handle_edge_irq); 218 return pdata->irq_base;
205 irq_set_nested_thread(cur_irq, 1); 219 }
206 220 }
207 /* ARM needs us to explicitly flag the IRQ as valid 221
208 * and will set them noprobe when we do so. */ 222 tps65910->irq_mask = 0xFFFFFF;
209#ifdef CONFIG_ARM 223
210 set_irq_flags(cur_irq, IRQF_VALID); 224 mutex_init(&tps65910->irq_lock);
211#else 225 tps65910->chip_irq = irq;
212 irq_set_noprobe(cur_irq); 226 tps65910->irq_base = pdata->irq_base;
213#endif 227
228 if (pdata->irq_base > 0)
229 tps65910->domain = irq_domain_add_legacy(tps65910->dev->of_node,
230 tps65910->irq_num,
231 pdata->irq_base,
232 0,
233 &tps65910_domain_ops, tps65910);
234 else
235 tps65910->domain = irq_domain_add_linear(tps65910->dev->of_node,
236 tps65910->irq_num,
237 &tps65910_domain_ops, tps65910);
238
239 if (!tps65910->domain) {
240 dev_err(tps65910->dev, "Failed to create IRQ domain\n");
241 return -ENOMEM;
214 } 242 }
215 243
216 ret = request_threaded_irq(irq, NULL, tps65910_irq, flags, 244 ret = request_threaded_irq(irq, NULL, tps65910_irq, flags,
diff --git a/drivers/mfd/tps65910.c b/drivers/mfd/tps65910.c
index bf2b25ebf2ca..be9e07b77325 100644
--- a/drivers/mfd/tps65910.c
+++ b/drivers/mfd/tps65910.c
@@ -19,13 +19,16 @@
19#include <linux/err.h> 19#include <linux/err.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/i2c.h> 21#include <linux/i2c.h>
22#include <linux/gpio.h>
23#include <linux/mfd/core.h> 22#include <linux/mfd/core.h>
24#include <linux/regmap.h> 23#include <linux/regmap.h>
25#include <linux/mfd/tps65910.h> 24#include <linux/mfd/tps65910.h>
25#include <linux/of_device.h>
26 26
27static struct mfd_cell tps65910s[] = { 27static struct mfd_cell tps65910s[] = {
28 { 28 {
29 .name = "tps65910-gpio",
30 },
31 {
29 .name = "tps65910-pmic", 32 .name = "tps65910-pmic",
30 }, 33 },
31 { 34 {
@@ -37,30 +40,6 @@ static struct mfd_cell tps65910s[] = {
37}; 40};
38 41
39 42
40static int tps65910_i2c_read(struct tps65910 *tps65910, u8 reg,
41 int bytes, void *dest)
42{
43 return regmap_bulk_read(tps65910->regmap, reg, dest, bytes);
44}
45
46static int tps65910_i2c_write(struct tps65910 *tps65910, u8 reg,
47 int bytes, void *src)
48{
49 return regmap_bulk_write(tps65910->regmap, reg, src, bytes);
50}
51
52int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
53{
54 return regmap_update_bits(tps65910->regmap, reg, mask, mask);
55}
56EXPORT_SYMBOL_GPL(tps65910_set_bits);
57
58int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask)
59{
60 return regmap_update_bits(tps65910->regmap, reg, mask, 0);
61}
62EXPORT_SYMBOL_GPL(tps65910_clear_bits);
63
64static bool is_volatile_reg(struct device *dev, unsigned int reg) 43static bool is_volatile_reg(struct device *dev, unsigned int reg)
65{ 44{
66 struct tps65910 *tps65910 = dev_get_drvdata(dev); 45 struct tps65910 *tps65910 = dev_get_drvdata(dev);
@@ -85,80 +64,197 @@ static const struct regmap_config tps65910_regmap_config = {
85 .reg_bits = 8, 64 .reg_bits = 8,
86 .val_bits = 8, 65 .val_bits = 8,
87 .volatile_reg = is_volatile_reg, 66 .volatile_reg = is_volatile_reg,
88 .max_register = TPS65910_MAX_REGISTER, 67 .max_register = TPS65910_MAX_REGISTER - 1,
89 .num_reg_defaults_raw = TPS65910_MAX_REGISTER,
90 .cache_type = REGCACHE_RBTREE, 68 .cache_type = REGCACHE_RBTREE,
91}; 69};
92 70
93static int tps65910_i2c_probe(struct i2c_client *i2c, 71static int __devinit tps65910_sleepinit(struct tps65910 *tps65910,
94 const struct i2c_device_id *id) 72 struct tps65910_board *pmic_pdata)
73{
74 struct device *dev = NULL;
75 int ret = 0;
76
77 dev = tps65910->dev;
78
79 if (!pmic_pdata->en_dev_slp)
80 return 0;
81
82 /* enabling SLEEP device state */
83 ret = tps65910_reg_set_bits(tps65910, TPS65910_DEVCTRL,
84 DEVCTRL_DEV_SLP_MASK);
85 if (ret < 0) {
86 dev_err(dev, "set dev_slp failed: %d\n", ret);
87 goto err_sleep_init;
88 }
89
90 /* Return if there is no sleep keepon data. */
91 if (!pmic_pdata->slp_keepon)
92 return 0;
93
94 if (pmic_pdata->slp_keepon->therm_keepon) {
95 ret = tps65910_reg_set_bits(tps65910,
96 TPS65910_SLEEP_KEEP_RES_ON,
97 SLEEP_KEEP_RES_ON_THERM_KEEPON_MASK);
98 if (ret < 0) {
99 dev_err(dev, "set therm_keepon failed: %d\n", ret);
100 goto disable_dev_slp;
101 }
102 }
103
104 if (pmic_pdata->slp_keepon->clkout32k_keepon) {
105 ret = tps65910_reg_set_bits(tps65910,
106 TPS65910_SLEEP_KEEP_RES_ON,
107 SLEEP_KEEP_RES_ON_CLKOUT32K_KEEPON_MASK);
108 if (ret < 0) {
109 dev_err(dev, "set clkout32k_keepon failed: %d\n", ret);
110 goto disable_dev_slp;
111 }
112 }
113
114 if (pmic_pdata->slp_keepon->i2chs_keepon) {
115 ret = tps65910_reg_set_bits(tps65910,
116 TPS65910_SLEEP_KEEP_RES_ON,
117 SLEEP_KEEP_RES_ON_I2CHS_KEEPON_MASK);
118 if (ret < 0) {
119 dev_err(dev, "set i2chs_keepon failed: %d\n", ret);
120 goto disable_dev_slp;
121 }
122 }
123
124 return 0;
125
126disable_dev_slp:
127 tps65910_reg_clear_bits(tps65910, TPS65910_DEVCTRL,
128 DEVCTRL_DEV_SLP_MASK);
129
130err_sleep_init:
131 return ret;
132}
133
134#ifdef CONFIG_OF
135static struct of_device_id tps65910_of_match[] = {
136 { .compatible = "ti,tps65910", .data = (void *)TPS65910},
137 { .compatible = "ti,tps65911", .data = (void *)TPS65911},
138 { },
139};
140MODULE_DEVICE_TABLE(of, tps65910_of_match);
141
142static struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
143 int *chip_id)
144{
145 struct device_node *np = client->dev.of_node;
146 struct tps65910_board *board_info;
147 unsigned int prop;
148 const struct of_device_id *match;
149 int ret = 0;
150
151 match = of_match_device(tps65910_of_match, &client->dev);
152 if (!match) {
153 dev_err(&client->dev, "Failed to find matching dt id\n");
154 return NULL;
155 }
156
157 *chip_id = (int)match->data;
158
159 board_info = devm_kzalloc(&client->dev, sizeof(*board_info),
160 GFP_KERNEL);
161 if (!board_info) {
162 dev_err(&client->dev, "Failed to allocate pdata\n");
163 return NULL;
164 }
165
166 ret = of_property_read_u32(np, "ti,vmbch-threshold", &prop);
167 if (!ret)
168 board_info->vmbch_threshold = prop;
169 else if (*chip_id == TPS65911)
170 dev_warn(&client->dev, "VMBCH-Threshold not specified");
171
172 ret = of_property_read_u32(np, "ti,vmbch2-threshold", &prop);
173 if (!ret)
174 board_info->vmbch2_threshold = prop;
175 else if (*chip_id == TPS65911)
176 dev_warn(&client->dev, "VMBCH2-Threshold not specified");
177
178 board_info->irq = client->irq;
179 board_info->irq_base = -1;
180
181 return board_info;
182}
183#else
184static inline
185struct tps65910_board *tps65910_parse_dt(struct i2c_client *client,
186 int *chip_id)
187{
188 return NULL;
189}
190#endif
191
192static __devinit int tps65910_i2c_probe(struct i2c_client *i2c,
193 const struct i2c_device_id *id)
95{ 194{
96 struct tps65910 *tps65910; 195 struct tps65910 *tps65910;
97 struct tps65910_board *pmic_plat_data; 196 struct tps65910_board *pmic_plat_data;
197 struct tps65910_board *of_pmic_plat_data = NULL;
98 struct tps65910_platform_data *init_data; 198 struct tps65910_platform_data *init_data;
99 int ret = 0; 199 int ret = 0;
200 int chip_id = id->driver_data;
100 201
101 pmic_plat_data = dev_get_platdata(&i2c->dev); 202 pmic_plat_data = dev_get_platdata(&i2c->dev);
203
204 if (!pmic_plat_data && i2c->dev.of_node) {
205 pmic_plat_data = tps65910_parse_dt(i2c, &chip_id);
206 of_pmic_plat_data = pmic_plat_data;
207 }
208
102 if (!pmic_plat_data) 209 if (!pmic_plat_data)
103 return -EINVAL; 210 return -EINVAL;
104 211
105 init_data = kzalloc(sizeof(struct tps65910_platform_data), GFP_KERNEL); 212 init_data = devm_kzalloc(&i2c->dev, sizeof(*init_data), GFP_KERNEL);
106 if (init_data == NULL) 213 if (init_data == NULL)
107 return -ENOMEM; 214 return -ENOMEM;
108 215
109 tps65910 = kzalloc(sizeof(struct tps65910), GFP_KERNEL); 216 tps65910 = devm_kzalloc(&i2c->dev, sizeof(*tps65910), GFP_KERNEL);
110 if (tps65910 == NULL) { 217 if (tps65910 == NULL)
111 kfree(init_data);
112 return -ENOMEM; 218 return -ENOMEM;
113 }
114 219
220 tps65910->of_plat_data = of_pmic_plat_data;
115 i2c_set_clientdata(i2c, tps65910); 221 i2c_set_clientdata(i2c, tps65910);
116 tps65910->dev = &i2c->dev; 222 tps65910->dev = &i2c->dev;
117 tps65910->i2c_client = i2c; 223 tps65910->i2c_client = i2c;
118 tps65910->id = id->driver_data; 224 tps65910->id = chip_id;
119 tps65910->read = tps65910_i2c_read;
120 tps65910->write = tps65910_i2c_write;
121 mutex_init(&tps65910->io_mutex); 225 mutex_init(&tps65910->io_mutex);
122 226
123 tps65910->regmap = regmap_init_i2c(i2c, &tps65910_regmap_config); 227 tps65910->regmap = devm_regmap_init_i2c(i2c, &tps65910_regmap_config);
124 if (IS_ERR(tps65910->regmap)) { 228 if (IS_ERR(tps65910->regmap)) {
125 ret = PTR_ERR(tps65910->regmap); 229 ret = PTR_ERR(tps65910->regmap);
126 dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret); 230 dev_err(&i2c->dev, "regmap initialization failed: %d\n", ret);
127 goto regmap_err; 231 return ret;
128 } 232 }
129 233
130 ret = mfd_add_devices(tps65910->dev, -1, 234 ret = mfd_add_devices(tps65910->dev, -1,
131 tps65910s, ARRAY_SIZE(tps65910s), 235 tps65910s, ARRAY_SIZE(tps65910s),
132 NULL, 0); 236 NULL, 0);
133 if (ret < 0) 237 if (ret < 0) {
134 goto err; 238 dev_err(&i2c->dev, "mfd_add_devices failed: %d\n", ret);
239 return ret;
240 }
135 241
136 init_data->irq = pmic_plat_data->irq; 242 init_data->irq = pmic_plat_data->irq;
137 init_data->irq_base = pmic_plat_data->irq_base; 243 init_data->irq_base = pmic_plat_data->irq_base;
138 244
139 tps65910_gpio_init(tps65910, pmic_plat_data->gpio_base);
140
141 tps65910_irq_init(tps65910, init_data->irq, init_data); 245 tps65910_irq_init(tps65910, init_data->irq, init_data);
142 246
143 kfree(init_data); 247 tps65910_sleepinit(tps65910, pmic_plat_data);
144 return ret;
145 248
146err:
147 regmap_exit(tps65910->regmap);
148regmap_err:
149 kfree(tps65910);
150 kfree(init_data);
151 return ret; 249 return ret;
152} 250}
153 251
154static int tps65910_i2c_remove(struct i2c_client *i2c) 252static __devexit int tps65910_i2c_remove(struct i2c_client *i2c)
155{ 253{
156 struct tps65910 *tps65910 = i2c_get_clientdata(i2c); 254 struct tps65910 *tps65910 = i2c_get_clientdata(i2c);
157 255
158 tps65910_irq_exit(tps65910); 256 tps65910_irq_exit(tps65910);
159 mfd_remove_devices(tps65910->dev); 257 mfd_remove_devices(tps65910->dev);
160 regmap_exit(tps65910->regmap);
161 kfree(tps65910);
162 258
163 return 0; 259 return 0;
164} 260}
@@ -175,9 +271,10 @@ static struct i2c_driver tps65910_i2c_driver = {
175 .driver = { 271 .driver = {
176 .name = "tps65910", 272 .name = "tps65910",
177 .owner = THIS_MODULE, 273 .owner = THIS_MODULE,
274 .of_match_table = of_match_ptr(tps65910_of_match),
178 }, 275 },
179 .probe = tps65910_i2c_probe, 276 .probe = tps65910_i2c_probe,
180 .remove = tps65910_i2c_remove, 277 .remove = __devexit_p(tps65910_i2c_remove),
181 .id_table = tps65910_i2c_id, 278 .id_table = tps65910_i2c_id,
182}; 279};
183 280
diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
index 5d656e814358..ad733d76207a 100644
--- a/drivers/mfd/twl4030-irq.c
+++ b/drivers/mfd/twl4030-irq.c
@@ -757,6 +757,7 @@ int twl4030_init_irq(struct device *dev, int irq_num)
757 dev_err(dev, "could not claim irq%d: %d\n", irq_num, status); 757 dev_err(dev, "could not claim irq%d: %d\n", irq_num, status);
758 goto fail_rqirq; 758 goto fail_rqirq;
759 } 759 }
760 enable_irq_wake(irq_num);
760 761
761 return irq_base; 762 return irq_base;
762fail_rqirq: 763fail_rqirq:
diff --git a/drivers/mfd/twl6040-core.c b/drivers/mfd/twl6040-core.c
index 2d6bedadca09..4ded9e7aa246 100644
--- a/drivers/mfd/twl6040-core.c
+++ b/drivers/mfd/twl6040-core.c
@@ -27,7 +27,12 @@
27#include <linux/types.h> 27#include <linux/types.h>
28#include <linux/slab.h> 28#include <linux/slab.h>
29#include <linux/kernel.h> 29#include <linux/kernel.h>
30#include <linux/err.h>
30#include <linux/platform_device.h> 31#include <linux/platform_device.h>
32#include <linux/of.h>
33#include <linux/of_irq.h>
34#include <linux/of_gpio.h>
35#include <linux/of_platform.h>
31#include <linux/gpio.h> 36#include <linux/gpio.h>
32#include <linux/delay.h> 37#include <linux/delay.h>
33#include <linux/i2c.h> 38#include <linux/i2c.h>
@@ -35,8 +40,24 @@
35#include <linux/err.h> 40#include <linux/err.h>
36#include <linux/mfd/core.h> 41#include <linux/mfd/core.h>
37#include <linux/mfd/twl6040.h> 42#include <linux/mfd/twl6040.h>
43#include <linux/regulator/consumer.h>
38 44
39#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1) 45#define VIBRACTRL_MEMBER(reg) ((reg == TWL6040_REG_VIBCTLL) ? 0 : 1)
46#define TWL6040_NUM_SUPPLIES (2)
47
48static bool twl6040_has_vibra(struct twl6040_platform_data *pdata,
49 struct device_node *node)
50{
51 if (pdata && pdata->vibra)
52 return true;
53
54#ifdef CONFIG_OF
55 if (of_find_node_by_name(node, "vibra"))
56 return true;
57#endif
58
59 return false;
60}
40 61
41int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg) 62int twl6040_reg_read(struct twl6040 *twl6040, unsigned int reg)
42{ 63{
@@ -502,17 +523,18 @@ static int __devinit twl6040_probe(struct i2c_client *client,
502 const struct i2c_device_id *id) 523 const struct i2c_device_id *id)
503{ 524{
504 struct twl6040_platform_data *pdata = client->dev.platform_data; 525 struct twl6040_platform_data *pdata = client->dev.platform_data;
526 struct device_node *node = client->dev.of_node;
505 struct twl6040 *twl6040; 527 struct twl6040 *twl6040;
506 struct mfd_cell *cell = NULL; 528 struct mfd_cell *cell = NULL;
507 int ret, children = 0; 529 int irq, ret, children = 0;
508 530
509 if (!pdata) { 531 if (!pdata && !node) {
510 dev_err(&client->dev, "Platform data is missing\n"); 532 dev_err(&client->dev, "Platform data is missing\n");
511 return -EINVAL; 533 return -EINVAL;
512 } 534 }
513 535
514 /* In order to operate correctly we need valid interrupt config */ 536 /* In order to operate correctly we need valid interrupt config */
515 if (!client->irq || !pdata->irq_base) { 537 if (!client->irq) {
516 dev_err(&client->dev, "Invalid IRQ configuration\n"); 538 dev_err(&client->dev, "Invalid IRQ configuration\n");
517 return -EINVAL; 539 return -EINVAL;
518 } 540 }
@@ -524,7 +546,7 @@ static int __devinit twl6040_probe(struct i2c_client *client,
524 goto err; 546 goto err;
525 } 547 }
526 548
527 twl6040->regmap = regmap_init_i2c(client, &twl6040_regmap_config); 549 twl6040->regmap = devm_regmap_init_i2c(client, &twl6040_regmap_config);
528 if (IS_ERR(twl6040->regmap)) { 550 if (IS_ERR(twl6040->regmap)) {
529 ret = PTR_ERR(twl6040->regmap); 551 ret = PTR_ERR(twl6040->regmap);
530 goto err; 552 goto err;
@@ -532,9 +554,23 @@ static int __devinit twl6040_probe(struct i2c_client *client,
532 554
533 i2c_set_clientdata(client, twl6040); 555 i2c_set_clientdata(client, twl6040);
534 556
557 twl6040->supplies[0].supply = "vio";
558 twl6040->supplies[1].supply = "v2v1";
559 ret = regulator_bulk_get(&client->dev, TWL6040_NUM_SUPPLIES,
560 twl6040->supplies);
561 if (ret != 0) {
562 dev_err(&client->dev, "Failed to get supplies: %d\n", ret);
563 goto regulator_get_err;
564 }
565
566 ret = regulator_bulk_enable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
567 if (ret != 0) {
568 dev_err(&client->dev, "Failed to enable supplies: %d\n", ret);
569 goto power_err;
570 }
571
535 twl6040->dev = &client->dev; 572 twl6040->dev = &client->dev;
536 twl6040->irq = client->irq; 573 twl6040->irq = client->irq;
537 twl6040->irq_base = pdata->irq_base;
538 574
539 mutex_init(&twl6040->mutex); 575 mutex_init(&twl6040->mutex);
540 mutex_init(&twl6040->io_mutex); 576 mutex_init(&twl6040->io_mutex);
@@ -543,22 +579,26 @@ static int __devinit twl6040_probe(struct i2c_client *client,
543 twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV); 579 twl6040->rev = twl6040_reg_read(twl6040, TWL6040_REG_ASICREV);
544 580
545 /* ERRATA: Automatic power-up is not possible in ES1.0 */ 581 /* ERRATA: Automatic power-up is not possible in ES1.0 */
546 if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) 582 if (twl6040_get_revid(twl6040) > TWL6040_REV_ES1_0) {
547 twl6040->audpwron = pdata->audpwron_gpio; 583 if (pdata)
548 else 584 twl6040->audpwron = pdata->audpwron_gpio;
585 else
586 twl6040->audpwron = of_get_named_gpio(node,
587 "ti,audpwron-gpio", 0);
588 } else
549 twl6040->audpwron = -EINVAL; 589 twl6040->audpwron = -EINVAL;
550 590
551 if (gpio_is_valid(twl6040->audpwron)) { 591 if (gpio_is_valid(twl6040->audpwron)) {
552 ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW, 592 ret = gpio_request_one(twl6040->audpwron, GPIOF_OUT_INIT_LOW,
553 "audpwron"); 593 "audpwron");
554 if (ret) 594 if (ret)
555 goto gpio1_err; 595 goto gpio_err;
556 } 596 }
557 597
558 /* codec interrupt */ 598 /* codec interrupt */
559 ret = twl6040_irq_init(twl6040); 599 ret = twl6040_irq_init(twl6040);
560 if (ret) 600 if (ret)
561 goto gpio2_err; 601 goto irq_init_err;
562 602
563 ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY, 603 ret = request_threaded_irq(twl6040->irq_base + TWL6040_IRQ_READY,
564 NULL, twl6040_naudint_handler, 0, 604 NULL, twl6040_naudint_handler, 0,
@@ -572,22 +612,27 @@ static int __devinit twl6040_probe(struct i2c_client *client,
572 /* dual-access registers controlled by I2C only */ 612 /* dual-access registers controlled by I2C only */
573 twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL); 613 twl6040_set_bits(twl6040, TWL6040_REG_ACCCTL, TWL6040_I2CSEL);
574 614
575 if (pdata->codec) { 615 /*
576 int irq = twl6040->irq_base + TWL6040_IRQ_PLUG; 616 * The main functionality of twl6040 to provide audio on OMAP4+ systems.
577 617 * We can add the ASoC codec child whenever this driver has been loaded.
578 cell = &twl6040->cells[children]; 618 * The ASoC codec can work without pdata, pass the platform_data only if
579 cell->name = "twl6040-codec"; 619 * it has been provided.
580 twl6040_codec_rsrc[0].start = irq; 620 */
581 twl6040_codec_rsrc[0].end = irq; 621 irq = twl6040->irq_base + TWL6040_IRQ_PLUG;
582 cell->resources = twl6040_codec_rsrc; 622 cell = &twl6040->cells[children];
583 cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc); 623 cell->name = "twl6040-codec";
624 twl6040_codec_rsrc[0].start = irq;
625 twl6040_codec_rsrc[0].end = irq;
626 cell->resources = twl6040_codec_rsrc;
627 cell->num_resources = ARRAY_SIZE(twl6040_codec_rsrc);
628 if (pdata && pdata->codec) {
584 cell->platform_data = pdata->codec; 629 cell->platform_data = pdata->codec;
585 cell->pdata_size = sizeof(*pdata->codec); 630 cell->pdata_size = sizeof(*pdata->codec);
586 children++;
587 } 631 }
632 children++;
588 633
589 if (pdata->vibra) { 634 if (twl6040_has_vibra(pdata, node)) {
590 int irq = twl6040->irq_base + TWL6040_IRQ_VIB; 635 irq = twl6040->irq_base + TWL6040_IRQ_VIB;
591 636
592 cell = &twl6040->cells[children]; 637 cell = &twl6040->cells[children];
593 cell->name = "twl6040-vibra"; 638 cell->name = "twl6040-vibra";
@@ -596,21 +641,17 @@ static int __devinit twl6040_probe(struct i2c_client *client,
596 cell->resources = twl6040_vibra_rsrc; 641 cell->resources = twl6040_vibra_rsrc;
597 cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc); 642 cell->num_resources = ARRAY_SIZE(twl6040_vibra_rsrc);
598 643
599 cell->platform_data = pdata->vibra; 644 if (pdata && pdata->vibra) {
600 cell->pdata_size = sizeof(*pdata->vibra); 645 cell->platform_data = pdata->vibra;
646 cell->pdata_size = sizeof(*pdata->vibra);
647 }
601 children++; 648 children++;
602 } 649 }
603 650
604 if (children) { 651 ret = mfd_add_devices(&client->dev, -1, twl6040->cells, children,
605 ret = mfd_add_devices(&client->dev, -1, twl6040->cells, 652 NULL, 0);
606 children, NULL, 0); 653 if (ret)
607 if (ret)
608 goto mfd_err;
609 } else {
610 dev_err(&client->dev, "No platform data found for children\n");
611 ret = -ENODEV;
612 goto mfd_err; 654 goto mfd_err;
613 }
614 655
615 return 0; 656 return 0;
616 657
@@ -618,12 +659,15 @@ mfd_err:
618 free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040); 659 free_irq(twl6040->irq_base + TWL6040_IRQ_READY, twl6040);
619irq_err: 660irq_err:
620 twl6040_irq_exit(twl6040); 661 twl6040_irq_exit(twl6040);
621gpio2_err: 662irq_init_err:
622 if (gpio_is_valid(twl6040->audpwron)) 663 if (gpio_is_valid(twl6040->audpwron))
623 gpio_free(twl6040->audpwron); 664 gpio_free(twl6040->audpwron);
624gpio1_err: 665gpio_err:
666 regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
667power_err:
668 regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
669regulator_get_err:
625 i2c_set_clientdata(client, NULL); 670 i2c_set_clientdata(client, NULL);
626 regmap_exit(twl6040->regmap);
627err: 671err:
628 return ret; 672 return ret;
629} 673}
@@ -643,7 +687,9 @@ static int __devexit twl6040_remove(struct i2c_client *client)
643 687
644 mfd_remove_devices(&client->dev); 688 mfd_remove_devices(&client->dev);
645 i2c_set_clientdata(client, NULL); 689 i2c_set_clientdata(client, NULL);
646 regmap_exit(twl6040->regmap); 690
691 regulator_bulk_disable(TWL6040_NUM_SUPPLIES, twl6040->supplies);
692 regulator_bulk_free(TWL6040_NUM_SUPPLIES, twl6040->supplies);
647 693
648 return 0; 694 return 0;
649} 695}
diff --git a/drivers/mfd/twl6040-irq.c b/drivers/mfd/twl6040-irq.c
index b3f8ddaa28a8..4b42543da228 100644
--- a/drivers/mfd/twl6040-irq.c
+++ b/drivers/mfd/twl6040-irq.c
@@ -23,7 +23,10 @@
23 23
24#include <linux/kernel.h> 24#include <linux/kernel.h>
25#include <linux/module.h> 25#include <linux/module.h>
26#include <linux/err.h>
26#include <linux/irq.h> 27#include <linux/irq.h>
28#include <linux/of.h>
29#include <linux/irqdomain.h>
27#include <linux/interrupt.h> 30#include <linux/interrupt.h>
28#include <linux/mfd/core.h> 31#include <linux/mfd/core.h>
29#include <linux/mfd/twl6040.h> 32#include <linux/mfd/twl6040.h>
@@ -138,7 +141,8 @@ static irqreturn_t twl6040_irq_thread(int irq, void *data)
138 141
139int twl6040_irq_init(struct twl6040 *twl6040) 142int twl6040_irq_init(struct twl6040 *twl6040)
140{ 143{
141 int cur_irq, ret; 144 struct device_node *node = twl6040->dev->of_node;
145 int i, nr_irqs, irq_base, ret;
142 u8 val; 146 u8 val;
143 147
144 mutex_init(&twl6040->irq_mutex); 148 mutex_init(&twl6040->irq_mutex);
@@ -148,21 +152,31 @@ int twl6040_irq_init(struct twl6040 *twl6040)
148 twl6040->irq_masks_cache = TWL6040_ALLINT_MSK; 152 twl6040->irq_masks_cache = TWL6040_ALLINT_MSK;
149 twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK); 153 twl6040_reg_write(twl6040, TWL6040_REG_INTMR, TWL6040_ALLINT_MSK);
150 154
155 nr_irqs = ARRAY_SIZE(twl6040_irqs);
156
157 irq_base = irq_alloc_descs(-1, 0, nr_irqs, 0);
158 if (IS_ERR_VALUE(irq_base)) {
159 dev_err(twl6040->dev, "Fail to allocate IRQ descs\n");
160 return irq_base;
161 }
162 twl6040->irq_base = irq_base;
163
164 irq_domain_add_legacy(node, ARRAY_SIZE(twl6040_irqs), irq_base, 0,
165 &irq_domain_simple_ops, NULL);
166
151 /* Register them with genirq */ 167 /* Register them with genirq */
152 for (cur_irq = twl6040->irq_base; 168 for (i = irq_base; i < irq_base + nr_irqs; i++) {
153 cur_irq < twl6040->irq_base + ARRAY_SIZE(twl6040_irqs); 169 irq_set_chip_data(i, twl6040);
154 cur_irq++) { 170 irq_set_chip_and_handler(i, &twl6040_irq_chip,
155 irq_set_chip_data(cur_irq, twl6040);
156 irq_set_chip_and_handler(cur_irq, &twl6040_irq_chip,
157 handle_level_irq); 171 handle_level_irq);
158 irq_set_nested_thread(cur_irq, 1); 172 irq_set_nested_thread(i, 1);
159 173
160 /* ARM needs us to explicitly flag the IRQ as valid 174 /* ARM needs us to explicitly flag the IRQ as valid
161 * and will set them noprobe when we do so. */ 175 * and will set them noprobe when we do so. */
162#ifdef CONFIG_ARM 176#ifdef CONFIG_ARM
163 set_irq_flags(cur_irq, IRQF_VALID); 177 set_irq_flags(i, IRQF_VALID);
164#else 178#else
165 irq_set_noprobe(cur_irq); 179 irq_set_noprobe(i);
166#endif 180#endif
167 } 181 }
168 182
diff --git a/drivers/mfd/vx855.c b/drivers/mfd/vx855.c
index b73cc15e0081..872aff21e4be 100644
--- a/drivers/mfd/vx855.c
+++ b/drivers/mfd/vx855.c
@@ -131,17 +131,7 @@ static struct pci_driver vx855_pci_driver = {
131 .remove = __devexit_p(vx855_remove), 131 .remove = __devexit_p(vx855_remove),
132}; 132};
133 133
134static int vx855_init(void) 134module_pci_driver(vx855_pci_driver);
135{
136 return pci_register_driver(&vx855_pci_driver);
137}
138module_init(vx855_init);
139
140static void vx855_exit(void)
141{
142 pci_unregister_driver(&vx855_pci_driver);
143}
144module_exit(vx855_exit);
145 135
146MODULE_LICENSE("GPL"); 136MODULE_LICENSE("GPL");
147MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>"); 137MODULE_AUTHOR("Harald Welte <HaraldWelte@viatech.com>");
diff --git a/drivers/mfd/wm831x-auxadc.c b/drivers/mfd/wm831x-auxadc.c
index 87210954a066..6ee3018d8653 100644
--- a/drivers/mfd/wm831x-auxadc.c
+++ b/drivers/mfd/wm831x-auxadc.c
@@ -280,11 +280,11 @@ void wm831x_auxadc_init(struct wm831x *wm831x)
280 mutex_init(&wm831x->auxadc_lock); 280 mutex_init(&wm831x->auxadc_lock);
281 INIT_LIST_HEAD(&wm831x->auxadc_pending); 281 INIT_LIST_HEAD(&wm831x->auxadc_pending);
282 282
283 if (wm831x->irq && wm831x->irq_base) { 283 if (wm831x->irq) {
284 wm831x->auxadc_read = wm831x_auxadc_read_irq; 284 wm831x->auxadc_read = wm831x_auxadc_read_irq;
285 285
286 ret = request_threaded_irq(wm831x->irq_base + 286 ret = request_threaded_irq(wm831x_irq(wm831x,
287 WM831X_IRQ_AUXADC_DATA, 287 WM831X_IRQ_AUXADC_DATA),
288 NULL, wm831x_auxadc_irq, 0, 288 NULL, wm831x_auxadc_irq, 0,
289 "auxadc", wm831x); 289 "auxadc", wm831x);
290 if (ret < 0) { 290 if (ret < 0) {
diff --git a/drivers/mfd/wm831x-core.c b/drivers/mfd/wm831x-core.c
index 838056c3493a..946698fd2dc6 100644
--- a/drivers/mfd/wm831x-core.c
+++ b/drivers/mfd/wm831x-core.c
@@ -614,8 +614,15 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
614} 614}
615EXPORT_SYMBOL_GPL(wm831x_set_bits); 615EXPORT_SYMBOL_GPL(wm831x_set_bits);
616 616
617static struct resource wm831x_io_parent = {
618 .start = 0,
619 .end = 0xffffffff,
620 .flags = IORESOURCE_IO,
621};
622
617static struct resource wm831x_dcdc1_resources[] = { 623static struct resource wm831x_dcdc1_resources[] = {
618 { 624 {
625 .parent = &wm831x_io_parent,
619 .start = WM831X_DC1_CONTROL_1, 626 .start = WM831X_DC1_CONTROL_1,
620 .end = WM831X_DC1_DVS_CONTROL, 627 .end = WM831X_DC1_DVS_CONTROL,
621 .flags = IORESOURCE_IO, 628 .flags = IORESOURCE_IO,
@@ -637,6 +644,7 @@ static struct resource wm831x_dcdc1_resources[] = {
637 644
638static struct resource wm831x_dcdc2_resources[] = { 645static struct resource wm831x_dcdc2_resources[] = {
639 { 646 {
647 .parent = &wm831x_io_parent,
640 .start = WM831X_DC2_CONTROL_1, 648 .start = WM831X_DC2_CONTROL_1,
641 .end = WM831X_DC2_DVS_CONTROL, 649 .end = WM831X_DC2_DVS_CONTROL,
642 .flags = IORESOURCE_IO, 650 .flags = IORESOURCE_IO,
@@ -657,6 +665,7 @@ static struct resource wm831x_dcdc2_resources[] = {
657 665
658static struct resource wm831x_dcdc3_resources[] = { 666static struct resource wm831x_dcdc3_resources[] = {
659 { 667 {
668 .parent = &wm831x_io_parent,
660 .start = WM831X_DC3_CONTROL_1, 669 .start = WM831X_DC3_CONTROL_1,
661 .end = WM831X_DC3_SLEEP_CONTROL, 670 .end = WM831X_DC3_SLEEP_CONTROL,
662 .flags = IORESOURCE_IO, 671 .flags = IORESOURCE_IO,
@@ -671,6 +680,7 @@ static struct resource wm831x_dcdc3_resources[] = {
671 680
672static struct resource wm831x_dcdc4_resources[] = { 681static struct resource wm831x_dcdc4_resources[] = {
673 { 682 {
683 .parent = &wm831x_io_parent,
674 .start = WM831X_DC4_CONTROL, 684 .start = WM831X_DC4_CONTROL,
675 .end = WM831X_DC4_SLEEP_CONTROL, 685 .end = WM831X_DC4_SLEEP_CONTROL,
676 .flags = IORESOURCE_IO, 686 .flags = IORESOURCE_IO,
@@ -685,6 +695,7 @@ static struct resource wm831x_dcdc4_resources[] = {
685 695
686static struct resource wm8320_dcdc4_buck_resources[] = { 696static struct resource wm8320_dcdc4_buck_resources[] = {
687 { 697 {
698 .parent = &wm831x_io_parent,
688 .start = WM831X_DC4_CONTROL, 699 .start = WM831X_DC4_CONTROL,
689 .end = WM832X_DC4_SLEEP_CONTROL, 700 .end = WM832X_DC4_SLEEP_CONTROL,
690 .flags = IORESOURCE_IO, 701 .flags = IORESOURCE_IO,
@@ -707,6 +718,7 @@ static struct resource wm831x_gpio_resources[] = {
707 718
708static struct resource wm831x_isink1_resources[] = { 719static struct resource wm831x_isink1_resources[] = {
709 { 720 {
721 .parent = &wm831x_io_parent,
710 .start = WM831X_CURRENT_SINK_1, 722 .start = WM831X_CURRENT_SINK_1,
711 .end = WM831X_CURRENT_SINK_1, 723 .end = WM831X_CURRENT_SINK_1,
712 .flags = IORESOURCE_IO, 724 .flags = IORESOURCE_IO,
@@ -720,6 +732,7 @@ static struct resource wm831x_isink1_resources[] = {
720 732
721static struct resource wm831x_isink2_resources[] = { 733static struct resource wm831x_isink2_resources[] = {
722 { 734 {
735 .parent = &wm831x_io_parent,
723 .start = WM831X_CURRENT_SINK_2, 736 .start = WM831X_CURRENT_SINK_2,
724 .end = WM831X_CURRENT_SINK_2, 737 .end = WM831X_CURRENT_SINK_2,
725 .flags = IORESOURCE_IO, 738 .flags = IORESOURCE_IO,
@@ -733,6 +746,7 @@ static struct resource wm831x_isink2_resources[] = {
733 746
734static struct resource wm831x_ldo1_resources[] = { 747static struct resource wm831x_ldo1_resources[] = {
735 { 748 {
749 .parent = &wm831x_io_parent,
736 .start = WM831X_LDO1_CONTROL, 750 .start = WM831X_LDO1_CONTROL,
737 .end = WM831X_LDO1_SLEEP_CONTROL, 751 .end = WM831X_LDO1_SLEEP_CONTROL,
738 .flags = IORESOURCE_IO, 752 .flags = IORESOURCE_IO,
@@ -747,6 +761,7 @@ static struct resource wm831x_ldo1_resources[] = {
747 761
748static struct resource wm831x_ldo2_resources[] = { 762static struct resource wm831x_ldo2_resources[] = {
749 { 763 {
764 .parent = &wm831x_io_parent,
750 .start = WM831X_LDO2_CONTROL, 765 .start = WM831X_LDO2_CONTROL,
751 .end = WM831X_LDO2_SLEEP_CONTROL, 766 .end = WM831X_LDO2_SLEEP_CONTROL,
752 .flags = IORESOURCE_IO, 767 .flags = IORESOURCE_IO,
@@ -761,6 +776,7 @@ static struct resource wm831x_ldo2_resources[] = {
761 776
762static struct resource wm831x_ldo3_resources[] = { 777static struct resource wm831x_ldo3_resources[] = {
763 { 778 {
779 .parent = &wm831x_io_parent,
764 .start = WM831X_LDO3_CONTROL, 780 .start = WM831X_LDO3_CONTROL,
765 .end = WM831X_LDO3_SLEEP_CONTROL, 781 .end = WM831X_LDO3_SLEEP_CONTROL,
766 .flags = IORESOURCE_IO, 782 .flags = IORESOURCE_IO,
@@ -775,6 +791,7 @@ static struct resource wm831x_ldo3_resources[] = {
775 791
776static struct resource wm831x_ldo4_resources[] = { 792static struct resource wm831x_ldo4_resources[] = {
777 { 793 {
794 .parent = &wm831x_io_parent,
778 .start = WM831X_LDO4_CONTROL, 795 .start = WM831X_LDO4_CONTROL,
779 .end = WM831X_LDO4_SLEEP_CONTROL, 796 .end = WM831X_LDO4_SLEEP_CONTROL,
780 .flags = IORESOURCE_IO, 797 .flags = IORESOURCE_IO,
@@ -789,6 +806,7 @@ static struct resource wm831x_ldo4_resources[] = {
789 806
790static struct resource wm831x_ldo5_resources[] = { 807static struct resource wm831x_ldo5_resources[] = {
791 { 808 {
809 .parent = &wm831x_io_parent,
792 .start = WM831X_LDO5_CONTROL, 810 .start = WM831X_LDO5_CONTROL,
793 .end = WM831X_LDO5_SLEEP_CONTROL, 811 .end = WM831X_LDO5_SLEEP_CONTROL,
794 .flags = IORESOURCE_IO, 812 .flags = IORESOURCE_IO,
@@ -803,6 +821,7 @@ static struct resource wm831x_ldo5_resources[] = {
803 821
804static struct resource wm831x_ldo6_resources[] = { 822static struct resource wm831x_ldo6_resources[] = {
805 { 823 {
824 .parent = &wm831x_io_parent,
806 .start = WM831X_LDO6_CONTROL, 825 .start = WM831X_LDO6_CONTROL,
807 .end = WM831X_LDO6_SLEEP_CONTROL, 826 .end = WM831X_LDO6_SLEEP_CONTROL,
808 .flags = IORESOURCE_IO, 827 .flags = IORESOURCE_IO,
@@ -817,6 +836,7 @@ static struct resource wm831x_ldo6_resources[] = {
817 836
818static struct resource wm831x_ldo7_resources[] = { 837static struct resource wm831x_ldo7_resources[] = {
819 { 838 {
839 .parent = &wm831x_io_parent,
820 .start = WM831X_LDO7_CONTROL, 840 .start = WM831X_LDO7_CONTROL,
821 .end = WM831X_LDO7_SLEEP_CONTROL, 841 .end = WM831X_LDO7_SLEEP_CONTROL,
822 .flags = IORESOURCE_IO, 842 .flags = IORESOURCE_IO,
@@ -831,6 +851,7 @@ static struct resource wm831x_ldo7_resources[] = {
831 851
832static struct resource wm831x_ldo8_resources[] = { 852static struct resource wm831x_ldo8_resources[] = {
833 { 853 {
854 .parent = &wm831x_io_parent,
834 .start = WM831X_LDO8_CONTROL, 855 .start = WM831X_LDO8_CONTROL,
835 .end = WM831X_LDO8_SLEEP_CONTROL, 856 .end = WM831X_LDO8_SLEEP_CONTROL,
836 .flags = IORESOURCE_IO, 857 .flags = IORESOURCE_IO,
@@ -845,6 +866,7 @@ static struct resource wm831x_ldo8_resources[] = {
845 866
846static struct resource wm831x_ldo9_resources[] = { 867static struct resource wm831x_ldo9_resources[] = {
847 { 868 {
869 .parent = &wm831x_io_parent,
848 .start = WM831X_LDO9_CONTROL, 870 .start = WM831X_LDO9_CONTROL,
849 .end = WM831X_LDO9_SLEEP_CONTROL, 871 .end = WM831X_LDO9_SLEEP_CONTROL,
850 .flags = IORESOURCE_IO, 872 .flags = IORESOURCE_IO,
@@ -859,6 +881,7 @@ static struct resource wm831x_ldo9_resources[] = {
859 881
860static struct resource wm831x_ldo10_resources[] = { 882static struct resource wm831x_ldo10_resources[] = {
861 { 883 {
884 .parent = &wm831x_io_parent,
862 .start = WM831X_LDO10_CONTROL, 885 .start = WM831X_LDO10_CONTROL,
863 .end = WM831X_LDO10_SLEEP_CONTROL, 886 .end = WM831X_LDO10_SLEEP_CONTROL,
864 .flags = IORESOURCE_IO, 887 .flags = IORESOURCE_IO,
@@ -873,6 +896,7 @@ static struct resource wm831x_ldo10_resources[] = {
873 896
874static struct resource wm831x_ldo11_resources[] = { 897static struct resource wm831x_ldo11_resources[] = {
875 { 898 {
899 .parent = &wm831x_io_parent,
876 .start = WM831X_LDO11_ON_CONTROL, 900 .start = WM831X_LDO11_ON_CONTROL,
877 .end = WM831X_LDO11_SLEEP_CONTROL, 901 .end = WM831X_LDO11_SLEEP_CONTROL,
878 .flags = IORESOURCE_IO, 902 .flags = IORESOURCE_IO,
@@ -974,6 +998,7 @@ static struct resource wm831x_rtc_resources[] = {
974 998
975static struct resource wm831x_status1_resources[] = { 999static struct resource wm831x_status1_resources[] = {
976 { 1000 {
1001 .parent = &wm831x_io_parent,
977 .start = WM831X_STATUS_LED_1, 1002 .start = WM831X_STATUS_LED_1,
978 .end = WM831X_STATUS_LED_1, 1003 .end = WM831X_STATUS_LED_1,
979 .flags = IORESOURCE_IO, 1004 .flags = IORESOURCE_IO,
@@ -982,6 +1007,7 @@ static struct resource wm831x_status1_resources[] = {
982 1007
983static struct resource wm831x_status2_resources[] = { 1008static struct resource wm831x_status2_resources[] = {
984 { 1009 {
1010 .parent = &wm831x_io_parent,
985 .start = WM831X_STATUS_LED_2, 1011 .start = WM831X_STATUS_LED_2,
986 .end = WM831X_STATUS_LED_2, 1012 .end = WM831X_STATUS_LED_2,
987 .flags = IORESOURCE_IO, 1013 .flags = IORESOURCE_IO,
@@ -1787,27 +1813,27 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1787 case WM8310: 1813 case WM8310:
1788 ret = mfd_add_devices(wm831x->dev, wm831x_num, 1814 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1789 wm8310_devs, ARRAY_SIZE(wm8310_devs), 1815 wm8310_devs, ARRAY_SIZE(wm8310_devs),
1790 NULL, wm831x->irq_base); 1816 NULL, 0);
1791 break; 1817 break;
1792 1818
1793 case WM8311: 1819 case WM8311:
1794 ret = mfd_add_devices(wm831x->dev, wm831x_num, 1820 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1795 wm8311_devs, ARRAY_SIZE(wm8311_devs), 1821 wm8311_devs, ARRAY_SIZE(wm8311_devs),
1796 NULL, wm831x->irq_base); 1822 NULL, 0);
1797 if (!pdata || !pdata->disable_touch) 1823 if (!pdata || !pdata->disable_touch)
1798 mfd_add_devices(wm831x->dev, wm831x_num, 1824 mfd_add_devices(wm831x->dev, wm831x_num,
1799 touch_devs, ARRAY_SIZE(touch_devs), 1825 touch_devs, ARRAY_SIZE(touch_devs),
1800 NULL, wm831x->irq_base); 1826 NULL, 0);
1801 break; 1827 break;
1802 1828
1803 case WM8312: 1829 case WM8312:
1804 ret = mfd_add_devices(wm831x->dev, wm831x_num, 1830 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1805 wm8312_devs, ARRAY_SIZE(wm8312_devs), 1831 wm8312_devs, ARRAY_SIZE(wm8312_devs),
1806 NULL, wm831x->irq_base); 1832 NULL, 0);
1807 if (!pdata || !pdata->disable_touch) 1833 if (!pdata || !pdata->disable_touch)
1808 mfd_add_devices(wm831x->dev, wm831x_num, 1834 mfd_add_devices(wm831x->dev, wm831x_num,
1809 touch_devs, ARRAY_SIZE(touch_devs), 1835 touch_devs, ARRAY_SIZE(touch_devs),
1810 NULL, wm831x->irq_base); 1836 NULL, 0);
1811 break; 1837 break;
1812 1838
1813 case WM8320: 1839 case WM8320:
@@ -1816,7 +1842,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1816 case WM8326: 1842 case WM8326:
1817 ret = mfd_add_devices(wm831x->dev, wm831x_num, 1843 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1818 wm8320_devs, ARRAY_SIZE(wm8320_devs), 1844 wm8320_devs, ARRAY_SIZE(wm8320_devs),
1819 NULL, wm831x->irq_base); 1845 NULL, 0);
1820 break; 1846 break;
1821 1847
1822 default: 1848 default:
@@ -1841,7 +1867,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1841 if (ret & WM831X_XTAL_ENA) { 1867 if (ret & WM831X_XTAL_ENA) {
1842 ret = mfd_add_devices(wm831x->dev, wm831x_num, 1868 ret = mfd_add_devices(wm831x->dev, wm831x_num,
1843 rtc_devs, ARRAY_SIZE(rtc_devs), 1869 rtc_devs, ARRAY_SIZE(rtc_devs),
1844 NULL, wm831x->irq_base); 1870 NULL, 0);
1845 if (ret != 0) { 1871 if (ret != 0) {
1846 dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret); 1872 dev_err(wm831x->dev, "Failed to add RTC: %d\n", ret);
1847 goto err_irq; 1873 goto err_irq;
@@ -1854,7 +1880,7 @@ int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq)
1854 /* Treat errors as non-critical */ 1880 /* Treat errors as non-critical */
1855 ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs, 1881 ret = mfd_add_devices(wm831x->dev, wm831x_num, backlight_devs,
1856 ARRAY_SIZE(backlight_devs), NULL, 1882 ARRAY_SIZE(backlight_devs), NULL,
1857 wm831x->irq_base); 1883 0);
1858 if (ret < 0) 1884 if (ret < 0)
1859 dev_err(wm831x->dev, "Failed to add backlight: %d\n", 1885 dev_err(wm831x->dev, "Failed to add backlight: %d\n",
1860 ret); 1886 ret);
@@ -1883,8 +1909,7 @@ void wm831x_device_exit(struct wm831x *wm831x)
1883{ 1909{
1884 wm831x_otp_exit(wm831x); 1910 wm831x_otp_exit(wm831x);
1885 mfd_remove_devices(wm831x->dev); 1911 mfd_remove_devices(wm831x->dev);
1886 if (wm831x->irq_base) 1912 free_irq(wm831x_irq(wm831x, WM831X_IRQ_AUXADC_DATA), wm831x);
1887 free_irq(wm831x->irq_base + WM831X_IRQ_AUXADC_DATA, wm831x);
1888 wm831x_irq_exit(wm831x); 1913 wm831x_irq_exit(wm831x);
1889} 1914}
1890 1915
diff --git a/drivers/mfd/wm831x-irq.c b/drivers/mfd/wm831x-irq.c
index bec4d0539160..804e56ec99eb 100644
--- a/drivers/mfd/wm831x-irq.c
+++ b/drivers/mfd/wm831x-irq.c
@@ -18,6 +18,7 @@
18#include <linux/irq.h> 18#include <linux/irq.h>
19#include <linux/mfd/core.h> 19#include <linux/mfd/core.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/irqdomain.h>
21 22
22#include <linux/mfd/wm831x/core.h> 23#include <linux/mfd/wm831x/core.h>
23#include <linux/mfd/wm831x/pdata.h> 24#include <linux/mfd/wm831x/pdata.h>
@@ -328,7 +329,7 @@ static inline int irq_data_to_status_reg(struct wm831x_irq_data *irq_data)
328static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x, 329static inline struct wm831x_irq_data *irq_to_wm831x_irq(struct wm831x *wm831x,
329 int irq) 330 int irq)
330{ 331{
331 return &wm831x_irqs[irq - wm831x->irq_base]; 332 return &wm831x_irqs[irq];
332} 333}
333 334
334static void wm831x_irq_lock(struct irq_data *data) 335static void wm831x_irq_lock(struct irq_data *data)
@@ -374,7 +375,7 @@ static void wm831x_irq_enable(struct irq_data *data)
374{ 375{
375 struct wm831x *wm831x = irq_data_get_irq_chip_data(data); 376 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
376 struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, 377 struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
377 data->irq); 378 data->hwirq);
378 379
379 wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask; 380 wm831x->irq_masks_cur[irq_data->reg - 1] &= ~irq_data->mask;
380} 381}
@@ -383,7 +384,7 @@ static void wm831x_irq_disable(struct irq_data *data)
383{ 384{
384 struct wm831x *wm831x = irq_data_get_irq_chip_data(data); 385 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
385 struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x, 386 struct wm831x_irq_data *irq_data = irq_to_wm831x_irq(wm831x,
386 data->irq); 387 data->hwirq);
387 388
388 wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask; 389 wm831x->irq_masks_cur[irq_data->reg - 1] |= irq_data->mask;
389} 390}
@@ -393,7 +394,7 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
393 struct wm831x *wm831x = irq_data_get_irq_chip_data(data); 394 struct wm831x *wm831x = irq_data_get_irq_chip_data(data);
394 int irq; 395 int irq;
395 396
396 irq = data->irq - wm831x->irq_base; 397 irq = data->hwirq;
397 398
398 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) { 399 if (irq < WM831X_IRQ_GPIO_1 || irq > WM831X_IRQ_GPIO_11) {
399 /* Ignore internal-only IRQs */ 400 /* Ignore internal-only IRQs */
@@ -412,22 +413,25 @@ static int wm831x_irq_set_type(struct irq_data *data, unsigned int type)
412 * do the update here as we can be called with the bus lock 413 * do the update here as we can be called with the bus lock
413 * held. 414 * held.
414 */ 415 */
416 wm831x->gpio_level_low[irq] = false;
417 wm831x->gpio_level_high[irq] = false;
415 switch (type) { 418 switch (type) {
416 case IRQ_TYPE_EDGE_BOTH: 419 case IRQ_TYPE_EDGE_BOTH:
417 wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE; 420 wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_INT_MODE;
418 wm831x->gpio_level[irq] = false;
419 break; 421 break;
420 case IRQ_TYPE_EDGE_RISING: 422 case IRQ_TYPE_EDGE_RISING:
421 wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL; 423 wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
422 wm831x->gpio_level[irq] = false;
423 break; 424 break;
424 case IRQ_TYPE_EDGE_FALLING: 425 case IRQ_TYPE_EDGE_FALLING:
425 wm831x->gpio_update[irq] = 0x10000; 426 wm831x->gpio_update[irq] = 0x10000;
426 wm831x->gpio_level[irq] = false;
427 break; 427 break;
428 case IRQ_TYPE_LEVEL_HIGH: 428 case IRQ_TYPE_LEVEL_HIGH:
429 wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL; 429 wm831x->gpio_update[irq] = 0x10000 | WM831X_GPN_POL;
430 wm831x->gpio_level[irq] = true; 430 wm831x->gpio_level_high[irq] = true;
431 break;
432 case IRQ_TYPE_LEVEL_LOW:
433 wm831x->gpio_update[irq] = 0x10000;
434 wm831x->gpio_level_low[irq] = true;
431 break; 435 break;
432 default: 436 default:
433 return -EINVAL; 437 return -EINVAL;
@@ -469,9 +473,11 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
469 * descriptors. 473 * descriptors.
470 */ 474 */
471 if (primary & WM831X_TCHPD_INT) 475 if (primary & WM831X_TCHPD_INT)
472 handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHPD); 476 handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
477 WM831X_IRQ_TCHPD));
473 if (primary & WM831X_TCHDATA_INT) 478 if (primary & WM831X_TCHDATA_INT)
474 handle_nested_irq(wm831x->irq_base + WM831X_IRQ_TCHDATA); 479 handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
480 WM831X_IRQ_TCHDATA));
475 primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT); 481 primary &= ~(WM831X_TCHDATA_EINT | WM831X_TCHPD_EINT);
476 482
477 for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) { 483 for (i = 0; i < ARRAY_SIZE(wm831x_irqs); i++) {
@@ -507,16 +513,29 @@ static irqreturn_t wm831x_irq_thread(int irq, void *data)
507 } 513 }
508 514
509 if (*status & wm831x_irqs[i].mask) 515 if (*status & wm831x_irqs[i].mask)
510 handle_nested_irq(wm831x->irq_base + i); 516 handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
517 i));
511 518
512 /* Simulate an edge triggered IRQ by polling the input 519 /* Simulate an edge triggered IRQ by polling the input
513 * status. This is sucky but improves interoperability. 520 * status. This is sucky but improves interoperability.
514 */ 521 */
515 if (primary == WM831X_GP_INT && 522 if (primary == WM831X_GP_INT &&
516 wm831x->gpio_level[i - WM831X_IRQ_GPIO_1]) { 523 wm831x->gpio_level_high[i - WM831X_IRQ_GPIO_1]) {
517 ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); 524 ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
518 while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) { 525 while (ret & 1 << (i - WM831X_IRQ_GPIO_1)) {
519 handle_nested_irq(wm831x->irq_base + i); 526 handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
527 i));
528 ret = wm831x_reg_read(wm831x,
529 WM831X_GPIO_LEVEL);
530 }
531 }
532
533 if (primary == WM831X_GP_INT &&
534 wm831x->gpio_level_low[i - WM831X_IRQ_GPIO_1]) {
535 ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL);
536 while (!(ret & 1 << (i - WM831X_IRQ_GPIO_1))) {
537 handle_nested_irq(irq_find_mapping(wm831x->irq_domain,
538 i));
520 ret = wm831x_reg_read(wm831x, 539 ret = wm831x_reg_read(wm831x,
521 WM831X_GPIO_LEVEL); 540 WM831X_GPIO_LEVEL);
522 } 541 }
@@ -527,10 +546,34 @@ out:
527 return IRQ_HANDLED; 546 return IRQ_HANDLED;
528} 547}
529 548
549static int wm831x_irq_map(struct irq_domain *h, unsigned int virq,
550 irq_hw_number_t hw)
551{
552 irq_set_chip_data(virq, h->host_data);
553 irq_set_chip_and_handler(virq, &wm831x_irq_chip, handle_edge_irq);
554 irq_set_nested_thread(virq, 1);
555
556 /* ARM needs us to explicitly flag the IRQ as valid
557 * and will set them noprobe when we do so. */
558#ifdef CONFIG_ARM
559 set_irq_flags(virq, IRQF_VALID);
560#else
561 irq_set_noprobe(virq);
562#endif
563
564 return 0;
565}
566
567static struct irq_domain_ops wm831x_irq_domain_ops = {
568 .map = wm831x_irq_map,
569 .xlate = irq_domain_xlate_twocell,
570};
571
530int wm831x_irq_init(struct wm831x *wm831x, int irq) 572int wm831x_irq_init(struct wm831x *wm831x, int irq)
531{ 573{
532 struct wm831x_pdata *pdata = wm831x->dev->platform_data; 574 struct wm831x_pdata *pdata = wm831x->dev->platform_data;
533 int i, cur_irq, ret; 575 struct irq_domain *domain;
576 int i, ret, irq_base;
534 577
535 mutex_init(&wm831x->irq_lock); 578 mutex_init(&wm831x->irq_lock);
536 579
@@ -543,18 +586,33 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
543 } 586 }
544 587
545 /* Try to dynamically allocate IRQs if no base is specified */ 588 /* Try to dynamically allocate IRQs if no base is specified */
546 if (!pdata || !pdata->irq_base) 589 if (pdata && pdata->irq_base) {
547 wm831x->irq_base = -1; 590 irq_base = irq_alloc_descs(pdata->irq_base, 0,
591 WM831X_NUM_IRQS, 0);
592 if (irq_base < 0) {
593 dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
594 irq_base);
595 irq_base = 0;
596 }
597 } else {
598 irq_base = 0;
599 }
600
601 if (irq_base)
602 domain = irq_domain_add_legacy(wm831x->dev->of_node,
603 ARRAY_SIZE(wm831x_irqs),
604 irq_base, 0,
605 &wm831x_irq_domain_ops,
606 wm831x);
548 else 607 else
549 wm831x->irq_base = pdata->irq_base; 608 domain = irq_domain_add_linear(wm831x->dev->of_node,
609 ARRAY_SIZE(wm831x_irqs),
610 &wm831x_irq_domain_ops,
611 wm831x);
550 612
551 wm831x->irq_base = irq_alloc_descs(wm831x->irq_base, 0, 613 if (!domain) {
552 WM831X_NUM_IRQS, 0); 614 dev_warn(wm831x->dev, "Failed to allocate IRQ domain\n");
553 if (wm831x->irq_base < 0) { 615 return -EINVAL;
554 dev_warn(wm831x->dev, "Failed to allocate IRQs: %d\n",
555 wm831x->irq_base);
556 wm831x->irq_base = 0;
557 return 0;
558 } 616 }
559 617
560 if (pdata && pdata->irq_cmos) 618 if (pdata && pdata->irq_cmos)
@@ -565,38 +623,22 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq)
565 wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG, 623 wm831x_set_bits(wm831x, WM831X_IRQ_CONFIG,
566 WM831X_IRQ_OD, i); 624 WM831X_IRQ_OD, i);
567 625
568 /* Try to flag /IRQ as a wake source; there are a number of
569 * unconditional wake sources in the PMIC so this isn't
570 * conditional but we don't actually care *too* much if it
571 * fails.
572 */
573 ret = enable_irq_wake(irq);
574 if (ret != 0) {
575 dev_warn(wm831x->dev, "Can't enable IRQ as wake source: %d\n",
576 ret);
577 }
578
579 wm831x->irq = irq; 626 wm831x->irq = irq;
580 627 wm831x->irq_domain = domain;
581 /* Register them with genirq */
582 for (cur_irq = wm831x->irq_base;
583 cur_irq < ARRAY_SIZE(wm831x_irqs) + wm831x->irq_base;
584 cur_irq++) {
585 irq_set_chip_data(cur_irq, wm831x);
586 irq_set_chip_and_handler(cur_irq, &wm831x_irq_chip,
587 handle_edge_irq);
588 irq_set_nested_thread(cur_irq, 1);
589
590 /* ARM needs us to explicitly flag the IRQ as valid
591 * and will set them noprobe when we do so. */
592#ifdef CONFIG_ARM
593 set_irq_flags(cur_irq, IRQF_VALID);
594#else
595 irq_set_noprobe(cur_irq);
596#endif
597 }
598 628
599 if (irq) { 629 if (irq) {
630 /* Try to flag /IRQ as a wake source; there are a number of
631 * unconditional wake sources in the PMIC so this isn't
632 * conditional but we don't actually care *too* much if it
633 * fails.
634 */
635 ret = enable_irq_wake(irq);
636 if (ret != 0) {
637 dev_warn(wm831x->dev,
638 "Can't enable IRQ as wake source: %d\n",
639 ret);
640 }
641
600 ret = request_threaded_irq(irq, NULL, wm831x_irq_thread, 642 ret = request_threaded_irq(irq, NULL, wm831x_irq_thread,
601 IRQF_TRIGGER_LOW | IRQF_ONESHOT, 643 IRQF_TRIGGER_LOW | IRQF_ONESHOT,
602 "wm831x", wm831x); 644 "wm831x", wm831x);
diff --git a/drivers/mfd/wm8350-core.c b/drivers/mfd/wm8350-core.c
index dd1caaac55e4..8a9b11ca076a 100644
--- a/drivers/mfd/wm8350-core.c
+++ b/drivers/mfd/wm8350-core.c
@@ -20,6 +20,7 @@
20#include <linux/device.h> 20#include <linux/device.h>
21#include <linux/delay.h> 21#include <linux/delay.h>
22#include <linux/interrupt.h> 22#include <linux/interrupt.h>
23#include <linux/regmap.h>
23#include <linux/workqueue.h> 24#include <linux/workqueue.h>
24 25
25#include <linux/mfd/wm8350/core.h> 26#include <linux/mfd/wm8350/core.h>
@@ -74,7 +75,7 @@ static int wm8350_phys_read(struct wm8350 *wm8350, u8 reg, int num_regs,
74 int bytes = num_regs * 2; 75 int bytes = num_regs * 2;
75 76
76 dev_dbg(wm8350->dev, "volatile read\n"); 77 dev_dbg(wm8350->dev, "volatile read\n");
77 ret = wm8350->read_dev(wm8350, reg, bytes, (char *)dest); 78 ret = regmap_raw_read(wm8350->regmap, reg, dest, bytes);
78 79
79 for (i = reg; i < reg + num_regs; i++) { 80 for (i = reg; i < reg + num_regs; i++) {
80 /* Cache is CPU endian */ 81 /* Cache is CPU endian */
@@ -96,9 +97,6 @@ static int wm8350_read(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *dest)
96 int ret = 0; 97 int ret = 0;
97 int bytes = num_regs * 2; 98 int bytes = num_regs * 2;
98 99
99 if (wm8350->read_dev == NULL)
100 return -ENODEV;
101
102 if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) { 100 if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
103 dev_err(wm8350->dev, "invalid reg %x\n", 101 dev_err(wm8350->dev, "invalid reg %x\n",
104 reg + num_regs - 1); 102 reg + num_regs - 1);
@@ -149,9 +147,6 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
149 int end = reg + num_regs; 147 int end = reg + num_regs;
150 int bytes = num_regs * 2; 148 int bytes = num_regs * 2;
151 149
152 if (wm8350->write_dev == NULL)
153 return -ENODEV;
154
155 if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) { 150 if ((reg + num_regs - 1) > WM8350_MAX_REGISTER) {
156 dev_err(wm8350->dev, "invalid reg %x\n", 151 dev_err(wm8350->dev, "invalid reg %x\n",
157 reg + num_regs - 1); 152 reg + num_regs - 1);
@@ -182,7 +177,7 @@ static int wm8350_write(struct wm8350 *wm8350, u8 reg, int num_regs, u16 *src)
182 } 177 }
183 178
184 /* Actually write it out */ 179 /* Actually write it out */
185 return wm8350->write_dev(wm8350, reg, bytes, (char *)src); 180 return regmap_raw_write(wm8350->regmap, reg, src, bytes);
186} 181}
187 182
188/* 183/*
@@ -515,9 +510,8 @@ static int wm8350_create_cache(struct wm8350 *wm8350, int type, int mode)
515 * a PMIC so the device many not be in a virgin state and we 510 * a PMIC so the device many not be in a virgin state and we
516 * can't rely on the silicon values. 511 * can't rely on the silicon values.
517 */ 512 */
518 ret = wm8350->read_dev(wm8350, 0, 513 ret = regmap_raw_read(wm8350->regmap, 0, wm8350->reg_cache,
519 sizeof(u16) * (WM8350_MAX_REGISTER + 1), 514 sizeof(u16) * (WM8350_MAX_REGISTER + 1));
520 wm8350->reg_cache);
521 if (ret < 0) { 515 if (ret < 0) {
522 dev_err(wm8350->dev, 516 dev_err(wm8350->dev,
523 "failed to read initial cache values\n"); 517 "failed to read initial cache values\n");
@@ -570,35 +564,30 @@ int wm8350_device_init(struct wm8350 *wm8350, int irq,
570 struct wm8350_platform_data *pdata) 564 struct wm8350_platform_data *pdata)
571{ 565{
572 int ret; 566 int ret;
573 u16 id1, id2, mask_rev; 567 unsigned int id1, id2, mask_rev;
574 u16 cust_id, mode, chip_rev; 568 unsigned int cust_id, mode, chip_rev;
575 569
576 dev_set_drvdata(wm8350->dev, wm8350); 570 dev_set_drvdata(wm8350->dev, wm8350);
577 571
578 /* get WM8350 revision and config mode */ 572 /* get WM8350 revision and config mode */
579 ret = wm8350->read_dev(wm8350, WM8350_RESET_ID, sizeof(id1), &id1); 573 ret = regmap_read(wm8350->regmap, WM8350_RESET_ID, &id1);
580 if (ret != 0) { 574 if (ret != 0) {
581 dev_err(wm8350->dev, "Failed to read ID: %d\n", ret); 575 dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
582 goto err; 576 goto err;
583 } 577 }
584 578
585 ret = wm8350->read_dev(wm8350, WM8350_ID, sizeof(id2), &id2); 579 ret = regmap_read(wm8350->regmap, WM8350_ID, &id2);
586 if (ret != 0) { 580 if (ret != 0) {
587 dev_err(wm8350->dev, "Failed to read ID: %d\n", ret); 581 dev_err(wm8350->dev, "Failed to read ID: %d\n", ret);
588 goto err; 582 goto err;
589 } 583 }
590 584
591 ret = wm8350->read_dev(wm8350, WM8350_REVISION, sizeof(mask_rev), 585 ret = regmap_read(wm8350->regmap, WM8350_REVISION, &mask_rev);
592 &mask_rev);
593 if (ret != 0) { 586 if (ret != 0) {
594 dev_err(wm8350->dev, "Failed to read revision: %d\n", ret); 587 dev_err(wm8350->dev, "Failed to read revision: %d\n", ret);
595 goto err; 588 goto err;
596 } 589 }
597 590
598 id1 = be16_to_cpu(id1);
599 id2 = be16_to_cpu(id2);
600 mask_rev = be16_to_cpu(mask_rev);
601
602 if (id1 != 0x6143) { 591 if (id1 != 0x6143) {
603 dev_err(wm8350->dev, 592 dev_err(wm8350->dev,
604 "Device with ID %x is not a WM8350\n", id1); 593 "Device with ID %x is not a WM8350\n", id1);
diff --git a/drivers/mfd/wm8350-i2c.c b/drivers/mfd/wm8350-i2c.c
index d955faaf27c4..a68aceb4e48c 100644
--- a/drivers/mfd/wm8350-i2c.c
+++ b/drivers/mfd/wm8350-i2c.c
@@ -15,47 +15,18 @@
15 15
16#include <linux/module.h> 16#include <linux/module.h>
17#include <linux/moduleparam.h> 17#include <linux/moduleparam.h>
18#include <linux/err.h>
18#include <linux/init.h> 19#include <linux/init.h>
19#include <linux/i2c.h> 20#include <linux/i2c.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <linux/mfd/wm8350/core.h> 22#include <linux/mfd/wm8350/core.h>
23#include <linux/regmap.h>
22#include <linux/slab.h> 24#include <linux/slab.h>
23 25
24static int wm8350_i2c_read_device(struct wm8350 *wm8350, char reg, 26static const struct regmap_config wm8350_regmap = {
25 int bytes, void *dest) 27 .reg_bits = 8,
26{ 28 .val_bits = 16,
27 int ret; 29};
28
29 ret = i2c_master_send(wm8350->i2c_client, &reg, 1);
30 if (ret < 0)
31 return ret;
32 ret = i2c_master_recv(wm8350->i2c_client, dest, bytes);
33 if (ret < 0)
34 return ret;
35 if (ret != bytes)
36 return -EIO;
37 return 0;
38}
39
40static int wm8350_i2c_write_device(struct wm8350 *wm8350, char reg,
41 int bytes, void *src)
42{
43 /* we add 1 byte for device register */
44 u8 msg[(WM8350_MAX_REGISTER << 1) + 1];
45 int ret;
46
47 if (bytes > ((WM8350_MAX_REGISTER << 1) + 1))
48 return -EINVAL;
49
50 msg[0] = reg;
51 memcpy(&msg[1], src, bytes);
52 ret = i2c_master_send(wm8350->i2c_client, msg, bytes + 1);
53 if (ret < 0)
54 return ret;
55 if (ret != bytes + 1)
56 return -EIO;
57 return 0;
58}
59 30
60static int wm8350_i2c_probe(struct i2c_client *i2c, 31static int wm8350_i2c_probe(struct i2c_client *i2c,
61 const struct i2c_device_id *id) 32 const struct i2c_device_id *id)
@@ -67,20 +38,18 @@ static int wm8350_i2c_probe(struct i2c_client *i2c,
67 if (wm8350 == NULL) 38 if (wm8350 == NULL)
68 return -ENOMEM; 39 return -ENOMEM;
69 40
41 wm8350->regmap = devm_regmap_init_i2c(i2c, &wm8350_regmap);
42 if (IS_ERR(wm8350->regmap)) {
43 ret = PTR_ERR(wm8350->regmap);
44 dev_err(&i2c->dev, "Failed to allocate register map: %d\n",
45 ret);
46 return ret;
47 }
48
70 i2c_set_clientdata(i2c, wm8350); 49 i2c_set_clientdata(i2c, wm8350);
71 wm8350->dev = &i2c->dev; 50 wm8350->dev = &i2c->dev;
72 wm8350->i2c_client = i2c;
73 wm8350->read_dev = wm8350_i2c_read_device;
74 wm8350->write_dev = wm8350_i2c_write_device;
75
76 ret = wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
77 if (ret < 0)
78 goto err;
79
80 return ret;
81 51
82err: 52 return wm8350_device_init(wm8350, i2c->irq, i2c->dev.platform_data);
83 return ret;
84} 53}
85 54
86static int wm8350_i2c_remove(struct i2c_client *i2c) 55static int wm8350_i2c_remove(struct i2c_client *i2c)
diff --git a/drivers/mfd/wm8400-core.c b/drivers/mfd/wm8400-core.c
index 1189a17f0f25..4b7d378551d5 100644
--- a/drivers/mfd/wm8400-core.c
+++ b/drivers/mfd/wm8400-core.c
@@ -23,136 +23,16 @@
23#include <linux/regmap.h> 23#include <linux/regmap.h>
24#include <linux/slab.h> 24#include <linux/slab.h>
25 25
26static struct { 26static bool wm8400_volatile(struct device *dev, unsigned int reg)
27 u16 readable; /* Mask of readable bits */
28 u16 writable; /* Mask of writable bits */
29 u16 vol; /* Mask of volatile bits */
30 int is_codec; /* Register controlled by codec reset */
31 u16 default_val; /* Value on reset */
32} reg_data[] = {
33 { 0xFFFF, 0xFFFF, 0x0000, 0, 0x6172 }, /* R0 */
34 { 0x7000, 0x0000, 0x8000, 0, 0x0000 }, /* R1 */
35 { 0xFF17, 0xFF17, 0x0000, 0, 0x0000 }, /* R2 */
36 { 0xEBF3, 0xEBF3, 0x0000, 1, 0x6000 }, /* R3 */
37 { 0x3CF3, 0x3CF3, 0x0000, 1, 0x0000 }, /* R4 */
38 { 0xF1F8, 0xF1F8, 0x0000, 1, 0x4050 }, /* R5 */
39 { 0xFC1F, 0xFC1F, 0x0000, 1, 0x4000 }, /* R6 */
40 { 0xDFDE, 0xDFDE, 0x0000, 1, 0x01C8 }, /* R7 */
41 { 0xFCFC, 0xFCFC, 0x0000, 1, 0x0000 }, /* R8 */
42 { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R9 */
43 { 0xEFFF, 0xEFFF, 0x0000, 1, 0x0040 }, /* R10 */
44 { 0x27F7, 0x27F7, 0x0000, 1, 0x0004 }, /* R11 */
45 { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R12 */
46 { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R13 */
47 { 0x1FEF, 0x1FEF, 0x0000, 1, 0x0000 }, /* R14 */
48 { 0x0163, 0x0163, 0x0000, 1, 0x0100 }, /* R15 */
49 { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R16 */
50 { 0x01FF, 0x01FF, 0x0000, 1, 0x00C0 }, /* R17 */
51 { 0x1FFF, 0x0FFF, 0x0000, 1, 0x0000 }, /* R18 */
52 { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1000 }, /* R19 */
53 { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R20 */
54 { 0xFFFF, 0xFFFF, 0x0000, 1, 0x1010 }, /* R21 */
55 { 0x0FDD, 0x0FDD, 0x0000, 1, 0x8000 }, /* R22 */
56 { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0800 }, /* R23 */
57 { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R24 */
58 { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R25 */
59 { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R26 */
60 { 0x0000, 0x01DF, 0x0000, 1, 0x008B }, /* R27 */
61 { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R28 */
62 { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R29 */
63 { 0x0000, 0x0077, 0x0000, 1, 0x0066 }, /* R30 */
64 { 0x0000, 0x0033, 0x0000, 1, 0x0022 }, /* R31 */
65 { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R32 */
66 { 0x0000, 0x01FF, 0x0000, 1, 0x0079 }, /* R33 */
67 { 0x0000, 0x0003, 0x0000, 1, 0x0003 }, /* R34 */
68 { 0x0000, 0x01FF, 0x0000, 1, 0x0003 }, /* R35 */
69 { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R36 */
70 { 0x0000, 0x003F, 0x0000, 1, 0x0100 }, /* R37 */
71 { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R38 */
72 { 0x0000, 0x000F, 0x0000, 0, 0x0000 }, /* R39 */
73 { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R40 */
74 { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R41 */
75 { 0x0000, 0x01B7, 0x0000, 1, 0x0000 }, /* R42 */
76 { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R43 */
77 { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R44 */
78 { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R45 */
79 { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R46 */
80 { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R47 */
81 { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R48 */
82 { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R49 */
83 { 0x0000, 0x01FF, 0x0000, 1, 0x0000 }, /* R50 */
84 { 0x0000, 0x01B3, 0x0000, 1, 0x0180 }, /* R51 */
85 { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R52 */
86 { 0x0000, 0x0077, 0x0000, 1, 0x0000 }, /* R53 */
87 { 0x0000, 0x00FF, 0x0000, 1, 0x0000 }, /* R54 */
88 { 0x0000, 0x0001, 0x0000, 1, 0x0000 }, /* R55 */
89 { 0x0000, 0x003F, 0x0000, 1, 0x0000 }, /* R56 */
90 { 0x0000, 0x004F, 0x0000, 1, 0x0000 }, /* R57 */
91 { 0x0000, 0x00FD, 0x0000, 1, 0x0000 }, /* R58 */
92 { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R59 */
93 { 0x1FFF, 0x1FFF, 0x0000, 1, 0x0000 }, /* R60 */
94 { 0xFFFF, 0xFFFF, 0x0000, 1, 0x0000 }, /* R61 */
95 { 0x03FF, 0x03FF, 0x0000, 1, 0x0000 }, /* R62 */
96 { 0x007F, 0x007F, 0x0000, 1, 0x0000 }, /* R63 */
97 { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R64 */
98 { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R65 */
99 { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R66 */
100 { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R67 */
101 { 0xDFFF, 0xDFFF, 0x0000, 0, 0x0000 }, /* R68 */
102 { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R69 */
103 { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R70 */
104 { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R71 */
105 { 0xFFFF, 0xFFFF, 0x0000, 0, 0x4400 }, /* R72 */
106 { 0x23FF, 0x23FF, 0x0000, 0, 0x0000 }, /* R73 */
107 { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R74 */
108 { 0x000E, 0x000E, 0x0000, 0, 0x0008 }, /* R75 */
109 { 0xE00F, 0xE00F, 0x0000, 0, 0x0000 }, /* R76 */
110 { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R77 */
111 { 0x03C0, 0x03C0, 0x0000, 0, 0x02C0 }, /* R78 */
112 { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R79 */
113 { 0xFFFF, 0xFFFF, 0x0000, 0, 0x0000 }, /* R80 */
114 { 0xFFFF, 0x0000, 0xffff, 0, 0x0000 }, /* R81 */
115 { 0x2BFF, 0x0000, 0xffff, 0, 0x0000 }, /* R82 */
116 { 0x0000, 0x0000, 0x0000, 0, 0x0000 }, /* R83 */
117 { 0x80FF, 0x80FF, 0x0000, 0, 0x00ff }, /* R84 */
118};
119
120static int wm8400_read(struct wm8400 *wm8400, u8 reg, int num_regs, u16 *dest)
121{ 27{
122 int i, ret = 0; 28 switch (reg) {
123 29 case WM8400_INTERRUPT_STATUS_1:
124 BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache)); 30 case WM8400_INTERRUPT_LEVELS:
125 31 case WM8400_SHUTDOWN_REASON:
126 /* If there are any volatile reads then read back the entire block */ 32 return true;
127 for (i = reg; i < reg + num_regs; i++) 33 default:
128 if (reg_data[i].vol) { 34 return false;
129 ret = regmap_bulk_read(wm8400->regmap, reg, dest,
130 num_regs);
131 return ret;
132 }
133
134 /* Otherwise use the cache */
135 memcpy(dest, &wm8400->reg_cache[reg], num_regs * sizeof(u16));
136
137 return 0;
138}
139
140static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
141 u16 *src)
142{
143 int ret, i;
144
145 BUG_ON(reg + num_regs > ARRAY_SIZE(wm8400->reg_cache));
146
147 for (i = 0; i < num_regs; i++) {
148 BUG_ON(!reg_data[reg + i].writable);
149 wm8400->reg_cache[reg + i] = src[i];
150 ret = regmap_write(wm8400->regmap, reg, src[i]);
151 if (ret != 0)
152 return ret;
153 } 35 }
154
155 return 0;
156} 36}
157 37
158/** 38/**
@@ -165,13 +45,12 @@ static int wm8400_write(struct wm8400 *wm8400, u8 reg, int num_regs,
165 */ 45 */
166u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg) 46u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg)
167{ 47{
168 u16 val; 48 unsigned int val;
169 49 int ret;
170 mutex_lock(&wm8400->io_lock);
171
172 wm8400_read(wm8400, reg, 1, &val);
173 50
174 mutex_unlock(&wm8400->io_lock); 51 ret = regmap_read(wm8400->regmap, reg, &val);
52 if (ret < 0)
53 return ret;
175 54
176 return val; 55 return val;
177} 56}
@@ -179,63 +58,10 @@ EXPORT_SYMBOL_GPL(wm8400_reg_read);
179 58
180int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data) 59int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data)
181{ 60{
182 int ret; 61 return regmap_bulk_read(wm8400->regmap, reg, data, count);
183
184 mutex_lock(&wm8400->io_lock);
185
186 ret = wm8400_read(wm8400, reg, count, data);
187
188 mutex_unlock(&wm8400->io_lock);
189
190 return ret;
191} 62}
192EXPORT_SYMBOL_GPL(wm8400_block_read); 63EXPORT_SYMBOL_GPL(wm8400_block_read);
193 64
194/**
195 * wm8400_set_bits - Bitmask write
196 *
197 * @wm8400: Pointer to wm8400 control structure
198 * @reg: Register to access
199 * @mask: Mask of bits to change
200 * @val: Value to set for masked bits
201 */
202int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val)
203{
204 u16 tmp;
205 int ret;
206
207 mutex_lock(&wm8400->io_lock);
208
209 ret = wm8400_read(wm8400, reg, 1, &tmp);
210 tmp = (tmp & ~mask) | val;
211 if (ret == 0)
212 ret = wm8400_write(wm8400, reg, 1, &tmp);
213
214 mutex_unlock(&wm8400->io_lock);
215
216 return ret;
217}
218EXPORT_SYMBOL_GPL(wm8400_set_bits);
219
220/**
221 * wm8400_reset_codec_reg_cache - Reset cached codec registers to
222 * their default values.
223 */
224void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
225{
226 int i;
227
228 mutex_lock(&wm8400->io_lock);
229
230 /* Reset all codec registers to their initial value */
231 for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
232 if (reg_data[i].is_codec)
233 wm8400->reg_cache[i] = reg_data[i].default_val;
234
235 mutex_unlock(&wm8400->io_lock);
236}
237EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
238
239static int wm8400_register_codec(struct wm8400 *wm8400) 65static int wm8400_register_codec(struct wm8400 *wm8400)
240{ 66{
241 struct mfd_cell cell = { 67 struct mfd_cell cell = {
@@ -257,44 +83,24 @@ static int wm8400_register_codec(struct wm8400 *wm8400)
257static int wm8400_init(struct wm8400 *wm8400, 83static int wm8400_init(struct wm8400 *wm8400,
258 struct wm8400_platform_data *pdata) 84 struct wm8400_platform_data *pdata)
259{ 85{
260 u16 reg; 86 unsigned int reg;
261 int ret, i; 87 int ret;
262
263 mutex_init(&wm8400->io_lock);
264 88
265 dev_set_drvdata(wm8400->dev, wm8400); 89 dev_set_drvdata(wm8400->dev, wm8400);
266 90
267 /* Check that this is actually a WM8400 */ 91 /* Check that this is actually a WM8400 */
268 ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &i); 92 ret = regmap_read(wm8400->regmap, WM8400_RESET_ID, &reg);
269 if (ret != 0) { 93 if (ret != 0) {
270 dev_err(wm8400->dev, "Chip ID register read failed\n"); 94 dev_err(wm8400->dev, "Chip ID register read failed\n");
271 return -EIO; 95 return -EIO;
272 } 96 }
273 if (i != reg_data[WM8400_RESET_ID].default_val) { 97 if (reg != 0x6172) {
274 dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n", i); 98 dev_err(wm8400->dev, "Device is not a WM8400, ID is %x\n",
99 reg);
275 return -ENODEV; 100 return -ENODEV;
276 } 101 }
277 102
278 /* We don't know what state the hardware is in and since this 103 ret = regmap_read(wm8400->regmap, WM8400_ID, &reg);
279 * is a PMIC we can't reset it safely so initialise the register
280 * cache from the hardware.
281 */
282 ret = regmap_raw_read(wm8400->regmap, 0, wm8400->reg_cache,
283 ARRAY_SIZE(wm8400->reg_cache));
284 if (ret != 0) {
285 dev_err(wm8400->dev, "Register cache read failed\n");
286 return -EIO;
287 }
288 for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
289 wm8400->reg_cache[i] = be16_to_cpu(wm8400->reg_cache[i]);
290
291 /* If the codec is in reset use hard coded values */
292 if (!(wm8400->reg_cache[WM8400_POWER_MANAGEMENT_1] & WM8400_CODEC_ENA))
293 for (i = 0; i < ARRAY_SIZE(wm8400->reg_cache); i++)
294 if (reg_data[i].is_codec)
295 wm8400->reg_cache[i] = reg_data[i].default_val;
296
297 ret = wm8400_read(wm8400, WM8400_ID, 1, &reg);
298 if (ret != 0) { 104 if (ret != 0) {
299 dev_err(wm8400->dev, "ID register read failed: %d\n", ret); 105 dev_err(wm8400->dev, "ID register read failed: %d\n", ret);
300 return ret; 106 return ret;
@@ -334,8 +140,22 @@ static const struct regmap_config wm8400_regmap_config = {
334 .reg_bits = 8, 140 .reg_bits = 8,
335 .val_bits = 16, 141 .val_bits = 16,
336 .max_register = WM8400_REGISTER_COUNT - 1, 142 .max_register = WM8400_REGISTER_COUNT - 1,
143
144 .volatile_reg = wm8400_volatile,
145
146 .cache_type = REGCACHE_RBTREE,
337}; 147};
338 148
149/**
150 * wm8400_reset_codec_reg_cache - Reset cached codec registers to
151 * their default values.
152 */
153void wm8400_reset_codec_reg_cache(struct wm8400 *wm8400)
154{
155 regmap_reinit_cache(wm8400->regmap, &wm8400_regmap_config);
156}
157EXPORT_SYMBOL_GPL(wm8400_reset_codec_reg_cache);
158
339#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE) 159#if defined(CONFIG_I2C) || defined(CONFIG_I2C_MODULE)
340static int wm8400_i2c_probe(struct i2c_client *i2c, 160static int wm8400_i2c_probe(struct i2c_client *i2c,
341 const struct i2c_device_id *id) 161 const struct i2c_device_id *id)
diff --git a/drivers/mfd/wm8994-core.c b/drivers/mfd/wm8994-core.c
index 9d7ca1e978fa..1e321d349777 100644
--- a/drivers/mfd/wm8994-core.c
+++ b/drivers/mfd/wm8994-core.c
@@ -500,7 +500,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
500 ret); 500 ret);
501 goto err_enable; 501 goto err_enable;
502 } 502 }
503 wm8994->revision = ret; 503 wm8994->revision = ret & WM8994_CHIP_REV_MASK;
504 wm8994->cust_id = (ret & WM8994_CUST_ID_MASK) >> WM8994_CUST_ID_SHIFT;
504 505
505 switch (wm8994->type) { 506 switch (wm8994->type) {
506 case WM8994: 507 case WM8994:
@@ -553,8 +554,8 @@ static __devinit int wm8994_device_init(struct wm8994 *wm8994, int irq)
553 break; 554 break;
554 } 555 }
555 556
556 dev_info(wm8994->dev, "%s revision %c\n", devname, 557 dev_info(wm8994->dev, "%s revision %c CUST_ID %02x\n", devname,
557 'A' + wm8994->revision); 558 'A' + wm8994->revision, wm8994->cust_id);
558 559
559 switch (wm8994->type) { 560 switch (wm8994->type) {
560 case WM1811: 561 case WM1811:
@@ -732,23 +733,7 @@ static struct i2c_driver wm8994_i2c_driver = {
732 .id_table = wm8994_i2c_id, 733 .id_table = wm8994_i2c_id,
733}; 734};
734 735
735static int __init wm8994_i2c_init(void) 736module_i2c_driver(wm8994_i2c_driver);
736{
737 int ret;
738
739 ret = i2c_add_driver(&wm8994_i2c_driver);
740 if (ret != 0)
741 pr_err("Failed to register wm8994 I2C driver: %d\n", ret);
742
743 return ret;
744}
745module_init(wm8994_i2c_init);
746
747static void __exit wm8994_i2c_exit(void)
748{
749 i2c_del_driver(&wm8994_i2c_driver);
750}
751module_exit(wm8994_i2c_exit);
752 737
753MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC"); 738MODULE_DESCRIPTION("Core support for the WM8994 audio CODEC");
754MODULE_LICENSE("GPL"); 739MODULE_LICENSE("GPL");
diff --git a/drivers/mfd/wm8994-regmap.c b/drivers/mfd/wm8994-regmap.c
index bfd25af6ecb1..52e9e2944940 100644
--- a/drivers/mfd/wm8994-regmap.c
+++ b/drivers/mfd/wm8994-regmap.c
@@ -1122,7 +1122,6 @@ static bool wm8994_volatile_register(struct device *dev, unsigned int reg)
1122 case WM8994_RATE_STATUS: 1122 case WM8994_RATE_STATUS:
1123 case WM8958_MIC_DETECT_3: 1123 case WM8958_MIC_DETECT_3:
1124 case WM8994_DC_SERVO_4E: 1124 case WM8994_DC_SERVO_4E:
1125 case WM8994_CHIP_REVISION:
1126 case WM8994_INTERRUPT_STATUS_1: 1125 case WM8994_INTERRUPT_STATUS_1:
1127 case WM8994_INTERRUPT_STATUS_2: 1126 case WM8994_INTERRUPT_STATUS_2:
1128 return true; 1127 return true;
diff --git a/drivers/misc/ab8500-pwm.c b/drivers/misc/ab8500-pwm.c
index d7a9aa14e5d5..042a8fe4efaa 100644
--- a/drivers/misc/ab8500-pwm.c
+++ b/drivers/misc/ab8500-pwm.c
@@ -142,10 +142,16 @@ static int __devexit ab8500_pwm_remove(struct platform_device *pdev)
142 return 0; 142 return 0;
143} 143}
144 144
145static const struct of_device_id ab8500_pwm_match[] = {
146 { .compatible = "stericsson,ab8500-pwm", },
147 {}
148};
149
145static struct platform_driver ab8500_pwm_driver = { 150static struct platform_driver ab8500_pwm_driver = {
146 .driver = { 151 .driver = {
147 .name = "ab8500-pwm", 152 .name = "ab8500-pwm",
148 .owner = THIS_MODULE, 153 .owner = THIS_MODULE,
154 .of_match_table = ab8500_pwm_match,
149 }, 155 },
150 .probe = ab8500_pwm_probe, 156 .probe = ab8500_pwm_probe,
151 .remove = __devexit_p(ab8500_pwm_remove), 157 .remove = __devexit_p(ab8500_pwm_remove),
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index dabec556ebb8..dd2d374dcc7a 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -384,7 +384,7 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
384 md = mmc_blk_get(bdev->bd_disk); 384 md = mmc_blk_get(bdev->bd_disk);
385 if (!md) { 385 if (!md) {
386 err = -EINVAL; 386 err = -EINVAL;
387 goto cmd_done; 387 goto cmd_err;
388 } 388 }
389 389
390 card = md->queue.card; 390 card = md->queue.card;
@@ -483,6 +483,7 @@ cmd_rel_host:
483 483
484cmd_done: 484cmd_done:
485 mmc_blk_put(md); 485 mmc_blk_put(md);
486cmd_err:
486 kfree(idata->buf); 487 kfree(idata->buf);
487 kfree(idata); 488 kfree(idata);
488 return err; 489 return err;
@@ -1283,7 +1284,7 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1283 int ret = 1, disable_multi = 0, retry = 0, type; 1284 int ret = 1, disable_multi = 0, retry = 0, type;
1284 enum mmc_blk_status status; 1285 enum mmc_blk_status status;
1285 struct mmc_queue_req *mq_rq; 1286 struct mmc_queue_req *mq_rq;
1286 struct request *req; 1287 struct request *req = rqc;
1287 struct mmc_async_req *areq; 1288 struct mmc_async_req *areq;
1288 1289
1289 if (!rqc && !mq->mqrq_prev->req) 1290 if (!rqc && !mq->mqrq_prev->req)
@@ -1291,6 +1292,16 @@ static int mmc_blk_issue_rw_rq(struct mmc_queue *mq, struct request *rqc)
1291 1292
1292 do { 1293 do {
1293 if (rqc) { 1294 if (rqc) {
1295 /*
1296 * When 4KB native sector is enabled, only 8 blocks
1297 * multiple read or write is allowed
1298 */
1299 if ((brq->data.blocks & 0x07) &&
1300 (card->ext_csd.data_sector_size == 4096)) {
1301 pr_err("%s: Transfer size is not 4KB sector size aligned\n",
1302 req->rq_disk->disk_name);
1303 goto cmd_abort;
1304 }
1294 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq); 1305 mmc_blk_rw_rq_prep(mq->mqrq_cur, card, 0, mq);
1295 areq = &mq->mqrq_cur->mmc_active; 1306 areq = &mq->mqrq_cur->mmc_active;
1296 } else 1307 } else
@@ -1538,7 +1549,12 @@ static struct mmc_blk_data *mmc_blk_alloc_req(struct mmc_card *card,
1538 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name), 1549 snprintf(md->disk->disk_name, sizeof(md->disk->disk_name),
1539 "mmcblk%d%s", md->name_idx, subname ? subname : ""); 1550 "mmcblk%d%s", md->name_idx, subname ? subname : "");
1540 1551
1541 blk_queue_logical_block_size(md->queue.queue, 512); 1552 if (mmc_card_mmc(card))
1553 blk_queue_logical_block_size(md->queue.queue,
1554 card->ext_csd.data_sector_size);
1555 else
1556 blk_queue_logical_block_size(md->queue.queue, 512);
1557
1542 set_capacity(md->disk, size); 1558 set_capacity(md->disk, size);
1543 1559
1544 if (mmc_host_cmd23(card->host)) { 1560 if (mmc_host_cmd23(card->host)) {
diff --git a/drivers/mmc/card/queue.c b/drivers/mmc/card/queue.c
index 996f8e36e23d..e360a979857d 100644
--- a/drivers/mmc/card/queue.c
+++ b/drivers/mmc/card/queue.c
@@ -96,7 +96,7 @@ static int mmc_queue_thread(void *d)
96 * on any queue on this host, and attempt to issue it. This may 96 * on any queue on this host, and attempt to issue it. This may
97 * not be the queue we were asked to process. 97 * not be the queue we were asked to process.
98 */ 98 */
99static void mmc_request(struct request_queue *q) 99static void mmc_request_fn(struct request_queue *q)
100{ 100{
101 struct mmc_queue *mq = q->queuedata; 101 struct mmc_queue *mq = q->queuedata;
102 struct request *req; 102 struct request *req;
@@ -171,12 +171,10 @@ int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card,
171 limit = *mmc_dev(host)->dma_mask; 171 limit = *mmc_dev(host)->dma_mask;
172 172
173 mq->card = card; 173 mq->card = card;
174 mq->queue = blk_init_queue(mmc_request, lock); 174 mq->queue = blk_init_queue(mmc_request_fn, lock);
175 if (!mq->queue) 175 if (!mq->queue)
176 return -ENOMEM; 176 return -ENOMEM;
177 177
178 memset(&mq->mqrq_cur, 0, sizeof(mq->mqrq_cur));
179 memset(&mq->mqrq_prev, 0, sizeof(mq->mqrq_prev));
180 mq->mqrq_cur = mqrq_cur; 178 mq->mqrq_cur = mqrq_cur;
181 mq->mqrq_prev = mqrq_prev; 179 mq->mqrq_prev = mqrq_prev;
182 mq->queue->queuedata = mq; 180 mq->queue->queuedata = mq;
diff --git a/drivers/mmc/core/bus.c b/drivers/mmc/core/bus.c
index c60cee92a2b2..9b68933f27e7 100644
--- a/drivers/mmc/core/bus.c
+++ b/drivers/mmc/core/bus.c
@@ -122,6 +122,7 @@ static int mmc_bus_remove(struct device *dev)
122 return 0; 122 return 0;
123} 123}
124 124
125#ifdef CONFIG_PM_SLEEP
125static int mmc_bus_suspend(struct device *dev) 126static int mmc_bus_suspend(struct device *dev)
126{ 127{
127 struct mmc_driver *drv = to_mmc_driver(dev->driver); 128 struct mmc_driver *drv = to_mmc_driver(dev->driver);
@@ -143,6 +144,7 @@ static int mmc_bus_resume(struct device *dev)
143 ret = drv->resume(card); 144 ret = drv->resume(card);
144 return ret; 145 return ret;
145} 146}
147#endif
146 148
147#ifdef CONFIG_PM_RUNTIME 149#ifdef CONFIG_PM_RUNTIME
148 150
diff --git a/drivers/mmc/core/cd-gpio.c b/drivers/mmc/core/cd-gpio.c
index 2c14be73254c..f13e38deceac 100644
--- a/drivers/mmc/core/cd-gpio.c
+++ b/drivers/mmc/core/cd-gpio.c
@@ -73,6 +73,9 @@ void mmc_cd_gpio_free(struct mmc_host *host)
73{ 73{
74 struct mmc_cd_gpio *cd = host->hotplug.handler_priv; 74 struct mmc_cd_gpio *cd = host->hotplug.handler_priv;
75 75
76 if (!cd)
77 return;
78
76 free_irq(host->hotplug.irq, host); 79 free_irq(host->hotplug.irq, host);
77 gpio_free(cd->gpio); 80 gpio_free(cd->gpio);
78 kfree(cd); 81 kfree(cd);
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index ba821fe70bca..0b6141d29dbd 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -42,6 +42,7 @@
42#include "sdio_ops.h" 42#include "sdio_ops.h"
43 43
44static struct workqueue_struct *workqueue; 44static struct workqueue_struct *workqueue;
45static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
45 46
46/* 47/*
47 * Enabling software CRCs on the data blocks can be a significant (30%) 48 * Enabling software CRCs on the data blocks can be a significant (30%)
@@ -1157,6 +1158,9 @@ static void mmc_power_up(struct mmc_host *host)
1157{ 1158{
1158 int bit; 1159 int bit;
1159 1160
1161 if (host->ios.power_mode == MMC_POWER_ON)
1162 return;
1163
1160 mmc_host_clk_hold(host); 1164 mmc_host_clk_hold(host);
1161 1165
1162 /* If ocr is set, we use it */ 1166 /* If ocr is set, we use it */
@@ -1199,6 +1203,10 @@ static void mmc_power_up(struct mmc_host *host)
1199void mmc_power_off(struct mmc_host *host) 1203void mmc_power_off(struct mmc_host *host)
1200{ 1204{
1201 int err = 0; 1205 int err = 0;
1206
1207 if (host->ios.power_mode == MMC_POWER_OFF)
1208 return;
1209
1202 mmc_host_clk_hold(host); 1210 mmc_host_clk_hold(host);
1203 1211
1204 host->ios.clock = 0; 1212 host->ios.clock = 0;
@@ -2005,7 +2013,6 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
2005 2013
2006void mmc_rescan(struct work_struct *work) 2014void mmc_rescan(struct work_struct *work)
2007{ 2015{
2008 static const unsigned freqs[] = { 400000, 300000, 200000, 100000 };
2009 struct mmc_host *host = 2016 struct mmc_host *host =
2010 container_of(work, struct mmc_host, detect.work); 2017 container_of(work, struct mmc_host, detect.work);
2011 int i; 2018 int i;
@@ -2044,8 +2051,12 @@ void mmc_rescan(struct work_struct *work)
2044 */ 2051 */
2045 mmc_bus_put(host); 2052 mmc_bus_put(host);
2046 2053
2047 if (host->ops->get_cd && host->ops->get_cd(host) == 0) 2054 if (host->ops->get_cd && host->ops->get_cd(host) == 0) {
2055 mmc_claim_host(host);
2056 mmc_power_off(host);
2057 mmc_release_host(host);
2048 goto out; 2058 goto out;
2059 }
2049 2060
2050 mmc_claim_host(host); 2061 mmc_claim_host(host);
2051 for (i = 0; i < ARRAY_SIZE(freqs); i++) { 2062 for (i = 0; i < ARRAY_SIZE(freqs); i++) {
@@ -2063,7 +2074,8 @@ void mmc_rescan(struct work_struct *work)
2063 2074
2064void mmc_start_host(struct mmc_host *host) 2075void mmc_start_host(struct mmc_host *host)
2065{ 2076{
2066 mmc_power_off(host); 2077 host->f_init = max(freqs[0], host->f_min);
2078 mmc_power_up(host);
2067 mmc_detect_change(host, 0); 2079 mmc_detect_change(host, 0);
2068} 2080}
2069 2081
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 54df5adc0413..2d4a4b746750 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -235,6 +235,36 @@ static int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd)
235 return err; 235 return err;
236} 236}
237 237
238static void mmc_select_card_type(struct mmc_card *card)
239{
240 struct mmc_host *host = card->host;
241 u8 card_type = card->ext_csd.raw_card_type & EXT_CSD_CARD_TYPE_MASK;
242 unsigned int caps = host->caps, caps2 = host->caps2;
243 unsigned int hs_max_dtr = 0;
244
245 if (card_type & EXT_CSD_CARD_TYPE_26)
246 hs_max_dtr = MMC_HIGH_26_MAX_DTR;
247
248 if (caps & MMC_CAP_MMC_HIGHSPEED &&
249 card_type & EXT_CSD_CARD_TYPE_52)
250 hs_max_dtr = MMC_HIGH_52_MAX_DTR;
251
252 if ((caps & MMC_CAP_1_8V_DDR &&
253 card_type & EXT_CSD_CARD_TYPE_DDR_1_8V) ||
254 (caps & MMC_CAP_1_2V_DDR &&
255 card_type & EXT_CSD_CARD_TYPE_DDR_1_2V))
256 hs_max_dtr = MMC_HIGH_DDR_MAX_DTR;
257
258 if ((caps2 & MMC_CAP2_HS200_1_8V_SDR &&
259 card_type & EXT_CSD_CARD_TYPE_SDR_1_8V) ||
260 (caps2 & MMC_CAP2_HS200_1_2V_SDR &&
261 card_type & EXT_CSD_CARD_TYPE_SDR_1_2V))
262 hs_max_dtr = MMC_HS200_MAX_DTR;
263
264 card->ext_csd.hs_max_dtr = hs_max_dtr;
265 card->ext_csd.card_type = card_type;
266}
267
238/* 268/*
239 * Decode extended CSD. 269 * Decode extended CSD.
240 */ 270 */
@@ -284,56 +314,9 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
284 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512) 314 if (card->ext_csd.sectors > (2u * 1024 * 1024 * 1024) / 512)
285 mmc_card_set_blockaddr(card); 315 mmc_card_set_blockaddr(card);
286 } 316 }
317
287 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE]; 318 card->ext_csd.raw_card_type = ext_csd[EXT_CSD_CARD_TYPE];
288 switch (ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_MASK) { 319 mmc_select_card_type(card);
289 case EXT_CSD_CARD_TYPE_SDR_ALL:
290 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V:
291 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V:
292 case EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52:
293 card->ext_csd.hs_max_dtr = 200000000;
294 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_200;
295 break;
296 case EXT_CSD_CARD_TYPE_SDR_1_2V_ALL:
297 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V:
298 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V:
299 case EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52:
300 card->ext_csd.hs_max_dtr = 200000000;
301 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_2V;
302 break;
303 case EXT_CSD_CARD_TYPE_SDR_1_8V_ALL:
304 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V:
305 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V:
306 case EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52:
307 card->ext_csd.hs_max_dtr = 200000000;
308 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_SDR_1_8V;
309 break;
310 case EXT_CSD_CARD_TYPE_DDR_52 | EXT_CSD_CARD_TYPE_52 |
311 EXT_CSD_CARD_TYPE_26:
312 card->ext_csd.hs_max_dtr = 52000000;
313 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_52;
314 break;
315 case EXT_CSD_CARD_TYPE_DDR_1_2V | EXT_CSD_CARD_TYPE_52 |
316 EXT_CSD_CARD_TYPE_26:
317 card->ext_csd.hs_max_dtr = 52000000;
318 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_2V;
319 break;
320 case EXT_CSD_CARD_TYPE_DDR_1_8V | EXT_CSD_CARD_TYPE_52 |
321 EXT_CSD_CARD_TYPE_26:
322 card->ext_csd.hs_max_dtr = 52000000;
323 card->ext_csd.card_type = EXT_CSD_CARD_TYPE_DDR_1_8V;
324 break;
325 case EXT_CSD_CARD_TYPE_52 | EXT_CSD_CARD_TYPE_26:
326 card->ext_csd.hs_max_dtr = 52000000;
327 break;
328 case EXT_CSD_CARD_TYPE_26:
329 card->ext_csd.hs_max_dtr = 26000000;
330 break;
331 default:
332 /* MMC v4 spec says this cannot happen */
333 pr_warning("%s: card is mmc v4 but doesn't "
334 "support any high-speed modes.\n",
335 mmc_hostname(card->host));
336 }
337 320
338 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT]; 321 card->ext_csd.raw_s_a_timeout = ext_csd[EXT_CSD_S_A_TIMEOUT];
339 card->ext_csd.raw_erase_timeout_mult = 322 card->ext_csd.raw_erase_timeout_mult =
@@ -533,6 +516,8 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd)
533 } else { 516 } else {
534 card->ext_csd.data_tag_unit_size = 0; 517 card->ext_csd.data_tag_unit_size = 0;
535 } 518 }
519 } else {
520 card->ext_csd.data_sector_size = 512;
536 } 521 }
537 522
538out: 523out:
@@ -556,14 +541,10 @@ static int mmc_compare_ext_csds(struct mmc_card *card, unsigned bus_width)
556 err = mmc_get_ext_csd(card, &bw_ext_csd); 541 err = mmc_get_ext_csd(card, &bw_ext_csd);
557 542
558 if (err || bw_ext_csd == NULL) { 543 if (err || bw_ext_csd == NULL) {
559 if (bus_width != MMC_BUS_WIDTH_1) 544 err = -EINVAL;
560 err = -EINVAL;
561 goto out; 545 goto out;
562 } 546 }
563 547
564 if (bus_width == MMC_BUS_WIDTH_1)
565 goto out;
566
567 /* only compare read only fields */ 548 /* only compare read only fields */
568 err = !((card->ext_csd.raw_partition_support == 549 err = !((card->ext_csd.raw_partition_support ==
569 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) && 550 bw_ext_csd[EXT_CSD_PARTITION_SUPPORT]) &&
@@ -736,6 +717,10 @@ static int mmc_select_powerclass(struct mmc_card *card,
736 card->ext_csd.generic_cmd6_time); 717 card->ext_csd.generic_cmd6_time);
737 } 718 }
738 719
720 if (err)
721 pr_err("%s: power class selection for ext_csd_bus_width %d"
722 " failed\n", mmc_hostname(card->host), bus_width);
723
739 return err; 724 return err;
740} 725}
741 726
@@ -745,7 +730,7 @@ static int mmc_select_powerclass(struct mmc_card *card,
745 */ 730 */
746static int mmc_select_hs200(struct mmc_card *card) 731static int mmc_select_hs200(struct mmc_card *card)
747{ 732{
748 int idx, err = 0; 733 int idx, err = -EINVAL;
749 struct mmc_host *host; 734 struct mmc_host *host;
750 static unsigned ext_csd_bits[] = { 735 static unsigned ext_csd_bits[] = {
751 EXT_CSD_BUS_WIDTH_4, 736 EXT_CSD_BUS_WIDTH_4,
@@ -761,10 +746,12 @@ static int mmc_select_hs200(struct mmc_card *card)
761 host = card->host; 746 host = card->host;
762 747
763 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V && 748 if (card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_2V &&
764 host->caps2 & MMC_CAP2_HS200_1_2V_SDR) 749 host->caps2 & MMC_CAP2_HS200_1_2V_SDR)
765 if (mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0)) 750 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_120, 0);
766 err = mmc_set_signal_voltage(host, 751
767 MMC_SIGNAL_VOLTAGE_180, 0); 752 if (err && card->ext_csd.card_type & EXT_CSD_CARD_TYPE_SDR_1_8V &&
753 host->caps2 & MMC_CAP2_HS200_1_8V_SDR)
754 err = mmc_set_signal_voltage(host, MMC_SIGNAL_VOLTAGE_180, 0);
768 755
769 /* If fails try again during next card power cycle */ 756 /* If fails try again during next card power cycle */
770 if (err) 757 if (err)
@@ -1117,9 +1104,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1117 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4; 1104 EXT_CSD_BUS_WIDTH_8 : EXT_CSD_BUS_WIDTH_4;
1118 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd); 1105 err = mmc_select_powerclass(card, ext_csd_bits, ext_csd);
1119 if (err) 1106 if (err)
1120 pr_warning("%s: power class selection to bus width %d" 1107 goto err;
1121 " failed\n", mmc_hostname(card->host),
1122 1 << bus_width);
1123 } 1108 }
1124 1109
1125 /* 1110 /*
@@ -1151,10 +1136,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1151 err = mmc_select_powerclass(card, ext_csd_bits[idx][0], 1136 err = mmc_select_powerclass(card, ext_csd_bits[idx][0],
1152 ext_csd); 1137 ext_csd);
1153 if (err) 1138 if (err)
1154 pr_warning("%s: power class selection to " 1139 goto err;
1155 "bus width %d failed\n",
1156 mmc_hostname(card->host),
1157 1 << bus_width);
1158 1140
1159 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1141 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1160 EXT_CSD_BUS_WIDTH, 1142 EXT_CSD_BUS_WIDTH,
@@ -1182,10 +1164,7 @@ static int mmc_init_card(struct mmc_host *host, u32 ocr,
1182 err = mmc_select_powerclass(card, ext_csd_bits[idx][1], 1164 err = mmc_select_powerclass(card, ext_csd_bits[idx][1],
1183 ext_csd); 1165 ext_csd);
1184 if (err) 1166 if (err)
1185 pr_warning("%s: power class selection to " 1167 goto err;
1186 "bus width %d ddr %d failed\n",
1187 mmc_hostname(card->host),
1188 1 << bus_width, ddr);
1189 1168
1190 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL, 1169 err = mmc_switch(card, EXT_CSD_CMD_SET_NORMAL,
1191 EXT_CSD_BUS_WIDTH, 1170 EXT_CSD_BUS_WIDTH,
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 2c7c83f832d2..13d0e95380ab 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -947,7 +947,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
947 } 947 }
948 948
949 if (!err && host->sdio_irqs) 949 if (!err && host->sdio_irqs)
950 mmc_signal_sdio_irq(host); 950 wake_up_process(host->sdio_irq_thread);
951 mmc_release_host(host); 951 mmc_release_host(host);
952 952
953 /* 953 /*
diff --git a/drivers/mmc/core/sdio_irq.c b/drivers/mmc/core/sdio_irq.c
index f573e7f9f740..3d8ceb4084de 100644
--- a/drivers/mmc/core/sdio_irq.c
+++ b/drivers/mmc/core/sdio_irq.c
@@ -28,18 +28,20 @@
28 28
29#include "sdio_ops.h" 29#include "sdio_ops.h"
30 30
31static int process_sdio_pending_irqs(struct mmc_card *card) 31static int process_sdio_pending_irqs(struct mmc_host *host)
32{ 32{
33 struct mmc_card *card = host->card;
33 int i, ret, count; 34 int i, ret, count;
34 unsigned char pending; 35 unsigned char pending;
35 struct sdio_func *func; 36 struct sdio_func *func;
36 37
37 /* 38 /*
38 * Optimization, if there is only 1 function interrupt registered 39 * Optimization, if there is only 1 function interrupt registered
39 * call irq handler directly 40 * and we know an IRQ was signaled then call irq handler directly.
41 * Otherwise do the full probe.
40 */ 42 */
41 func = card->sdio_single_irq; 43 func = card->sdio_single_irq;
42 if (func) { 44 if (func && host->sdio_irq_pending) {
43 func->irq_handler(func); 45 func->irq_handler(func);
44 return 1; 46 return 1;
45 } 47 }
@@ -116,7 +118,8 @@ static int sdio_irq_thread(void *_host)
116 ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort); 118 ret = __mmc_claim_host(host, &host->sdio_irq_thread_abort);
117 if (ret) 119 if (ret)
118 break; 120 break;
119 ret = process_sdio_pending_irqs(host->card); 121 ret = process_sdio_pending_irqs(host);
122 host->sdio_irq_pending = false;
120 mmc_release_host(host); 123 mmc_release_host(host);
121 124
122 /* 125 /*
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 2bc06e7344db..aa131b32e3b2 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -278,10 +278,13 @@ choice
278 Choose which driver to use for the Atmel MCI Silicon 278 Choose which driver to use for the Atmel MCI Silicon
279 279
280config MMC_AT91 280config MMC_AT91
281 tristate "AT91 SD/MMC Card Interface support" 281 tristate "AT91 SD/MMC Card Interface support (DEPRECATED)"
282 depends on ARCH_AT91 282 depends on ARCH_AT91
283 help 283 help
284 This selects the AT91 MCI controller. 284 This selects the AT91 MCI controller. This driver will
285 be removed soon (for more information have a look to
286 Documentation/feature-removal-schedule.txt). Please use
287 MMC_ATMEL_MCI.
285 288
286 If unsure, say N. 289 If unsure, say N.
287 290
@@ -307,16 +310,6 @@ config MMC_ATMELMCI_DMA
307 310
308 If unsure, say N. 311 If unsure, say N.
309 312
310config MMC_IMX
311 tristate "Motorola i.MX Multimedia Card Interface support"
312 depends on ARCH_MX1
313 help
314 This selects the Motorola i.MX Multimedia card Interface.
315 If you have a i.MX platform with a Multimedia Card slot,
316 say Y or M here.
317
318 If unsure, say N.
319
320config MMC_MSM 313config MMC_MSM
321 tristate "Qualcomm SDCC Controller Support" 314 tristate "Qualcomm SDCC Controller Support"
322 depends on MMC && ARCH_MSM 315 depends on MMC && ARCH_MSM
diff --git a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile
index 3e7e26d08073..8922b06be925 100644
--- a/drivers/mmc/host/Makefile
+++ b/drivers/mmc/host/Makefile
@@ -4,7 +4,6 @@
4 4
5obj-$(CONFIG_MMC_ARMMMCI) += mmci.o 5obj-$(CONFIG_MMC_ARMMMCI) += mmci.o
6obj-$(CONFIG_MMC_PXA) += pxamci.o 6obj-$(CONFIG_MMC_PXA) += pxamci.o
7obj-$(CONFIG_MMC_IMX) += imxmmc.o
8obj-$(CONFIG_MMC_MXC) += mxcmmc.o 7obj-$(CONFIG_MMC_MXC) += mxcmmc.o
9obj-$(CONFIG_MMC_MXS) += mxs-mmc.o 8obj-$(CONFIG_MMC_MXS) += mxs-mmc.o
10obj-$(CONFIG_MMC_SDHCI) += sdhci.o 9obj-$(CONFIG_MMC_SDHCI) += sdhci.o
diff --git a/drivers/mmc/host/atmel-mci.c b/drivers/mmc/host/atmel-mci.c
index e94476beca18..420aca642b14 100644
--- a/drivers/mmc/host/atmel-mci.c
+++ b/drivers/mmc/host/atmel-mci.c
@@ -45,19 +45,19 @@
45#define ATMCI_DMA_THRESHOLD 16 45#define ATMCI_DMA_THRESHOLD 16
46 46
47enum { 47enum {
48 EVENT_CMD_COMPLETE = 0, 48 EVENT_CMD_RDY = 0,
49 EVENT_XFER_COMPLETE, 49 EVENT_XFER_COMPLETE,
50 EVENT_DATA_COMPLETE, 50 EVENT_NOTBUSY,
51 EVENT_DATA_ERROR, 51 EVENT_DATA_ERROR,
52}; 52};
53 53
54enum atmel_mci_state { 54enum atmel_mci_state {
55 STATE_IDLE = 0, 55 STATE_IDLE = 0,
56 STATE_SENDING_CMD, 56 STATE_SENDING_CMD,
57 STATE_SENDING_DATA, 57 STATE_DATA_XFER,
58 STATE_DATA_BUSY, 58 STATE_WAITING_NOTBUSY,
59 STATE_SENDING_STOP, 59 STATE_SENDING_STOP,
60 STATE_DATA_ERROR, 60 STATE_END_REQUEST,
61}; 61};
62 62
63enum atmci_xfer_dir { 63enum atmci_xfer_dir {
@@ -78,6 +78,9 @@ struct atmel_mci_caps {
78 bool has_highspeed; 78 bool has_highspeed;
79 bool has_rwproof; 79 bool has_rwproof;
80 bool has_odd_clk_div; 80 bool has_odd_clk_div;
81 bool has_bad_data_ordering;
82 bool need_reset_after_xfer;
83 bool need_blksz_mul_4;
81}; 84};
82 85
83struct atmel_mci_dma { 86struct atmel_mci_dma {
@@ -91,6 +94,11 @@ struct atmel_mci_dma {
91 * @regs: Pointer to MMIO registers. 94 * @regs: Pointer to MMIO registers.
92 * @sg: Scatterlist entry currently being processed by PIO or PDC code. 95 * @sg: Scatterlist entry currently being processed by PIO or PDC code.
93 * @pio_offset: Offset into the current scatterlist entry. 96 * @pio_offset: Offset into the current scatterlist entry.
97 * @buffer: Buffer used if we don't have the r/w proof capability. We
98 * don't have the time to switch pdc buffers so we have to use only
99 * one buffer for the full transaction.
100 * @buf_size: size of the buffer.
101 * @phys_buf_addr: buffer address needed for pdc.
94 * @cur_slot: The slot which is currently using the controller. 102 * @cur_slot: The slot which is currently using the controller.
95 * @mrq: The request currently being processed on @cur_slot, 103 * @mrq: The request currently being processed on @cur_slot,
96 * or NULL if the controller is idle. 104 * or NULL if the controller is idle.
@@ -116,6 +124,7 @@ struct atmel_mci_dma {
116 * @queue: List of slots waiting for access to the controller. 124 * @queue: List of slots waiting for access to the controller.
117 * @need_clock_update: Update the clock rate before the next request. 125 * @need_clock_update: Update the clock rate before the next request.
118 * @need_reset: Reset controller before next request. 126 * @need_reset: Reset controller before next request.
127 * @timer: Timer to balance the data timeout error flag which cannot rise.
119 * @mode_reg: Value of the MR register. 128 * @mode_reg: Value of the MR register.
120 * @cfg_reg: Value of the CFG register. 129 * @cfg_reg: Value of the CFG register.
121 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus 130 * @bus_hz: The rate of @mck in Hz. This forms the basis for MMC bus
@@ -166,6 +175,9 @@ struct atmel_mci {
166 175
167 struct scatterlist *sg; 176 struct scatterlist *sg;
168 unsigned int pio_offset; 177 unsigned int pio_offset;
178 unsigned int *buffer;
179 unsigned int buf_size;
180 dma_addr_t buf_phys_addr;
169 181
170 struct atmel_mci_slot *cur_slot; 182 struct atmel_mci_slot *cur_slot;
171 struct mmc_request *mrq; 183 struct mmc_request *mrq;
@@ -189,6 +201,7 @@ struct atmel_mci {
189 201
190 bool need_clock_update; 202 bool need_clock_update;
191 bool need_reset; 203 bool need_reset;
204 struct timer_list timer;
192 u32 mode_reg; 205 u32 mode_reg;
193 u32 cfg_reg; 206 u32 cfg_reg;
194 unsigned long bus_hz; 207 unsigned long bus_hz;
@@ -480,6 +493,32 @@ err:
480 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n"); 493 dev_err(&mmc->class_dev, "failed to initialize debugfs for slot\n");
481} 494}
482 495
496static inline unsigned int atmci_get_version(struct atmel_mci *host)
497{
498 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
499}
500
501static void atmci_timeout_timer(unsigned long data)
502{
503 struct atmel_mci *host;
504
505 host = (struct atmel_mci *)data;
506
507 dev_dbg(&host->pdev->dev, "software timeout\n");
508
509 if (host->mrq->cmd->data) {
510 host->mrq->cmd->data->error = -ETIMEDOUT;
511 host->data = NULL;
512 } else {
513 host->mrq->cmd->error = -ETIMEDOUT;
514 host->cmd = NULL;
515 }
516 host->need_reset = 1;
517 host->state = STATE_END_REQUEST;
518 smp_wmb();
519 tasklet_schedule(&host->tasklet);
520}
521
483static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host, 522static inline unsigned int atmci_ns_to_clocks(struct atmel_mci *host,
484 unsigned int ns) 523 unsigned int ns)
485{ 524{
@@ -591,6 +630,7 @@ static void atmci_send_command(struct atmel_mci *host,
591 630
592static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data) 631static void atmci_send_stop_cmd(struct atmel_mci *host, struct mmc_data *data)
593{ 632{
633 dev_dbg(&host->pdev->dev, "send stop command\n");
594 atmci_send_command(host, data->stop, host->stop_cmdr); 634 atmci_send_command(host, data->stop, host->stop_cmdr);
595 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY); 635 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
596} 636}
@@ -603,6 +643,7 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
603 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb) 643 enum atmci_xfer_dir dir, enum atmci_pdc_buf buf_nb)
604{ 644{
605 u32 pointer_reg, counter_reg; 645 u32 pointer_reg, counter_reg;
646 unsigned int buf_size;
606 647
607 if (dir == XFER_RECEIVE) { 648 if (dir == XFER_RECEIVE) {
608 pointer_reg = ATMEL_PDC_RPR; 649 pointer_reg = ATMEL_PDC_RPR;
@@ -617,8 +658,15 @@ static void atmci_pdc_set_single_buf(struct atmel_mci *host,
617 counter_reg += ATMEL_PDC_SCND_BUF_OFF; 658 counter_reg += ATMEL_PDC_SCND_BUF_OFF;
618 } 659 }
619 660
620 atmci_writel(host, pointer_reg, sg_dma_address(host->sg)); 661 if (!host->caps.has_rwproof) {
621 if (host->data_size <= sg_dma_len(host->sg)) { 662 buf_size = host->buf_size;
663 atmci_writel(host, pointer_reg, host->buf_phys_addr);
664 } else {
665 buf_size = sg_dma_len(host->sg);
666 atmci_writel(host, pointer_reg, sg_dma_address(host->sg));
667 }
668
669 if (host->data_size <= buf_size) {
622 if (host->data_size & 0x3) { 670 if (host->data_size & 0x3) {
623 /* If size is different from modulo 4, transfer bytes */ 671 /* If size is different from modulo 4, transfer bytes */
624 atmci_writel(host, counter_reg, host->data_size); 672 atmci_writel(host, counter_reg, host->data_size);
@@ -670,7 +718,20 @@ static void atmci_pdc_cleanup(struct atmel_mci *host)
670 */ 718 */
671static void atmci_pdc_complete(struct atmel_mci *host) 719static void atmci_pdc_complete(struct atmel_mci *host)
672{ 720{
721 int transfer_size = host->data->blocks * host->data->blksz;
722 int i;
723
673 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS); 724 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
725
726 if ((!host->caps.has_rwproof)
727 && (host->data->flags & MMC_DATA_READ)) {
728 if (host->caps.has_bad_data_ordering)
729 for (i = 0; i < transfer_size; i++)
730 host->buffer[i] = swab32(host->buffer[i]);
731 sg_copy_from_buffer(host->data->sg, host->data->sg_len,
732 host->buffer, transfer_size);
733 }
734
674 atmci_pdc_cleanup(host); 735 atmci_pdc_cleanup(host);
675 736
676 /* 737 /*
@@ -678,9 +739,10 @@ static void atmci_pdc_complete(struct atmel_mci *host)
678 * to send the stop command or waiting for NBUSY in this case. 739 * to send the stop command or waiting for NBUSY in this case.
679 */ 740 */
680 if (host->data) { 741 if (host->data) {
742 dev_dbg(&host->pdev->dev,
743 "(%s) set pending xfer complete\n", __func__);
681 atmci_set_pending(host, EVENT_XFER_COMPLETE); 744 atmci_set_pending(host, EVENT_XFER_COMPLETE);
682 tasklet_schedule(&host->tasklet); 745 tasklet_schedule(&host->tasklet);
683 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
684 } 746 }
685} 747}
686 748
@@ -716,6 +778,8 @@ static void atmci_dma_complete(void *arg)
716 * to send the stop command or waiting for NBUSY in this case. 778 * to send the stop command or waiting for NBUSY in this case.
717 */ 779 */
718 if (data) { 780 if (data) {
781 dev_dbg(&host->pdev->dev,
782 "(%s) set pending xfer complete\n", __func__);
719 atmci_set_pending(host, EVENT_XFER_COMPLETE); 783 atmci_set_pending(host, EVENT_XFER_COMPLETE);
720 tasklet_schedule(&host->tasklet); 784 tasklet_schedule(&host->tasklet);
721 785
@@ -791,6 +855,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
791 u32 iflags, tmp; 855 u32 iflags, tmp;
792 unsigned int sg_len; 856 unsigned int sg_len;
793 enum dma_data_direction dir; 857 enum dma_data_direction dir;
858 int i;
794 859
795 data->error = -EINPROGRESS; 860 data->error = -EINPROGRESS;
796 861
@@ -806,7 +871,7 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
806 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF; 871 iflags |= ATMCI_ENDRX | ATMCI_RXBUFF;
807 } else { 872 } else {
808 dir = DMA_TO_DEVICE; 873 dir = DMA_TO_DEVICE;
809 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE; 874 iflags |= ATMCI_ENDTX | ATMCI_TXBUFE | ATMCI_BLKE;
810 } 875 }
811 876
812 /* Set BLKLEN */ 877 /* Set BLKLEN */
@@ -818,6 +883,16 @@ atmci_prepare_data_pdc(struct atmel_mci *host, struct mmc_data *data)
818 /* Configure PDC */ 883 /* Configure PDC */
819 host->data_size = data->blocks * data->blksz; 884 host->data_size = data->blocks * data->blksz;
820 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir); 885 sg_len = dma_map_sg(&host->pdev->dev, data->sg, data->sg_len, dir);
886
887 if ((!host->caps.has_rwproof)
888 && (host->data->flags & MMC_DATA_WRITE)) {
889 sg_copy_to_buffer(host->data->sg, host->data->sg_len,
890 host->buffer, host->data_size);
891 if (host->caps.has_bad_data_ordering)
892 for (i = 0; i < host->data_size; i++)
893 host->buffer[i] = swab32(host->buffer[i]);
894 }
895
821 if (host->data_size) 896 if (host->data_size)
822 atmci_pdc_set_both_buf(host, 897 atmci_pdc_set_both_buf(host,
823 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT)); 898 ((dir == DMA_FROM_DEVICE) ? XFER_RECEIVE : XFER_TRANSMIT));
@@ -931,6 +1006,8 @@ atmci_submit_data_dma(struct atmel_mci *host, struct mmc_data *data)
931 1006
932static void atmci_stop_transfer(struct atmel_mci *host) 1007static void atmci_stop_transfer(struct atmel_mci *host)
933{ 1008{
1009 dev_dbg(&host->pdev->dev,
1010 "(%s) set pending xfer complete\n", __func__);
934 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1011 atmci_set_pending(host, EVENT_XFER_COMPLETE);
935 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1012 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
936} 1013}
@@ -940,8 +1017,7 @@ static void atmci_stop_transfer(struct atmel_mci *host)
940 */ 1017 */
941static void atmci_stop_transfer_pdc(struct atmel_mci *host) 1018static void atmci_stop_transfer_pdc(struct atmel_mci *host)
942{ 1019{
943 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1020 atmci_writel(host, ATMEL_PDC_PTCR, ATMEL_PDC_RXTDIS | ATMEL_PDC_TXTDIS);
944 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
945} 1021}
946 1022
947static void atmci_stop_transfer_dma(struct atmel_mci *host) 1023static void atmci_stop_transfer_dma(struct atmel_mci *host)
@@ -953,6 +1029,8 @@ static void atmci_stop_transfer_dma(struct atmel_mci *host)
953 atmci_dma_cleanup(host); 1029 atmci_dma_cleanup(host);
954 } else { 1030 } else {
955 /* Data transfer was stopped by the interrupt handler */ 1031 /* Data transfer was stopped by the interrupt handler */
1032 dev_dbg(&host->pdev->dev,
1033 "(%s) set pending xfer complete\n", __func__);
956 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1034 atmci_set_pending(host, EVENT_XFER_COMPLETE);
957 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY); 1035 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
958 } 1036 }
@@ -977,9 +1055,12 @@ static void atmci_start_request(struct atmel_mci *host,
977 1055
978 host->pending_events = 0; 1056 host->pending_events = 0;
979 host->completed_events = 0; 1057 host->completed_events = 0;
1058 host->cmd_status = 0;
980 host->data_status = 0; 1059 host->data_status = 0;
981 1060
982 if (host->need_reset) { 1061 dev_dbg(&host->pdev->dev, "start request: cmd %u\n", mrq->cmd->opcode);
1062
1063 if (host->need_reset || host->caps.need_reset_after_xfer) {
983 iflags = atmci_readl(host, ATMCI_IMR); 1064 iflags = atmci_readl(host, ATMCI_IMR);
984 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB); 1065 iflags &= (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB);
985 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST); 1066 atmci_writel(host, ATMCI_CR, ATMCI_CR_SWRST);
@@ -994,7 +1075,7 @@ static void atmci_start_request(struct atmel_mci *host,
994 1075
995 iflags = atmci_readl(host, ATMCI_IMR); 1076 iflags = atmci_readl(host, ATMCI_IMR);
996 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) 1077 if (iflags & ~(ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
997 dev_warn(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n", 1078 dev_dbg(&slot->mmc->class_dev, "WARNING: IMR=0x%08x\n",
998 iflags); 1079 iflags);
999 1080
1000 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) { 1081 if (unlikely(test_and_clear_bit(ATMCI_CARD_NEED_INIT, &slot->flags))) {
@@ -1043,6 +1124,8 @@ static void atmci_start_request(struct atmel_mci *host,
1043 * prepared yet.) 1124 * prepared yet.)
1044 */ 1125 */
1045 atmci_writel(host, ATMCI_IER, iflags); 1126 atmci_writel(host, ATMCI_IER, iflags);
1127
1128 mod_timer(&host->timer, jiffies + msecs_to_jiffies(2000));
1046} 1129}
1047 1130
1048static void atmci_queue_request(struct atmel_mci *host, 1131static void atmci_queue_request(struct atmel_mci *host,
@@ -1057,6 +1140,7 @@ static void atmci_queue_request(struct atmel_mci *host,
1057 host->state = STATE_SENDING_CMD; 1140 host->state = STATE_SENDING_CMD;
1058 atmci_start_request(host, slot); 1141 atmci_start_request(host, slot);
1059 } else { 1142 } else {
1143 dev_dbg(&host->pdev->dev, "queue request\n");
1060 list_add_tail(&slot->queue_node, &host->queue); 1144 list_add_tail(&slot->queue_node, &host->queue);
1061 } 1145 }
1062 spin_unlock_bh(&host->lock); 1146 spin_unlock_bh(&host->lock);
@@ -1069,6 +1153,7 @@ static void atmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
1069 struct mmc_data *data; 1153 struct mmc_data *data;
1070 1154
1071 WARN_ON(slot->mrq); 1155 WARN_ON(slot->mrq);
1156 dev_dbg(&host->pdev->dev, "MRQ: cmd %u\n", mrq->cmd->opcode);
1072 1157
1073 /* 1158 /*
1074 * We may "know" the card is gone even though there's still an 1159 * We may "know" the card is gone even though there's still an
@@ -1308,6 +1393,8 @@ static void atmci_request_end(struct atmel_mci *host, struct mmc_request *mrq)
1308 host->state = STATE_IDLE; 1393 host->state = STATE_IDLE;
1309 } 1394 }
1310 1395
1396 del_timer(&host->timer);
1397
1311 spin_unlock(&host->lock); 1398 spin_unlock(&host->lock);
1312 mmc_request_done(prev_mmc, mrq); 1399 mmc_request_done(prev_mmc, mrq);
1313 spin_lock(&host->lock); 1400 spin_lock(&host->lock);
@@ -1330,21 +1417,13 @@ static void atmci_command_complete(struct atmel_mci *host,
1330 cmd->error = -EILSEQ; 1417 cmd->error = -EILSEQ;
1331 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE)) 1418 else if (status & (ATMCI_RINDE | ATMCI_RDIRE | ATMCI_RENDE))
1332 cmd->error = -EIO; 1419 cmd->error = -EIO;
1333 else 1420 else if (host->mrq->data && (host->mrq->data->blksz & 3)) {
1334 cmd->error = 0; 1421 if (host->caps.need_blksz_mul_4) {
1335 1422 cmd->error = -EINVAL;
1336 if (cmd->error) { 1423 host->need_reset = 1;
1337 dev_dbg(&host->pdev->dev,
1338 "command error: status=0x%08x\n", status);
1339
1340 if (cmd->data) {
1341 host->stop_transfer(host);
1342 host->data = NULL;
1343 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY
1344 | ATMCI_TXRDY | ATMCI_RXRDY
1345 | ATMCI_DATA_ERROR_FLAGS);
1346 } 1424 }
1347 } 1425 } else
1426 cmd->error = 0;
1348} 1427}
1349 1428
1350static void atmci_detect_change(unsigned long data) 1429static void atmci_detect_change(unsigned long data)
@@ -1407,23 +1486,21 @@ static void atmci_detect_change(unsigned long data)
1407 break; 1486 break;
1408 case STATE_SENDING_CMD: 1487 case STATE_SENDING_CMD:
1409 mrq->cmd->error = -ENOMEDIUM; 1488 mrq->cmd->error = -ENOMEDIUM;
1410 if (!mrq->data) 1489 if (mrq->data)
1411 break; 1490 host->stop_transfer(host);
1412 /* fall through */ 1491 break;
1413 case STATE_SENDING_DATA: 1492 case STATE_DATA_XFER:
1414 mrq->data->error = -ENOMEDIUM; 1493 mrq->data->error = -ENOMEDIUM;
1415 host->stop_transfer(host); 1494 host->stop_transfer(host);
1416 break; 1495 break;
1417 case STATE_DATA_BUSY: 1496 case STATE_WAITING_NOTBUSY:
1418 case STATE_DATA_ERROR: 1497 mrq->data->error = -ENOMEDIUM;
1419 if (mrq->data->error == -EINPROGRESS) 1498 break;
1420 mrq->data->error = -ENOMEDIUM;
1421 if (!mrq->stop)
1422 break;
1423 /* fall through */
1424 case STATE_SENDING_STOP: 1499 case STATE_SENDING_STOP:
1425 mrq->stop->error = -ENOMEDIUM; 1500 mrq->stop->error = -ENOMEDIUM;
1426 break; 1501 break;
1502 case STATE_END_REQUEST:
1503 break;
1427 } 1504 }
1428 1505
1429 atmci_request_end(host, mrq); 1506 atmci_request_end(host, mrq);
@@ -1451,7 +1528,6 @@ static void atmci_tasklet_func(unsigned long priv)
1451 struct atmel_mci *host = (struct atmel_mci *)priv; 1528 struct atmel_mci *host = (struct atmel_mci *)priv;
1452 struct mmc_request *mrq = host->mrq; 1529 struct mmc_request *mrq = host->mrq;
1453 struct mmc_data *data = host->data; 1530 struct mmc_data *data = host->data;
1454 struct mmc_command *cmd = host->cmd;
1455 enum atmel_mci_state state = host->state; 1531 enum atmel_mci_state state = host->state;
1456 enum atmel_mci_state prev_state; 1532 enum atmel_mci_state prev_state;
1457 u32 status; 1533 u32 status;
@@ -1467,107 +1543,186 @@ static void atmci_tasklet_func(unsigned long priv)
1467 1543
1468 do { 1544 do {
1469 prev_state = state; 1545 prev_state = state;
1546 dev_dbg(&host->pdev->dev, "FSM: state=%d\n", state);
1470 1547
1471 switch (state) { 1548 switch (state) {
1472 case STATE_IDLE: 1549 case STATE_IDLE:
1473 break; 1550 break;
1474 1551
1475 case STATE_SENDING_CMD: 1552 case STATE_SENDING_CMD:
1553 /*
1554 * Command has been sent, we are waiting for command
1555 * ready. Then we have three next states possible:
1556 * END_REQUEST by default, WAITING_NOTBUSY if it's a
1557 * command needing it or DATA_XFER if there is data.
1558 */
1559 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1476 if (!atmci_test_and_clear_pending(host, 1560 if (!atmci_test_and_clear_pending(host,
1477 EVENT_CMD_COMPLETE)) 1561 EVENT_CMD_RDY))
1478 break; 1562 break;
1479 1563
1564 dev_dbg(&host->pdev->dev, "set completed cmd ready\n");
1480 host->cmd = NULL; 1565 host->cmd = NULL;
1481 atmci_set_completed(host, EVENT_CMD_COMPLETE); 1566 atmci_set_completed(host, EVENT_CMD_RDY);
1482 atmci_command_complete(host, mrq->cmd); 1567 atmci_command_complete(host, mrq->cmd);
1483 if (!mrq->data || cmd->error) { 1568 if (mrq->data) {
1484 atmci_request_end(host, host->mrq); 1569 dev_dbg(&host->pdev->dev,
1485 goto unlock; 1570 "command with data transfer");
1486 } 1571 /*
1572 * If there is a command error don't start
1573 * data transfer.
1574 */
1575 if (mrq->cmd->error) {
1576 host->stop_transfer(host);
1577 host->data = NULL;
1578 atmci_writel(host, ATMCI_IDR,
1579 ATMCI_TXRDY | ATMCI_RXRDY
1580 | ATMCI_DATA_ERROR_FLAGS);
1581 state = STATE_END_REQUEST;
1582 } else
1583 state = STATE_DATA_XFER;
1584 } else if ((!mrq->data) && (mrq->cmd->flags & MMC_RSP_BUSY)) {
1585 dev_dbg(&host->pdev->dev,
1586 "command response need waiting notbusy");
1587 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1588 state = STATE_WAITING_NOTBUSY;
1589 } else
1590 state = STATE_END_REQUEST;
1487 1591
1488 prev_state = state = STATE_SENDING_DATA; 1592 break;
1489 /* fall through */
1490 1593
1491 case STATE_SENDING_DATA: 1594 case STATE_DATA_XFER:
1492 if (atmci_test_and_clear_pending(host, 1595 if (atmci_test_and_clear_pending(host,
1493 EVENT_DATA_ERROR)) { 1596 EVENT_DATA_ERROR)) {
1494 host->stop_transfer(host); 1597 dev_dbg(&host->pdev->dev, "set completed data error\n");
1495 if (data->stop) 1598 atmci_set_completed(host, EVENT_DATA_ERROR);
1496 atmci_send_stop_cmd(host, data); 1599 state = STATE_END_REQUEST;
1497 state = STATE_DATA_ERROR;
1498 break; 1600 break;
1499 } 1601 }
1500 1602
1603 /*
1604 * A data transfer is in progress. The event expected
1605 * to move to the next state depends of data transfer
1606 * type (PDC or DMA). Once transfer done we can move
1607 * to the next step which is WAITING_NOTBUSY in write
1608 * case and directly SENDING_STOP in read case.
1609 */
1610 dev_dbg(&host->pdev->dev, "FSM: xfer complete?\n");
1501 if (!atmci_test_and_clear_pending(host, 1611 if (!atmci_test_and_clear_pending(host,
1502 EVENT_XFER_COMPLETE)) 1612 EVENT_XFER_COMPLETE))
1503 break; 1613 break;
1504 1614
1615 dev_dbg(&host->pdev->dev,
1616 "(%s) set completed xfer complete\n",
1617 __func__);
1505 atmci_set_completed(host, EVENT_XFER_COMPLETE); 1618 atmci_set_completed(host, EVENT_XFER_COMPLETE);
1506 prev_state = state = STATE_DATA_BUSY;
1507 /* fall through */
1508 1619
1509 case STATE_DATA_BUSY: 1620 if (host->data->flags & MMC_DATA_WRITE) {
1510 if (!atmci_test_and_clear_pending(host, 1621 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1511 EVENT_DATA_COMPLETE)) 1622 state = STATE_WAITING_NOTBUSY;
1512 break; 1623 } else if (host->mrq->stop) {
1513 1624 atmci_writel(host, ATMCI_IER, ATMCI_CMDRDY);
1514 host->data = NULL; 1625 atmci_send_stop_cmd(host, data);
1515 atmci_set_completed(host, EVENT_DATA_COMPLETE); 1626 state = STATE_SENDING_STOP;
1516 status = host->data_status;
1517 if (unlikely(status & ATMCI_DATA_ERROR_FLAGS)) {
1518 if (status & ATMCI_DTOE) {
1519 dev_dbg(&host->pdev->dev,
1520 "data timeout error\n");
1521 data->error = -ETIMEDOUT;
1522 } else if (status & ATMCI_DCRCE) {
1523 dev_dbg(&host->pdev->dev,
1524 "data CRC error\n");
1525 data->error = -EILSEQ;
1526 } else {
1527 dev_dbg(&host->pdev->dev,
1528 "data FIFO error (status=%08x)\n",
1529 status);
1530 data->error = -EIO;
1531 }
1532 } else { 1627 } else {
1628 host->data = NULL;
1533 data->bytes_xfered = data->blocks * data->blksz; 1629 data->bytes_xfered = data->blocks * data->blksz;
1534 data->error = 0; 1630 data->error = 0;
1535 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS); 1631 state = STATE_END_REQUEST;
1536 } 1632 }
1633 break;
1537 1634
1538 if (!data->stop) { 1635 case STATE_WAITING_NOTBUSY:
1539 atmci_request_end(host, host->mrq); 1636 /*
1540 goto unlock; 1637 * We can be in the state for two reasons: a command
1541 } 1638 * requiring waiting not busy signal (stop command
1639 * included) or a write operation. In the latest case,
1640 * we need to send a stop command.
1641 */
1642 dev_dbg(&host->pdev->dev, "FSM: not busy?\n");
1643 if (!atmci_test_and_clear_pending(host,
1644 EVENT_NOTBUSY))
1645 break;
1542 1646
1543 prev_state = state = STATE_SENDING_STOP; 1647 dev_dbg(&host->pdev->dev, "set completed not busy\n");
1544 if (!data->error) 1648 atmci_set_completed(host, EVENT_NOTBUSY);
1545 atmci_send_stop_cmd(host, data); 1649
1546 /* fall through */ 1650 if (host->data) {
1651 /*
1652 * For some commands such as CMD53, even if
1653 * there is data transfer, there is no stop
1654 * command to send.
1655 */
1656 if (host->mrq->stop) {
1657 atmci_writel(host, ATMCI_IER,
1658 ATMCI_CMDRDY);
1659 atmci_send_stop_cmd(host, data);
1660 state = STATE_SENDING_STOP;
1661 } else {
1662 host->data = NULL;
1663 data->bytes_xfered = data->blocks
1664 * data->blksz;
1665 data->error = 0;
1666 state = STATE_END_REQUEST;
1667 }
1668 } else
1669 state = STATE_END_REQUEST;
1670 break;
1547 1671
1548 case STATE_SENDING_STOP: 1672 case STATE_SENDING_STOP:
1673 /*
1674 * In this state, it is important to set host->data to
1675 * NULL (which is tested in the waiting notbusy state)
1676 * in order to go to the end request state instead of
1677 * sending stop again.
1678 */
1679 dev_dbg(&host->pdev->dev, "FSM: cmd ready?\n");
1549 if (!atmci_test_and_clear_pending(host, 1680 if (!atmci_test_and_clear_pending(host,
1550 EVENT_CMD_COMPLETE)) 1681 EVENT_CMD_RDY))
1551 break; 1682 break;
1552 1683
1684 dev_dbg(&host->pdev->dev, "FSM: cmd ready\n");
1553 host->cmd = NULL; 1685 host->cmd = NULL;
1686 host->data = NULL;
1687 data->bytes_xfered = data->blocks * data->blksz;
1688 data->error = 0;
1554 atmci_command_complete(host, mrq->stop); 1689 atmci_command_complete(host, mrq->stop);
1555 atmci_request_end(host, host->mrq); 1690 if (mrq->stop->error) {
1556 goto unlock; 1691 host->stop_transfer(host);
1692 atmci_writel(host, ATMCI_IDR,
1693 ATMCI_TXRDY | ATMCI_RXRDY
1694 | ATMCI_DATA_ERROR_FLAGS);
1695 state = STATE_END_REQUEST;
1696 } else {
1697 atmci_writel(host, ATMCI_IER, ATMCI_NOTBUSY);
1698 state = STATE_WAITING_NOTBUSY;
1699 }
1700 break;
1557 1701
1558 case STATE_DATA_ERROR: 1702 case STATE_END_REQUEST:
1559 if (!atmci_test_and_clear_pending(host, 1703 atmci_writel(host, ATMCI_IDR, ATMCI_TXRDY | ATMCI_RXRDY
1560 EVENT_XFER_COMPLETE)) 1704 | ATMCI_DATA_ERROR_FLAGS);
1561 break; 1705 status = host->data_status;
1706 if (unlikely(status)) {
1707 host->stop_transfer(host);
1708 host->data = NULL;
1709 if (status & ATMCI_DTOE) {
1710 data->error = -ETIMEDOUT;
1711 } else if (status & ATMCI_DCRCE) {
1712 data->error = -EILSEQ;
1713 } else {
1714 data->error = -EIO;
1715 }
1716 }
1562 1717
1563 state = STATE_DATA_BUSY; 1718 atmci_request_end(host, host->mrq);
1719 state = STATE_IDLE;
1564 break; 1720 break;
1565 } 1721 }
1566 } while (state != prev_state); 1722 } while (state != prev_state);
1567 1723
1568 host->state = state; 1724 host->state = state;
1569 1725
1570unlock:
1571 spin_unlock(&host->lock); 1726 spin_unlock(&host->lock);
1572} 1727}
1573 1728
@@ -1620,9 +1775,6 @@ static void atmci_read_data_pio(struct atmel_mci *host)
1620 | ATMCI_DATA_ERROR_FLAGS)); 1775 | ATMCI_DATA_ERROR_FLAGS));
1621 host->data_status = status; 1776 host->data_status = status;
1622 data->bytes_xfered += nbytes; 1777 data->bytes_xfered += nbytes;
1623 smp_wmb();
1624 atmci_set_pending(host, EVENT_DATA_ERROR);
1625 tasklet_schedule(&host->tasklet);
1626 return; 1778 return;
1627 } 1779 }
1628 } while (status & ATMCI_RXRDY); 1780 } while (status & ATMCI_RXRDY);
@@ -1691,9 +1843,6 @@ static void atmci_write_data_pio(struct atmel_mci *host)
1691 | ATMCI_DATA_ERROR_FLAGS)); 1843 | ATMCI_DATA_ERROR_FLAGS));
1692 host->data_status = status; 1844 host->data_status = status;
1693 data->bytes_xfered += nbytes; 1845 data->bytes_xfered += nbytes;
1694 smp_wmb();
1695 atmci_set_pending(host, EVENT_DATA_ERROR);
1696 tasklet_schedule(&host->tasklet);
1697 return; 1846 return;
1698 } 1847 }
1699 } while (status & ATMCI_TXRDY); 1848 } while (status & ATMCI_TXRDY);
@@ -1711,16 +1860,6 @@ done:
1711 atmci_set_pending(host, EVENT_XFER_COMPLETE); 1860 atmci_set_pending(host, EVENT_XFER_COMPLETE);
1712} 1861}
1713 1862
1714static void atmci_cmd_interrupt(struct atmel_mci *host, u32 status)
1715{
1716 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
1717
1718 host->cmd_status = status;
1719 smp_wmb();
1720 atmci_set_pending(host, EVENT_CMD_COMPLETE);
1721 tasklet_schedule(&host->tasklet);
1722}
1723
1724static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status) 1863static void atmci_sdio_interrupt(struct atmel_mci *host, u32 status)
1725{ 1864{
1726 int i; 1865 int i;
@@ -1748,17 +1887,21 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1748 break; 1887 break;
1749 1888
1750 if (pending & ATMCI_DATA_ERROR_FLAGS) { 1889 if (pending & ATMCI_DATA_ERROR_FLAGS) {
1890 dev_dbg(&host->pdev->dev, "IRQ: data error\n");
1751 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS 1891 atmci_writel(host, ATMCI_IDR, ATMCI_DATA_ERROR_FLAGS
1752 | ATMCI_RXRDY | ATMCI_TXRDY); 1892 | ATMCI_RXRDY | ATMCI_TXRDY
1753 pending &= atmci_readl(host, ATMCI_IMR); 1893 | ATMCI_ENDRX | ATMCI_ENDTX
1894 | ATMCI_RXBUFF | ATMCI_TXBUFE);
1754 1895
1755 host->data_status = status; 1896 host->data_status = status;
1897 dev_dbg(&host->pdev->dev, "set pending data error\n");
1756 smp_wmb(); 1898 smp_wmb();
1757 atmci_set_pending(host, EVENT_DATA_ERROR); 1899 atmci_set_pending(host, EVENT_DATA_ERROR);
1758 tasklet_schedule(&host->tasklet); 1900 tasklet_schedule(&host->tasklet);
1759 } 1901 }
1760 1902
1761 if (pending & ATMCI_TXBUFE) { 1903 if (pending & ATMCI_TXBUFE) {
1904 dev_dbg(&host->pdev->dev, "IRQ: tx buffer empty\n");
1762 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE); 1905 atmci_writel(host, ATMCI_IDR, ATMCI_TXBUFE);
1763 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); 1906 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1764 /* 1907 /*
@@ -1774,6 +1917,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1774 atmci_pdc_complete(host); 1917 atmci_pdc_complete(host);
1775 } 1918 }
1776 } else if (pending & ATMCI_ENDTX) { 1919 } else if (pending & ATMCI_ENDTX) {
1920 dev_dbg(&host->pdev->dev, "IRQ: end of tx buffer\n");
1777 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX); 1921 atmci_writel(host, ATMCI_IDR, ATMCI_ENDTX);
1778 1922
1779 if (host->data_size) { 1923 if (host->data_size) {
@@ -1784,6 +1928,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1784 } 1928 }
1785 1929
1786 if (pending & ATMCI_RXBUFF) { 1930 if (pending & ATMCI_RXBUFF) {
1931 dev_dbg(&host->pdev->dev, "IRQ: rx buffer full\n");
1787 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF); 1932 atmci_writel(host, ATMCI_IDR, ATMCI_RXBUFF);
1788 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); 1933 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1789 /* 1934 /*
@@ -1799,6 +1944,7 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1799 atmci_pdc_complete(host); 1944 atmci_pdc_complete(host);
1800 } 1945 }
1801 } else if (pending & ATMCI_ENDRX) { 1946 } else if (pending & ATMCI_ENDRX) {
1947 dev_dbg(&host->pdev->dev, "IRQ: end of rx buffer\n");
1802 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX); 1948 atmci_writel(host, ATMCI_IDR, ATMCI_ENDRX);
1803 1949
1804 if (host->data_size) { 1950 if (host->data_size) {
@@ -1808,23 +1954,44 @@ static irqreturn_t atmci_interrupt(int irq, void *dev_id)
1808 } 1954 }
1809 } 1955 }
1810 1956
1957 /*
1958 * First mci IPs, so mainly the ones having pdc, have some
1959 * issues with the notbusy signal. You can't get it after
1960 * data transmission if you have not sent a stop command.
1961 * The appropriate workaround is to use the BLKE signal.
1962 */
1963 if (pending & ATMCI_BLKE) {
1964 dev_dbg(&host->pdev->dev, "IRQ: blke\n");
1965 atmci_writel(host, ATMCI_IDR, ATMCI_BLKE);
1966 smp_wmb();
1967 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
1968 atmci_set_pending(host, EVENT_NOTBUSY);
1969 tasklet_schedule(&host->tasklet);
1970 }
1811 1971
1812 if (pending & ATMCI_NOTBUSY) { 1972 if (pending & ATMCI_NOTBUSY) {
1813 atmci_writel(host, ATMCI_IDR, 1973 dev_dbg(&host->pdev->dev, "IRQ: not_busy\n");
1814 ATMCI_DATA_ERROR_FLAGS | ATMCI_NOTBUSY); 1974 atmci_writel(host, ATMCI_IDR, ATMCI_NOTBUSY);
1815 if (!host->data_status)
1816 host->data_status = status;
1817 smp_wmb(); 1975 smp_wmb();
1818 atmci_set_pending(host, EVENT_DATA_COMPLETE); 1976 dev_dbg(&host->pdev->dev, "set pending notbusy\n");
1977 atmci_set_pending(host, EVENT_NOTBUSY);
1819 tasklet_schedule(&host->tasklet); 1978 tasklet_schedule(&host->tasklet);
1820 } 1979 }
1980
1821 if (pending & ATMCI_RXRDY) 1981 if (pending & ATMCI_RXRDY)
1822 atmci_read_data_pio(host); 1982 atmci_read_data_pio(host);
1823 if (pending & ATMCI_TXRDY) 1983 if (pending & ATMCI_TXRDY)
1824 atmci_write_data_pio(host); 1984 atmci_write_data_pio(host);
1825 1985
1826 if (pending & ATMCI_CMDRDY) 1986 if (pending & ATMCI_CMDRDY) {
1827 atmci_cmd_interrupt(host, status); 1987 dev_dbg(&host->pdev->dev, "IRQ: cmd ready\n");
1988 atmci_writel(host, ATMCI_IDR, ATMCI_CMDRDY);
1989 host->cmd_status = status;
1990 smp_wmb();
1991 dev_dbg(&host->pdev->dev, "set pending cmd rdy\n");
1992 atmci_set_pending(host, EVENT_CMD_RDY);
1993 tasklet_schedule(&host->tasklet);
1994 }
1828 1995
1829 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB)) 1996 if (pending & (ATMCI_SDIOIRQA | ATMCI_SDIOIRQB))
1830 atmci_sdio_interrupt(host, status); 1997 atmci_sdio_interrupt(host, status);
@@ -1877,13 +2044,26 @@ static int __init atmci_init_slot(struct atmel_mci *host,
1877 mmc->caps |= MMC_CAP_SDIO_IRQ; 2044 mmc->caps |= MMC_CAP_SDIO_IRQ;
1878 if (host->caps.has_highspeed) 2045 if (host->caps.has_highspeed)
1879 mmc->caps |= MMC_CAP_SD_HIGHSPEED; 2046 mmc->caps |= MMC_CAP_SD_HIGHSPEED;
1880 if (slot_data->bus_width >= 4) 2047 /*
2048 * Without the read/write proof capability, it is strongly suggested to
2049 * use only one bit for data to prevent fifo underruns and overruns
2050 * which will corrupt data.
2051 */
2052 if ((slot_data->bus_width >= 4) && host->caps.has_rwproof)
1881 mmc->caps |= MMC_CAP_4_BIT_DATA; 2053 mmc->caps |= MMC_CAP_4_BIT_DATA;
1882 2054
1883 mmc->max_segs = 64; 2055 if (atmci_get_version(host) < 0x200) {
1884 mmc->max_req_size = 32768 * 512; 2056 mmc->max_segs = 256;
1885 mmc->max_blk_size = 32768; 2057 mmc->max_blk_size = 4095;
1886 mmc->max_blk_count = 512; 2058 mmc->max_blk_count = 256;
2059 mmc->max_req_size = mmc->max_blk_size * mmc->max_blk_count;
2060 mmc->max_seg_size = mmc->max_blk_size * mmc->max_segs;
2061 } else {
2062 mmc->max_segs = 64;
2063 mmc->max_req_size = 32768 * 512;
2064 mmc->max_blk_size = 32768;
2065 mmc->max_blk_count = 512;
2066 }
1887 2067
1888 /* Assume card is present initially */ 2068 /* Assume card is present initially */
1889 set_bit(ATMCI_CARD_PRESENT, &slot->flags); 2069 set_bit(ATMCI_CARD_PRESENT, &slot->flags);
@@ -2007,11 +2187,6 @@ static bool atmci_configure_dma(struct atmel_mci *host)
2007 } 2187 }
2008} 2188}
2009 2189
2010static inline unsigned int atmci_get_version(struct atmel_mci *host)
2011{
2012 return atmci_readl(host, ATMCI_VERSION) & 0x00000fff;
2013}
2014
2015/* 2190/*
2016 * HSMCI (High Speed MCI) module is not fully compatible with MCI module. 2191 * HSMCI (High Speed MCI) module is not fully compatible with MCI module.
2017 * HSMCI provides DMA support and a new config register but no more supports 2192 * HSMCI provides DMA support and a new config register but no more supports
@@ -2032,6 +2207,9 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2032 host->caps.has_highspeed = 0; 2207 host->caps.has_highspeed = 0;
2033 host->caps.has_rwproof = 0; 2208 host->caps.has_rwproof = 0;
2034 host->caps.has_odd_clk_div = 0; 2209 host->caps.has_odd_clk_div = 0;
2210 host->caps.has_bad_data_ordering = 1;
2211 host->caps.need_reset_after_xfer = 1;
2212 host->caps.need_blksz_mul_4 = 1;
2035 2213
2036 /* keep only major version number */ 2214 /* keep only major version number */
2037 switch (version & 0xf00) { 2215 switch (version & 0xf00) {
@@ -2051,7 +2229,11 @@ static void __init atmci_get_cap(struct atmel_mci *host)
2051 host->caps.has_highspeed = 1; 2229 host->caps.has_highspeed = 1;
2052 case 0x200: 2230 case 0x200:
2053 host->caps.has_rwproof = 1; 2231 host->caps.has_rwproof = 1;
2232 host->caps.need_blksz_mul_4 = 0;
2054 case 0x100: 2233 case 0x100:
2234 host->caps.has_bad_data_ordering = 0;
2235 host->caps.need_reset_after_xfer = 0;
2236 case 0x0:
2055 break; 2237 break;
2056 default: 2238 default:
2057 host->caps.has_pdc = 0; 2239 host->caps.has_pdc = 0;
@@ -2138,14 +2320,20 @@ static int __init atmci_probe(struct platform_device *pdev)
2138 if (pdata->slot[0].bus_width) { 2320 if (pdata->slot[0].bus_width) {
2139 ret = atmci_init_slot(host, &pdata->slot[0], 2321 ret = atmci_init_slot(host, &pdata->slot[0],
2140 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA); 2322 0, ATMCI_SDCSEL_SLOT_A, ATMCI_SDIOIRQA);
2141 if (!ret) 2323 if (!ret) {
2142 nr_slots++; 2324 nr_slots++;
2325 host->buf_size = host->slot[0]->mmc->max_req_size;
2326 }
2143 } 2327 }
2144 if (pdata->slot[1].bus_width) { 2328 if (pdata->slot[1].bus_width) {
2145 ret = atmci_init_slot(host, &pdata->slot[1], 2329 ret = atmci_init_slot(host, &pdata->slot[1],
2146 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB); 2330 1, ATMCI_SDCSEL_SLOT_B, ATMCI_SDIOIRQB);
2147 if (!ret) 2331 if (!ret) {
2148 nr_slots++; 2332 nr_slots++;
2333 if (host->slot[1]->mmc->max_req_size > host->buf_size)
2334 host->buf_size =
2335 host->slot[1]->mmc->max_req_size;
2336 }
2149 } 2337 }
2150 2338
2151 if (!nr_slots) { 2339 if (!nr_slots) {
@@ -2153,6 +2341,19 @@ static int __init atmci_probe(struct platform_device *pdev)
2153 goto err_init_slot; 2341 goto err_init_slot;
2154 } 2342 }
2155 2343
2344 if (!host->caps.has_rwproof) {
2345 host->buffer = dma_alloc_coherent(&pdev->dev, host->buf_size,
2346 &host->buf_phys_addr,
2347 GFP_KERNEL);
2348 if (!host->buffer) {
2349 ret = -ENOMEM;
2350 dev_err(&pdev->dev, "buffer allocation failed\n");
2351 goto err_init_slot;
2352 }
2353 }
2354
2355 setup_timer(&host->timer, atmci_timeout_timer, (unsigned long)host);
2356
2156 dev_info(&pdev->dev, 2357 dev_info(&pdev->dev,
2157 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n", 2358 "Atmel MCI controller at 0x%08lx irq %d, %u slots\n",
2158 host->mapbase, irq, nr_slots); 2359 host->mapbase, irq, nr_slots);
@@ -2179,6 +2380,10 @@ static int __exit atmci_remove(struct platform_device *pdev)
2179 2380
2180 platform_set_drvdata(pdev, NULL); 2381 platform_set_drvdata(pdev, NULL);
2181 2382
2383 if (host->buffer)
2384 dma_free_coherent(&pdev->dev, host->buf_size,
2385 host->buffer, host->buf_phys_addr);
2386
2182 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) { 2387 for (i = 0; i < ATMCI_MAX_NR_SLOTS; i++) {
2183 if (host->slot[i]) 2388 if (host->slot[i])
2184 atmci_cleanup_slot(host->slot[i], i); 2389 atmci_cleanup_slot(host->slot[i], i);
diff --git a/drivers/mmc/host/davinci_mmc.c b/drivers/mmc/host/davinci_mmc.c
index c1f3673ae1ef..7cf6c624bf73 100644
--- a/drivers/mmc/host/davinci_mmc.c
+++ b/drivers/mmc/host/davinci_mmc.c
@@ -1533,4 +1533,5 @@ module_exit(davinci_mmcsd_exit);
1533MODULE_AUTHOR("Texas Instruments India"); 1533MODULE_AUTHOR("Texas Instruments India");
1534MODULE_LICENSE("GPL"); 1534MODULE_LICENSE("GPL");
1535MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller"); 1535MODULE_DESCRIPTION("MMC/SD driver for Davinci MMC controller");
1536MODULE_ALIAS("platform:davinci_mmc");
1536 1537
diff --git a/drivers/mmc/host/dw_mmc.c b/drivers/mmc/host/dw_mmc.c
index ab3fc4617107..9bbf45f8c538 100644
--- a/drivers/mmc/host/dw_mmc.c
+++ b/drivers/mmc/host/dw_mmc.c
@@ -100,8 +100,6 @@ struct dw_mci_slot {
100 int last_detect_state; 100 int last_detect_state;
101}; 101};
102 102
103static struct workqueue_struct *dw_mci_card_workqueue;
104
105#if defined(CONFIG_DEBUG_FS) 103#if defined(CONFIG_DEBUG_FS)
106static int dw_mci_req_show(struct seq_file *s, void *v) 104static int dw_mci_req_show(struct seq_file *s, void *v)
107{ 105{
@@ -859,10 +857,10 @@ static void dw_mci_enable_sdio_irq(struct mmc_host *mmc, int enb)
859 int_mask = mci_readl(host, INTMASK); 857 int_mask = mci_readl(host, INTMASK);
860 if (enb) { 858 if (enb) {
861 mci_writel(host, INTMASK, 859 mci_writel(host, INTMASK,
862 (int_mask | (1 << SDMMC_INT_SDIO(slot->id)))); 860 (int_mask | SDMMC_INT_SDIO(slot->id)));
863 } else { 861 } else {
864 mci_writel(host, INTMASK, 862 mci_writel(host, INTMASK,
865 (int_mask & ~(1 << SDMMC_INT_SDIO(slot->id)))); 863 (int_mask & ~SDMMC_INT_SDIO(slot->id)));
866 } 864 }
867} 865}
868 866
@@ -1605,7 +1603,7 @@ static irqreturn_t dw_mci_interrupt(int irq, void *dev_id)
1605 1603
1606 if (pending & SDMMC_INT_CD) { 1604 if (pending & SDMMC_INT_CD) {
1607 mci_writel(host, RINTSTS, SDMMC_INT_CD); 1605 mci_writel(host, RINTSTS, SDMMC_INT_CD);
1608 queue_work(dw_mci_card_workqueue, &host->card_work); 1606 queue_work(host->card_workqueue, &host->card_work);
1609 } 1607 }
1610 1608
1611 /* Handle SDIO Interrupts */ 1609 /* Handle SDIO Interrupts */
@@ -1844,7 +1842,7 @@ static int __init dw_mci_init_slot(struct dw_mci *host, unsigned int id)
1844 * Card may have been plugged in prior to boot so we 1842 * Card may have been plugged in prior to boot so we
1845 * need to run the detect tasklet 1843 * need to run the detect tasklet
1846 */ 1844 */
1847 queue_work(dw_mci_card_workqueue, &host->card_work); 1845 queue_work(host->card_workqueue, &host->card_work);
1848 1846
1849 return 0; 1847 return 0;
1850} 1848}
@@ -2021,9 +2019,9 @@ int dw_mci_probe(struct dw_mci *host)
2021 mci_writel(host, CLKSRC, 0); 2019 mci_writel(host, CLKSRC, 0);
2022 2020
2023 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host); 2021 tasklet_init(&host->tasklet, dw_mci_tasklet_func, (unsigned long)host);
2024 dw_mci_card_workqueue = alloc_workqueue("dw-mci-card", 2022 host->card_workqueue = alloc_workqueue("dw-mci-card",
2025 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1); 2023 WQ_MEM_RECLAIM | WQ_NON_REENTRANT, 1);
2026 if (!dw_mci_card_workqueue) 2024 if (!host->card_workqueue)
2027 goto err_dmaunmap; 2025 goto err_dmaunmap;
2028 INIT_WORK(&host->card_work, dw_mci_work_routine_card); 2026 INIT_WORK(&host->card_work, dw_mci_work_routine_card);
2029 ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host); 2027 ret = request_irq(host->irq, dw_mci_interrupt, host->irq_flags, "dw-mci", host);
@@ -2085,7 +2083,7 @@ err_init_slot:
2085 free_irq(host->irq, host); 2083 free_irq(host->irq, host);
2086 2084
2087err_workqueue: 2085err_workqueue:
2088 destroy_workqueue(dw_mci_card_workqueue); 2086 destroy_workqueue(host->card_workqueue);
2089 2087
2090err_dmaunmap: 2088err_dmaunmap:
2091 if (host->use_dma && host->dma_ops->exit) 2089 if (host->use_dma && host->dma_ops->exit)
@@ -2119,7 +2117,7 @@ void dw_mci_remove(struct dw_mci *host)
2119 mci_writel(host, CLKSRC, 0); 2117 mci_writel(host, CLKSRC, 0);
2120 2118
2121 free_irq(host->irq, host); 2119 free_irq(host->irq, host);
2122 destroy_workqueue(dw_mci_card_workqueue); 2120 destroy_workqueue(host->card_workqueue);
2123 dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma); 2121 dma_free_coherent(&host->dev, PAGE_SIZE, host->sg_cpu, host->sg_dma);
2124 2122
2125 if (host->use_dma && host->dma_ops->exit) 2123 if (host->use_dma && host->dma_ops->exit)
diff --git a/drivers/mmc/host/imxmmc.c b/drivers/mmc/host/imxmmc.c
deleted file mode 100644
index ea0f3cedef21..000000000000
--- a/drivers/mmc/host/imxmmc.c
+++ /dev/null
@@ -1,1169 +0,0 @@
1/*
2 * linux/drivers/mmc/host/imxmmc.c - Motorola i.MX MMCI driver
3 *
4 * Copyright (C) 2004 Sascha Hauer, Pengutronix <sascha@saschahauer.de>
5 * Copyright (C) 2006 Pavel Pisa, PiKRON <ppisa@pikron.com>
6 *
7 * derived from pxamci.c by Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/init.h>
17#include <linux/ioport.h>
18#include <linux/platform_device.h>
19#include <linux/interrupt.h>
20#include <linux/blkdev.h>
21#include <linux/dma-mapping.h>
22#include <linux/mmc/host.h>
23#include <linux/mmc/card.h>
24#include <linux/delay.h>
25#include <linux/clk.h>
26#include <linux/io.h>
27
28#include <asm/dma.h>
29#include <asm/irq.h>
30#include <asm/sizes.h>
31#include <mach/mmc.h>
32#include <mach/imx-dma.h>
33
34#include "imxmmc.h"
35
36#define DRIVER_NAME "imx-mmc"
37
38#define IMXMCI_INT_MASK_DEFAULT (INT_MASK_BUF_READY | INT_MASK_DATA_TRAN | \
39 INT_MASK_WRITE_OP_DONE | INT_MASK_END_CMD_RES | \
40 INT_MASK_AUTO_CARD_DETECT | INT_MASK_DAT0_EN | INT_MASK_SDIO)
41
42struct imxmci_host {
43 struct mmc_host *mmc;
44 spinlock_t lock;
45 struct resource *res;
46 void __iomem *base;
47 int irq;
48 imx_dmach_t dma;
49 volatile unsigned int imask;
50 unsigned int power_mode;
51 unsigned int present;
52 struct imxmmc_platform_data *pdata;
53
54 struct mmc_request *req;
55 struct mmc_command *cmd;
56 struct mmc_data *data;
57
58 struct timer_list timer;
59 struct tasklet_struct tasklet;
60 unsigned int status_reg;
61 unsigned long pending_events;
62 /* Next two fields are there for CPU driven transfers to overcome SDHC deficiencies */
63 u16 *data_ptr;
64 unsigned int data_cnt;
65 atomic_t stuck_timeout;
66
67 unsigned int dma_nents;
68 unsigned int dma_size;
69 unsigned int dma_dir;
70 int dma_allocated;
71
72 unsigned char actual_bus_width;
73
74 int prev_cmd_code;
75
76 struct clk *clk;
77};
78
79#define IMXMCI_PEND_IRQ_b 0
80#define IMXMCI_PEND_DMA_END_b 1
81#define IMXMCI_PEND_DMA_ERR_b 2
82#define IMXMCI_PEND_WAIT_RESP_b 3
83#define IMXMCI_PEND_DMA_DATA_b 4
84#define IMXMCI_PEND_CPU_DATA_b 5
85#define IMXMCI_PEND_CARD_XCHG_b 6
86#define IMXMCI_PEND_SET_INIT_b 7
87#define IMXMCI_PEND_STARTED_b 8
88
89#define IMXMCI_PEND_IRQ_m (1 << IMXMCI_PEND_IRQ_b)
90#define IMXMCI_PEND_DMA_END_m (1 << IMXMCI_PEND_DMA_END_b)
91#define IMXMCI_PEND_DMA_ERR_m (1 << IMXMCI_PEND_DMA_ERR_b)
92#define IMXMCI_PEND_WAIT_RESP_m (1 << IMXMCI_PEND_WAIT_RESP_b)
93#define IMXMCI_PEND_DMA_DATA_m (1 << IMXMCI_PEND_DMA_DATA_b)
94#define IMXMCI_PEND_CPU_DATA_m (1 << IMXMCI_PEND_CPU_DATA_b)
95#define IMXMCI_PEND_CARD_XCHG_m (1 << IMXMCI_PEND_CARD_XCHG_b)
96#define IMXMCI_PEND_SET_INIT_m (1 << IMXMCI_PEND_SET_INIT_b)
97#define IMXMCI_PEND_STARTED_m (1 << IMXMCI_PEND_STARTED_b)
98
99static void imxmci_stop_clock(struct imxmci_host *host)
100{
101 int i = 0;
102 u16 reg;
103
104 reg = readw(host->base + MMC_REG_STR_STP_CLK);
105 writew(reg & ~STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
106 while (i < 0x1000) {
107 if (!(i & 0x7f)) {
108 reg = readw(host->base + MMC_REG_STR_STP_CLK);
109 writew(reg | STR_STP_CLK_STOP_CLK,
110 host->base + MMC_REG_STR_STP_CLK);
111 }
112
113 reg = readw(host->base + MMC_REG_STATUS);
114 if (!(reg & STATUS_CARD_BUS_CLK_RUN)) {
115 /* Check twice before cut */
116 reg = readw(host->base + MMC_REG_STATUS);
117 if (!(reg & STATUS_CARD_BUS_CLK_RUN))
118 return;
119 }
120
121 i++;
122 }
123 dev_dbg(mmc_dev(host->mmc), "imxmci_stop_clock blocked, no luck\n");
124}
125
126static int imxmci_start_clock(struct imxmci_host *host)
127{
128 unsigned int trials = 0;
129 unsigned int delay_limit = 128;
130 unsigned long flags;
131 u16 reg;
132
133 reg = readw(host->base + MMC_REG_STR_STP_CLK);
134 writew(reg & ~STR_STP_CLK_STOP_CLK, host->base + MMC_REG_STR_STP_CLK);
135
136 clear_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
137
138 /*
139 * Command start of the clock, this usually succeeds in less
140 * then 6 delay loops, but during card detection (low clockrate)
141 * it takes up to 5000 delay loops and sometimes fails for the first time
142 */
143 reg = readw(host->base + MMC_REG_STR_STP_CLK);
144 writew(reg | STR_STP_CLK_START_CLK, host->base + MMC_REG_STR_STP_CLK);
145
146 do {
147 unsigned int delay = delay_limit;
148
149 while (delay--) {
150 reg = readw(host->base + MMC_REG_STATUS);
151 if (reg & STATUS_CARD_BUS_CLK_RUN) {
152 /* Check twice before cut */
153 reg = readw(host->base + MMC_REG_STATUS);
154 if (reg & STATUS_CARD_BUS_CLK_RUN)
155 return 0;
156 }
157
158 if (test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events))
159 return 0;
160 }
161
162 local_irq_save(flags);
163 /*
164 * Ensure, that request is not doubled under all possible circumstances.
165 * It is possible, that cock running state is missed, because some other
166 * IRQ or schedule delays this function execution and the clocks has
167 * been already stopped by other means (response processing, SDHC HW)
168 */
169 if (!test_bit(IMXMCI_PEND_STARTED_b, &host->pending_events)) {
170 reg = readw(host->base + MMC_REG_STR_STP_CLK);
171 writew(reg | STR_STP_CLK_START_CLK,
172 host->base + MMC_REG_STR_STP_CLK);
173 }
174 local_irq_restore(flags);
175
176 } while (++trials < 256);
177
178 dev_err(mmc_dev(host->mmc), "imxmci_start_clock blocked, no luck\n");
179
180 return -1;
181}
182
183static void imxmci_softreset(struct imxmci_host *host)
184{
185 int i;
186
187 /* reset sequence */
188 writew(0x08, host->base + MMC_REG_STR_STP_CLK);
189 writew(0x0D, host->base + MMC_REG_STR_STP_CLK);
190
191 for (i = 0; i < 8; i++)
192 writew(0x05, host->base + MMC_REG_STR_STP_CLK);
193
194 writew(0xff, host->base + MMC_REG_RES_TO);
195 writew(512, host->base + MMC_REG_BLK_LEN);
196 writew(1, host->base + MMC_REG_NOB);
197}
198
199static int imxmci_busy_wait_for_status(struct imxmci_host *host,
200 unsigned int *pstat, unsigned int stat_mask,
201 int timeout, const char *where)
202{
203 int loops = 0;
204
205 while (!(*pstat & stat_mask)) {
206 loops += 2;
207 if (loops >= timeout) {
208 dev_dbg(mmc_dev(host->mmc), "busy wait timeout in %s, STATUS = 0x%x (0x%x)\n",
209 where, *pstat, stat_mask);
210 return -1;
211 }
212 udelay(2);
213 *pstat |= readw(host->base + MMC_REG_STATUS);
214 }
215 if (!loops)
216 return 0;
217
218 /* The busy-wait is expected there for clock <8MHz due to SDHC hardware flaws */
219 if (!(stat_mask & STATUS_END_CMD_RESP) || (host->mmc->ios.clock >= 8000000))
220 dev_info(mmc_dev(host->mmc), "busy wait for %d usec in %s, STATUS = 0x%x (0x%x)\n",
221 loops, where, *pstat, stat_mask);
222 return loops;
223}
224
225static void imxmci_setup_data(struct imxmci_host *host, struct mmc_data *data)
226{
227 unsigned int nob = data->blocks;
228 unsigned int blksz = data->blksz;
229 unsigned int datasz = nob * blksz;
230 int i;
231
232 if (data->flags & MMC_DATA_STREAM)
233 nob = 0xffff;
234
235 host->data = data;
236 data->bytes_xfered = 0;
237
238 writew(nob, host->base + MMC_REG_NOB);
239 writew(blksz, host->base + MMC_REG_BLK_LEN);
240
241 /*
242 * DMA cannot be used for small block sizes, we have to use CPU driven transfers otherwise.
243 * We are in big troubles for non-512 byte transfers according to note in the paragraph
244 * 20.6.7 of User Manual anyway, but we need to be able to transfer SCR at least.
245 * The situation is even more complex in reality. The SDHC in not able to handle wll
246 * partial FIFO fills and reads. The length has to be rounded up to burst size multiple.
247 * This is required for SCR read at least.
248 */
249 if (datasz < 512) {
250 host->dma_size = datasz;
251 if (data->flags & MMC_DATA_READ) {
252 host->dma_dir = DMA_FROM_DEVICE;
253
254 /* Hack to enable read SCR */
255 writew(1, host->base + MMC_REG_NOB);
256 writew(512, host->base + MMC_REG_BLK_LEN);
257 } else {
258 host->dma_dir = DMA_TO_DEVICE;
259 }
260
261 /* Convert back to virtual address */
262 host->data_ptr = (u16 *)sg_virt(data->sg);
263 host->data_cnt = 0;
264
265 clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
266 set_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
267
268 return;
269 }
270
271 if (data->flags & MMC_DATA_READ) {
272 host->dma_dir = DMA_FROM_DEVICE;
273 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
274 data->sg_len, host->dma_dir);
275
276 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
277 host->res->start + MMC_REG_BUFFER_ACCESS,
278 DMA_MODE_READ);
279
280 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_READ, IMX_DMA_WIDTH_16, CCR_REN);*/
281 CCR(host->dma) = CCR_DMOD_LINEAR | CCR_DSIZ_32 | CCR_SMOD_FIFO | CCR_SSIZ_16 | CCR_REN;
282 } else {
283 host->dma_dir = DMA_TO_DEVICE;
284
285 host->dma_nents = dma_map_sg(mmc_dev(host->mmc), data->sg,
286 data->sg_len, host->dma_dir);
287
288 imx_dma_setup_sg(host->dma, data->sg, data->sg_len, datasz,
289 host->res->start + MMC_REG_BUFFER_ACCESS,
290 DMA_MODE_WRITE);
291
292 /*imx_dma_setup_mem2dev_ccr(host->dma, DMA_MODE_WRITE, IMX_DMA_WIDTH_16, CCR_REN);*/
293 CCR(host->dma) = CCR_SMOD_LINEAR | CCR_SSIZ_32 | CCR_DMOD_FIFO | CCR_DSIZ_16 | CCR_REN;
294 }
295
296#if 1 /* This code is there only for consistency checking and can be disabled in future */
297 host->dma_size = 0;
298 for (i = 0; i < host->dma_nents; i++)
299 host->dma_size += data->sg[i].length;
300
301 if (datasz > host->dma_size) {
302 dev_err(mmc_dev(host->mmc), "imxmci_setup_data datasz 0x%x > 0x%x dm_size\n",
303 datasz, host->dma_size);
304 }
305#endif
306
307 host->dma_size = datasz;
308
309 wmb();
310
311 set_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events);
312 clear_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events);
313
314 /* start DMA engine for read, write is delayed after initial response */
315 if (host->dma_dir == DMA_FROM_DEVICE)
316 imx_dma_enable(host->dma);
317}
318
319static void imxmci_start_cmd(struct imxmci_host *host, struct mmc_command *cmd, unsigned int cmdat)
320{
321 unsigned long flags;
322 u32 imask;
323
324 WARN_ON(host->cmd != NULL);
325 host->cmd = cmd;
326
327 /* Ensure, that clock are stopped else command programming and start fails */
328 imxmci_stop_clock(host);
329
330 if (cmd->flags & MMC_RSP_BUSY)
331 cmdat |= CMD_DAT_CONT_BUSY;
332
333 switch (mmc_resp_type(cmd)) {
334 case MMC_RSP_R1: /* short CRC, OPCODE */
335 case MMC_RSP_R1B:/* short CRC, OPCODE, BUSY */
336 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R1;
337 break;
338 case MMC_RSP_R2: /* long 136 bit + CRC */
339 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R2;
340 break;
341 case MMC_RSP_R3: /* short */
342 cmdat |= CMD_DAT_CONT_RESPONSE_FORMAT_R3;
343 break;
344 default:
345 break;
346 }
347
348 if (test_and_clear_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events))
349 cmdat |= CMD_DAT_CONT_INIT; /* This command needs init */
350
351 if (host->actual_bus_width == MMC_BUS_WIDTH_4)
352 cmdat |= CMD_DAT_CONT_BUS_WIDTH_4;
353
354 writew(cmd->opcode, host->base + MMC_REG_CMD);
355 writew(cmd->arg >> 16, host->base + MMC_REG_ARGH);
356 writew(cmd->arg & 0xffff, host->base + MMC_REG_ARGL);
357 writew(cmdat, host->base + MMC_REG_CMD_DAT_CONT);
358
359 atomic_set(&host->stuck_timeout, 0);
360 set_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events);
361
362
363 imask = IMXMCI_INT_MASK_DEFAULT;
364 imask &= ~INT_MASK_END_CMD_RES;
365 if (cmdat & CMD_DAT_CONT_DATA_ENABLE) {
366 /* imask &= ~INT_MASK_BUF_READY; */
367 imask &= ~INT_MASK_DATA_TRAN;
368 if (cmdat & CMD_DAT_CONT_WRITE)
369 imask &= ~INT_MASK_WRITE_OP_DONE;
370 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
371 imask &= ~INT_MASK_BUF_READY;
372 }
373
374 spin_lock_irqsave(&host->lock, flags);
375 host->imask = imask;
376 writew(host->imask, host->base + MMC_REG_INT_MASK);
377 spin_unlock_irqrestore(&host->lock, flags);
378
379 dev_dbg(mmc_dev(host->mmc), "CMD%02d (0x%02x) mask set to 0x%04x\n",
380 cmd->opcode, cmd->opcode, imask);
381
382 imxmci_start_clock(host);
383}
384
385static void imxmci_finish_request(struct imxmci_host *host, struct mmc_request *req)
386{
387 unsigned long flags;
388
389 spin_lock_irqsave(&host->lock, flags);
390
391 host->pending_events &= ~(IMXMCI_PEND_WAIT_RESP_m | IMXMCI_PEND_DMA_END_m |
392 IMXMCI_PEND_DMA_DATA_m | IMXMCI_PEND_CPU_DATA_m);
393
394 host->imask = IMXMCI_INT_MASK_DEFAULT;
395 writew(host->imask, host->base + MMC_REG_INT_MASK);
396
397 spin_unlock_irqrestore(&host->lock, flags);
398
399 if (req && req->cmd)
400 host->prev_cmd_code = req->cmd->opcode;
401
402 host->req = NULL;
403 host->cmd = NULL;
404 host->data = NULL;
405 mmc_request_done(host->mmc, req);
406}
407
408static int imxmci_finish_data(struct imxmci_host *host, unsigned int stat)
409{
410 struct mmc_data *data = host->data;
411 int data_error;
412
413 if (test_and_clear_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
414 imx_dma_disable(host->dma);
415 dma_unmap_sg(mmc_dev(host->mmc), data->sg, host->dma_nents,
416 host->dma_dir);
417 }
418
419 if (stat & STATUS_ERR_MASK) {
420 dev_dbg(mmc_dev(host->mmc), "request failed. status: 0x%08x\n", stat);
421 if (stat & (STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR))
422 data->error = -EILSEQ;
423 else if (stat & STATUS_TIME_OUT_READ)
424 data->error = -ETIMEDOUT;
425 else
426 data->error = -EIO;
427 } else {
428 data->bytes_xfered = host->dma_size;
429 }
430
431 data_error = data->error;
432
433 host->data = NULL;
434
435 return data_error;
436}
437
438static int imxmci_cmd_done(struct imxmci_host *host, unsigned int stat)
439{
440 struct mmc_command *cmd = host->cmd;
441 int i;
442 u32 a, b, c;
443 struct mmc_data *data = host->data;
444
445 if (!cmd)
446 return 0;
447
448 host->cmd = NULL;
449
450 if (stat & STATUS_TIME_OUT_RESP) {
451 dev_dbg(mmc_dev(host->mmc), "CMD TIMEOUT\n");
452 cmd->error = -ETIMEDOUT;
453 } else if (stat & STATUS_RESP_CRC_ERR && cmd->flags & MMC_RSP_CRC) {
454 dev_dbg(mmc_dev(host->mmc), "cmd crc error\n");
455 cmd->error = -EILSEQ;
456 }
457
458 if (cmd->flags & MMC_RSP_PRESENT) {
459 if (cmd->flags & MMC_RSP_136) {
460 for (i = 0; i < 4; i++) {
461 a = readw(host->base + MMC_REG_RES_FIFO);
462 b = readw(host->base + MMC_REG_RES_FIFO);
463 cmd->resp[i] = a << 16 | b;
464 }
465 } else {
466 a = readw(host->base + MMC_REG_RES_FIFO);
467 b = readw(host->base + MMC_REG_RES_FIFO);
468 c = readw(host->base + MMC_REG_RES_FIFO);
469 cmd->resp[0] = a << 24 | b << 8 | c >> 8;
470 }
471 }
472
473 dev_dbg(mmc_dev(host->mmc), "RESP 0x%08x, 0x%08x, 0x%08x, 0x%08x, error %d\n",
474 cmd->resp[0], cmd->resp[1], cmd->resp[2], cmd->resp[3], cmd->error);
475
476 if (data && !cmd->error && !(stat & STATUS_ERR_MASK)) {
477 if (host->req->data->flags & MMC_DATA_WRITE) {
478
479 /* Wait for FIFO to be empty before starting DMA write */
480
481 stat = readw(host->base + MMC_REG_STATUS);
482 if (imxmci_busy_wait_for_status(host, &stat,
483 STATUS_APPL_BUFF_FE,
484 40, "imxmci_cmd_done DMA WR") < 0) {
485 cmd->error = -EIO;
486 imxmci_finish_data(host, stat);
487 if (host->req)
488 imxmci_finish_request(host, host->req);
489 dev_warn(mmc_dev(host->mmc), "STATUS = 0x%04x\n",
490 stat);
491 return 0;
492 }
493
494 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
495 imx_dma_enable(host->dma);
496 }
497 } else {
498 struct mmc_request *req;
499 imxmci_stop_clock(host);
500 req = host->req;
501
502 if (data)
503 imxmci_finish_data(host, stat);
504
505 if (req)
506 imxmci_finish_request(host, req);
507 else
508 dev_warn(mmc_dev(host->mmc), "imxmci_cmd_done: no request to finish\n");
509 }
510
511 return 1;
512}
513
514static int imxmci_data_done(struct imxmci_host *host, unsigned int stat)
515{
516 struct mmc_data *data = host->data;
517 int data_error;
518
519 if (!data)
520 return 0;
521
522 data_error = imxmci_finish_data(host, stat);
523
524 if (host->req->stop) {
525 imxmci_stop_clock(host);
526 imxmci_start_cmd(host, host->req->stop, 0);
527 } else {
528 struct mmc_request *req;
529 req = host->req;
530 if (req)
531 imxmci_finish_request(host, req);
532 else
533 dev_warn(mmc_dev(host->mmc), "imxmci_data_done: no request to finish\n");
534 }
535
536 return 1;
537}
538
539static int imxmci_cpu_driven_data(struct imxmci_host *host, unsigned int *pstat)
540{
541 int i;
542 int burst_len;
543 int trans_done = 0;
544 unsigned int stat = *pstat;
545
546 if (host->actual_bus_width != MMC_BUS_WIDTH_4)
547 burst_len = 16;
548 else
549 burst_len = 64;
550
551 /* This is unfortunately required */
552 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data running STATUS = 0x%x\n",
553 stat);
554
555 udelay(20); /* required for clocks < 8MHz*/
556
557 if (host->dma_dir == DMA_FROM_DEVICE) {
558 imxmci_busy_wait_for_status(host, &stat,
559 STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE |
560 STATUS_TIME_OUT_READ,
561 50, "imxmci_cpu_driven_data read");
562
563 while ((stat & (STATUS_APPL_BUFF_FF | STATUS_DATA_TRANS_DONE)) &&
564 !(stat & STATUS_TIME_OUT_READ) &&
565 (host->data_cnt < 512)) {
566
567 udelay(20); /* required for clocks < 8MHz*/
568
569 for (i = burst_len; i >= 2 ; i -= 2) {
570 u16 data;
571 data = readw(host->base + MMC_REG_BUFFER_ACCESS);
572 udelay(10); /* required for clocks < 8MHz*/
573 if (host->data_cnt+2 <= host->dma_size) {
574 *(host->data_ptr++) = data;
575 } else {
576 if (host->data_cnt < host->dma_size)
577 *(u8 *)(host->data_ptr) = data;
578 }
579 host->data_cnt += 2;
580 }
581
582 stat = readw(host->base + MMC_REG_STATUS);
583
584 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read %d burst %d STATUS = 0x%x\n",
585 host->data_cnt, burst_len, stat);
586 }
587
588 if ((stat & STATUS_DATA_TRANS_DONE) && (host->data_cnt >= 512))
589 trans_done = 1;
590
591 if (host->dma_size & 0x1ff)
592 stat &= ~STATUS_CRC_READ_ERR;
593
594 if (stat & STATUS_TIME_OUT_READ) {
595 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data read timeout STATUS = 0x%x\n",
596 stat);
597 trans_done = -1;
598 }
599
600 } else {
601 imxmci_busy_wait_for_status(host, &stat,
602 STATUS_APPL_BUFF_FE,
603 20, "imxmci_cpu_driven_data write");
604
605 while ((stat & STATUS_APPL_BUFF_FE) &&
606 (host->data_cnt < host->dma_size)) {
607 if (burst_len >= host->dma_size - host->data_cnt) {
608 burst_len = host->dma_size - host->data_cnt;
609 host->data_cnt = host->dma_size;
610 trans_done = 1;
611 } else {
612 host->data_cnt += burst_len;
613 }
614
615 for (i = burst_len; i > 0 ; i -= 2)
616 writew(*(host->data_ptr++), host->base + MMC_REG_BUFFER_ACCESS);
617
618 stat = readw(host->base + MMC_REG_STATUS);
619
620 dev_dbg(mmc_dev(host->mmc), "imxmci_cpu_driven_data write burst %d STATUS = 0x%x\n",
621 burst_len, stat);
622 }
623 }
624
625 *pstat = stat;
626
627 return trans_done;
628}
629
630static void imxmci_dma_irq(int dma, void *devid)
631{
632 struct imxmci_host *host = devid;
633 u32 stat = readw(host->base + MMC_REG_STATUS);
634
635 atomic_set(&host->stuck_timeout, 0);
636 host->status_reg = stat;
637 set_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
638 tasklet_schedule(&host->tasklet);
639}
640
641static irqreturn_t imxmci_irq(int irq, void *devid)
642{
643 struct imxmci_host *host = devid;
644 u32 stat = readw(host->base + MMC_REG_STATUS);
645 int handled = 1;
646
647 writew(host->imask | INT_MASK_SDIO | INT_MASK_AUTO_CARD_DETECT,
648 host->base + MMC_REG_INT_MASK);
649
650 atomic_set(&host->stuck_timeout, 0);
651 host->status_reg = stat;
652 set_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
653 set_bit(IMXMCI_PEND_STARTED_b, &host->pending_events);
654 tasklet_schedule(&host->tasklet);
655
656 return IRQ_RETVAL(handled);
657}
658
659static void imxmci_tasklet_fnc(unsigned long data)
660{
661 struct imxmci_host *host = (struct imxmci_host *)data;
662 u32 stat;
663 unsigned int data_dir_mask = 0; /* STATUS_WR_CRC_ERROR_CODE_MASK */
664 int timeout = 0;
665
666 if (atomic_read(&host->stuck_timeout) > 4) {
667 char *what;
668 timeout = 1;
669 stat = readw(host->base + MMC_REG_STATUS);
670 host->status_reg = stat;
671 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
672 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
673 what = "RESP+DMA";
674 else
675 what = "RESP";
676 else
677 if (test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events))
678 if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events))
679 what = "DATA";
680 else
681 what = "DMA";
682 else
683 what = "???";
684
685 dev_err(mmc_dev(host->mmc),
686 "%s TIMEOUT, hardware stucked STATUS = 0x%04x IMASK = 0x%04x\n",
687 what, stat,
688 readw(host->base + MMC_REG_INT_MASK));
689 dev_err(mmc_dev(host->mmc),
690 "CMD_DAT_CONT = 0x%04x, MMC_BLK_LEN = 0x%04x, MMC_NOB = 0x%04x, DMA_CCR = 0x%08x\n",
691 readw(host->base + MMC_REG_CMD_DAT_CONT),
692 readw(host->base + MMC_REG_BLK_LEN),
693 readw(host->base + MMC_REG_NOB),
694 CCR(host->dma));
695 dev_err(mmc_dev(host->mmc), "CMD%d, prevCMD%d, bus %d-bit, dma_size = 0x%x\n",
696 host->cmd ? host->cmd->opcode : 0,
697 host->prev_cmd_code,
698 1 << host->actual_bus_width, host->dma_size);
699 }
700
701 if (!host->present || timeout)
702 host->status_reg = STATUS_TIME_OUT_RESP | STATUS_TIME_OUT_READ |
703 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR;
704
705 if (test_bit(IMXMCI_PEND_IRQ_b, &host->pending_events) || timeout) {
706 clear_bit(IMXMCI_PEND_IRQ_b, &host->pending_events);
707
708 stat = readw(host->base + MMC_REG_STATUS);
709 /*
710 * This is not required in theory, but there is chance to miss some flag
711 * which clears automatically by mask write, FreeScale original code keeps
712 * stat from IRQ time so do I
713 */
714 stat |= host->status_reg;
715
716 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events))
717 stat &= ~STATUS_CRC_READ_ERR;
718
719 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
720 imxmci_busy_wait_for_status(host, &stat,
721 STATUS_END_CMD_RESP | STATUS_ERR_MASK,
722 20, "imxmci_tasklet_fnc resp (ERRATUM #4)");
723 }
724
725 if (stat & (STATUS_END_CMD_RESP | STATUS_ERR_MASK)) {
726 if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
727 imxmci_cmd_done(host, stat);
728 if (host->data && (stat & STATUS_ERR_MASK))
729 imxmci_data_done(host, stat);
730 }
731
732 if (test_bit(IMXMCI_PEND_CPU_DATA_b, &host->pending_events)) {
733 stat |= readw(host->base + MMC_REG_STATUS);
734 if (imxmci_cpu_driven_data(host, &stat)) {
735 if (test_and_clear_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events))
736 imxmci_cmd_done(host, stat);
737 atomic_clear_mask(IMXMCI_PEND_IRQ_m|IMXMCI_PEND_CPU_DATA_m,
738 &host->pending_events);
739 imxmci_data_done(host, stat);
740 }
741 }
742 }
743
744 if (test_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events) &&
745 !test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events)) {
746
747 stat = readw(host->base + MMC_REG_STATUS);
748 /* Same as above */
749 stat |= host->status_reg;
750
751 if (host->dma_dir == DMA_TO_DEVICE)
752 data_dir_mask = STATUS_WRITE_OP_DONE;
753 else
754 data_dir_mask = STATUS_DATA_TRANS_DONE;
755
756 if (stat & data_dir_mask) {
757 clear_bit(IMXMCI_PEND_DMA_END_b, &host->pending_events);
758 imxmci_data_done(host, stat);
759 }
760 }
761
762 if (test_and_clear_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events)) {
763
764 if (host->cmd)
765 imxmci_cmd_done(host, STATUS_TIME_OUT_RESP);
766
767 if (host->data)
768 imxmci_data_done(host, STATUS_TIME_OUT_READ |
769 STATUS_CRC_READ_ERR | STATUS_CRC_WRITE_ERR);
770
771 if (host->req)
772 imxmci_finish_request(host, host->req);
773
774 mmc_detect_change(host->mmc, msecs_to_jiffies(100));
775
776 }
777}
778
779static void imxmci_request(struct mmc_host *mmc, struct mmc_request *req)
780{
781 struct imxmci_host *host = mmc_priv(mmc);
782 unsigned int cmdat;
783
784 WARN_ON(host->req != NULL);
785
786 host->req = req;
787
788 cmdat = 0;
789
790 if (req->data) {
791 imxmci_setup_data(host, req->data);
792
793 cmdat |= CMD_DAT_CONT_DATA_ENABLE;
794
795 if (req->data->flags & MMC_DATA_WRITE)
796 cmdat |= CMD_DAT_CONT_WRITE;
797
798 if (req->data->flags & MMC_DATA_STREAM)
799 cmdat |= CMD_DAT_CONT_STREAM_BLOCK;
800 }
801
802 imxmci_start_cmd(host, req->cmd, cmdat);
803}
804
805#define CLK_RATE 19200000
806
807static void imxmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
808{
809 struct imxmci_host *host = mmc_priv(mmc);
810 int prescaler;
811
812 if (ios->bus_width == MMC_BUS_WIDTH_4) {
813 host->actual_bus_width = MMC_BUS_WIDTH_4;
814 imx_gpio_mode(PB11_PF_SD_DAT3);
815 BLR(host->dma) = 0; /* burst 64 byte read/write */
816 } else {
817 host->actual_bus_width = MMC_BUS_WIDTH_1;
818 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
819 BLR(host->dma) = 16; /* burst 16 byte read/write */
820 }
821
822 if (host->power_mode != ios->power_mode) {
823 switch (ios->power_mode) {
824 case MMC_POWER_OFF:
825 break;
826 case MMC_POWER_UP:
827 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
828 break;
829 case MMC_POWER_ON:
830 break;
831 }
832 host->power_mode = ios->power_mode;
833 }
834
835 if (ios->clock) {
836 unsigned int clk;
837 u16 reg;
838
839 /* The prescaler is 5 for PERCLK2 equal to 96MHz
840 * then 96MHz / 5 = 19.2 MHz
841 */
842 clk = clk_get_rate(host->clk);
843 prescaler = (clk + (CLK_RATE * 7) / 8) / CLK_RATE;
844 switch (prescaler) {
845 case 0:
846 case 1: prescaler = 0;
847 break;
848 case 2: prescaler = 1;
849 break;
850 case 3: prescaler = 2;
851 break;
852 case 4: prescaler = 4;
853 break;
854 default:
855 case 5: prescaler = 5;
856 break;
857 }
858
859 dev_dbg(mmc_dev(host->mmc), "PERCLK2 %d MHz -> prescaler %d\n",
860 clk, prescaler);
861
862 for (clk = 0; clk < 8; clk++) {
863 int x;
864 x = CLK_RATE / (1 << clk);
865 if (x <= ios->clock)
866 break;
867 }
868
869 /* enable controller */
870 reg = readw(host->base + MMC_REG_STR_STP_CLK);
871 writew(reg | STR_STP_CLK_ENABLE,
872 host->base + MMC_REG_STR_STP_CLK);
873
874 imxmci_stop_clock(host);
875 writew((prescaler << 3) | clk, host->base + MMC_REG_CLK_RATE);
876 /*
877 * Under my understanding, clock should not be started there, because it would
878 * initiate SDHC sequencer and send last or random command into card
879 */
880 /* imxmci_start_clock(host); */
881
882 dev_dbg(mmc_dev(host->mmc),
883 "MMC_CLK_RATE: 0x%08x\n",
884 readw(host->base + MMC_REG_CLK_RATE));
885 } else {
886 imxmci_stop_clock(host);
887 }
888}
889
890static int imxmci_get_ro(struct mmc_host *mmc)
891{
892 struct imxmci_host *host = mmc_priv(mmc);
893
894 if (host->pdata && host->pdata->get_ro)
895 return !!host->pdata->get_ro(mmc_dev(mmc));
896 /*
897 * Board doesn't support read only detection; let the mmc core
898 * decide what to do.
899 */
900 return -ENOSYS;
901}
902
903
904static const struct mmc_host_ops imxmci_ops = {
905 .request = imxmci_request,
906 .set_ios = imxmci_set_ios,
907 .get_ro = imxmci_get_ro,
908};
909
910static void imxmci_check_status(unsigned long data)
911{
912 struct imxmci_host *host = (struct imxmci_host *)data;
913
914 if (host->pdata && host->pdata->card_present &&
915 host->pdata->card_present(mmc_dev(host->mmc)) != host->present) {
916 host->present ^= 1;
917 dev_info(mmc_dev(host->mmc), "card %s\n",
918 host->present ? "inserted" : "removed");
919
920 set_bit(IMXMCI_PEND_CARD_XCHG_b, &host->pending_events);
921 tasklet_schedule(&host->tasklet);
922 }
923
924 if (test_bit(IMXMCI_PEND_WAIT_RESP_b, &host->pending_events) ||
925 test_bit(IMXMCI_PEND_DMA_DATA_b, &host->pending_events)) {
926 atomic_inc(&host->stuck_timeout);
927 if (atomic_read(&host->stuck_timeout) > 4)
928 tasklet_schedule(&host->tasklet);
929 } else {
930 atomic_set(&host->stuck_timeout, 0);
931
932 }
933
934 mod_timer(&host->timer, jiffies + (HZ>>1));
935}
936
937static int __init imxmci_probe(struct platform_device *pdev)
938{
939 struct mmc_host *mmc;
940 struct imxmci_host *host = NULL;
941 struct resource *r;
942 int ret = 0, irq;
943 u16 rev_no;
944
945 pr_info("i.MX mmc driver\n");
946
947 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
948 irq = platform_get_irq(pdev, 0);
949 if (!r || irq < 0)
950 return -ENXIO;
951
952 r = request_mem_region(r->start, resource_size(r), pdev->name);
953 if (!r)
954 return -EBUSY;
955
956 mmc = mmc_alloc_host(sizeof(struct imxmci_host), &pdev->dev);
957 if (!mmc) {
958 ret = -ENOMEM;
959 goto out;
960 }
961
962 mmc->ops = &imxmci_ops;
963 mmc->f_min = 150000;
964 mmc->f_max = CLK_RATE/2;
965 mmc->ocr_avail = MMC_VDD_32_33;
966 mmc->caps = MMC_CAP_4_BIT_DATA;
967
968 /* MMC core transfer sizes tunable parameters */
969 mmc->max_segs = 64;
970 mmc->max_seg_size = 64*512; /* default PAGE_CACHE_SIZE */
971 mmc->max_req_size = 64*512; /* default PAGE_CACHE_SIZE */
972 mmc->max_blk_size = 2048;
973 mmc->max_blk_count = 65535;
974
975 host = mmc_priv(mmc);
976 host->base = ioremap(r->start, resource_size(r));
977 if (!host->base) {
978 ret = -ENOMEM;
979 goto out;
980 }
981
982 host->mmc = mmc;
983 host->dma_allocated = 0;
984 host->pdata = pdev->dev.platform_data;
985 if (!host->pdata)
986 dev_warn(&pdev->dev, "No platform data provided!\n");
987
988 spin_lock_init(&host->lock);
989 host->res = r;
990 host->irq = irq;
991
992 host->clk = clk_get(&pdev->dev, "perclk2");
993 if (IS_ERR(host->clk)) {
994 ret = PTR_ERR(host->clk);
995 goto out;
996 }
997 clk_enable(host->clk);
998
999 imx_gpio_mode(PB8_PF_SD_DAT0);
1000 imx_gpio_mode(PB9_PF_SD_DAT1);
1001 imx_gpio_mode(PB10_PF_SD_DAT2);
1002 /* Configured as GPIO with pull-up to ensure right MCC card mode */
1003 /* Switched to PB11_PF_SD_DAT3 if 4 bit bus is configured */
1004 imx_gpio_mode(GPIO_PORTB | GPIO_IN | GPIO_PUEN | 11);
1005 /* imx_gpio_mode(PB11_PF_SD_DAT3); */
1006 imx_gpio_mode(PB12_PF_SD_CLK);
1007 imx_gpio_mode(PB13_PF_SD_CMD);
1008
1009 imxmci_softreset(host);
1010
1011 rev_no = readw(host->base + MMC_REG_REV_NO);
1012 if (rev_no != 0x390) {
1013 dev_err(mmc_dev(host->mmc), "wrong rev.no. 0x%08x. aborting.\n",
1014 readw(host->base + MMC_REG_REV_NO));
1015 goto out;
1016 }
1017
1018 /* recommended in data sheet */
1019 writew(0x2db4, host->base + MMC_REG_READ_TO);
1020
1021 host->imask = IMXMCI_INT_MASK_DEFAULT;
1022 writew(host->imask, host->base + MMC_REG_INT_MASK);
1023
1024 host->dma = imx_dma_request_by_prio(DRIVER_NAME, DMA_PRIO_LOW);
1025 if(host->dma < 0) {
1026 dev_err(mmc_dev(host->mmc), "imx_dma_request_by_prio failed\n");
1027 ret = -EBUSY;
1028 goto out;
1029 }
1030 host->dma_allocated = 1;
1031 imx_dma_setup_handlers(host->dma, imxmci_dma_irq, NULL, host);
1032 RSSR(host->dma) = DMA_REQ_SDHC;
1033
1034 tasklet_init(&host->tasklet, imxmci_tasklet_fnc, (unsigned long)host);
1035 host->status_reg=0;
1036 host->pending_events=0;
1037
1038 ret = request_irq(host->irq, imxmci_irq, 0, DRIVER_NAME, host);
1039 if (ret)
1040 goto out;
1041
1042 if (host->pdata && host->pdata->card_present)
1043 host->present = host->pdata->card_present(mmc_dev(mmc));
1044 else /* if there is no way to detect assume that card is present */
1045 host->present = 1;
1046
1047 init_timer(&host->timer);
1048 host->timer.data = (unsigned long)host;
1049 host->timer.function = imxmci_check_status;
1050 add_timer(&host->timer);
1051 mod_timer(&host->timer, jiffies + (HZ >> 1));
1052
1053 platform_set_drvdata(pdev, mmc);
1054
1055 mmc_add_host(mmc);
1056
1057 return 0;
1058
1059out:
1060 if (host) {
1061 if (host->dma_allocated) {
1062 imx_dma_free(host->dma);
1063 host->dma_allocated = 0;
1064 }
1065 if (host->clk) {
1066 clk_disable(host->clk);
1067 clk_put(host->clk);
1068 }
1069 if (host->base)
1070 iounmap(host->base);
1071 }
1072 if (mmc)
1073 mmc_free_host(mmc);
1074 release_mem_region(r->start, resource_size(r));
1075 return ret;
1076}
1077
1078static int __exit imxmci_remove(struct platform_device *pdev)
1079{
1080 struct mmc_host *mmc = platform_get_drvdata(pdev);
1081
1082 platform_set_drvdata(pdev, NULL);
1083
1084 if (mmc) {
1085 struct imxmci_host *host = mmc_priv(mmc);
1086
1087 tasklet_disable(&host->tasklet);
1088
1089 del_timer_sync(&host->timer);
1090 mmc_remove_host(mmc);
1091
1092 free_irq(host->irq, host);
1093 iounmap(host->base);
1094 if (host->dma_allocated) {
1095 imx_dma_free(host->dma);
1096 host->dma_allocated = 0;
1097 }
1098
1099 tasklet_kill(&host->tasklet);
1100
1101 clk_disable(host->clk);
1102 clk_put(host->clk);
1103
1104 release_mem_region(host->res->start, resource_size(host->res));
1105
1106 mmc_free_host(mmc);
1107 }
1108 return 0;
1109}
1110
1111#ifdef CONFIG_PM
1112static int imxmci_suspend(struct platform_device *dev, pm_message_t state)
1113{
1114 struct mmc_host *mmc = platform_get_drvdata(dev);
1115 int ret = 0;
1116
1117 if (mmc)
1118 ret = mmc_suspend_host(mmc);
1119
1120 return ret;
1121}
1122
1123static int imxmci_resume(struct platform_device *dev)
1124{
1125 struct mmc_host *mmc = platform_get_drvdata(dev);
1126 struct imxmci_host *host;
1127 int ret = 0;
1128
1129 if (mmc) {
1130 host = mmc_priv(mmc);
1131 if (host)
1132 set_bit(IMXMCI_PEND_SET_INIT_b, &host->pending_events);
1133 ret = mmc_resume_host(mmc);
1134 }
1135
1136 return ret;
1137}
1138#else
1139#define imxmci_suspend NULL
1140#define imxmci_resume NULL
1141#endif /* CONFIG_PM */
1142
1143static struct platform_driver imxmci_driver = {
1144 .remove = __exit_p(imxmci_remove),
1145 .suspend = imxmci_suspend,
1146 .resume = imxmci_resume,
1147 .driver = {
1148 .name = DRIVER_NAME,
1149 .owner = THIS_MODULE,
1150 }
1151};
1152
1153static int __init imxmci_init(void)
1154{
1155 return platform_driver_probe(&imxmci_driver, imxmci_probe);
1156}
1157
1158static void __exit imxmci_exit(void)
1159{
1160 platform_driver_unregister(&imxmci_driver);
1161}
1162
1163module_init(imxmci_init);
1164module_exit(imxmci_exit);
1165
1166MODULE_DESCRIPTION("i.MX Multimedia Card Interface Driver");
1167MODULE_AUTHOR("Sascha Hauer, Pengutronix");
1168MODULE_LICENSE("GPL");
1169MODULE_ALIAS("platform:imx-mmc");
diff --git a/drivers/mmc/host/imxmmc.h b/drivers/mmc/host/imxmmc.h
deleted file mode 100644
index 09d5d4ee3a77..000000000000
--- a/drivers/mmc/host/imxmmc.h
+++ /dev/null
@@ -1,64 +0,0 @@
1#define MMC_REG_STR_STP_CLK 0x00
2#define MMC_REG_STATUS 0x04
3#define MMC_REG_CLK_RATE 0x08
4#define MMC_REG_CMD_DAT_CONT 0x0C
5#define MMC_REG_RES_TO 0x10
6#define MMC_REG_READ_TO 0x14
7#define MMC_REG_BLK_LEN 0x18
8#define MMC_REG_NOB 0x1C
9#define MMC_REG_REV_NO 0x20
10#define MMC_REG_INT_MASK 0x24
11#define MMC_REG_CMD 0x28
12#define MMC_REG_ARGH 0x2C
13#define MMC_REG_ARGL 0x30
14#define MMC_REG_RES_FIFO 0x34
15#define MMC_REG_BUFFER_ACCESS 0x38
16
17#define STR_STP_CLK_IPG_CLK_GATE_DIS (1<<15)
18#define STR_STP_CLK_IPG_PERCLK_GATE_DIS (1<<14)
19#define STR_STP_CLK_ENDIAN (1<<5)
20#define STR_STP_CLK_RESET (1<<3)
21#define STR_STP_CLK_ENABLE (1<<2)
22#define STR_STP_CLK_START_CLK (1<<1)
23#define STR_STP_CLK_STOP_CLK (1<<0)
24#define STATUS_CARD_PRESENCE (1<<15)
25#define STATUS_SDIO_INT_ACTIVE (1<<14)
26#define STATUS_END_CMD_RESP (1<<13)
27#define STATUS_WRITE_OP_DONE (1<<12)
28#define STATUS_DATA_TRANS_DONE (1<<11)
29#define STATUS_WR_CRC_ERROR_CODE_MASK (3<<10)
30#define STATUS_CARD_BUS_CLK_RUN (1<<8)
31#define STATUS_APPL_BUFF_FF (1<<7)
32#define STATUS_APPL_BUFF_FE (1<<6)
33#define STATUS_RESP_CRC_ERR (1<<5)
34#define STATUS_CRC_READ_ERR (1<<3)
35#define STATUS_CRC_WRITE_ERR (1<<2)
36#define STATUS_TIME_OUT_RESP (1<<1)
37#define STATUS_TIME_OUT_READ (1<<0)
38#define STATUS_ERR_MASK 0x2f
39#define CLK_RATE_PRESCALER(x) ((x) & 0x7)
40#define CLK_RATE_CLK_RATE(x) (((x) & 0x7) << 3)
41#define CMD_DAT_CONT_CMD_RESP_LONG_OFF (1<<12)
42#define CMD_DAT_CONT_STOP_READWAIT (1<<11)
43#define CMD_DAT_CONT_START_READWAIT (1<<10)
44#define CMD_DAT_CONT_BUS_WIDTH_1 (0<<8)
45#define CMD_DAT_CONT_BUS_WIDTH_4 (2<<8)
46#define CMD_DAT_CONT_INIT (1<<7)
47#define CMD_DAT_CONT_BUSY (1<<6)
48#define CMD_DAT_CONT_STREAM_BLOCK (1<<5)
49#define CMD_DAT_CONT_WRITE (1<<4)
50#define CMD_DAT_CONT_DATA_ENABLE (1<<3)
51#define CMD_DAT_CONT_RESPONSE_FORMAT_R1 (1)
52#define CMD_DAT_CONT_RESPONSE_FORMAT_R2 (2)
53#define CMD_DAT_CONT_RESPONSE_FORMAT_R3 (3)
54#define CMD_DAT_CONT_RESPONSE_FORMAT_R4 (4)
55#define CMD_DAT_CONT_RESPONSE_FORMAT_R5 (5)
56#define CMD_DAT_CONT_RESPONSE_FORMAT_R6 (6)
57#define INT_MASK_AUTO_CARD_DETECT (1<<6)
58#define INT_MASK_DAT0_EN (1<<5)
59#define INT_MASK_SDIO (1<<4)
60#define INT_MASK_BUF_READY (1<<3)
61#define INT_MASK_END_CMD_RES (1<<2)
62#define INT_MASK_WRITE_OP_DONE (1<<1)
63#define INT_MASK_DATA_TRAN (1<<0)
64#define INT_ALL (0x7f)
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index b6f38421d541..f0fcce40cd8d 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -15,6 +15,7 @@
15#include <linux/device.h> 15#include <linux/device.h>
16#include <linux/interrupt.h> 16#include <linux/interrupt.h>
17#include <linux/kernel.h> 17#include <linux/kernel.h>
18#include <linux/slab.h>
18#include <linux/delay.h> 19#include <linux/delay.h>
19#include <linux/err.h> 20#include <linux/err.h>
20#include <linux/highmem.h> 21#include <linux/highmem.h>
@@ -25,6 +26,7 @@
25#include <linux/clk.h> 26#include <linux/clk.h>
26#include <linux/scatterlist.h> 27#include <linux/scatterlist.h>
27#include <linux/gpio.h> 28#include <linux/gpio.h>
29#include <linux/of_gpio.h>
28#include <linux/regulator/consumer.h> 30#include <linux/regulator/consumer.h>
29#include <linux/dmaengine.h> 31#include <linux/dmaengine.h>
30#include <linux/dma-mapping.h> 32#include <linux/dma-mapping.h>
@@ -1207,21 +1209,76 @@ static const struct mmc_host_ops mmci_ops = {
1207 .get_cd = mmci_get_cd, 1209 .get_cd = mmci_get_cd,
1208}; 1210};
1209 1211
1212#ifdef CONFIG_OF
1213static void mmci_dt_populate_generic_pdata(struct device_node *np,
1214 struct mmci_platform_data *pdata)
1215{
1216 int bus_width = 0;
1217
1218 pdata->gpio_wp = of_get_named_gpio(np, "wp-gpios", 0);
1219 if (!pdata->gpio_wp)
1220 pdata->gpio_wp = -1;
1221
1222 pdata->gpio_cd = of_get_named_gpio(np, "cd-gpios", 0);
1223 if (!pdata->gpio_cd)
1224 pdata->gpio_cd = -1;
1225
1226 if (of_get_property(np, "cd-inverted", NULL))
1227 pdata->cd_invert = true;
1228 else
1229 pdata->cd_invert = false;
1230
1231 of_property_read_u32(np, "max-frequency", &pdata->f_max);
1232 if (!pdata->f_max)
1233 pr_warn("%s has no 'max-frequency' property\n", np->full_name);
1234
1235 if (of_get_property(np, "mmc-cap-mmc-highspeed", NULL))
1236 pdata->capabilities |= MMC_CAP_MMC_HIGHSPEED;
1237 if (of_get_property(np, "mmc-cap-sd-highspeed", NULL))
1238 pdata->capabilities |= MMC_CAP_SD_HIGHSPEED;
1239
1240 of_property_read_u32(np, "bus-width", &bus_width);
1241 switch (bus_width) {
1242 case 0 :
1243 /* No bus-width supplied. */
1244 break;
1245 case 4 :
1246 pdata->capabilities |= MMC_CAP_4_BIT_DATA;
1247 break;
1248 case 8 :
1249 pdata->capabilities |= MMC_CAP_8_BIT_DATA;
1250 break;
1251 default :
1252 pr_warn("%s: Unsupported bus width\n", np->full_name);
1253 }
1254}
1255#else
1256static void mmci_dt_populate_generic_pdata(struct device_node *np,
1257 struct mmci_platform_data *pdata)
1258{
1259 return;
1260}
1261#endif
1262
1210static int __devinit mmci_probe(struct amba_device *dev, 1263static int __devinit mmci_probe(struct amba_device *dev,
1211 const struct amba_id *id) 1264 const struct amba_id *id)
1212{ 1265{
1213 struct mmci_platform_data *plat = dev->dev.platform_data; 1266 struct mmci_platform_data *plat = dev->dev.platform_data;
1267 struct device_node *np = dev->dev.of_node;
1214 struct variant_data *variant = id->data; 1268 struct variant_data *variant = id->data;
1215 struct mmci_host *host; 1269 struct mmci_host *host;
1216 struct mmc_host *mmc; 1270 struct mmc_host *mmc;
1217 int ret; 1271 int ret;
1218 1272
1219 /* must have platform data */ 1273 /* Must have platform data or Device Tree. */
1220 if (!plat) { 1274 if (!plat && !np) {
1221 ret = -EINVAL; 1275 dev_err(&dev->dev, "No plat data or DT found\n");
1222 goto out; 1276 return -EINVAL;
1223 } 1277 }
1224 1278
1279 if (np)
1280 mmci_dt_populate_generic_pdata(np, plat);
1281
1225 ret = amba_request_regions(dev, DRIVER_NAME); 1282 ret = amba_request_regions(dev, DRIVER_NAME);
1226 if (ret) 1283 if (ret)
1227 goto out; 1284 goto out;
diff --git a/drivers/mmc/host/mvsdio.c b/drivers/mmc/host/mvsdio.c
index eeb8cd125b0c..3b9136c1a475 100644
--- a/drivers/mmc/host/mvsdio.c
+++ b/drivers/mmc/host/mvsdio.c
@@ -19,6 +19,7 @@
19#include <linux/dma-mapping.h> 19#include <linux/dma-mapping.h>
20#include <linux/scatterlist.h> 20#include <linux/scatterlist.h>
21#include <linux/irq.h> 21#include <linux/irq.h>
22#include <linux/clk.h>
22#include <linux/gpio.h> 23#include <linux/gpio.h>
23#include <linux/mmc/host.h> 24#include <linux/mmc/host.h>
24 25
@@ -51,6 +52,7 @@ struct mvsd_host {
51 struct device *dev; 52 struct device *dev;
52 struct resource *res; 53 struct resource *res;
53 int irq; 54 int irq;
55 struct clk *clk;
54 int gpio_card_detect; 56 int gpio_card_detect;
55 int gpio_write_protect; 57 int gpio_write_protect;
56}; 58};
@@ -770,6 +772,13 @@ static int __init mvsd_probe(struct platform_device *pdev)
770 } else 772 } else
771 host->irq = irq; 773 host->irq = irq;
772 774
775 /* Not all platforms can gate the clock, so it is not
776 an error if the clock does not exists. */
777 host->clk = clk_get(&pdev->dev, NULL);
778 if (!IS_ERR(host->clk)) {
779 clk_prepare_enable(host->clk);
780 }
781
773 if (mvsd_data->gpio_card_detect) { 782 if (mvsd_data->gpio_card_detect) {
774 ret = gpio_request(mvsd_data->gpio_card_detect, 783 ret = gpio_request(mvsd_data->gpio_card_detect,
775 DRIVER_NAME " cd"); 784 DRIVER_NAME " cd");
@@ -854,6 +863,11 @@ static int __exit mvsd_remove(struct platform_device *pdev)
854 mvsd_power_down(host); 863 mvsd_power_down(host);
855 iounmap(host->base); 864 iounmap(host->base);
856 release_resource(host->res); 865 release_resource(host->res);
866
867 if (!IS_ERR(host->clk)) {
868 clk_disable_unprepare(host->clk);
869 clk_put(host->clk);
870 }
857 mmc_free_host(mmc); 871 mmc_free_host(mmc);
858 } 872 }
859 platform_set_drvdata(pdev, NULL); 873 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/mmc/host/mxcmmc.c b/drivers/mmc/host/mxcmmc.c
index b2058b432320..28ed52d58f7f 100644
--- a/drivers/mmc/host/mxcmmc.c
+++ b/drivers/mmc/host/mxcmmc.c
@@ -136,7 +136,8 @@ struct mxcmci_host {
136 u16 rev_no; 136 u16 rev_no;
137 unsigned int cmdat; 137 unsigned int cmdat;
138 138
139 struct clk *clk; 139 struct clk *clk_ipg;
140 struct clk *clk_per;
140 141
141 int clock; 142 int clock;
142 143
@@ -672,7 +673,7 @@ static void mxcmci_set_clk_rate(struct mxcmci_host *host, unsigned int clk_ios)
672{ 673{
673 unsigned int divider; 674 unsigned int divider;
674 int prescaler = 0; 675 int prescaler = 0;
675 unsigned int clk_in = clk_get_rate(host->clk); 676 unsigned int clk_in = clk_get_rate(host->clk_per);
676 677
677 while (prescaler <= 0x800) { 678 while (prescaler <= 0x800) {
678 for (divider = 1; divider <= 0xF; divider++) { 679 for (divider = 1; divider <= 0xF; divider++) {
@@ -900,12 +901,20 @@ static int mxcmci_probe(struct platform_device *pdev)
900 host->res = r; 901 host->res = r;
901 host->irq = irq; 902 host->irq = irq;
902 903
903 host->clk = clk_get(&pdev->dev, NULL); 904 host->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
904 if (IS_ERR(host->clk)) { 905 if (IS_ERR(host->clk_ipg)) {
905 ret = PTR_ERR(host->clk); 906 ret = PTR_ERR(host->clk_ipg);
906 goto out_iounmap; 907 goto out_iounmap;
907 } 908 }
908 clk_enable(host->clk); 909
910 host->clk_per = devm_clk_get(&pdev->dev, "per");
911 if (IS_ERR(host->clk_per)) {
912 ret = PTR_ERR(host->clk_per);
913 goto out_iounmap;
914 }
915
916 clk_prepare_enable(host->clk_per);
917 clk_prepare_enable(host->clk_ipg);
909 918
910 mxcmci_softreset(host); 919 mxcmci_softreset(host);
911 920
@@ -917,8 +926,8 @@ static int mxcmci_probe(struct platform_device *pdev)
917 goto out_clk_put; 926 goto out_clk_put;
918 } 927 }
919 928
920 mmc->f_min = clk_get_rate(host->clk) >> 16; 929 mmc->f_min = clk_get_rate(host->clk_per) >> 16;
921 mmc->f_max = clk_get_rate(host->clk) >> 1; 930 mmc->f_max = clk_get_rate(host->clk_per) >> 1;
922 931
923 /* recommended in data sheet */ 932 /* recommended in data sheet */
924 writew(0x2db4, host->base + MMC_REG_READ_TO); 933 writew(0x2db4, host->base + MMC_REG_READ_TO);
@@ -967,8 +976,8 @@ out_free_dma:
967 if (host->dma) 976 if (host->dma)
968 dma_release_channel(host->dma); 977 dma_release_channel(host->dma);
969out_clk_put: 978out_clk_put:
970 clk_disable(host->clk); 979 clk_disable_unprepare(host->clk_per);
971 clk_put(host->clk); 980 clk_disable_unprepare(host->clk_ipg);
972out_iounmap: 981out_iounmap:
973 iounmap(host->base); 982 iounmap(host->base);
974out_free: 983out_free:
@@ -999,8 +1008,8 @@ static int mxcmci_remove(struct platform_device *pdev)
999 if (host->dma) 1008 if (host->dma)
1000 dma_release_channel(host->dma); 1009 dma_release_channel(host->dma);
1001 1010
1002 clk_disable(host->clk); 1011 clk_disable_unprepare(host->clk_per);
1003 clk_put(host->clk); 1012 clk_disable_unprepare(host->clk_ipg);
1004 1013
1005 release_mem_region(host->res->start, resource_size(host->res)); 1014 release_mem_region(host->res->start, resource_size(host->res));
1006 1015
@@ -1018,7 +1027,8 @@ static int mxcmci_suspend(struct device *dev)
1018 1027
1019 if (mmc) 1028 if (mmc)
1020 ret = mmc_suspend_host(mmc); 1029 ret = mmc_suspend_host(mmc);
1021 clk_disable(host->clk); 1030 clk_disable_unprepare(host->clk_per);
1031 clk_disable_unprepare(host->clk_ipg);
1022 1032
1023 return ret; 1033 return ret;
1024} 1034}
@@ -1029,7 +1039,8 @@ static int mxcmci_resume(struct device *dev)
1029 struct mxcmci_host *host = mmc_priv(mmc); 1039 struct mxcmci_host *host = mmc_priv(mmc);
1030 int ret = 0; 1040 int ret = 0;
1031 1041
1032 clk_enable(host->clk); 1042 clk_prepare_enable(host->clk_per);
1043 clk_prepare_enable(host->clk_ipg);
1033 if (mmc) 1044 if (mmc)
1034 ret = mmc_resume_host(mmc); 1045 ret = mmc_resume_host(mmc);
1035 1046
diff --git a/drivers/mmc/host/mxs-mmc.c b/drivers/mmc/host/mxs-mmc.c
index bb03ddda481d..34a90266ab11 100644
--- a/drivers/mmc/host/mxs-mmc.c
+++ b/drivers/mmc/host/mxs-mmc.c
@@ -23,6 +23,9 @@
23#include <linux/kernel.h> 23#include <linux/kernel.h>
24#include <linux/init.h> 24#include <linux/init.h>
25#include <linux/ioport.h> 25#include <linux/ioport.h>
26#include <linux/of.h>
27#include <linux/of_device.h>
28#include <linux/of_gpio.h>
26#include <linux/platform_device.h> 29#include <linux/platform_device.h>
27#include <linux/delay.h> 30#include <linux/delay.h>
28#include <linux/interrupt.h> 31#include <linux/interrupt.h>
@@ -40,18 +43,15 @@
40#include <linux/module.h> 43#include <linux/module.h>
41#include <linux/fsl/mxs-dma.h> 44#include <linux/fsl/mxs-dma.h>
42#include <linux/pinctrl/consumer.h> 45#include <linux/pinctrl/consumer.h>
43 46#include <linux/stmp_device.h>
44#include <mach/mxs.h> 47#include <linux/mmc/mxs-mmc.h>
45#include <mach/common.h>
46#include <mach/mmc.h>
47 48
48#define DRIVER_NAME "mxs-mmc" 49#define DRIVER_NAME "mxs-mmc"
49 50
50/* card detect polling timeout */ 51/* card detect polling timeout */
51#define MXS_MMC_DETECT_TIMEOUT (HZ/2) 52#define MXS_MMC_DETECT_TIMEOUT (HZ/2)
52 53
53#define SSP_VERSION_LATEST 4 54#define ssp_is_old(host) ((host)->devid == IMX23_MMC)
54#define ssp_is_old() (host->version < SSP_VERSION_LATEST)
55 55
56/* SSP registers */ 56/* SSP registers */
57#define HW_SSP_CTRL0 0x000 57#define HW_SSP_CTRL0 0x000
@@ -86,14 +86,14 @@
86#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4) 86#define BM_SSP_BLOCK_SIZE_BLOCK_COUNT (0xffffff << 4)
87#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0) 87#define BP_SSP_BLOCK_SIZE_BLOCK_SIZE (0)
88#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf) 88#define BM_SSP_BLOCK_SIZE_BLOCK_SIZE (0xf)
89#define HW_SSP_TIMING (ssp_is_old() ? 0x050 : 0x070) 89#define HW_SSP_TIMING(h) (ssp_is_old(h) ? 0x050 : 0x070)
90#define BP_SSP_TIMING_TIMEOUT (16) 90#define BP_SSP_TIMING_TIMEOUT (16)
91#define BM_SSP_TIMING_TIMEOUT (0xffff << 16) 91#define BM_SSP_TIMING_TIMEOUT (0xffff << 16)
92#define BP_SSP_TIMING_CLOCK_DIVIDE (8) 92#define BP_SSP_TIMING_CLOCK_DIVIDE (8)
93#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8) 93#define BM_SSP_TIMING_CLOCK_DIVIDE (0xff << 8)
94#define BP_SSP_TIMING_CLOCK_RATE (0) 94#define BP_SSP_TIMING_CLOCK_RATE (0)
95#define BM_SSP_TIMING_CLOCK_RATE (0xff) 95#define BM_SSP_TIMING_CLOCK_RATE (0xff)
96#define HW_SSP_CTRL1 (ssp_is_old() ? 0x060 : 0x080) 96#define HW_SSP_CTRL1(h) (ssp_is_old(h) ? 0x060 : 0x080)
97#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31) 97#define BM_SSP_CTRL1_SDIO_IRQ (1 << 31)
98#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30) 98#define BM_SSP_CTRL1_SDIO_IRQ_EN (1 << 30)
99#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29) 99#define BM_SSP_CTRL1_RESP_ERR_IRQ (1 << 29)
@@ -116,15 +116,13 @@
116#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4) 116#define BM_SSP_CTRL1_WORD_LENGTH (0xf << 4)
117#define BP_SSP_CTRL1_SSP_MODE (0) 117#define BP_SSP_CTRL1_SSP_MODE (0)
118#define BM_SSP_CTRL1_SSP_MODE (0xf) 118#define BM_SSP_CTRL1_SSP_MODE (0xf)
119#define HW_SSP_SDRESP0 (ssp_is_old() ? 0x080 : 0x0a0) 119#define HW_SSP_SDRESP0(h) (ssp_is_old(h) ? 0x080 : 0x0a0)
120#define HW_SSP_SDRESP1 (ssp_is_old() ? 0x090 : 0x0b0) 120#define HW_SSP_SDRESP1(h) (ssp_is_old(h) ? 0x090 : 0x0b0)
121#define HW_SSP_SDRESP2 (ssp_is_old() ? 0x0a0 : 0x0c0) 121#define HW_SSP_SDRESP2(h) (ssp_is_old(h) ? 0x0a0 : 0x0c0)
122#define HW_SSP_SDRESP3 (ssp_is_old() ? 0x0b0 : 0x0d0) 122#define HW_SSP_SDRESP3(h) (ssp_is_old(h) ? 0x0b0 : 0x0d0)
123#define HW_SSP_STATUS (ssp_is_old() ? 0x0c0 : 0x100) 123#define HW_SSP_STATUS(h) (ssp_is_old(h) ? 0x0c0 : 0x100)
124#define BM_SSP_STATUS_CARD_DETECT (1 << 28) 124#define BM_SSP_STATUS_CARD_DETECT (1 << 28)
125#define BM_SSP_STATUS_SDIO_IRQ (1 << 17) 125#define BM_SSP_STATUS_SDIO_IRQ (1 << 17)
126#define HW_SSP_VERSION (cpu_is_mx23() ? 0x110 : 0x130)
127#define BP_SSP_VERSION_MAJOR (24)
128 126
129#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field) 127#define BF_SSP(value, field) (((value) << BP_SSP_##field) & BM_SSP_##field)
130 128
@@ -139,6 +137,11 @@
139 137
140#define SSP_PIO_NUM 3 138#define SSP_PIO_NUM 3
141 139
140enum mxs_mmc_id {
141 IMX23_MMC,
142 IMX28_MMC,
143};
144
142struct mxs_mmc_host { 145struct mxs_mmc_host {
143 struct mmc_host *mmc; 146 struct mmc_host *mmc;
144 struct mmc_request *mrq; 147 struct mmc_request *mrq;
@@ -146,9 +149,7 @@ struct mxs_mmc_host {
146 struct mmc_data *data; 149 struct mmc_data *data;
147 150
148 void __iomem *base; 151 void __iomem *base;
149 int irq; 152 int dma_channel;
150 struct resource *res;
151 struct resource *dma_res;
152 struct clk *clk; 153 struct clk *clk;
153 unsigned int clk_rate; 154 unsigned int clk_rate;
154 155
@@ -158,32 +159,28 @@ struct mxs_mmc_host {
158 enum dma_transfer_direction slave_dirn; 159 enum dma_transfer_direction slave_dirn;
159 u32 ssp_pio_words[SSP_PIO_NUM]; 160 u32 ssp_pio_words[SSP_PIO_NUM];
160 161
161 unsigned int version; 162 enum mxs_mmc_id devid;
162 unsigned char bus_width; 163 unsigned char bus_width;
163 spinlock_t lock; 164 spinlock_t lock;
164 int sdio_irq_en; 165 int sdio_irq_en;
166 int wp_gpio;
165}; 167};
166 168
167static int mxs_mmc_get_ro(struct mmc_host *mmc) 169static int mxs_mmc_get_ro(struct mmc_host *mmc)
168{ 170{
169 struct mxs_mmc_host *host = mmc_priv(mmc); 171 struct mxs_mmc_host *host = mmc_priv(mmc);
170 struct mxs_mmc_platform_data *pdata =
171 mmc_dev(host->mmc)->platform_data;
172
173 if (!pdata)
174 return -EFAULT;
175 172
176 if (!gpio_is_valid(pdata->wp_gpio)) 173 if (!gpio_is_valid(host->wp_gpio))
177 return -EINVAL; 174 return -EINVAL;
178 175
179 return gpio_get_value(pdata->wp_gpio); 176 return gpio_get_value(host->wp_gpio);
180} 177}
181 178
182static int mxs_mmc_get_cd(struct mmc_host *mmc) 179static int mxs_mmc_get_cd(struct mmc_host *mmc)
183{ 180{
184 struct mxs_mmc_host *host = mmc_priv(mmc); 181 struct mxs_mmc_host *host = mmc_priv(mmc);
185 182
186 return !(readl(host->base + HW_SSP_STATUS) & 183 return !(readl(host->base + HW_SSP_STATUS(host)) &
187 BM_SSP_STATUS_CARD_DETECT); 184 BM_SSP_STATUS_CARD_DETECT);
188} 185}
189 186
@@ -191,7 +188,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
191{ 188{
192 u32 ctrl0, ctrl1; 189 u32 ctrl0, ctrl1;
193 190
194 mxs_reset_block(host->base); 191 stmp_reset_block(host->base);
195 192
196 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC; 193 ctrl0 = BM_SSP_CTRL0_IGNORE_CRC;
197 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) | 194 ctrl1 = BF_SSP(0x3, CTRL1_SSP_MODE) |
@@ -207,7 +204,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
207 writel(BF_SSP(0xffff, TIMING_TIMEOUT) | 204 writel(BF_SSP(0xffff, TIMING_TIMEOUT) |
208 BF_SSP(2, TIMING_CLOCK_DIVIDE) | 205 BF_SSP(2, TIMING_CLOCK_DIVIDE) |
209 BF_SSP(0, TIMING_CLOCK_RATE), 206 BF_SSP(0, TIMING_CLOCK_RATE),
210 host->base + HW_SSP_TIMING); 207 host->base + HW_SSP_TIMING(host));
211 208
212 if (host->sdio_irq_en) { 209 if (host->sdio_irq_en) {
213 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK; 210 ctrl0 |= BM_SSP_CTRL0_SDIO_IRQ_CHECK;
@@ -215,7 +212,7 @@ static void mxs_mmc_reset(struct mxs_mmc_host *host)
215 } 212 }
216 213
217 writel(ctrl0, host->base + HW_SSP_CTRL0); 214 writel(ctrl0, host->base + HW_SSP_CTRL0);
218 writel(ctrl1, host->base + HW_SSP_CTRL1); 215 writel(ctrl1, host->base + HW_SSP_CTRL1(host));
219} 216}
220 217
221static void mxs_mmc_start_cmd(struct mxs_mmc_host *host, 218static void mxs_mmc_start_cmd(struct mxs_mmc_host *host,
@@ -229,12 +226,12 @@ static void mxs_mmc_request_done(struct mxs_mmc_host *host)
229 226
230 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) { 227 if (mmc_resp_type(cmd) & MMC_RSP_PRESENT) {
231 if (mmc_resp_type(cmd) & MMC_RSP_136) { 228 if (mmc_resp_type(cmd) & MMC_RSP_136) {
232 cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0); 229 cmd->resp[3] = readl(host->base + HW_SSP_SDRESP0(host));
233 cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1); 230 cmd->resp[2] = readl(host->base + HW_SSP_SDRESP1(host));
234 cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2); 231 cmd->resp[1] = readl(host->base + HW_SSP_SDRESP2(host));
235 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3); 232 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP3(host));
236 } else { 233 } else {
237 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0); 234 cmd->resp[0] = readl(host->base + HW_SSP_SDRESP0(host));
238 } 235 }
239 } 236 }
240 237
@@ -277,9 +274,9 @@ static irqreturn_t mxs_mmc_irq_handler(int irq, void *dev_id)
277 274
278 spin_lock(&host->lock); 275 spin_lock(&host->lock);
279 276
280 stat = readl(host->base + HW_SSP_CTRL1); 277 stat = readl(host->base + HW_SSP_CTRL1(host));
281 writel(stat & MXS_MMC_IRQ_BITS, 278 writel(stat & MXS_MMC_IRQ_BITS,
282 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); 279 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
283 280
284 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN)) 281 if ((stat & BM_SSP_CTRL1_SDIO_IRQ) && (stat & BM_SSP_CTRL1_SDIO_IRQ_EN))
285 mmc_signal_sdio_irq(host->mmc); 282 mmc_signal_sdio_irq(host->mmc);
@@ -485,7 +482,7 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
485 blocks = 1; 482 blocks = 1;
486 483
487 /* xfer count, block size and count need to be set differently */ 484 /* xfer count, block size and count need to be set differently */
488 if (ssp_is_old()) { 485 if (ssp_is_old(host)) {
489 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT); 486 ctrl0 |= BF_SSP(data_size, CTRL0_XFER_COUNT);
490 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) | 487 cmd0 |= BF_SSP(log2_blksz, CMD0_BLOCK_SIZE) |
491 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT); 488 BF_SSP(blocks - 1, CMD0_BLOCK_COUNT);
@@ -509,10 +506,10 @@ static void mxs_mmc_adtc(struct mxs_mmc_host *host)
509 506
510 /* set the timeout count */ 507 /* set the timeout count */
511 timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns); 508 timeout = mxs_ns_to_ssp_ticks(host->clk_rate, data->timeout_ns);
512 val = readl(host->base + HW_SSP_TIMING); 509 val = readl(host->base + HW_SSP_TIMING(host));
513 val &= ~(BM_SSP_TIMING_TIMEOUT); 510 val &= ~(BM_SSP_TIMING_TIMEOUT);
514 val |= BF_SSP(timeout, TIMING_TIMEOUT); 511 val |= BF_SSP(timeout, TIMING_TIMEOUT);
515 writel(val, host->base + HW_SSP_TIMING); 512 writel(val, host->base + HW_SSP_TIMING(host));
516 513
517 /* pio */ 514 /* pio */
518 host->ssp_pio_words[0] = ctrl0; 515 host->ssp_pio_words[0] = ctrl0;
@@ -598,11 +595,11 @@ static void mxs_mmc_set_clk_rate(struct mxs_mmc_host *host, unsigned int rate)
598 595
599 ssp_sck = ssp_clk / clock_divide / (1 + clock_rate); 596 ssp_sck = ssp_clk / clock_divide / (1 + clock_rate);
600 597
601 val = readl(host->base + HW_SSP_TIMING); 598 val = readl(host->base + HW_SSP_TIMING(host));
602 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE); 599 val &= ~(BM_SSP_TIMING_CLOCK_DIVIDE | BM_SSP_TIMING_CLOCK_RATE);
603 val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE); 600 val |= BF_SSP(clock_divide, TIMING_CLOCK_DIVIDE);
604 val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE); 601 val |= BF_SSP(clock_rate, TIMING_CLOCK_RATE);
605 writel(val, host->base + HW_SSP_TIMING); 602 writel(val, host->base + HW_SSP_TIMING(host));
606 603
607 host->clk_rate = ssp_sck; 604 host->clk_rate = ssp_sck;
608 605
@@ -637,18 +634,19 @@ static void mxs_mmc_enable_sdio_irq(struct mmc_host *mmc, int enable)
637 634
638 if (enable) { 635 if (enable) {
639 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, 636 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
640 host->base + HW_SSP_CTRL0 + MXS_SET_ADDR); 637 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_SET);
641 writel(BM_SSP_CTRL1_SDIO_IRQ_EN, 638 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
642 host->base + HW_SSP_CTRL1 + MXS_SET_ADDR); 639 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_SET);
643 640
644 if (readl(host->base + HW_SSP_STATUS) & BM_SSP_STATUS_SDIO_IRQ) 641 if (readl(host->base + HW_SSP_STATUS(host)) &
642 BM_SSP_STATUS_SDIO_IRQ)
645 mmc_signal_sdio_irq(host->mmc); 643 mmc_signal_sdio_irq(host->mmc);
646 644
647 } else { 645 } else {
648 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK, 646 writel(BM_SSP_CTRL0_SDIO_IRQ_CHECK,
649 host->base + HW_SSP_CTRL0 + MXS_CLR_ADDR); 647 host->base + HW_SSP_CTRL0 + STMP_OFFSET_REG_CLR);
650 writel(BM_SSP_CTRL1_SDIO_IRQ_EN, 648 writel(BM_SSP_CTRL1_SDIO_IRQ_EN,
651 host->base + HW_SSP_CTRL1 + MXS_CLR_ADDR); 649 host->base + HW_SSP_CTRL1(host) + STMP_OFFSET_REG_CLR);
652 } 650 }
653 651
654 spin_unlock_irqrestore(&host->lock, flags); 652 spin_unlock_irqrestore(&host->lock, flags);
@@ -669,7 +667,7 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
669 if (!mxs_dma_is_apbh(chan)) 667 if (!mxs_dma_is_apbh(chan))
670 return false; 668 return false;
671 669
672 if (chan->chan_id != host->dma_res->start) 670 if (chan->chan_id != host->dma_channel)
673 return false; 671 return false;
674 672
675 chan->private = &host->dma_data; 673 chan->private = &host->dma_data;
@@ -677,11 +675,34 @@ static bool mxs_mmc_dma_filter(struct dma_chan *chan, void *param)
677 return true; 675 return true;
678} 676}
679 677
678static struct platform_device_id mxs_mmc_ids[] = {
679 {
680 .name = "imx23-mmc",
681 .driver_data = IMX23_MMC,
682 }, {
683 .name = "imx28-mmc",
684 .driver_data = IMX28_MMC,
685 }, {
686 /* sentinel */
687 }
688};
689MODULE_DEVICE_TABLE(platform, mxs_mmc_ids);
690
691static const struct of_device_id mxs_mmc_dt_ids[] = {
692 { .compatible = "fsl,imx23-mmc", .data = (void *) IMX23_MMC, },
693 { .compatible = "fsl,imx28-mmc", .data = (void *) IMX28_MMC, },
694 { /* sentinel */ }
695};
696MODULE_DEVICE_TABLE(of, mxs_mmc_dt_ids);
697
680static int mxs_mmc_probe(struct platform_device *pdev) 698static int mxs_mmc_probe(struct platform_device *pdev)
681{ 699{
700 const struct of_device_id *of_id =
701 of_match_device(mxs_mmc_dt_ids, &pdev->dev);
702 struct device_node *np = pdev->dev.of_node;
682 struct mxs_mmc_host *host; 703 struct mxs_mmc_host *host;
683 struct mmc_host *mmc; 704 struct mmc_host *mmc;
684 struct resource *iores, *dmares, *r; 705 struct resource *iores, *dmares;
685 struct mxs_mmc_platform_data *pdata; 706 struct mxs_mmc_platform_data *pdata;
686 struct pinctrl *pinctrl; 707 struct pinctrl *pinctrl;
687 int ret = 0, irq_err, irq_dma; 708 int ret = 0, irq_err, irq_dma;
@@ -691,46 +712,51 @@ static int mxs_mmc_probe(struct platform_device *pdev)
691 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); 712 dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0);
692 irq_err = platform_get_irq(pdev, 0); 713 irq_err = platform_get_irq(pdev, 0);
693 irq_dma = platform_get_irq(pdev, 1); 714 irq_dma = platform_get_irq(pdev, 1);
694 if (!iores || !dmares || irq_err < 0 || irq_dma < 0) 715 if (!iores || irq_err < 0 || irq_dma < 0)
695 return -EINVAL; 716 return -EINVAL;
696 717
697 r = request_mem_region(iores->start, resource_size(iores), pdev->name);
698 if (!r)
699 return -EBUSY;
700
701 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev); 718 mmc = mmc_alloc_host(sizeof(struct mxs_mmc_host), &pdev->dev);
702 if (!mmc) { 719 if (!mmc)
703 ret = -ENOMEM; 720 return -ENOMEM;
704 goto out_release_mem;
705 }
706 721
707 host = mmc_priv(mmc); 722 host = mmc_priv(mmc);
708 host->base = ioremap(r->start, resource_size(r)); 723 host->base = devm_request_and_ioremap(&pdev->dev, iores);
709 if (!host->base) { 724 if (!host->base) {
710 ret = -ENOMEM; 725 ret = -EADDRNOTAVAIL;
711 goto out_mmc_free; 726 goto out_mmc_free;
712 } 727 }
713 728
714 /* only major verion does matter */ 729 if (np) {
715 host->version = readl(host->base + HW_SSP_VERSION) >> 730 host->devid = (enum mxs_mmc_id) of_id->data;
716 BP_SSP_VERSION_MAJOR; 731 /*
732 * TODO: This is a temporary solution and should be changed
733 * to use generic DMA binding later when the helpers get in.
734 */
735 ret = of_property_read_u32(np, "fsl,ssp-dma-channel",
736 &host->dma_channel);
737 if (ret) {
738 dev_err(mmc_dev(host->mmc),
739 "failed to get dma channel\n");
740 goto out_mmc_free;
741 }
742 } else {
743 host->devid = pdev->id_entry->driver_data;
744 host->dma_channel = dmares->start;
745 }
717 746
718 host->mmc = mmc; 747 host->mmc = mmc;
719 host->res = r;
720 host->dma_res = dmares;
721 host->irq = irq_err;
722 host->sdio_irq_en = 0; 748 host->sdio_irq_en = 0;
723 749
724 pinctrl = devm_pinctrl_get_select_default(&pdev->dev); 750 pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
725 if (IS_ERR(pinctrl)) { 751 if (IS_ERR(pinctrl)) {
726 ret = PTR_ERR(pinctrl); 752 ret = PTR_ERR(pinctrl);
727 goto out_iounmap; 753 goto out_mmc_free;
728 } 754 }
729 755
730 host->clk = clk_get(&pdev->dev, NULL); 756 host->clk = clk_get(&pdev->dev, NULL);
731 if (IS_ERR(host->clk)) { 757 if (IS_ERR(host->clk)) {
732 ret = PTR_ERR(host->clk); 758 ret = PTR_ERR(host->clk);
733 goto out_iounmap; 759 goto out_mmc_free;
734 } 760 }
735 clk_prepare_enable(host->clk); 761 clk_prepare_enable(host->clk);
736 762
@@ -752,11 +778,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
752 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL; 778 MMC_CAP_SDIO_IRQ | MMC_CAP_NEEDS_POLL;
753 779
754 pdata = mmc_dev(host->mmc)->platform_data; 780 pdata = mmc_dev(host->mmc)->platform_data;
755 if (pdata) { 781 if (!pdata) {
782 u32 bus_width = 0;
783 of_property_read_u32(np, "bus-width", &bus_width);
784 if (bus_width == 4)
785 mmc->caps |= MMC_CAP_4_BIT_DATA;
786 else if (bus_width == 8)
787 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
788 host->wp_gpio = of_get_named_gpio(np, "wp-gpios", 0);
789 } else {
756 if (pdata->flags & SLOTF_8_BIT_CAPABLE) 790 if (pdata->flags & SLOTF_8_BIT_CAPABLE)
757 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA; 791 mmc->caps |= MMC_CAP_4_BIT_DATA | MMC_CAP_8_BIT_DATA;
758 if (pdata->flags & SLOTF_4_BIT_CAPABLE) 792 if (pdata->flags & SLOTF_4_BIT_CAPABLE)
759 mmc->caps |= MMC_CAP_4_BIT_DATA; 793 mmc->caps |= MMC_CAP_4_BIT_DATA;
794 host->wp_gpio = pdata->wp_gpio;
760 } 795 }
761 796
762 mmc->f_min = 400000; 797 mmc->f_min = 400000;
@@ -765,13 +800,14 @@ static int mxs_mmc_probe(struct platform_device *pdev)
765 800
766 mmc->max_segs = 52; 801 mmc->max_segs = 52;
767 mmc->max_blk_size = 1 << 0xf; 802 mmc->max_blk_size = 1 << 0xf;
768 mmc->max_blk_count = (ssp_is_old()) ? 0xff : 0xffffff; 803 mmc->max_blk_count = (ssp_is_old(host)) ? 0xff : 0xffffff;
769 mmc->max_req_size = (ssp_is_old()) ? 0xffff : 0xffffffff; 804 mmc->max_req_size = (ssp_is_old(host)) ? 0xffff : 0xffffffff;
770 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev); 805 mmc->max_seg_size = dma_get_max_seg_size(host->dmach->device->dev);
771 806
772 platform_set_drvdata(pdev, mmc); 807 platform_set_drvdata(pdev, mmc);
773 808
774 ret = request_irq(host->irq, mxs_mmc_irq_handler, 0, DRIVER_NAME, host); 809 ret = devm_request_irq(&pdev->dev, irq_err, mxs_mmc_irq_handler, 0,
810 DRIVER_NAME, host);
775 if (ret) 811 if (ret)
776 goto out_free_dma; 812 goto out_free_dma;
777 813
@@ -779,26 +815,20 @@ static int mxs_mmc_probe(struct platform_device *pdev)
779 815
780 ret = mmc_add_host(mmc); 816 ret = mmc_add_host(mmc);
781 if (ret) 817 if (ret)
782 goto out_free_irq; 818 goto out_free_dma;
783 819
784 dev_info(mmc_dev(host->mmc), "initialized\n"); 820 dev_info(mmc_dev(host->mmc), "initialized\n");
785 821
786 return 0; 822 return 0;
787 823
788out_free_irq:
789 free_irq(host->irq, host);
790out_free_dma: 824out_free_dma:
791 if (host->dmach) 825 if (host->dmach)
792 dma_release_channel(host->dmach); 826 dma_release_channel(host->dmach);
793out_clk_put: 827out_clk_put:
794 clk_disable_unprepare(host->clk); 828 clk_disable_unprepare(host->clk);
795 clk_put(host->clk); 829 clk_put(host->clk);
796out_iounmap:
797 iounmap(host->base);
798out_mmc_free: 830out_mmc_free:
799 mmc_free_host(mmc); 831 mmc_free_host(mmc);
800out_release_mem:
801 release_mem_region(iores->start, resource_size(iores));
802 return ret; 832 return ret;
803} 833}
804 834
@@ -806,12 +836,9 @@ static int mxs_mmc_remove(struct platform_device *pdev)
806{ 836{
807 struct mmc_host *mmc = platform_get_drvdata(pdev); 837 struct mmc_host *mmc = platform_get_drvdata(pdev);
808 struct mxs_mmc_host *host = mmc_priv(mmc); 838 struct mxs_mmc_host *host = mmc_priv(mmc);
809 struct resource *res = host->res;
810 839
811 mmc_remove_host(mmc); 840 mmc_remove_host(mmc);
812 841
813 free_irq(host->irq, host);
814
815 platform_set_drvdata(pdev, NULL); 842 platform_set_drvdata(pdev, NULL);
816 843
817 if (host->dmach) 844 if (host->dmach)
@@ -820,12 +847,8 @@ static int mxs_mmc_remove(struct platform_device *pdev)
820 clk_disable_unprepare(host->clk); 847 clk_disable_unprepare(host->clk);
821 clk_put(host->clk); 848 clk_put(host->clk);
822 849
823 iounmap(host->base);
824
825 mmc_free_host(mmc); 850 mmc_free_host(mmc);
826 851
827 release_mem_region(res->start, resource_size(res));
828
829 return 0; 852 return 0;
830} 853}
831 854
@@ -865,11 +888,13 @@ static const struct dev_pm_ops mxs_mmc_pm_ops = {
865static struct platform_driver mxs_mmc_driver = { 888static struct platform_driver mxs_mmc_driver = {
866 .probe = mxs_mmc_probe, 889 .probe = mxs_mmc_probe,
867 .remove = mxs_mmc_remove, 890 .remove = mxs_mmc_remove,
891 .id_table = mxs_mmc_ids,
868 .driver = { 892 .driver = {
869 .name = DRIVER_NAME, 893 .name = DRIVER_NAME,
870 .owner = THIS_MODULE, 894 .owner = THIS_MODULE,
871#ifdef CONFIG_PM 895#ifdef CONFIG_PM
872 .pm = &mxs_mmc_pm_ops, 896 .pm = &mxs_mmc_pm_ops,
897 .of_match_table = mxs_mmc_dt_ids,
873#endif 898#endif
874 }, 899 },
875}; 900};
diff --git a/drivers/mmc/host/omap.c b/drivers/mmc/host/omap.c
index 887c0e598cf3..552196c764d4 100644
--- a/drivers/mmc/host/omap.c
+++ b/drivers/mmc/host/omap.c
@@ -169,11 +169,11 @@ struct mmc_omap_host {
169 struct timer_list clk_timer; 169 struct timer_list clk_timer;
170 spinlock_t clk_lock; /* for changing enabled state */ 170 spinlock_t clk_lock; /* for changing enabled state */
171 unsigned int fclk_enabled:1; 171 unsigned int fclk_enabled:1;
172 struct workqueue_struct *mmc_omap_wq;
172 173
173 struct omap_mmc_platform_data *pdata; 174 struct omap_mmc_platform_data *pdata;
174}; 175};
175 176
176static struct workqueue_struct *mmc_omap_wq;
177 177
178static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot) 178static void mmc_omap_fclk_offdelay(struct mmc_omap_slot *slot)
179{ 179{
@@ -291,7 +291,7 @@ static void mmc_omap_release_slot(struct mmc_omap_slot *slot, int clk_enabled)
291 host->next_slot = new_slot; 291 host->next_slot = new_slot;
292 host->mmc = new_slot->mmc; 292 host->mmc = new_slot->mmc;
293 spin_unlock_irqrestore(&host->slot_lock, flags); 293 spin_unlock_irqrestore(&host->slot_lock, flags);
294 queue_work(mmc_omap_wq, &host->slot_release_work); 294 queue_work(host->mmc_omap_wq, &host->slot_release_work);
295 return; 295 return;
296 } 296 }
297 297
@@ -459,7 +459,7 @@ mmc_omap_xfer_done(struct mmc_omap_host *host, struct mmc_data *data)
459 } 459 }
460 460
461 host->stop_data = data; 461 host->stop_data = data;
462 queue_work(mmc_omap_wq, &host->send_stop_work); 462 queue_work(host->mmc_omap_wq, &host->send_stop_work);
463} 463}
464 464
465static void 465static void
@@ -639,7 +639,7 @@ mmc_omap_cmd_timer(unsigned long data)
639 OMAP_MMC_WRITE(host, IE, 0); 639 OMAP_MMC_WRITE(host, IE, 0);
640 disable_irq(host->irq); 640 disable_irq(host->irq);
641 host->abort = 1; 641 host->abort = 1;
642 queue_work(mmc_omap_wq, &host->cmd_abort_work); 642 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
643 } 643 }
644 spin_unlock_irqrestore(&host->slot_lock, flags); 644 spin_unlock_irqrestore(&host->slot_lock, flags);
645} 645}
@@ -828,7 +828,7 @@ static irqreturn_t mmc_omap_irq(int irq, void *dev_id)
828 host->abort = 1; 828 host->abort = 1;
829 OMAP_MMC_WRITE(host, IE, 0); 829 OMAP_MMC_WRITE(host, IE, 0);
830 disable_irq_nosync(host->irq); 830 disable_irq_nosync(host->irq);
831 queue_work(mmc_omap_wq, &host->cmd_abort_work); 831 queue_work(host->mmc_omap_wq, &host->cmd_abort_work);
832 return IRQ_HANDLED; 832 return IRQ_HANDLED;
833 } 833 }
834 834
@@ -1389,13 +1389,13 @@ static void mmc_omap_remove_slot(struct mmc_omap_slot *slot)
1389 1389
1390 tasklet_kill(&slot->cover_tasklet); 1390 tasklet_kill(&slot->cover_tasklet);
1391 del_timer_sync(&slot->cover_timer); 1391 del_timer_sync(&slot->cover_timer);
1392 flush_workqueue(mmc_omap_wq); 1392 flush_workqueue(slot->host->mmc_omap_wq);
1393 1393
1394 mmc_remove_host(mmc); 1394 mmc_remove_host(mmc);
1395 mmc_free_host(mmc); 1395 mmc_free_host(mmc);
1396} 1396}
1397 1397
1398static int __init mmc_omap_probe(struct platform_device *pdev) 1398static int __devinit mmc_omap_probe(struct platform_device *pdev)
1399{ 1399{
1400 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data; 1400 struct omap_mmc_platform_data *pdata = pdev->dev.platform_data;
1401 struct mmc_omap_host *host = NULL; 1401 struct mmc_omap_host *host = NULL;
@@ -1497,6 +1497,10 @@ static int __init mmc_omap_probe(struct platform_device *pdev)
1497 1497
1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2); 1498 host->reg_shift = (cpu_is_omap7xx() ? 1 : 2);
1499 1499
1500 host->mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1501 if (!host->mmc_omap_wq)
1502 goto err_plat_cleanup;
1503
1500 return 0; 1504 return 0;
1501 1505
1502err_plat_cleanup: 1506err_plat_cleanup:
@@ -1518,7 +1522,7 @@ err_free_mem_region:
1518 return ret; 1522 return ret;
1519} 1523}
1520 1524
1521static int mmc_omap_remove(struct platform_device *pdev) 1525static int __devexit mmc_omap_remove(struct platform_device *pdev)
1522{ 1526{
1523 struct mmc_omap_host *host = platform_get_drvdata(pdev); 1527 struct mmc_omap_host *host = platform_get_drvdata(pdev);
1524 int i; 1528 int i;
@@ -1542,6 +1546,7 @@ static int mmc_omap_remove(struct platform_device *pdev)
1542 iounmap(host->virt_base); 1546 iounmap(host->virt_base);
1543 release_mem_region(pdev->resource[0].start, 1547 release_mem_region(pdev->resource[0].start,
1544 pdev->resource[0].end - pdev->resource[0].start + 1); 1548 pdev->resource[0].end - pdev->resource[0].start + 1);
1549 destroy_workqueue(host->mmc_omap_wq);
1545 1550
1546 kfree(host); 1551 kfree(host);
1547 1552
@@ -1599,7 +1604,8 @@ static int mmc_omap_resume(struct platform_device *pdev)
1599#endif 1604#endif
1600 1605
1601static struct platform_driver mmc_omap_driver = { 1606static struct platform_driver mmc_omap_driver = {
1602 .remove = mmc_omap_remove, 1607 .probe = mmc_omap_probe,
1608 .remove = __devexit_p(mmc_omap_remove),
1603 .suspend = mmc_omap_suspend, 1609 .suspend = mmc_omap_suspend,
1604 .resume = mmc_omap_resume, 1610 .resume = mmc_omap_resume,
1605 .driver = { 1611 .driver = {
@@ -1608,29 +1614,7 @@ static struct platform_driver mmc_omap_driver = {
1608 }, 1614 },
1609}; 1615};
1610 1616
1611static int __init mmc_omap_init(void) 1617module_platform_driver(mmc_omap_driver);
1612{
1613 int ret;
1614
1615 mmc_omap_wq = alloc_workqueue("mmc_omap", 0, 0);
1616 if (!mmc_omap_wq)
1617 return -ENOMEM;
1618
1619 ret = platform_driver_probe(&mmc_omap_driver, mmc_omap_probe);
1620 if (ret)
1621 destroy_workqueue(mmc_omap_wq);
1622 return ret;
1623}
1624
1625static void __exit mmc_omap_exit(void)
1626{
1627 platform_driver_unregister(&mmc_omap_driver);
1628 destroy_workqueue(mmc_omap_wq);
1629}
1630
1631module_init(mmc_omap_init);
1632module_exit(mmc_omap_exit);
1633
1634MODULE_DESCRIPTION("OMAP Multimedia Card driver"); 1618MODULE_DESCRIPTION("OMAP Multimedia Card driver");
1635MODULE_LICENSE("GPL"); 1619MODULE_LICENSE("GPL");
1636MODULE_ALIAS("platform:" DRIVER_NAME); 1620MODULE_ALIAS("platform:" DRIVER_NAME);
diff --git a/drivers/mmc/host/omap_hsmmc.c b/drivers/mmc/host/omap_hsmmc.c
index 56d4499d4388..9a7a60aeb19e 100644
--- a/drivers/mmc/host/omap_hsmmc.c
+++ b/drivers/mmc/host/omap_hsmmc.c
@@ -85,12 +85,14 @@
85#define BRR_ENABLE (1 << 5) 85#define BRR_ENABLE (1 << 5)
86#define DTO_ENABLE (1 << 20) 86#define DTO_ENABLE (1 << 20)
87#define INIT_STREAM (1 << 1) 87#define INIT_STREAM (1 << 1)
88#define ACEN_ACMD12 (1 << 2)
88#define DP_SELECT (1 << 21) 89#define DP_SELECT (1 << 21)
89#define DDIR (1 << 4) 90#define DDIR (1 << 4)
90#define DMA_EN 0x1 91#define DMA_EN 0x1
91#define MSBS (1 << 5) 92#define MSBS (1 << 5)
92#define BCE (1 << 1) 93#define BCE (1 << 1)
93#define FOUR_BIT (1 << 1) 94#define FOUR_BIT (1 << 1)
95#define DDR (1 << 19)
94#define DW8 (1 << 5) 96#define DW8 (1 << 5)
95#define CC 0x1 97#define CC 0x1
96#define TC 0x02 98#define TC 0x02
@@ -115,6 +117,7 @@
115#define OMAP_MMC_MAX_CLOCK 52000000 117#define OMAP_MMC_MAX_CLOCK 52000000
116#define DRIVER_NAME "omap_hsmmc" 118#define DRIVER_NAME "omap_hsmmc"
117 119
120#define AUTO_CMD12 (1 << 0) /* Auto CMD12 support */
118/* 121/*
119 * One controller can have multiple slots, like on some omap boards using 122 * One controller can have multiple slots, like on some omap boards using
120 * omap.c controller driver. Luckily this is not currently done on any known 123 * omap.c controller driver. Luckily this is not currently done on any known
@@ -167,7 +170,6 @@ struct omap_hsmmc_host {
167 int use_dma, dma_ch; 170 int use_dma, dma_ch;
168 int dma_line_tx, dma_line_rx; 171 int dma_line_tx, dma_line_rx;
169 int slot_id; 172 int slot_id;
170 int got_dbclk;
171 int response_busy; 173 int response_busy;
172 int context_loss; 174 int context_loss;
173 int vdd; 175 int vdd;
@@ -175,6 +177,7 @@ struct omap_hsmmc_host {
175 int reqs_blocked; 177 int reqs_blocked;
176 int use_reg; 178 int use_reg;
177 int req_in_progress; 179 int req_in_progress;
180 unsigned int flags;
178 struct omap_hsmmc_next next_data; 181 struct omap_hsmmc_next next_data;
179 182
180 struct omap_mmc_platform_data *pdata; 183 struct omap_mmc_platform_data *pdata;
@@ -520,6 +523,10 @@ static void omap_hsmmc_set_bus_width(struct omap_hsmmc_host *host)
520 u32 con; 523 u32 con;
521 524
522 con = OMAP_HSMMC_READ(host->base, CON); 525 con = OMAP_HSMMC_READ(host->base, CON);
526 if (ios->timing == MMC_TIMING_UHS_DDR50)
527 con |= DDR; /* configure in DDR mode */
528 else
529 con &= ~DDR;
523 switch (ios->bus_width) { 530 switch (ios->bus_width) {
524 case MMC_BUS_WIDTH_8: 531 case MMC_BUS_WIDTH_8:
525 OMAP_HSMMC_WRITE(host->base, CON, con | DW8); 532 OMAP_HSMMC_WRITE(host->base, CON, con | DW8);
@@ -766,6 +773,8 @@ omap_hsmmc_start_command(struct omap_hsmmc_host *host, struct mmc_command *cmd,
766 cmdtype = 0x3; 773 cmdtype = 0x3;
767 774
768 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22); 775 cmdreg = (cmd->opcode << 24) | (resptype << 16) | (cmdtype << 22);
776 if ((host->flags & AUTO_CMD12) && mmc_op_multi(cmd->opcode))
777 cmdreg |= ACEN_ACMD12;
769 778
770 if (data) { 779 if (data) {
771 cmdreg |= DP_SELECT | MSBS | BCE; 780 cmdreg |= DP_SELECT | MSBS | BCE;
@@ -796,11 +805,12 @@ omap_hsmmc_get_dma_dir(struct omap_hsmmc_host *host, struct mmc_data *data)
796static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq) 805static void omap_hsmmc_request_done(struct omap_hsmmc_host *host, struct mmc_request *mrq)
797{ 806{
798 int dma_ch; 807 int dma_ch;
808 unsigned long flags;
799 809
800 spin_lock(&host->irq_lock); 810 spin_lock_irqsave(&host->irq_lock, flags);
801 host->req_in_progress = 0; 811 host->req_in_progress = 0;
802 dma_ch = host->dma_ch; 812 dma_ch = host->dma_ch;
803 spin_unlock(&host->irq_lock); 813 spin_unlock_irqrestore(&host->irq_lock, flags);
804 814
805 omap_hsmmc_disable_irq(host); 815 omap_hsmmc_disable_irq(host);
806 /* Do not complete the request if DMA is still in progress */ 816 /* Do not complete the request if DMA is still in progress */
@@ -837,11 +847,14 @@ omap_hsmmc_xfer_done(struct omap_hsmmc_host *host, struct mmc_data *data)
837 else 847 else
838 data->bytes_xfered = 0; 848 data->bytes_xfered = 0;
839 849
840 if (!data->stop) { 850 if (data->stop && ((!(host->flags & AUTO_CMD12)) || data->error)) {
851 omap_hsmmc_start_command(host, data->stop, NULL);
852 } else {
853 if (data->stop)
854 data->stop->resp[0] = OMAP_HSMMC_READ(host->base,
855 RSP76);
841 omap_hsmmc_request_done(host, data->mrq); 856 omap_hsmmc_request_done(host, data->mrq);
842 return;
843 } 857 }
844 omap_hsmmc_start_command(host, data->stop, NULL);
845} 858}
846 859
847/* 860/*
@@ -874,13 +887,14 @@ omap_hsmmc_cmd_done(struct omap_hsmmc_host *host, struct mmc_command *cmd)
874static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno) 887static void omap_hsmmc_dma_cleanup(struct omap_hsmmc_host *host, int errno)
875{ 888{
876 int dma_ch; 889 int dma_ch;
890 unsigned long flags;
877 891
878 host->data->error = errno; 892 host->data->error = errno;
879 893
880 spin_lock(&host->irq_lock); 894 spin_lock_irqsave(&host->irq_lock, flags);
881 dma_ch = host->dma_ch; 895 dma_ch = host->dma_ch;
882 host->dma_ch = -1; 896 host->dma_ch = -1;
883 spin_unlock(&host->irq_lock); 897 spin_unlock_irqrestore(&host->irq_lock, flags);
884 898
885 if (host->use_dma && dma_ch != -1) { 899 if (host->use_dma && dma_ch != -1) {
886 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg, 900 dma_unmap_sg(mmc_dev(host->mmc), host->data->sg,
@@ -1082,7 +1096,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1082 1096
1083 /* Disable the clocks */ 1097 /* Disable the clocks */
1084 pm_runtime_put_sync(host->dev); 1098 pm_runtime_put_sync(host->dev);
1085 if (host->got_dbclk) 1099 if (host->dbclk)
1086 clk_disable(host->dbclk); 1100 clk_disable(host->dbclk);
1087 1101
1088 /* Turn the power off */ 1102 /* Turn the power off */
@@ -1093,7 +1107,7 @@ static int omap_hsmmc_switch_opcond(struct omap_hsmmc_host *host, int vdd)
1093 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1, 1107 ret = mmc_slot(host).set_power(host->dev, host->slot_id, 1,
1094 vdd); 1108 vdd);
1095 pm_runtime_get_sync(host->dev); 1109 pm_runtime_get_sync(host->dev);
1096 if (host->got_dbclk) 1110 if (host->dbclk)
1097 clk_enable(host->dbclk); 1111 clk_enable(host->dbclk);
1098 1112
1099 if (ret != 0) 1113 if (ret != 0)
@@ -1234,6 +1248,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1234 struct omap_hsmmc_host *host = cb_data; 1248 struct omap_hsmmc_host *host = cb_data;
1235 struct mmc_data *data; 1249 struct mmc_data *data;
1236 int dma_ch, req_in_progress; 1250 int dma_ch, req_in_progress;
1251 unsigned long flags;
1237 1252
1238 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) { 1253 if (!(ch_status & OMAP_DMA_BLOCK_IRQ)) {
1239 dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n", 1254 dev_warn(mmc_dev(host->mmc), "unexpected dma status %x\n",
@@ -1241,9 +1256,9 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1241 return; 1256 return;
1242 } 1257 }
1243 1258
1244 spin_lock(&host->irq_lock); 1259 spin_lock_irqsave(&host->irq_lock, flags);
1245 if (host->dma_ch < 0) { 1260 if (host->dma_ch < 0) {
1246 spin_unlock(&host->irq_lock); 1261 spin_unlock_irqrestore(&host->irq_lock, flags);
1247 return; 1262 return;
1248 } 1263 }
1249 1264
@@ -1253,7 +1268,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1253 /* Fire up the next transfer. */ 1268 /* Fire up the next transfer. */
1254 omap_hsmmc_config_dma_params(host, data, 1269 omap_hsmmc_config_dma_params(host, data,
1255 data->sg + host->dma_sg_idx); 1270 data->sg + host->dma_sg_idx);
1256 spin_unlock(&host->irq_lock); 1271 spin_unlock_irqrestore(&host->irq_lock, flags);
1257 return; 1272 return;
1258 } 1273 }
1259 1274
@@ -1264,7 +1279,7 @@ static void omap_hsmmc_dma_cb(int lch, u16 ch_status, void *cb_data)
1264 req_in_progress = host->req_in_progress; 1279 req_in_progress = host->req_in_progress;
1265 dma_ch = host->dma_ch; 1280 dma_ch = host->dma_ch;
1266 host->dma_ch = -1; 1281 host->dma_ch = -1;
1267 spin_unlock(&host->irq_lock); 1282 spin_unlock_irqrestore(&host->irq_lock, flags);
1268 1283
1269 omap_free_dma(dma_ch); 1284 omap_free_dma(dma_ch);
1270 1285
@@ -1766,7 +1781,7 @@ static struct omap_mmc_platform_data *of_get_hsmmc_pdata(struct device *dev)
1766 pdata->slots[0].nonremovable = true; 1781 pdata->slots[0].nonremovable = true;
1767 pdata->slots[0].no_regulator_off_init = true; 1782 pdata->slots[0].no_regulator_off_init = true;
1768 } 1783 }
1769 of_property_read_u32(np, "ti,bus-width", &bus_width); 1784 of_property_read_u32(np, "bus-width", &bus_width);
1770 if (bus_width == 4) 1785 if (bus_width == 4)
1771 pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA; 1786 pdata->slots[0].caps |= MMC_CAP_4_BIT_DATA;
1772 else if (bus_width == 8) 1787 else if (bus_width == 8)
@@ -1844,6 +1859,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1844 host->mapbase = res->start + pdata->reg_offset; 1859 host->mapbase = res->start + pdata->reg_offset;
1845 host->base = ioremap(host->mapbase, SZ_4K); 1860 host->base = ioremap(host->mapbase, SZ_4K);
1846 host->power_mode = MMC_POWER_OFF; 1861 host->power_mode = MMC_POWER_OFF;
1862 host->flags = AUTO_CMD12;
1847 host->next_data.cookie = 1; 1863 host->next_data.cookie = 1;
1848 1864
1849 platform_set_drvdata(pdev, host); 1865 platform_set_drvdata(pdev, host);
@@ -1885,21 +1901,17 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1885 1901
1886 omap_hsmmc_context_save(host); 1902 omap_hsmmc_context_save(host);
1887 1903
1888 if (cpu_is_omap2430()) { 1904 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck");
1889 host->dbclk = clk_get(&pdev->dev, "mmchsdb_fck"); 1905 /*
1890 /* 1906 * MMC can still work without debounce clock.
1891 * MMC can still work without debounce clock. 1907 */
1892 */ 1908 if (IS_ERR(host->dbclk)) {
1893 if (IS_ERR(host->dbclk)) 1909 dev_warn(mmc_dev(host->mmc), "Failed to get debounce clk\n");
1894 dev_warn(mmc_dev(host->mmc), 1910 host->dbclk = NULL;
1895 "Failed to get debounce clock\n"); 1911 } else if (clk_enable(host->dbclk) != 0) {
1896 else 1912 dev_warn(mmc_dev(host->mmc), "Failed to enable debounce clk\n");
1897 host->got_dbclk = 1; 1913 clk_put(host->dbclk);
1898 1914 host->dbclk = NULL;
1899 if (host->got_dbclk)
1900 if (clk_enable(host->dbclk) != 0)
1901 dev_dbg(mmc_dev(host->mmc), "Enabling debounce"
1902 " clk failed\n");
1903 } 1915 }
1904 1916
1905 /* Since we do only SG emulation, we can have as many segs 1917 /* Since we do only SG emulation, we can have as many segs
@@ -1969,7 +1981,7 @@ static int __devinit omap_hsmmc_probe(struct platform_device *pdev)
1969 ret = request_threaded_irq(mmc_slot(host).card_detect_irq, 1981 ret = request_threaded_irq(mmc_slot(host).card_detect_irq,
1970 NULL, 1982 NULL,
1971 omap_hsmmc_detect, 1983 omap_hsmmc_detect,
1972 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING, 1984 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_ONESHOT,
1973 mmc_hostname(mmc), host); 1985 mmc_hostname(mmc), host);
1974 if (ret) { 1986 if (ret) {
1975 dev_dbg(mmc_dev(host->mmc), 1987 dev_dbg(mmc_dev(host->mmc),
@@ -2019,7 +2031,7 @@ err_irq:
2019 pm_runtime_put_sync(host->dev); 2031 pm_runtime_put_sync(host->dev);
2020 pm_runtime_disable(host->dev); 2032 pm_runtime_disable(host->dev);
2021 clk_put(host->fclk); 2033 clk_put(host->fclk);
2022 if (host->got_dbclk) { 2034 if (host->dbclk) {
2023 clk_disable(host->dbclk); 2035 clk_disable(host->dbclk);
2024 clk_put(host->dbclk); 2036 clk_put(host->dbclk);
2025 } 2037 }
@@ -2030,7 +2042,9 @@ err1:
2030err_alloc: 2042err_alloc:
2031 omap_hsmmc_gpio_free(pdata); 2043 omap_hsmmc_gpio_free(pdata);
2032err: 2044err:
2033 release_mem_region(res->start, resource_size(res)); 2045 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2046 if (res)
2047 release_mem_region(res->start, resource_size(res));
2034 return ret; 2048 return ret;
2035} 2049}
2036 2050
@@ -2052,7 +2066,7 @@ static int __devexit omap_hsmmc_remove(struct platform_device *pdev)
2052 pm_runtime_put_sync(host->dev); 2066 pm_runtime_put_sync(host->dev);
2053 pm_runtime_disable(host->dev); 2067 pm_runtime_disable(host->dev);
2054 clk_put(host->fclk); 2068 clk_put(host->fclk);
2055 if (host->got_dbclk) { 2069 if (host->dbclk) {
2056 clk_disable(host->dbclk); 2070 clk_disable(host->dbclk);
2057 clk_put(host->dbclk); 2071 clk_put(host->dbclk);
2058 } 2072 }
@@ -2110,7 +2124,7 @@ static int omap_hsmmc_suspend(struct device *dev)
2110 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP); 2124 OMAP_HSMMC_READ(host->base, HCTL) & ~SDBP);
2111 } 2125 }
2112 2126
2113 if (host->got_dbclk) 2127 if (host->dbclk)
2114 clk_disable(host->dbclk); 2128 clk_disable(host->dbclk);
2115err: 2129err:
2116 pm_runtime_put_sync(host->dev); 2130 pm_runtime_put_sync(host->dev);
@@ -2131,7 +2145,7 @@ static int omap_hsmmc_resume(struct device *dev)
2131 2145
2132 pm_runtime_get_sync(host->dev); 2146 pm_runtime_get_sync(host->dev);
2133 2147
2134 if (host->got_dbclk) 2148 if (host->dbclk)
2135 clk_enable(host->dbclk); 2149 clk_enable(host->dbclk);
2136 2150
2137 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER)) 2151 if (!(host->mmc->pm_flags & MMC_PM_KEEP_POWER))
diff --git a/drivers/mmc/host/sdhci-esdhc-imx.c b/drivers/mmc/host/sdhci-esdhc-imx.c
index d190d04636a7..ebbe984e5d00 100644
--- a/drivers/mmc/host/sdhci-esdhc-imx.c
+++ b/drivers/mmc/host/sdhci-esdhc-imx.c
@@ -71,6 +71,9 @@ struct pltfm_imx_data {
71 enum imx_esdhc_type devtype; 71 enum imx_esdhc_type devtype;
72 struct pinctrl *pinctrl; 72 struct pinctrl *pinctrl;
73 struct esdhc_platform_data boarddata; 73 struct esdhc_platform_data boarddata;
74 struct clk *clk_ipg;
75 struct clk *clk_ahb;
76 struct clk *clk_per;
74}; 77};
75 78
76static struct platform_device_id imx_esdhc_devtype[] = { 79static struct platform_device_id imx_esdhc_devtype[] = {
@@ -404,7 +407,7 @@ sdhci_esdhc_imx_probe_dt(struct platform_device *pdev,
404 if (!np) 407 if (!np)
405 return -ENODEV; 408 return -ENODEV;
406 409
407 if (of_get_property(np, "fsl,card-wired", NULL)) 410 if (of_get_property(np, "non-removable", NULL))
408 boarddata->cd_type = ESDHC_CD_PERMANENT; 411 boarddata->cd_type = ESDHC_CD_PERMANENT;
409 412
410 if (of_get_property(np, "fsl,cd-controller", NULL)) 413 if (of_get_property(np, "fsl,cd-controller", NULL))
@@ -439,7 +442,6 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
439 struct sdhci_pltfm_host *pltfm_host; 442 struct sdhci_pltfm_host *pltfm_host;
440 struct sdhci_host *host; 443 struct sdhci_host *host;
441 struct esdhc_platform_data *boarddata; 444 struct esdhc_platform_data *boarddata;
442 struct clk *clk;
443 int err; 445 int err;
444 struct pltfm_imx_data *imx_data; 446 struct pltfm_imx_data *imx_data;
445 447
@@ -460,14 +462,29 @@ static int __devinit sdhci_esdhc_imx_probe(struct platform_device *pdev)
460 imx_data->devtype = pdev->id_entry->driver_data; 462 imx_data->devtype = pdev->id_entry->driver_data;
461 pltfm_host->priv = imx_data; 463 pltfm_host->priv = imx_data;
462 464
463 clk = clk_get(mmc_dev(host->mmc), NULL); 465 imx_data->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
464 if (IS_ERR(clk)) { 466 if (IS_ERR(imx_data->clk_ipg)) {
465 dev_err(mmc_dev(host->mmc), "clk err\n"); 467 err = PTR_ERR(imx_data->clk_ipg);
466 err = PTR_ERR(clk);
467 goto err_clk_get; 468 goto err_clk_get;
468 } 469 }
469 clk_prepare_enable(clk); 470
470 pltfm_host->clk = clk; 471 imx_data->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
472 if (IS_ERR(imx_data->clk_ahb)) {
473 err = PTR_ERR(imx_data->clk_ahb);
474 goto err_clk_get;
475 }
476
477 imx_data->clk_per = devm_clk_get(&pdev->dev, "per");
478 if (IS_ERR(imx_data->clk_per)) {
479 err = PTR_ERR(imx_data->clk_per);
480 goto err_clk_get;
481 }
482
483 pltfm_host->clk = imx_data->clk_per;
484
485 clk_prepare_enable(imx_data->clk_per);
486 clk_prepare_enable(imx_data->clk_ipg);
487 clk_prepare_enable(imx_data->clk_ahb);
471 488
472 imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev); 489 imx_data->pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
473 if (IS_ERR(imx_data->pinctrl)) { 490 if (IS_ERR(imx_data->pinctrl)) {
@@ -567,8 +584,9 @@ no_card_detect_irq:
567no_card_detect_pin: 584no_card_detect_pin:
568no_board_data: 585no_board_data:
569pin_err: 586pin_err:
570 clk_disable_unprepare(pltfm_host->clk); 587 clk_disable_unprepare(imx_data->clk_per);
571 clk_put(pltfm_host->clk); 588 clk_disable_unprepare(imx_data->clk_ipg);
589 clk_disable_unprepare(imx_data->clk_ahb);
572err_clk_get: 590err_clk_get:
573 kfree(imx_data); 591 kfree(imx_data);
574err_imx_data: 592err_imx_data:
@@ -594,8 +612,10 @@ static int __devexit sdhci_esdhc_imx_remove(struct platform_device *pdev)
594 gpio_free(boarddata->cd_gpio); 612 gpio_free(boarddata->cd_gpio);
595 } 613 }
596 614
597 clk_disable_unprepare(pltfm_host->clk); 615 clk_disable_unprepare(imx_data->clk_per);
598 clk_put(pltfm_host->clk); 616 clk_disable_unprepare(imx_data->clk_ipg);
617 clk_disable_unprepare(imx_data->clk_ahb);
618
599 kfree(imx_data); 619 kfree(imx_data);
600 620
601 sdhci_pltfm_free(pdev); 621 sdhci_pltfm_free(pdev);
diff --git a/drivers/mmc/host/sdhci-pltfm.c b/drivers/mmc/host/sdhci-pltfm.c
index c5c2a48bdd94..d9a4ef4f1ed0 100644
--- a/drivers/mmc/host/sdhci-pltfm.c
+++ b/drivers/mmc/host/sdhci-pltfm.c
@@ -42,7 +42,8 @@ static struct sdhci_ops sdhci_pltfm_ops = {
42#ifdef CONFIG_OF 42#ifdef CONFIG_OF
43static bool sdhci_of_wp_inverted(struct device_node *np) 43static bool sdhci_of_wp_inverted(struct device_node *np)
44{ 44{
45 if (of_get_property(np, "sdhci,wp-inverted", NULL)) 45 if (of_get_property(np, "sdhci,wp-inverted", NULL) ||
46 of_get_property(np, "wp-inverted", NULL))
46 return true; 47 return true;
47 48
48 /* Old device trees don't have the wp-inverted property. */ 49 /* Old device trees don't have the wp-inverted property. */
@@ -59,13 +60,16 @@ void sdhci_get_of_property(struct platform_device *pdev)
59 struct sdhci_host *host = platform_get_drvdata(pdev); 60 struct sdhci_host *host = platform_get_drvdata(pdev);
60 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 61 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
61 const __be32 *clk; 62 const __be32 *clk;
63 u32 bus_width;
62 int size; 64 int size;
63 65
64 if (of_device_is_available(np)) { 66 if (of_device_is_available(np)) {
65 if (of_get_property(np, "sdhci,auto-cmd12", NULL)) 67 if (of_get_property(np, "sdhci,auto-cmd12", NULL))
66 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; 68 host->quirks |= SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12;
67 69
68 if (of_get_property(np, "sdhci,1-bit-only", NULL)) 70 if (of_get_property(np, "sdhci,1-bit-only", NULL) ||
71 (of_property_read_u32(np, "bus-width", &bus_width) == 0 &&
72 bus_width == 1))
69 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA; 73 host->quirks |= SDHCI_QUIRK_FORCE_1_BIT_DATA;
70 74
71 if (sdhci_of_wp_inverted(np)) 75 if (sdhci_of_wp_inverted(np))
diff --git a/drivers/mmc/host/sdhci-spear.c b/drivers/mmc/host/sdhci-spear.c
index 6dfa82e03c7e..1fe32dfa7cd4 100644
--- a/drivers/mmc/host/sdhci-spear.c
+++ b/drivers/mmc/host/sdhci-spear.c
@@ -75,8 +75,6 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
75 struct spear_sdhci *sdhci; 75 struct spear_sdhci *sdhci;
76 int ret; 76 int ret;
77 77
78 BUG_ON(pdev == NULL);
79
80 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 78 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
81 if (!iomem) { 79 if (!iomem) {
82 ret = -ENOMEM; 80 ret = -ENOMEM;
@@ -84,18 +82,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
84 goto err; 82 goto err;
85 } 83 }
86 84
87 if (!request_mem_region(iomem->start, resource_size(iomem), 85 if (!devm_request_mem_region(&pdev->dev, iomem->start,
88 "spear-sdhci")) { 86 resource_size(iomem), "spear-sdhci")) {
89 ret = -EBUSY; 87 ret = -EBUSY;
90 dev_dbg(&pdev->dev, "cannot request region\n"); 88 dev_dbg(&pdev->dev, "cannot request region\n");
91 goto err; 89 goto err;
92 } 90 }
93 91
94 sdhci = kzalloc(sizeof(*sdhci), GFP_KERNEL); 92 sdhci = devm_kzalloc(&pdev->dev, sizeof(*sdhci), GFP_KERNEL);
95 if (!sdhci) { 93 if (!sdhci) {
96 ret = -ENOMEM; 94 ret = -ENOMEM;
97 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n"); 95 dev_dbg(&pdev->dev, "cannot allocate memory for sdhci\n");
98 goto err_kzalloc; 96 goto err;
99 } 97 }
100 98
101 /* clk enable */ 99 /* clk enable */
@@ -103,13 +101,13 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
103 if (IS_ERR(sdhci->clk)) { 101 if (IS_ERR(sdhci->clk)) {
104 ret = PTR_ERR(sdhci->clk); 102 ret = PTR_ERR(sdhci->clk);
105 dev_dbg(&pdev->dev, "Error getting clock\n"); 103 dev_dbg(&pdev->dev, "Error getting clock\n");
106 goto err_clk_get; 104 goto err;
107 } 105 }
108 106
109 ret = clk_enable(sdhci->clk); 107 ret = clk_enable(sdhci->clk);
110 if (ret) { 108 if (ret) {
111 dev_dbg(&pdev->dev, "Error enabling clock\n"); 109 dev_dbg(&pdev->dev, "Error enabling clock\n");
112 goto err_clk_enb; 110 goto put_clk;
113 } 111 }
114 112
115 /* overwrite platform_data */ 113 /* overwrite platform_data */
@@ -124,7 +122,7 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
124 if (IS_ERR(host)) { 122 if (IS_ERR(host)) {
125 ret = PTR_ERR(host); 123 ret = PTR_ERR(host);
126 dev_dbg(&pdev->dev, "error allocating host\n"); 124 dev_dbg(&pdev->dev, "error allocating host\n");
127 goto err_alloc_host; 125 goto disable_clk;
128 } 126 }
129 127
130 host->hw_name = "sdhci"; 128 host->hw_name = "sdhci";
@@ -132,17 +130,18 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
132 host->irq = platform_get_irq(pdev, 0); 130 host->irq = platform_get_irq(pdev, 0);
133 host->quirks = SDHCI_QUIRK_BROKEN_ADMA; 131 host->quirks = SDHCI_QUIRK_BROKEN_ADMA;
134 132
135 host->ioaddr = ioremap(iomem->start, resource_size(iomem)); 133 host->ioaddr = devm_ioremap(&pdev->dev, iomem->start,
134 resource_size(iomem));
136 if (!host->ioaddr) { 135 if (!host->ioaddr) {
137 ret = -ENOMEM; 136 ret = -ENOMEM;
138 dev_dbg(&pdev->dev, "failed to remap registers\n"); 137 dev_dbg(&pdev->dev, "failed to remap registers\n");
139 goto err_ioremap; 138 goto free_host;
140 } 139 }
141 140
142 ret = sdhci_add_host(host); 141 ret = sdhci_add_host(host);
143 if (ret) { 142 if (ret) {
144 dev_dbg(&pdev->dev, "error adding host\n"); 143 dev_dbg(&pdev->dev, "error adding host\n");
145 goto err_add_host; 144 goto free_host;
146 } 145 }
147 146
148 platform_set_drvdata(pdev, host); 147 platform_set_drvdata(pdev, host);
@@ -161,11 +160,12 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
161 if (sdhci->data->card_power_gpio >= 0) { 160 if (sdhci->data->card_power_gpio >= 0) {
162 int val = 0; 161 int val = 0;
163 162
164 ret = gpio_request(sdhci->data->card_power_gpio, "sdhci"); 163 ret = devm_gpio_request(&pdev->dev,
164 sdhci->data->card_power_gpio, "sdhci");
165 if (ret < 0) { 165 if (ret < 0) {
166 dev_dbg(&pdev->dev, "gpio request fail: %d\n", 166 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
167 sdhci->data->card_power_gpio); 167 sdhci->data->card_power_gpio);
168 goto err_pgpio_request; 168 goto set_drvdata;
169 } 169 }
170 170
171 if (sdhci->data->power_always_enb) 171 if (sdhci->data->power_always_enb)
@@ -177,60 +177,48 @@ static int __devinit sdhci_probe(struct platform_device *pdev)
177 if (ret) { 177 if (ret) {
178 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", 178 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
179 sdhci->data->card_power_gpio); 179 sdhci->data->card_power_gpio);
180 goto err_pgpio_direction; 180 goto set_drvdata;
181 } 181 }
182 } 182 }
183 183
184 if (sdhci->data->card_int_gpio >= 0) { 184 if (sdhci->data->card_int_gpio >= 0) {
185 ret = gpio_request(sdhci->data->card_int_gpio, "sdhci"); 185 ret = devm_gpio_request(&pdev->dev, sdhci->data->card_int_gpio,
186 "sdhci");
186 if (ret < 0) { 187 if (ret < 0) {
187 dev_dbg(&pdev->dev, "gpio request fail: %d\n", 188 dev_dbg(&pdev->dev, "gpio request fail: %d\n",
188 sdhci->data->card_int_gpio); 189 sdhci->data->card_int_gpio);
189 goto err_igpio_request; 190 goto set_drvdata;
190 } 191 }
191 192
192 ret = gpio_direction_input(sdhci->data->card_int_gpio); 193 ret = gpio_direction_input(sdhci->data->card_int_gpio);
193 if (ret) { 194 if (ret) {
194 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n", 195 dev_dbg(&pdev->dev, "gpio set direction fail: %d\n",
195 sdhci->data->card_int_gpio); 196 sdhci->data->card_int_gpio);
196 goto err_igpio_direction; 197 goto set_drvdata;
197 } 198 }
198 ret = request_irq(gpio_to_irq(sdhci->data->card_int_gpio), 199 ret = devm_request_irq(&pdev->dev,
200 gpio_to_irq(sdhci->data->card_int_gpio),
199 sdhci_gpio_irq, IRQF_TRIGGER_LOW, 201 sdhci_gpio_irq, IRQF_TRIGGER_LOW,
200 mmc_hostname(host->mmc), pdev); 202 mmc_hostname(host->mmc), pdev);
201 if (ret) { 203 if (ret) {
202 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n", 204 dev_dbg(&pdev->dev, "gpio request irq fail: %d\n",
203 sdhci->data->card_int_gpio); 205 sdhci->data->card_int_gpio);
204 goto err_igpio_request_irq; 206 goto set_drvdata;
205 } 207 }
206 208
207 } 209 }
208 210
209 return 0; 211 return 0;
210 212
211err_igpio_request_irq: 213set_drvdata:
212err_igpio_direction:
213 if (sdhci->data->card_int_gpio >= 0)
214 gpio_free(sdhci->data->card_int_gpio);
215err_igpio_request:
216err_pgpio_direction:
217 if (sdhci->data->card_power_gpio >= 0)
218 gpio_free(sdhci->data->card_power_gpio);
219err_pgpio_request:
220 platform_set_drvdata(pdev, NULL); 214 platform_set_drvdata(pdev, NULL);
221 sdhci_remove_host(host, 1); 215 sdhci_remove_host(host, 1);
222err_add_host: 216free_host:
223 iounmap(host->ioaddr);
224err_ioremap:
225 sdhci_free_host(host); 217 sdhci_free_host(host);
226err_alloc_host: 218disable_clk:
227 clk_disable(sdhci->clk); 219 clk_disable(sdhci->clk);
228err_clk_enb: 220put_clk:
229 clk_put(sdhci->clk); 221 clk_put(sdhci->clk);
230err_clk_get:
231 kfree(sdhci);
232err_kzalloc:
233 release_mem_region(iomem->start, resource_size(iomem));
234err: 222err:
235 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret); 223 dev_err(&pdev->dev, "spear-sdhci probe failed: %d\n", ret);
236 return ret; 224 return ret;
@@ -239,35 +227,19 @@ err:
239static int __devexit sdhci_remove(struct platform_device *pdev) 227static int __devexit sdhci_remove(struct platform_device *pdev)
240{ 228{
241 struct sdhci_host *host = platform_get_drvdata(pdev); 229 struct sdhci_host *host = platform_get_drvdata(pdev);
242 struct resource *iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
243 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev); 230 struct spear_sdhci *sdhci = dev_get_platdata(&pdev->dev);
244 int dead; 231 int dead = 0;
245 u32 scratch; 232 u32 scratch;
246 233
247 if (sdhci->data) {
248 if (sdhci->data->card_int_gpio >= 0) {
249 free_irq(gpio_to_irq(sdhci->data->card_int_gpio), pdev);
250 gpio_free(sdhci->data->card_int_gpio);
251 }
252
253 if (sdhci->data->card_power_gpio >= 0)
254 gpio_free(sdhci->data->card_power_gpio);
255 }
256
257 platform_set_drvdata(pdev, NULL); 234 platform_set_drvdata(pdev, NULL);
258 dead = 0;
259 scratch = readl(host->ioaddr + SDHCI_INT_STATUS); 235 scratch = readl(host->ioaddr + SDHCI_INT_STATUS);
260 if (scratch == (u32)-1) 236 if (scratch == (u32)-1)
261 dead = 1; 237 dead = 1;
262 238
263 sdhci_remove_host(host, dead); 239 sdhci_remove_host(host, dead);
264 iounmap(host->ioaddr);
265 sdhci_free_host(host); 240 sdhci_free_host(host);
266 clk_disable(sdhci->clk); 241 clk_disable(sdhci->clk);
267 clk_put(sdhci->clk); 242 clk_put(sdhci->clk);
268 kfree(sdhci);
269 if (iomem)
270 release_mem_region(iomem->start, resource_size(iomem));
271 243
272 return 0; 244 return 0;
273} 245}
diff --git a/drivers/mmc/host/sdhci-tegra.c b/drivers/mmc/host/sdhci-tegra.c
index ff5a16991939..b38d8a78f6a0 100644
--- a/drivers/mmc/host/sdhci-tegra.c
+++ b/drivers/mmc/host/sdhci-tegra.c
@@ -32,8 +32,13 @@
32 32
33#include "sdhci-pltfm.h" 33#include "sdhci-pltfm.h"
34 34
35/* Tegra SDHOST controller vendor register definitions */
36#define SDHCI_TEGRA_VENDOR_MISC_CTRL 0x120
37#define SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300 0x20
38
35#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0) 39#define NVQUIRK_FORCE_SDHCI_SPEC_200 BIT(0)
36#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1) 40#define NVQUIRK_ENABLE_BLOCK_GAP_DET BIT(1)
41#define NVQUIRK_ENABLE_SDHCI_SPEC_300 BIT(2)
37 42
38struct sdhci_tegra_soc_data { 43struct sdhci_tegra_soc_data {
39 struct sdhci_pltfm_data *pdata; 44 struct sdhci_pltfm_data *pdata;
@@ -120,6 +125,25 @@ static irqreturn_t carddetect_irq(int irq, void *data)
120 return IRQ_HANDLED; 125 return IRQ_HANDLED;
121}; 126};
122 127
128static void tegra_sdhci_reset_exit(struct sdhci_host *host, u8 mask)
129{
130 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
131 struct sdhci_tegra *tegra_host = pltfm_host->priv;
132 const struct sdhci_tegra_soc_data *soc_data = tegra_host->soc_data;
133
134 if (!(mask & SDHCI_RESET_ALL))
135 return;
136
137 /* Erratum: Enable SDHCI spec v3.00 support */
138 if (soc_data->nvquirks & NVQUIRK_ENABLE_SDHCI_SPEC_300) {
139 u32 misc_ctrl;
140
141 misc_ctrl = sdhci_readb(host, SDHCI_TEGRA_VENDOR_MISC_CTRL);
142 misc_ctrl |= SDHCI_MISC_CTRL_ENABLE_SDHCI_SPEC_300;
143 sdhci_writeb(host, misc_ctrl, SDHCI_TEGRA_VENDOR_MISC_CTRL);
144 }
145}
146
123static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width) 147static int tegra_sdhci_8bit(struct sdhci_host *host, int bus_width)
124{ 148{
125 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); 149 struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host);
@@ -148,6 +172,7 @@ static struct sdhci_ops tegra_sdhci_ops = {
148 .read_w = tegra_sdhci_readw, 172 .read_w = tegra_sdhci_readw,
149 .write_l = tegra_sdhci_writel, 173 .write_l = tegra_sdhci_writel,
150 .platform_8bit_width = tegra_sdhci_8bit, 174 .platform_8bit_width = tegra_sdhci_8bit,
175 .platform_reset_exit = tegra_sdhci_reset_exit,
151}; 176};
152 177
153#ifdef CONFIG_ARCH_TEGRA_2x_SOC 178#ifdef CONFIG_ARCH_TEGRA_2x_SOC
@@ -178,6 +203,7 @@ static struct sdhci_pltfm_data sdhci_tegra30_pdata = {
178 203
179static struct sdhci_tegra_soc_data soc_data_tegra30 = { 204static struct sdhci_tegra_soc_data soc_data_tegra30 = {
180 .pdata = &sdhci_tegra30_pdata, 205 .pdata = &sdhci_tegra30_pdata,
206 .nvquirks = NVQUIRK_ENABLE_SDHCI_SPEC_300,
181}; 207};
182#endif 208#endif
183 209
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index ccefdebeff14..e626732aff77 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -680,8 +680,8 @@ static u8 sdhci_calc_timeout(struct sdhci_host *host, struct mmc_command *cmd)
680 } 680 }
681 681
682 if (count >= 0xF) { 682 if (count >= 0xF) {
683 pr_warning("%s: Too large timeout requested for CMD%d!\n", 683 pr_warning("%s: Too large timeout 0x%x requested for CMD%d!\n",
684 mmc_hostname(host->mmc), cmd->opcode); 684 mmc_hostname(host->mmc), count, cmd->opcode);
685 count = 0xE; 685 count = 0xE;
686 } 686 }
687 687
diff --git a/drivers/mtd/maps/lantiq-flash.c b/drivers/mtd/maps/lantiq-flash.c
index b5401e355745..c03456f17004 100644
--- a/drivers/mtd/maps/lantiq-flash.c
+++ b/drivers/mtd/maps/lantiq-flash.c
@@ -19,9 +19,9 @@
19#include <linux/mtd/cfi.h> 19#include <linux/mtd/cfi.h>
20#include <linux/platform_device.h> 20#include <linux/platform_device.h>
21#include <linux/mtd/physmap.h> 21#include <linux/mtd/physmap.h>
22#include <linux/of.h>
22 23
23#include <lantiq_soc.h> 24#include <lantiq_soc.h>
24#include <lantiq_platform.h>
25 25
26/* 26/*
27 * The NOR flash is connected to the same external bus unit (EBU) as PCI. 27 * The NOR flash is connected to the same external bus unit (EBU) as PCI.
@@ -44,8 +44,9 @@ struct ltq_mtd {
44 struct map_info *map; 44 struct map_info *map;
45}; 45};
46 46
47static char ltq_map_name[] = "ltq_nor"; 47static const char ltq_map_name[] = "ltq_nor";
48static const char *ltq_probe_types[] __devinitconst = { "cmdlinepart", NULL }; 48static const char *ltq_probe_types[] __devinitconst = {
49 "cmdlinepart", "ofpart", NULL };
49 50
50static map_word 51static map_word
51ltq_read16(struct map_info *map, unsigned long adr) 52ltq_read16(struct map_info *map, unsigned long adr)
@@ -108,42 +109,38 @@ ltq_copy_to(struct map_info *map, unsigned long to,
108 spin_unlock_irqrestore(&ebu_lock, flags); 109 spin_unlock_irqrestore(&ebu_lock, flags);
109} 110}
110 111
111static int __init 112static int __devinit
112ltq_mtd_probe(struct platform_device *pdev) 113ltq_mtd_probe(struct platform_device *pdev)
113{ 114{
114 struct physmap_flash_data *ltq_mtd_data = dev_get_platdata(&pdev->dev); 115 struct mtd_part_parser_data ppdata;
115 struct ltq_mtd *ltq_mtd; 116 struct ltq_mtd *ltq_mtd;
116 struct resource *res;
117 struct cfi_private *cfi; 117 struct cfi_private *cfi;
118 int err; 118 int err;
119 119
120 if (of_machine_is_compatible("lantiq,falcon") &&
121 (ltq_boot_select() != BS_FLASH)) {
122 dev_err(&pdev->dev, "invalid bootstrap options\n");
123 return -ENODEV;
124 }
125
120 ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL); 126 ltq_mtd = kzalloc(sizeof(struct ltq_mtd), GFP_KERNEL);
121 platform_set_drvdata(pdev, ltq_mtd); 127 platform_set_drvdata(pdev, ltq_mtd);
122 128
123 ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 129 ltq_mtd->res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
124 if (!ltq_mtd->res) { 130 if (!ltq_mtd->res) {
125 dev_err(&pdev->dev, "failed to get memory resource"); 131 dev_err(&pdev->dev, "failed to get memory resource\n");
126 err = -ENOENT; 132 err = -ENOENT;
127 goto err_out; 133 goto err_out;
128 } 134 }
129 135
130 res = devm_request_mem_region(&pdev->dev, ltq_mtd->res->start,
131 resource_size(ltq_mtd->res), dev_name(&pdev->dev));
132 if (!ltq_mtd->res) {
133 dev_err(&pdev->dev, "failed to request mem resource");
134 err = -EBUSY;
135 goto err_out;
136 }
137
138 ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL); 136 ltq_mtd->map = kzalloc(sizeof(struct map_info), GFP_KERNEL);
139 ltq_mtd->map->phys = res->start; 137 ltq_mtd->map->phys = ltq_mtd->res->start;
140 ltq_mtd->map->size = resource_size(res); 138 ltq_mtd->map->size = resource_size(ltq_mtd->res);
141 ltq_mtd->map->virt = devm_ioremap_nocache(&pdev->dev, 139 ltq_mtd->map->virt = devm_request_and_ioremap(&pdev->dev, ltq_mtd->res);
142 ltq_mtd->map->phys, ltq_mtd->map->size);
143 if (!ltq_mtd->map->virt) { 140 if (!ltq_mtd->map->virt) {
144 dev_err(&pdev->dev, "failed to ioremap!\n"); 141 dev_err(&pdev->dev, "failed to remap mem resource\n");
145 err = -ENOMEM; 142 err = -EBUSY;
146 goto err_free; 143 goto err_out;
147 } 144 }
148 145
149 ltq_mtd->map->name = ltq_map_name; 146 ltq_mtd->map->name = ltq_map_name;
@@ -169,9 +166,9 @@ ltq_mtd_probe(struct platform_device *pdev)
169 cfi->addr_unlock1 ^= 1; 166 cfi->addr_unlock1 ^= 1;
170 cfi->addr_unlock2 ^= 1; 167 cfi->addr_unlock2 ^= 1;
171 168
172 err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types, NULL, 169 ppdata.of_node = pdev->dev.of_node;
173 ltq_mtd_data->parts, 170 err = mtd_device_parse_register(ltq_mtd->mtd, ltq_probe_types,
174 ltq_mtd_data->nr_parts); 171 &ppdata, NULL, 0);
175 if (err) { 172 if (err) {
176 dev_err(&pdev->dev, "failed to add partitions\n"); 173 dev_err(&pdev->dev, "failed to add partitions\n");
177 goto err_destroy; 174 goto err_destroy;
@@ -204,32 +201,23 @@ ltq_mtd_remove(struct platform_device *pdev)
204 return 0; 201 return 0;
205} 202}
206 203
204static const struct of_device_id ltq_mtd_match[] = {
205 { .compatible = "lantiq,nor" },
206 {},
207};
208MODULE_DEVICE_TABLE(of, ltq_mtd_match);
209
207static struct platform_driver ltq_mtd_driver = { 210static struct platform_driver ltq_mtd_driver = {
211 .probe = ltq_mtd_probe,
208 .remove = __devexit_p(ltq_mtd_remove), 212 .remove = __devexit_p(ltq_mtd_remove),
209 .driver = { 213 .driver = {
210 .name = "ltq_nor", 214 .name = "ltq-nor",
211 .owner = THIS_MODULE, 215 .owner = THIS_MODULE,
216 .of_match_table = ltq_mtd_match,
212 }, 217 },
213}; 218};
214 219
215static int __init 220module_platform_driver(ltq_mtd_driver);
216init_ltq_mtd(void)
217{
218 int ret = platform_driver_probe(&ltq_mtd_driver, ltq_mtd_probe);
219
220 if (ret)
221 pr_err("ltq_nor: error registering platform driver");
222 return ret;
223}
224
225static void __exit
226exit_ltq_mtd(void)
227{
228 platform_driver_unregister(&ltq_mtd_driver);
229}
230
231module_init(init_ltq_mtd);
232module_exit(exit_ltq_mtd);
233 221
234MODULE_LICENSE("GPL"); 222MODULE_LICENSE("GPL");
235MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); 223MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
diff --git a/drivers/mtd/nand/mxc_nand.c b/drivers/mtd/nand/mxc_nand.c
index cc0678a967c1..9e374e9bd296 100644
--- a/drivers/mtd/nand/mxc_nand.c
+++ b/drivers/mtd/nand/mxc_nand.c
@@ -690,7 +690,7 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
690 if (chip == -1) { 690 if (chip == -1) {
691 /* Disable the NFC clock */ 691 /* Disable the NFC clock */
692 if (host->clk_act) { 692 if (host->clk_act) {
693 clk_disable(host->clk); 693 clk_disable_unprepare(host->clk);
694 host->clk_act = 0; 694 host->clk_act = 0;
695 } 695 }
696 return; 696 return;
@@ -698,7 +698,7 @@ static void mxc_nand_select_chip(struct mtd_info *mtd, int chip)
698 698
699 if (!host->clk_act) { 699 if (!host->clk_act) {
700 /* Enable the NFC clock */ 700 /* Enable the NFC clock */
701 clk_enable(host->clk); 701 clk_prepare_enable(host->clk);
702 host->clk_act = 1; 702 host->clk_act = 1;
703 } 703 }
704 704
@@ -1078,7 +1078,7 @@ static int __init mxcnd_probe(struct platform_device *pdev)
1078 goto eclk; 1078 goto eclk;
1079 } 1079 }
1080 1080
1081 clk_enable(host->clk); 1081 clk_prepare_enable(host->clk);
1082 host->clk_act = 1; 1082 host->clk_act = 1;
1083 1083
1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1084 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/mtd/nand/orion_nand.c b/drivers/mtd/nand/orion_nand.c
index 0f50ef38b87b..513dc88a05ca 100644
--- a/drivers/mtd/nand/orion_nand.c
+++ b/drivers/mtd/nand/orion_nand.c
@@ -17,6 +17,8 @@
17#include <linux/mtd/mtd.h> 17#include <linux/mtd/mtd.h>
18#include <linux/mtd/nand.h> 18#include <linux/mtd/nand.h>
19#include <linux/mtd/partitions.h> 19#include <linux/mtd/partitions.h>
20#include <linux/clk.h>
21#include <linux/err.h>
20#include <asm/io.h> 22#include <asm/io.h>
21#include <asm/sizes.h> 23#include <asm/sizes.h>
22#include <mach/hardware.h> 24#include <mach/hardware.h>
@@ -79,6 +81,7 @@ static int __init orion_nand_probe(struct platform_device *pdev)
79 struct nand_chip *nc; 81 struct nand_chip *nc;
80 struct orion_nand_data *board; 82 struct orion_nand_data *board;
81 struct resource *res; 83 struct resource *res;
84 struct clk *clk;
82 void __iomem *io_base; 85 void __iomem *io_base;
83 int ret = 0; 86 int ret = 0;
84 u32 val = 0; 87 u32 val = 0;
@@ -155,6 +158,14 @@ static int __init orion_nand_probe(struct platform_device *pdev)
155 158
156 platform_set_drvdata(pdev, mtd); 159 platform_set_drvdata(pdev, mtd);
157 160
161 /* Not all platforms can gate the clock, so it is not
162 an error if the clock does not exists. */
163 clk = clk_get(&pdev->dev, NULL);
164 if (!IS_ERR(clk)) {
165 clk_prepare_enable(clk);
166 clk_put(clk);
167 }
168
158 if (nand_scan(mtd, 1)) { 169 if (nand_scan(mtd, 1)) {
159 ret = -ENXIO; 170 ret = -ENXIO;
160 goto no_dev; 171 goto no_dev;
@@ -184,6 +195,7 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
184{ 195{
185 struct mtd_info *mtd = platform_get_drvdata(pdev); 196 struct mtd_info *mtd = platform_get_drvdata(pdev);
186 struct nand_chip *nc = mtd->priv; 197 struct nand_chip *nc = mtd->priv;
198 struct clk *clk;
187 199
188 nand_release(mtd); 200 nand_release(mtd);
189 201
@@ -191,6 +203,12 @@ static int __devexit orion_nand_remove(struct platform_device *pdev)
191 203
192 kfree(nc); 204 kfree(nc);
193 205
206 clk = clk_get(&pdev->dev, NULL);
207 if (!IS_ERR(clk)) {
208 clk_disable_unprepare(clk);
209 clk_put(clk);
210 }
211
194 return 0; 212 return 0;
195} 213}
196 214
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index ec03b401620a..9c755db6b16d 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -1131,7 +1131,6 @@ static irqreturn_t
1131e100rxtx_interrupt(int irq, void *dev_id) 1131e100rxtx_interrupt(int irq, void *dev_id)
1132{ 1132{
1133 struct net_device *dev = (struct net_device *)dev_id; 1133 struct net_device *dev = (struct net_device *)dev_id;
1134 struct net_local *np = netdev_priv(dev);
1135 unsigned long irqbits; 1134 unsigned long irqbits;
1136 1135
1137 /* 1136 /*
diff --git a/drivers/net/ethernet/freescale/fec.c b/drivers/net/ethernet/freescale/fec.c
index 8f2cf8c09e2d..ff7f4c5115a1 100644
--- a/drivers/net/ethernet/freescale/fec.c
+++ b/drivers/net/ethernet/freescale/fec.c
@@ -207,7 +207,8 @@ struct fec_enet_private {
207 207
208 struct net_device *netdev; 208 struct net_device *netdev;
209 209
210 struct clk *clk; 210 struct clk *clk_ipg;
211 struct clk *clk_ahb;
211 212
212 /* The saved address of a sent-in-place packet/buffer, for skfree(). */ 213 /* The saved address of a sent-in-place packet/buffer, for skfree(). */
213 unsigned char *tx_bounce[TX_RING_SIZE]; 214 unsigned char *tx_bounce[TX_RING_SIZE];
@@ -1065,7 +1066,7 @@ static int fec_enet_mii_init(struct platform_device *pdev)
1065 * Reference Manual has an error on this, and gets fixed on i.MX6Q 1066 * Reference Manual has an error on this, and gets fixed on i.MX6Q
1066 * document. 1067 * document.
1067 */ 1068 */
1068 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk), 5000000); 1069 fep->phy_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ahb), 5000000);
1069 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC) 1070 if (id_entry->driver_data & FEC_QUIRK_ENET_MAC)
1070 fep->phy_speed--; 1071 fep->phy_speed--;
1071 fep->phy_speed <<= 1; 1072 fep->phy_speed <<= 1;
@@ -1618,12 +1619,20 @@ fec_probe(struct platform_device *pdev)
1618 goto failed_pin; 1619 goto failed_pin;
1619 } 1620 }
1620 1621
1621 fep->clk = clk_get(&pdev->dev, NULL); 1622 fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1622 if (IS_ERR(fep->clk)) { 1623 if (IS_ERR(fep->clk_ipg)) {
1623 ret = PTR_ERR(fep->clk); 1624 ret = PTR_ERR(fep->clk_ipg);
1624 goto failed_clk; 1625 goto failed_clk;
1625 } 1626 }
1626 clk_prepare_enable(fep->clk); 1627
1628 fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
1629 if (IS_ERR(fep->clk_ahb)) {
1630 ret = PTR_ERR(fep->clk_ahb);
1631 goto failed_clk;
1632 }
1633
1634 clk_prepare_enable(fep->clk_ahb);
1635 clk_prepare_enable(fep->clk_ipg);
1627 1636
1628 ret = fec_enet_init(ndev); 1637 ret = fec_enet_init(ndev);
1629 if (ret) 1638 if (ret)
@@ -1646,8 +1655,8 @@ failed_register:
1646 fec_enet_mii_remove(fep); 1655 fec_enet_mii_remove(fep);
1647failed_mii_init: 1656failed_mii_init:
1648failed_init: 1657failed_init:
1649 clk_disable_unprepare(fep->clk); 1658 clk_disable_unprepare(fep->clk_ahb);
1650 clk_put(fep->clk); 1659 clk_disable_unprepare(fep->clk_ipg);
1651failed_pin: 1660failed_pin:
1652failed_clk: 1661failed_clk:
1653 for (i = 0; i < FEC_IRQ_NUM; i++) { 1662 for (i = 0; i < FEC_IRQ_NUM; i++) {
@@ -1680,8 +1689,8 @@ fec_drv_remove(struct platform_device *pdev)
1680 if (irq > 0) 1689 if (irq > 0)
1681 free_irq(irq, ndev); 1690 free_irq(irq, ndev);
1682 } 1691 }
1683 clk_disable_unprepare(fep->clk); 1692 clk_disable_unprepare(fep->clk_ahb);
1684 clk_put(fep->clk); 1693 clk_disable_unprepare(fep->clk_ipg);
1685 iounmap(fep->hwp); 1694 iounmap(fep->hwp);
1686 free_netdev(ndev); 1695 free_netdev(ndev);
1687 1696
@@ -1705,7 +1714,8 @@ fec_suspend(struct device *dev)
1705 fec_stop(ndev); 1714 fec_stop(ndev);
1706 netif_device_detach(ndev); 1715 netif_device_detach(ndev);
1707 } 1716 }
1708 clk_disable_unprepare(fep->clk); 1717 clk_disable_unprepare(fep->clk_ahb);
1718 clk_disable_unprepare(fep->clk_ipg);
1709 1719
1710 return 0; 1720 return 0;
1711} 1721}
@@ -1716,7 +1726,8 @@ fec_resume(struct device *dev)
1716 struct net_device *ndev = dev_get_drvdata(dev); 1726 struct net_device *ndev = dev_get_drvdata(dev);
1717 struct fec_enet_private *fep = netdev_priv(ndev); 1727 struct fec_enet_private *fep = netdev_priv(ndev);
1718 1728
1719 clk_prepare_enable(fep->clk); 1729 clk_prepare_enable(fep->clk_ahb);
1730 clk_prepare_enable(fep->clk_ipg);
1720 if (netif_running(ndev)) { 1731 if (netif_running(ndev)) {
1721 fec_restart(ndev, fep->full_duplex); 1732 fec_restart(ndev, fep->full_duplex);
1722 netif_device_attach(ndev); 1733 netif_device_attach(ndev);
diff --git a/drivers/net/ethernet/marvell/mv643xx_eth.c b/drivers/net/ethernet/marvell/mv643xx_eth.c
index c8950da60e6b..04d901d0ff63 100644
--- a/drivers/net/ethernet/marvell/mv643xx_eth.c
+++ b/drivers/net/ethernet/marvell/mv643xx_eth.c
@@ -57,6 +57,7 @@
57#include <linux/types.h> 57#include <linux/types.h>
58#include <linux/inet_lro.h> 58#include <linux/inet_lro.h>
59#include <linux/slab.h> 59#include <linux/slab.h>
60#include <linux/clk.h>
60 61
61static char mv643xx_eth_driver_name[] = "mv643xx_eth"; 62static char mv643xx_eth_driver_name[] = "mv643xx_eth";
62static char mv643xx_eth_driver_version[] = "1.4"; 63static char mv643xx_eth_driver_version[] = "1.4";
@@ -289,10 +290,10 @@ struct mv643xx_eth_shared_private {
289 /* 290 /*
290 * Hardware-specific parameters. 291 * Hardware-specific parameters.
291 */ 292 */
292 unsigned int t_clk;
293 int extended_rx_coal_limit; 293 int extended_rx_coal_limit;
294 int tx_bw_control; 294 int tx_bw_control;
295 int tx_csum_limit; 295 int tx_csum_limit;
296
296}; 297};
297 298
298#define TX_BW_CONTROL_ABSENT 0 299#define TX_BW_CONTROL_ABSENT 0
@@ -431,6 +432,12 @@ struct mv643xx_eth_private {
431 int tx_desc_sram_size; 432 int tx_desc_sram_size;
432 int txq_count; 433 int txq_count;
433 struct tx_queue txq[8]; 434 struct tx_queue txq[8];
435
436 /*
437 * Hardware-specific parameters.
438 */
439 struct clk *clk;
440 unsigned int t_clk;
434}; 441};
435 442
436 443
@@ -1010,7 +1017,7 @@ static void tx_set_rate(struct mv643xx_eth_private *mp, int rate, int burst)
1010 int mtu; 1017 int mtu;
1011 int bucket_size; 1018 int bucket_size;
1012 1019
1013 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1020 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1014 if (token_rate > 1023) 1021 if (token_rate > 1023)
1015 token_rate = 1023; 1022 token_rate = 1023;
1016 1023
@@ -1042,7 +1049,7 @@ static void txq_set_rate(struct tx_queue *txq, int rate, int burst)
1042 int token_rate; 1049 int token_rate;
1043 int bucket_size; 1050 int bucket_size;
1044 1051
1045 token_rate = ((rate / 1000) * 64) / (mp->shared->t_clk / 1000); 1052 token_rate = ((rate / 1000) * 64) / (mp->t_clk / 1000);
1046 if (token_rate > 1023) 1053 if (token_rate > 1023)
1047 token_rate = 1023; 1054 token_rate = 1023;
1048 1055
@@ -1309,7 +1316,7 @@ static unsigned int get_rx_coal(struct mv643xx_eth_private *mp)
1309 temp = (val & 0x003fff00) >> 8; 1316 temp = (val & 0x003fff00) >> 8;
1310 1317
1311 temp *= 64000000; 1318 temp *= 64000000;
1312 do_div(temp, mp->shared->t_clk); 1319 do_div(temp, mp->t_clk);
1313 1320
1314 return (unsigned int)temp; 1321 return (unsigned int)temp;
1315} 1322}
@@ -1319,7 +1326,7 @@ static void set_rx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1319 u64 temp; 1326 u64 temp;
1320 u32 val; 1327 u32 val;
1321 1328
1322 temp = (u64)usec * mp->shared->t_clk; 1329 temp = (u64)usec * mp->t_clk;
1323 temp += 31999999; 1330 temp += 31999999;
1324 do_div(temp, 64000000); 1331 do_div(temp, 64000000);
1325 1332
@@ -1345,7 +1352,7 @@ static unsigned int get_tx_coal(struct mv643xx_eth_private *mp)
1345 1352
1346 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4; 1353 temp = (rdlp(mp, TX_FIFO_URGENT_THRESHOLD) & 0x3fff0) >> 4;
1347 temp *= 64000000; 1354 temp *= 64000000;
1348 do_div(temp, mp->shared->t_clk); 1355 do_div(temp, mp->t_clk);
1349 1356
1350 return (unsigned int)temp; 1357 return (unsigned int)temp;
1351} 1358}
@@ -1354,7 +1361,7 @@ static void set_tx_coal(struct mv643xx_eth_private *mp, unsigned int usec)
1354{ 1361{
1355 u64 temp; 1362 u64 temp;
1356 1363
1357 temp = (u64)usec * mp->shared->t_clk; 1364 temp = (u64)usec * mp->t_clk;
1358 temp += 31999999; 1365 temp += 31999999;
1359 do_div(temp, 64000000); 1366 do_div(temp, 64000000);
1360 1367
@@ -2663,10 +2670,6 @@ static int mv643xx_eth_shared_probe(struct platform_device *pdev)
2663 if (dram) 2670 if (dram)
2664 mv643xx_eth_conf_mbus_windows(msp, dram); 2671 mv643xx_eth_conf_mbus_windows(msp, dram);
2665 2672
2666 /*
2667 * Detect hardware parameters.
2668 */
2669 msp->t_clk = (pd != NULL && pd->t_clk != 0) ? pd->t_clk : 133000000;
2670 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ? 2673 msp->tx_csum_limit = (pd != NULL && pd->tx_csum_limit) ?
2671 pd->tx_csum_limit : 9 * 1024; 2674 pd->tx_csum_limit : 9 * 1024;
2672 infer_hw_params(msp); 2675 infer_hw_params(msp);
@@ -2891,6 +2894,18 @@ static int mv643xx_eth_probe(struct platform_device *pdev)
2891 2894
2892 mp->dev = dev; 2895 mp->dev = dev;
2893 2896
2897 /*
2898 * Get the clk rate, if there is one, otherwise use the default.
2899 */
2900 mp->clk = clk_get(&pdev->dev, (pdev->id ? "1" : "0"));
2901 if (!IS_ERR(mp->clk)) {
2902 clk_prepare_enable(mp->clk);
2903 mp->t_clk = clk_get_rate(mp->clk);
2904 } else {
2905 mp->t_clk = 133000000;
2906 printk(KERN_WARNING "Unable to get clock");
2907 }
2908
2894 set_params(mp, pd); 2909 set_params(mp, pd);
2895 netif_set_real_num_tx_queues(dev, mp->txq_count); 2910 netif_set_real_num_tx_queues(dev, mp->txq_count);
2896 netif_set_real_num_rx_queues(dev, mp->rxq_count); 2911 netif_set_real_num_rx_queues(dev, mp->rxq_count);
@@ -2979,6 +2994,11 @@ static int mv643xx_eth_remove(struct platform_device *pdev)
2979 if (mp->phy != NULL) 2994 if (mp->phy != NULL)
2980 phy_detach(mp->phy); 2995 phy_detach(mp->phy);
2981 cancel_work_sync(&mp->tx_timeout_task); 2996 cancel_work_sync(&mp->tx_timeout_task);
2997
2998 if (!IS_ERR(mp->clk)) {
2999 clk_disable_unprepare(mp->clk);
3000 clk_put(mp->clk);
3001 }
2982 free_netdev(mp->dev); 3002 free_netdev(mp->dev);
2983 3003
2984 platform_set_drvdata(pdev, NULL); 3004 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/net/ethernet/ti/Kconfig b/drivers/net/ethernet/ti/Kconfig
index b42252c4bec8..1b173a6145d6 100644
--- a/drivers/net/ethernet/ti/Kconfig
+++ b/drivers/net/ethernet/ti/Kconfig
@@ -51,7 +51,7 @@ config TI_DAVINCI_CPDMA
51 51
52config TI_CPSW 52config TI_CPSW
53 tristate "TI CPSW Switch Support" 53 tristate "TI CPSW Switch Support"
54 depends on ARM && (ARCH_DAVINCI || SOC_OMAPAM33XX) 54 depends on ARM && (ARCH_DAVINCI || SOC_AM33XX)
55 select TI_DAVINCI_CPDMA 55 select TI_DAVINCI_CPDMA
56 select TI_DAVINCI_MDIO 56 select TI_DAVINCI_MDIO
57 ---help--- 57 ---help---
diff --git a/drivers/of/of_i2c.c b/drivers/of/of_i2c.c
index f37fbeb66a44..1e173f357674 100644
--- a/drivers/of/of_i2c.c
+++ b/drivers/of/of_i2c.c
@@ -90,8 +90,22 @@ struct i2c_client *of_find_i2c_device_by_node(struct device_node *node)
90 if (!dev) 90 if (!dev)
91 return NULL; 91 return NULL;
92 92
93 return to_i2c_client(dev); 93 return i2c_verify_client(dev);
94} 94}
95EXPORT_SYMBOL(of_find_i2c_device_by_node); 95EXPORT_SYMBOL(of_find_i2c_device_by_node);
96 96
97/* must call put_device() when done with returned i2c_adapter device */
98struct i2c_adapter *of_find_i2c_adapter_by_node(struct device_node *node)
99{
100 struct device *dev;
101
102 dev = bus_find_device(&i2c_bus_type, NULL, node,
103 of_dev_node_match);
104 if (!dev)
105 return NULL;
106
107 return i2c_verify_adapter(dev);
108}
109EXPORT_SYMBOL(of_find_i2c_adapter_by_node);
110
97MODULE_LICENSE("GPL"); 111MODULE_LICENSE("GPL");
diff --git a/drivers/of/of_pci_irq.c b/drivers/of/of_pci_irq.c
index 93125163dea2..677053813211 100644
--- a/drivers/of/of_pci_irq.c
+++ b/drivers/of/of_pci_irq.c
@@ -15,7 +15,7 @@
15 * PCI tree until an device-node is found, at which point it will finish 15 * PCI tree until an device-node is found, at which point it will finish
16 * resolving using the OF tree walking. 16 * resolving using the OF tree walking.
17 */ 17 */
18int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq) 18int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq)
19{ 19{
20 struct device_node *dn, *ppnode; 20 struct device_node *dn, *ppnode;
21 struct pci_dev *ppdev; 21 struct pci_dev *ppdev;
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 8f169002dc7e..447e83472c01 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2370,7 +2370,7 @@ void pci_enable_acs(struct pci_dev *dev)
2370 * number is always 0 (see the Implementation Note in section 2.2.8.1 of 2370 * number is always 0 (see the Implementation Note in section 2.2.8.1 of
2371 * the PCI Express Base Specification, Revision 2.1) 2371 * the PCI Express Base Specification, Revision 2.1)
2372 */ 2372 */
2373u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin) 2373u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin)
2374{ 2374{
2375 int slot; 2375 int slot;
2376 2376
diff --git a/drivers/pinctrl/spear/Kconfig b/drivers/pinctrl/spear/Kconfig
index 6a2596b4f359..91558791e766 100644
--- a/drivers/pinctrl/spear/Kconfig
+++ b/drivers/pinctrl/spear/Kconfig
@@ -31,4 +31,14 @@ config PINCTRL_SPEAR320
31 depends on MACH_SPEAR320 31 depends on MACH_SPEAR320
32 select PINCTRL_SPEAR3XX 32 select PINCTRL_SPEAR3XX
33 33
34config PINCTRL_SPEAR1310
35 bool "ST Microelectronics SPEAr1310 SoC pin controller driver"
36 depends on MACH_SPEAR1310
37 select PINCTRL_SPEAR
38
39config PINCTRL_SPEAR1340
40 bool "ST Microelectronics SPEAr1340 SoC pin controller driver"
41 depends on MACH_SPEAR1340
42 select PINCTRL_SPEAR
43
34endif 44endif
diff --git a/drivers/pinctrl/spear/Makefile b/drivers/pinctrl/spear/Makefile
index 15dcb85da22d..b28a7ba22443 100644
--- a/drivers/pinctrl/spear/Makefile
+++ b/drivers/pinctrl/spear/Makefile
@@ -5,3 +5,5 @@ obj-$(CONFIG_PINCTRL_SPEAR3XX) += pinctrl-spear3xx.o
5obj-$(CONFIG_PINCTRL_SPEAR300) += pinctrl-spear300.o 5obj-$(CONFIG_PINCTRL_SPEAR300) += pinctrl-spear300.o
6obj-$(CONFIG_PINCTRL_SPEAR310) += pinctrl-spear310.o 6obj-$(CONFIG_PINCTRL_SPEAR310) += pinctrl-spear310.o
7obj-$(CONFIG_PINCTRL_SPEAR320) += pinctrl-spear320.o 7obj-$(CONFIG_PINCTRL_SPEAR320) += pinctrl-spear320.o
8obj-$(CONFIG_PINCTRL_SPEAR1310) += pinctrl-spear1310.o
9obj-$(CONFIG_PINCTRL_SPEAR1340) += pinctrl-spear1340.o
diff --git a/drivers/pinctrl/spear/pinctrl-spear.h b/drivers/pinctrl/spear/pinctrl-spear.h
index 47a6b5b72f90..9155783bb47f 100644
--- a/drivers/pinctrl/spear/pinctrl-spear.h
+++ b/drivers/pinctrl/spear/pinctrl-spear.h
@@ -139,4 +139,255 @@ void __devinit pmx_init_addr(struct spear_pinctrl_machdata *machdata, u16 reg);
139int __devinit spear_pinctrl_probe(struct platform_device *pdev, 139int __devinit spear_pinctrl_probe(struct platform_device *pdev,
140 struct spear_pinctrl_machdata *machdata); 140 struct spear_pinctrl_machdata *machdata);
141int __devexit spear_pinctrl_remove(struct platform_device *pdev); 141int __devexit spear_pinctrl_remove(struct platform_device *pdev);
142
143#define SPEAR_PIN_0_TO_101 \
144 PINCTRL_PIN(0, "PLGPIO0"), \
145 PINCTRL_PIN(1, "PLGPIO1"), \
146 PINCTRL_PIN(2, "PLGPIO2"), \
147 PINCTRL_PIN(3, "PLGPIO3"), \
148 PINCTRL_PIN(4, "PLGPIO4"), \
149 PINCTRL_PIN(5, "PLGPIO5"), \
150 PINCTRL_PIN(6, "PLGPIO6"), \
151 PINCTRL_PIN(7, "PLGPIO7"), \
152 PINCTRL_PIN(8, "PLGPIO8"), \
153 PINCTRL_PIN(9, "PLGPIO9"), \
154 PINCTRL_PIN(10, "PLGPIO10"), \
155 PINCTRL_PIN(11, "PLGPIO11"), \
156 PINCTRL_PIN(12, "PLGPIO12"), \
157 PINCTRL_PIN(13, "PLGPIO13"), \
158 PINCTRL_PIN(14, "PLGPIO14"), \
159 PINCTRL_PIN(15, "PLGPIO15"), \
160 PINCTRL_PIN(16, "PLGPIO16"), \
161 PINCTRL_PIN(17, "PLGPIO17"), \
162 PINCTRL_PIN(18, "PLGPIO18"), \
163 PINCTRL_PIN(19, "PLGPIO19"), \
164 PINCTRL_PIN(20, "PLGPIO20"), \
165 PINCTRL_PIN(21, "PLGPIO21"), \
166 PINCTRL_PIN(22, "PLGPIO22"), \
167 PINCTRL_PIN(23, "PLGPIO23"), \
168 PINCTRL_PIN(24, "PLGPIO24"), \
169 PINCTRL_PIN(25, "PLGPIO25"), \
170 PINCTRL_PIN(26, "PLGPIO26"), \
171 PINCTRL_PIN(27, "PLGPIO27"), \
172 PINCTRL_PIN(28, "PLGPIO28"), \
173 PINCTRL_PIN(29, "PLGPIO29"), \
174 PINCTRL_PIN(30, "PLGPIO30"), \
175 PINCTRL_PIN(31, "PLGPIO31"), \
176 PINCTRL_PIN(32, "PLGPIO32"), \
177 PINCTRL_PIN(33, "PLGPIO33"), \
178 PINCTRL_PIN(34, "PLGPIO34"), \
179 PINCTRL_PIN(35, "PLGPIO35"), \
180 PINCTRL_PIN(36, "PLGPIO36"), \
181 PINCTRL_PIN(37, "PLGPIO37"), \
182 PINCTRL_PIN(38, "PLGPIO38"), \
183 PINCTRL_PIN(39, "PLGPIO39"), \
184 PINCTRL_PIN(40, "PLGPIO40"), \
185 PINCTRL_PIN(41, "PLGPIO41"), \
186 PINCTRL_PIN(42, "PLGPIO42"), \
187 PINCTRL_PIN(43, "PLGPIO43"), \
188 PINCTRL_PIN(44, "PLGPIO44"), \
189 PINCTRL_PIN(45, "PLGPIO45"), \
190 PINCTRL_PIN(46, "PLGPIO46"), \
191 PINCTRL_PIN(47, "PLGPIO47"), \
192 PINCTRL_PIN(48, "PLGPIO48"), \
193 PINCTRL_PIN(49, "PLGPIO49"), \
194 PINCTRL_PIN(50, "PLGPIO50"), \
195 PINCTRL_PIN(51, "PLGPIO51"), \
196 PINCTRL_PIN(52, "PLGPIO52"), \
197 PINCTRL_PIN(53, "PLGPIO53"), \
198 PINCTRL_PIN(54, "PLGPIO54"), \
199 PINCTRL_PIN(55, "PLGPIO55"), \
200 PINCTRL_PIN(56, "PLGPIO56"), \
201 PINCTRL_PIN(57, "PLGPIO57"), \
202 PINCTRL_PIN(58, "PLGPIO58"), \
203 PINCTRL_PIN(59, "PLGPIO59"), \
204 PINCTRL_PIN(60, "PLGPIO60"), \
205 PINCTRL_PIN(61, "PLGPIO61"), \
206 PINCTRL_PIN(62, "PLGPIO62"), \
207 PINCTRL_PIN(63, "PLGPIO63"), \
208 PINCTRL_PIN(64, "PLGPIO64"), \
209 PINCTRL_PIN(65, "PLGPIO65"), \
210 PINCTRL_PIN(66, "PLGPIO66"), \
211 PINCTRL_PIN(67, "PLGPIO67"), \
212 PINCTRL_PIN(68, "PLGPIO68"), \
213 PINCTRL_PIN(69, "PLGPIO69"), \
214 PINCTRL_PIN(70, "PLGPIO70"), \
215 PINCTRL_PIN(71, "PLGPIO71"), \
216 PINCTRL_PIN(72, "PLGPIO72"), \
217 PINCTRL_PIN(73, "PLGPIO73"), \
218 PINCTRL_PIN(74, "PLGPIO74"), \
219 PINCTRL_PIN(75, "PLGPIO75"), \
220 PINCTRL_PIN(76, "PLGPIO76"), \
221 PINCTRL_PIN(77, "PLGPIO77"), \
222 PINCTRL_PIN(78, "PLGPIO78"), \
223 PINCTRL_PIN(79, "PLGPIO79"), \
224 PINCTRL_PIN(80, "PLGPIO80"), \
225 PINCTRL_PIN(81, "PLGPIO81"), \
226 PINCTRL_PIN(82, "PLGPIO82"), \
227 PINCTRL_PIN(83, "PLGPIO83"), \
228 PINCTRL_PIN(84, "PLGPIO84"), \
229 PINCTRL_PIN(85, "PLGPIO85"), \
230 PINCTRL_PIN(86, "PLGPIO86"), \
231 PINCTRL_PIN(87, "PLGPIO87"), \
232 PINCTRL_PIN(88, "PLGPIO88"), \
233 PINCTRL_PIN(89, "PLGPIO89"), \
234 PINCTRL_PIN(90, "PLGPIO90"), \
235 PINCTRL_PIN(91, "PLGPIO91"), \
236 PINCTRL_PIN(92, "PLGPIO92"), \
237 PINCTRL_PIN(93, "PLGPIO93"), \
238 PINCTRL_PIN(94, "PLGPIO94"), \
239 PINCTRL_PIN(95, "PLGPIO95"), \
240 PINCTRL_PIN(96, "PLGPIO96"), \
241 PINCTRL_PIN(97, "PLGPIO97"), \
242 PINCTRL_PIN(98, "PLGPIO98"), \
243 PINCTRL_PIN(99, "PLGPIO99"), \
244 PINCTRL_PIN(100, "PLGPIO100"), \
245 PINCTRL_PIN(101, "PLGPIO101")
246
247#define SPEAR_PIN_102_TO_245 \
248 PINCTRL_PIN(102, "PLGPIO102"), \
249 PINCTRL_PIN(103, "PLGPIO103"), \
250 PINCTRL_PIN(104, "PLGPIO104"), \
251 PINCTRL_PIN(105, "PLGPIO105"), \
252 PINCTRL_PIN(106, "PLGPIO106"), \
253 PINCTRL_PIN(107, "PLGPIO107"), \
254 PINCTRL_PIN(108, "PLGPIO108"), \
255 PINCTRL_PIN(109, "PLGPIO109"), \
256 PINCTRL_PIN(110, "PLGPIO110"), \
257 PINCTRL_PIN(111, "PLGPIO111"), \
258 PINCTRL_PIN(112, "PLGPIO112"), \
259 PINCTRL_PIN(113, "PLGPIO113"), \
260 PINCTRL_PIN(114, "PLGPIO114"), \
261 PINCTRL_PIN(115, "PLGPIO115"), \
262 PINCTRL_PIN(116, "PLGPIO116"), \
263 PINCTRL_PIN(117, "PLGPIO117"), \
264 PINCTRL_PIN(118, "PLGPIO118"), \
265 PINCTRL_PIN(119, "PLGPIO119"), \
266 PINCTRL_PIN(120, "PLGPIO120"), \
267 PINCTRL_PIN(121, "PLGPIO121"), \
268 PINCTRL_PIN(122, "PLGPIO122"), \
269 PINCTRL_PIN(123, "PLGPIO123"), \
270 PINCTRL_PIN(124, "PLGPIO124"), \
271 PINCTRL_PIN(125, "PLGPIO125"), \
272 PINCTRL_PIN(126, "PLGPIO126"), \
273 PINCTRL_PIN(127, "PLGPIO127"), \
274 PINCTRL_PIN(128, "PLGPIO128"), \
275 PINCTRL_PIN(129, "PLGPIO129"), \
276 PINCTRL_PIN(130, "PLGPIO130"), \
277 PINCTRL_PIN(131, "PLGPIO131"), \
278 PINCTRL_PIN(132, "PLGPIO132"), \
279 PINCTRL_PIN(133, "PLGPIO133"), \
280 PINCTRL_PIN(134, "PLGPIO134"), \
281 PINCTRL_PIN(135, "PLGPIO135"), \
282 PINCTRL_PIN(136, "PLGPIO136"), \
283 PINCTRL_PIN(137, "PLGPIO137"), \
284 PINCTRL_PIN(138, "PLGPIO138"), \
285 PINCTRL_PIN(139, "PLGPIO139"), \
286 PINCTRL_PIN(140, "PLGPIO140"), \
287 PINCTRL_PIN(141, "PLGPIO141"), \
288 PINCTRL_PIN(142, "PLGPIO142"), \
289 PINCTRL_PIN(143, "PLGPIO143"), \
290 PINCTRL_PIN(144, "PLGPIO144"), \
291 PINCTRL_PIN(145, "PLGPIO145"), \
292 PINCTRL_PIN(146, "PLGPIO146"), \
293 PINCTRL_PIN(147, "PLGPIO147"), \
294 PINCTRL_PIN(148, "PLGPIO148"), \
295 PINCTRL_PIN(149, "PLGPIO149"), \
296 PINCTRL_PIN(150, "PLGPIO150"), \
297 PINCTRL_PIN(151, "PLGPIO151"), \
298 PINCTRL_PIN(152, "PLGPIO152"), \
299 PINCTRL_PIN(153, "PLGPIO153"), \
300 PINCTRL_PIN(154, "PLGPIO154"), \
301 PINCTRL_PIN(155, "PLGPIO155"), \
302 PINCTRL_PIN(156, "PLGPIO156"), \
303 PINCTRL_PIN(157, "PLGPIO157"), \
304 PINCTRL_PIN(158, "PLGPIO158"), \
305 PINCTRL_PIN(159, "PLGPIO159"), \
306 PINCTRL_PIN(160, "PLGPIO160"), \
307 PINCTRL_PIN(161, "PLGPIO161"), \
308 PINCTRL_PIN(162, "PLGPIO162"), \
309 PINCTRL_PIN(163, "PLGPIO163"), \
310 PINCTRL_PIN(164, "PLGPIO164"), \
311 PINCTRL_PIN(165, "PLGPIO165"), \
312 PINCTRL_PIN(166, "PLGPIO166"), \
313 PINCTRL_PIN(167, "PLGPIO167"), \
314 PINCTRL_PIN(168, "PLGPIO168"), \
315 PINCTRL_PIN(169, "PLGPIO169"), \
316 PINCTRL_PIN(170, "PLGPIO170"), \
317 PINCTRL_PIN(171, "PLGPIO171"), \
318 PINCTRL_PIN(172, "PLGPIO172"), \
319 PINCTRL_PIN(173, "PLGPIO173"), \
320 PINCTRL_PIN(174, "PLGPIO174"), \
321 PINCTRL_PIN(175, "PLGPIO175"), \
322 PINCTRL_PIN(176, "PLGPIO176"), \
323 PINCTRL_PIN(177, "PLGPIO177"), \
324 PINCTRL_PIN(178, "PLGPIO178"), \
325 PINCTRL_PIN(179, "PLGPIO179"), \
326 PINCTRL_PIN(180, "PLGPIO180"), \
327 PINCTRL_PIN(181, "PLGPIO181"), \
328 PINCTRL_PIN(182, "PLGPIO182"), \
329 PINCTRL_PIN(183, "PLGPIO183"), \
330 PINCTRL_PIN(184, "PLGPIO184"), \
331 PINCTRL_PIN(185, "PLGPIO185"), \
332 PINCTRL_PIN(186, "PLGPIO186"), \
333 PINCTRL_PIN(187, "PLGPIO187"), \
334 PINCTRL_PIN(188, "PLGPIO188"), \
335 PINCTRL_PIN(189, "PLGPIO189"), \
336 PINCTRL_PIN(190, "PLGPIO190"), \
337 PINCTRL_PIN(191, "PLGPIO191"), \
338 PINCTRL_PIN(192, "PLGPIO192"), \
339 PINCTRL_PIN(193, "PLGPIO193"), \
340 PINCTRL_PIN(194, "PLGPIO194"), \
341 PINCTRL_PIN(195, "PLGPIO195"), \
342 PINCTRL_PIN(196, "PLGPIO196"), \
343 PINCTRL_PIN(197, "PLGPIO197"), \
344 PINCTRL_PIN(198, "PLGPIO198"), \
345 PINCTRL_PIN(199, "PLGPIO199"), \
346 PINCTRL_PIN(200, "PLGPIO200"), \
347 PINCTRL_PIN(201, "PLGPIO201"), \
348 PINCTRL_PIN(202, "PLGPIO202"), \
349 PINCTRL_PIN(203, "PLGPIO203"), \
350 PINCTRL_PIN(204, "PLGPIO204"), \
351 PINCTRL_PIN(205, "PLGPIO205"), \
352 PINCTRL_PIN(206, "PLGPIO206"), \
353 PINCTRL_PIN(207, "PLGPIO207"), \
354 PINCTRL_PIN(208, "PLGPIO208"), \
355 PINCTRL_PIN(209, "PLGPIO209"), \
356 PINCTRL_PIN(210, "PLGPIO210"), \
357 PINCTRL_PIN(211, "PLGPIO211"), \
358 PINCTRL_PIN(212, "PLGPIO212"), \
359 PINCTRL_PIN(213, "PLGPIO213"), \
360 PINCTRL_PIN(214, "PLGPIO214"), \
361 PINCTRL_PIN(215, "PLGPIO215"), \
362 PINCTRL_PIN(216, "PLGPIO216"), \
363 PINCTRL_PIN(217, "PLGPIO217"), \
364 PINCTRL_PIN(218, "PLGPIO218"), \
365 PINCTRL_PIN(219, "PLGPIO219"), \
366 PINCTRL_PIN(220, "PLGPIO220"), \
367 PINCTRL_PIN(221, "PLGPIO221"), \
368 PINCTRL_PIN(222, "PLGPIO222"), \
369 PINCTRL_PIN(223, "PLGPIO223"), \
370 PINCTRL_PIN(224, "PLGPIO224"), \
371 PINCTRL_PIN(225, "PLGPIO225"), \
372 PINCTRL_PIN(226, "PLGPIO226"), \
373 PINCTRL_PIN(227, "PLGPIO227"), \
374 PINCTRL_PIN(228, "PLGPIO228"), \
375 PINCTRL_PIN(229, "PLGPIO229"), \
376 PINCTRL_PIN(230, "PLGPIO230"), \
377 PINCTRL_PIN(231, "PLGPIO231"), \
378 PINCTRL_PIN(232, "PLGPIO232"), \
379 PINCTRL_PIN(233, "PLGPIO233"), \
380 PINCTRL_PIN(234, "PLGPIO234"), \
381 PINCTRL_PIN(235, "PLGPIO235"), \
382 PINCTRL_PIN(236, "PLGPIO236"), \
383 PINCTRL_PIN(237, "PLGPIO237"), \
384 PINCTRL_PIN(238, "PLGPIO238"), \
385 PINCTRL_PIN(239, "PLGPIO239"), \
386 PINCTRL_PIN(240, "PLGPIO240"), \
387 PINCTRL_PIN(241, "PLGPIO241"), \
388 PINCTRL_PIN(242, "PLGPIO242"), \
389 PINCTRL_PIN(243, "PLGPIO243"), \
390 PINCTRL_PIN(244, "PLGPIO244"), \
391 PINCTRL_PIN(245, "PLGPIO245")
392
142#endif /* __PINMUX_SPEAR_H__ */ 393#endif /* __PINMUX_SPEAR_H__ */
diff --git a/drivers/pinctrl/spear/pinctrl-spear1310.c b/drivers/pinctrl/spear/pinctrl-spear1310.c
new file mode 100644
index 000000000000..fff168be7f00
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1310.c
@@ -0,0 +1,2198 @@
1/*
2 * Driver for the ST Microelectronics SPEAr1310 pinmux
3 *
4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include "pinctrl-spear.h"
18
19#define DRIVER_NAME "spear1310-pinmux"
20
21/* pins */
22static const struct pinctrl_pin_desc spear1310_pins[] = {
23 SPEAR_PIN_0_TO_101,
24 SPEAR_PIN_102_TO_245,
25};
26
27/* registers */
28#define PERIP_CFG 0x32C
29 #define MCIF_SEL_SHIFT 3
30 #define MCIF_SEL_SD (0x1 << MCIF_SEL_SHIFT)
31 #define MCIF_SEL_CF (0x2 << MCIF_SEL_SHIFT)
32 #define MCIF_SEL_XD (0x3 << MCIF_SEL_SHIFT)
33 #define MCIF_SEL_MASK (0x3 << MCIF_SEL_SHIFT)
34
35#define PCIE_SATA_CFG 0x3A4
36 #define PCIE_SATA2_SEL_PCIE (0 << 31)
37 #define PCIE_SATA1_SEL_PCIE (0 << 30)
38 #define PCIE_SATA0_SEL_PCIE (0 << 29)
39 #define PCIE_SATA2_SEL_SATA (1 << 31)
40 #define PCIE_SATA1_SEL_SATA (1 << 30)
41 #define PCIE_SATA0_SEL_SATA (1 << 29)
42 #define SATA2_CFG_TX_CLK_EN (1 << 27)
43 #define SATA2_CFG_RX_CLK_EN (1 << 26)
44 #define SATA2_CFG_POWERUP_RESET (1 << 25)
45 #define SATA2_CFG_PM_CLK_EN (1 << 24)
46 #define SATA1_CFG_TX_CLK_EN (1 << 23)
47 #define SATA1_CFG_RX_CLK_EN (1 << 22)
48 #define SATA1_CFG_POWERUP_RESET (1 << 21)
49 #define SATA1_CFG_PM_CLK_EN (1 << 20)
50 #define SATA0_CFG_TX_CLK_EN (1 << 19)
51 #define SATA0_CFG_RX_CLK_EN (1 << 18)
52 #define SATA0_CFG_POWERUP_RESET (1 << 17)
53 #define SATA0_CFG_PM_CLK_EN (1 << 16)
54 #define PCIE2_CFG_DEVICE_PRESENT (1 << 11)
55 #define PCIE2_CFG_POWERUP_RESET (1 << 10)
56 #define PCIE2_CFG_CORE_CLK_EN (1 << 9)
57 #define PCIE2_CFG_AUX_CLK_EN (1 << 8)
58 #define PCIE1_CFG_DEVICE_PRESENT (1 << 7)
59 #define PCIE1_CFG_POWERUP_RESET (1 << 6)
60 #define PCIE1_CFG_CORE_CLK_EN (1 << 5)
61 #define PCIE1_CFG_AUX_CLK_EN (1 << 4)
62 #define PCIE0_CFG_DEVICE_PRESENT (1 << 3)
63 #define PCIE0_CFG_POWERUP_RESET (1 << 2)
64 #define PCIE0_CFG_CORE_CLK_EN (1 << 1)
65 #define PCIE0_CFG_AUX_CLK_EN (1 << 0)
66
67#define PAD_FUNCTION_EN_0 0x650
68 #define PMX_UART0_MASK (1 << 1)
69 #define PMX_I2C0_MASK (1 << 2)
70 #define PMX_I2S0_MASK (1 << 3)
71 #define PMX_SSP0_MASK (1 << 4)
72 #define PMX_CLCD1_MASK (1 << 5)
73 #define PMX_EGPIO00_MASK (1 << 6)
74 #define PMX_EGPIO01_MASK (1 << 7)
75 #define PMX_EGPIO02_MASK (1 << 8)
76 #define PMX_EGPIO03_MASK (1 << 9)
77 #define PMX_EGPIO04_MASK (1 << 10)
78 #define PMX_EGPIO05_MASK (1 << 11)
79 #define PMX_EGPIO06_MASK (1 << 12)
80 #define PMX_EGPIO07_MASK (1 << 13)
81 #define PMX_EGPIO08_MASK (1 << 14)
82 #define PMX_EGPIO09_MASK (1 << 15)
83 #define PMX_SMI_MASK (1 << 16)
84 #define PMX_NAND8_MASK (1 << 17)
85 #define PMX_GMIICLK_MASK (1 << 18)
86 #define PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK (1 << 19)
87 #define PMX_RXCLK_RDV_TXEN_D03_MASK (1 << 20)
88 #define PMX_GMIID47_MASK (1 << 21)
89 #define PMX_MDC_MDIO_MASK (1 << 22)
90 #define PMX_MCI_DATA8_15_MASK (1 << 23)
91 #define PMX_NFAD23_MASK (1 << 24)
92 #define PMX_NFAD24_MASK (1 << 25)
93 #define PMX_NFAD25_MASK (1 << 26)
94 #define PMX_NFCE3_MASK (1 << 27)
95 #define PMX_NFWPRT3_MASK (1 << 28)
96 #define PMX_NFRSTPWDWN0_MASK (1 << 29)
97 #define PMX_NFRSTPWDWN1_MASK (1 << 30)
98 #define PMX_NFRSTPWDWN2_MASK (1 << 31)
99
100#define PAD_FUNCTION_EN_1 0x654
101 #define PMX_NFRSTPWDWN3_MASK (1 << 0)
102 #define PMX_SMINCS2_MASK (1 << 1)
103 #define PMX_SMINCS3_MASK (1 << 2)
104 #define PMX_CLCD2_MASK (1 << 3)
105 #define PMX_KBD_ROWCOL68_MASK (1 << 4)
106 #define PMX_EGPIO10_MASK (1 << 5)
107 #define PMX_EGPIO11_MASK (1 << 6)
108 #define PMX_EGPIO12_MASK (1 << 7)
109 #define PMX_EGPIO13_MASK (1 << 8)
110 #define PMX_EGPIO14_MASK (1 << 9)
111 #define PMX_EGPIO15_MASK (1 << 10)
112 #define PMX_UART0_MODEM_MASK (1 << 11)
113 #define PMX_GPT0_TMR0_MASK (1 << 12)
114 #define PMX_GPT0_TMR1_MASK (1 << 13)
115 #define PMX_GPT1_TMR0_MASK (1 << 14)
116 #define PMX_GPT1_TMR1_MASK (1 << 15)
117 #define PMX_I2S1_MASK (1 << 16)
118 #define PMX_KBD_ROWCOL25_MASK (1 << 17)
119 #define PMX_NFIO8_15_MASK (1 << 18)
120 #define PMX_KBD_COL1_MASK (1 << 19)
121 #define PMX_NFCE1_MASK (1 << 20)
122 #define PMX_KBD_COL0_MASK (1 << 21)
123 #define PMX_NFCE2_MASK (1 << 22)
124 #define PMX_KBD_ROW1_MASK (1 << 23)
125 #define PMX_NFWPRT1_MASK (1 << 24)
126 #define PMX_KBD_ROW0_MASK (1 << 25)
127 #define PMX_NFWPRT2_MASK (1 << 26)
128 #define PMX_MCIDATA0_MASK (1 << 27)
129 #define PMX_MCIDATA1_MASK (1 << 28)
130 #define PMX_MCIDATA2_MASK (1 << 29)
131 #define PMX_MCIDATA3_MASK (1 << 30)
132 #define PMX_MCIDATA4_MASK (1 << 31)
133
134#define PAD_FUNCTION_EN_2 0x658
135 #define PMX_MCIDATA5_MASK (1 << 0)
136 #define PMX_MCIDATA6_MASK (1 << 1)
137 #define PMX_MCIDATA7_MASK (1 << 2)
138 #define PMX_MCIDATA1SD_MASK (1 << 3)
139 #define PMX_MCIDATA2SD_MASK (1 << 4)
140 #define PMX_MCIDATA3SD_MASK (1 << 5)
141 #define PMX_MCIADDR0ALE_MASK (1 << 6)
142 #define PMX_MCIADDR1CLECLK_MASK (1 << 7)
143 #define PMX_MCIADDR2_MASK (1 << 8)
144 #define PMX_MCICECF_MASK (1 << 9)
145 #define PMX_MCICEXD_MASK (1 << 10)
146 #define PMX_MCICESDMMC_MASK (1 << 11)
147 #define PMX_MCICDCF1_MASK (1 << 12)
148 #define PMX_MCICDCF2_MASK (1 << 13)
149 #define PMX_MCICDXD_MASK (1 << 14)
150 #define PMX_MCICDSDMMC_MASK (1 << 15)
151 #define PMX_MCIDATADIR_MASK (1 << 16)
152 #define PMX_MCIDMARQWP_MASK (1 << 17)
153 #define PMX_MCIIORDRE_MASK (1 << 18)
154 #define PMX_MCIIOWRWE_MASK (1 << 19)
155 #define PMX_MCIRESETCF_MASK (1 << 20)
156 #define PMX_MCICS0CE_MASK (1 << 21)
157 #define PMX_MCICFINTR_MASK (1 << 22)
158 #define PMX_MCIIORDY_MASK (1 << 23)
159 #define PMX_MCICS1_MASK (1 << 24)
160 #define PMX_MCIDMAACK_MASK (1 << 25)
161 #define PMX_MCISDCMD_MASK (1 << 26)
162 #define PMX_MCILEDS_MASK (1 << 27)
163 #define PMX_TOUCH_XY_MASK (1 << 28)
164 #define PMX_SSP0_CS0_MASK (1 << 29)
165 #define PMX_SSP0_CS1_2_MASK (1 << 30)
166
167/* combined macros */
168#define PMX_GMII_MASK (PMX_GMIICLK_MASK | \
169 PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
170 PMX_RXCLK_RDV_TXEN_D03_MASK | \
171 PMX_GMIID47_MASK | PMX_MDC_MDIO_MASK)
172
173#define PMX_EGPIO_0_GRP_MASK (PMX_EGPIO00_MASK | PMX_EGPIO01_MASK | \
174 PMX_EGPIO02_MASK | \
175 PMX_EGPIO03_MASK | PMX_EGPIO04_MASK | \
176 PMX_EGPIO05_MASK | PMX_EGPIO06_MASK | \
177 PMX_EGPIO07_MASK | PMX_EGPIO08_MASK | \
178 PMX_EGPIO09_MASK)
179#define PMX_EGPIO_1_GRP_MASK (PMX_EGPIO10_MASK | PMX_EGPIO11_MASK | \
180 PMX_EGPIO12_MASK | PMX_EGPIO13_MASK | \
181 PMX_EGPIO14_MASK | PMX_EGPIO15_MASK)
182
183#define PMX_KEYBOARD_6X6_MASK (PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
184 PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL0_MASK | \
185 PMX_KBD_COL1_MASK)
186
187#define PMX_NAND8BIT_0_MASK (PMX_NAND8_MASK | PMX_NFAD23_MASK | \
188 PMX_NFAD24_MASK | PMX_NFAD25_MASK | \
189 PMX_NFWPRT3_MASK | PMX_NFRSTPWDWN0_MASK | \
190 PMX_NFRSTPWDWN1_MASK | PMX_NFRSTPWDWN2_MASK | \
191 PMX_NFCE3_MASK)
192#define PMX_NAND8BIT_1_MASK PMX_NFRSTPWDWN3_MASK
193
194#define PMX_NAND16BIT_1_MASK (PMX_KBD_ROWCOL25_MASK | PMX_NFIO8_15_MASK)
195#define PMX_NAND_4CHIPS_MASK (PMX_NFCE1_MASK | PMX_NFCE2_MASK | \
196 PMX_NFWPRT1_MASK | PMX_NFWPRT2_MASK | \
197 PMX_KBD_ROW0_MASK | PMX_KBD_ROW1_MASK | \
198 PMX_KBD_COL0_MASK | PMX_KBD_COL1_MASK)
199
200#define PMX_MCIFALL_1_MASK 0xF8000000
201#define PMX_MCIFALL_2_MASK 0x0FFFFFFF
202
203#define PMX_PCI_REG1_MASK (PMX_SMINCS2_MASK | PMX_SMINCS3_MASK | \
204 PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK | \
205 PMX_EGPIO_1_GRP_MASK | PMX_GPT0_TMR0_MASK | \
206 PMX_GPT0_TMR1_MASK | PMX_GPT1_TMR0_MASK | \
207 PMX_GPT1_TMR1_MASK | PMX_I2S1_MASK | \
208 PMX_NFCE2_MASK)
209#define PMX_PCI_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
210 PMX_SSP0_CS1_2_MASK)
211
212#define PMX_SMII_0_1_2_MASK (PMX_CLCD2_MASK | PMX_KBD_ROWCOL68_MASK)
213#define PMX_RGMII_REG0_MASK (PMX_MCI_DATA8_15_MASK | \
214 PMX_GMIICOL_CRS_XFERER_MIITXCLK_MASK | \
215 PMX_GMIID47_MASK)
216#define PMX_RGMII_REG1_MASK (PMX_KBD_ROWCOL68_MASK | PMX_EGPIO_1_GRP_MASK |\
217 PMX_KBD_ROW1_MASK | PMX_NFWPRT1_MASK | \
218 PMX_KBD_ROW0_MASK | PMX_NFWPRT2_MASK)
219#define PMX_RGMII_REG2_MASK (PMX_TOUCH_XY_MASK | PMX_SSP0_CS0_MASK | \
220 PMX_SSP0_CS1_2_MASK)
221
222#define PCIE_CFG_VAL(x) (PCIE_SATA##x##_SEL_PCIE | \
223 PCIE##x##_CFG_AUX_CLK_EN | \
224 PCIE##x##_CFG_CORE_CLK_EN | \
225 PCIE##x##_CFG_POWERUP_RESET | \
226 PCIE##x##_CFG_DEVICE_PRESENT)
227#define SATA_CFG_VAL(x) (PCIE_SATA##x##_SEL_SATA | \
228 SATA##x##_CFG_PM_CLK_EN | \
229 SATA##x##_CFG_POWERUP_RESET | \
230 SATA##x##_CFG_RX_CLK_EN | \
231 SATA##x##_CFG_TX_CLK_EN)
232
233/* Pad multiplexing for i2c0 device */
234static const unsigned i2c0_pins[] = { 102, 103 };
235static struct spear_muxreg i2c0_muxreg[] = {
236 {
237 .reg = PAD_FUNCTION_EN_0,
238 .mask = PMX_I2C0_MASK,
239 .val = PMX_I2C0_MASK,
240 },
241};
242
243static struct spear_modemux i2c0_modemux[] = {
244 {
245 .muxregs = i2c0_muxreg,
246 .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
247 },
248};
249
250static struct spear_pingroup i2c0_pingroup = {
251 .name = "i2c0_grp",
252 .pins = i2c0_pins,
253 .npins = ARRAY_SIZE(i2c0_pins),
254 .modemuxs = i2c0_modemux,
255 .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
256};
257
258static const char *const i2c0_grps[] = { "i2c0_grp" };
259static struct spear_function i2c0_function = {
260 .name = "i2c0",
261 .groups = i2c0_grps,
262 .ngroups = ARRAY_SIZE(i2c0_grps),
263};
264
265/* Pad multiplexing for ssp0 device */
266static const unsigned ssp0_pins[] = { 109, 110, 111, 112 };
267static struct spear_muxreg ssp0_muxreg[] = {
268 {
269 .reg = PAD_FUNCTION_EN_0,
270 .mask = PMX_SSP0_MASK,
271 .val = PMX_SSP0_MASK,
272 },
273};
274
275static struct spear_modemux ssp0_modemux[] = {
276 {
277 .muxregs = ssp0_muxreg,
278 .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
279 },
280};
281
282static struct spear_pingroup ssp0_pingroup = {
283 .name = "ssp0_grp",
284 .pins = ssp0_pins,
285 .npins = ARRAY_SIZE(ssp0_pins),
286 .modemuxs = ssp0_modemux,
287 .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
288};
289
290/* Pad multiplexing for ssp0_cs0 device */
291static const unsigned ssp0_cs0_pins[] = { 96 };
292static struct spear_muxreg ssp0_cs0_muxreg[] = {
293 {
294 .reg = PAD_FUNCTION_EN_2,
295 .mask = PMX_SSP0_CS0_MASK,
296 .val = PMX_SSP0_CS0_MASK,
297 },
298};
299
300static struct spear_modemux ssp0_cs0_modemux[] = {
301 {
302 .muxregs = ssp0_cs0_muxreg,
303 .nmuxregs = ARRAY_SIZE(ssp0_cs0_muxreg),
304 },
305};
306
307static struct spear_pingroup ssp0_cs0_pingroup = {
308 .name = "ssp0_cs0_grp",
309 .pins = ssp0_cs0_pins,
310 .npins = ARRAY_SIZE(ssp0_cs0_pins),
311 .modemuxs = ssp0_cs0_modemux,
312 .nmodemuxs = ARRAY_SIZE(ssp0_cs0_modemux),
313};
314
315/* ssp0_cs1_2 device */
316static const unsigned ssp0_cs1_2_pins[] = { 94, 95 };
317static struct spear_muxreg ssp0_cs1_2_muxreg[] = {
318 {
319 .reg = PAD_FUNCTION_EN_2,
320 .mask = PMX_SSP0_CS1_2_MASK,
321 .val = PMX_SSP0_CS1_2_MASK,
322 },
323};
324
325static struct spear_modemux ssp0_cs1_2_modemux[] = {
326 {
327 .muxregs = ssp0_cs1_2_muxreg,
328 .nmuxregs = ARRAY_SIZE(ssp0_cs1_2_muxreg),
329 },
330};
331
332static struct spear_pingroup ssp0_cs1_2_pingroup = {
333 .name = "ssp0_cs1_2_grp",
334 .pins = ssp0_cs1_2_pins,
335 .npins = ARRAY_SIZE(ssp0_cs1_2_pins),
336 .modemuxs = ssp0_cs1_2_modemux,
337 .nmodemuxs = ARRAY_SIZE(ssp0_cs1_2_modemux),
338};
339
340static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs0_grp",
341 "ssp0_cs1_2_grp" };
342static struct spear_function ssp0_function = {
343 .name = "ssp0",
344 .groups = ssp0_grps,
345 .ngroups = ARRAY_SIZE(ssp0_grps),
346};
347
348/* Pad multiplexing for i2s0 device */
349static const unsigned i2s0_pins[] = { 104, 105, 106, 107, 108 };
350static struct spear_muxreg i2s0_muxreg[] = {
351 {
352 .reg = PAD_FUNCTION_EN_0,
353 .mask = PMX_I2S0_MASK,
354 .val = PMX_I2S0_MASK,
355 },
356};
357
358static struct spear_modemux i2s0_modemux[] = {
359 {
360 .muxregs = i2s0_muxreg,
361 .nmuxregs = ARRAY_SIZE(i2s0_muxreg),
362 },
363};
364
365static struct spear_pingroup i2s0_pingroup = {
366 .name = "i2s0_grp",
367 .pins = i2s0_pins,
368 .npins = ARRAY_SIZE(i2s0_pins),
369 .modemuxs = i2s0_modemux,
370 .nmodemuxs = ARRAY_SIZE(i2s0_modemux),
371};
372
373static const char *const i2s0_grps[] = { "i2s0_grp" };
374static struct spear_function i2s0_function = {
375 .name = "i2s0",
376 .groups = i2s0_grps,
377 .ngroups = ARRAY_SIZE(i2s0_grps),
378};
379
380/* Pad multiplexing for i2s1 device */
381static const unsigned i2s1_pins[] = { 0, 1, 2, 3 };
382static struct spear_muxreg i2s1_muxreg[] = {
383 {
384 .reg = PAD_FUNCTION_EN_1,
385 .mask = PMX_I2S1_MASK,
386 .val = PMX_I2S1_MASK,
387 },
388};
389
390static struct spear_modemux i2s1_modemux[] = {
391 {
392 .muxregs = i2s1_muxreg,
393 .nmuxregs = ARRAY_SIZE(i2s1_muxreg),
394 },
395};
396
397static struct spear_pingroup i2s1_pingroup = {
398 .name = "i2s1_grp",
399 .pins = i2s1_pins,
400 .npins = ARRAY_SIZE(i2s1_pins),
401 .modemuxs = i2s1_modemux,
402 .nmodemuxs = ARRAY_SIZE(i2s1_modemux),
403};
404
405static const char *const i2s1_grps[] = { "i2s1_grp" };
406static struct spear_function i2s1_function = {
407 .name = "i2s1",
408 .groups = i2s1_grps,
409 .ngroups = ARRAY_SIZE(i2s1_grps),
410};
411
412/* Pad multiplexing for clcd device */
413static const unsigned clcd_pins[] = { 113, 114, 115, 116, 117, 118, 119, 120,
414 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134,
415 135, 136, 137, 138, 139, 140, 141, 142 };
416static struct spear_muxreg clcd_muxreg[] = {
417 {
418 .reg = PAD_FUNCTION_EN_0,
419 .mask = PMX_CLCD1_MASK,
420 .val = PMX_CLCD1_MASK,
421 },
422};
423
424static struct spear_modemux clcd_modemux[] = {
425 {
426 .muxregs = clcd_muxreg,
427 .nmuxregs = ARRAY_SIZE(clcd_muxreg),
428 },
429};
430
431static struct spear_pingroup clcd_pingroup = {
432 .name = "clcd_grp",
433 .pins = clcd_pins,
434 .npins = ARRAY_SIZE(clcd_pins),
435 .modemuxs = clcd_modemux,
436 .nmodemuxs = ARRAY_SIZE(clcd_modemux),
437};
438
439static const unsigned clcd_high_res_pins[] = { 30, 31, 32, 33, 34, 35, 36, 37,
440 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53 };
441static struct spear_muxreg clcd_high_res_muxreg[] = {
442 {
443 .reg = PAD_FUNCTION_EN_1,
444 .mask = PMX_CLCD2_MASK,
445 .val = PMX_CLCD2_MASK,
446 },
447};
448
449static struct spear_modemux clcd_high_res_modemux[] = {
450 {
451 .muxregs = clcd_high_res_muxreg,
452 .nmuxregs = ARRAY_SIZE(clcd_high_res_muxreg),
453 },
454};
455
456static struct spear_pingroup clcd_high_res_pingroup = {
457 .name = "clcd_high_res_grp",
458 .pins = clcd_high_res_pins,
459 .npins = ARRAY_SIZE(clcd_high_res_pins),
460 .modemuxs = clcd_high_res_modemux,
461 .nmodemuxs = ARRAY_SIZE(clcd_high_res_modemux),
462};
463
464static const char *const clcd_grps[] = { "clcd_grp", "clcd_high_res" };
465static struct spear_function clcd_function = {
466 .name = "clcd",
467 .groups = clcd_grps,
468 .ngroups = ARRAY_SIZE(clcd_grps),
469};
470
471static const unsigned arm_gpio_pins[] = { 18, 19, 20, 21, 22, 23, 143, 144, 145,
472 146, 147, 148, 149, 150, 151, 152 };
473static struct spear_muxreg arm_gpio_muxreg[] = {
474 {
475 .reg = PAD_FUNCTION_EN_0,
476 .mask = PMX_EGPIO_0_GRP_MASK,
477 .val = PMX_EGPIO_0_GRP_MASK,
478 }, {
479 .reg = PAD_FUNCTION_EN_1,
480 .mask = PMX_EGPIO_1_GRP_MASK,
481 .val = PMX_EGPIO_1_GRP_MASK,
482 },
483};
484
485static struct spear_modemux arm_gpio_modemux[] = {
486 {
487 .muxregs = arm_gpio_muxreg,
488 .nmuxregs = ARRAY_SIZE(arm_gpio_muxreg),
489 },
490};
491
492static struct spear_pingroup arm_gpio_pingroup = {
493 .name = "arm_gpio_grp",
494 .pins = arm_gpio_pins,
495 .npins = ARRAY_SIZE(arm_gpio_pins),
496 .modemuxs = arm_gpio_modemux,
497 .nmodemuxs = ARRAY_SIZE(arm_gpio_modemux),
498};
499
500static const char *const arm_gpio_grps[] = { "arm_gpio_grp" };
501static struct spear_function arm_gpio_function = {
502 .name = "arm_gpio",
503 .groups = arm_gpio_grps,
504 .ngroups = ARRAY_SIZE(arm_gpio_grps),
505};
506
507/* Pad multiplexing for smi 2 chips device */
508static const unsigned smi_2_chips_pins[] = { 153, 154, 155, 156, 157 };
509static struct spear_muxreg smi_2_chips_muxreg[] = {
510 {
511 .reg = PAD_FUNCTION_EN_0,
512 .mask = PMX_SMI_MASK,
513 .val = PMX_SMI_MASK,
514 },
515};
516
517static struct spear_modemux smi_2_chips_modemux[] = {
518 {
519 .muxregs = smi_2_chips_muxreg,
520 .nmuxregs = ARRAY_SIZE(smi_2_chips_muxreg),
521 },
522};
523
524static struct spear_pingroup smi_2_chips_pingroup = {
525 .name = "smi_2_chips_grp",
526 .pins = smi_2_chips_pins,
527 .npins = ARRAY_SIZE(smi_2_chips_pins),
528 .modemuxs = smi_2_chips_modemux,
529 .nmodemuxs = ARRAY_SIZE(smi_2_chips_modemux),
530};
531
532static const unsigned smi_4_chips_pins[] = { 54, 55 };
533static struct spear_muxreg smi_4_chips_muxreg[] = {
534 {
535 .reg = PAD_FUNCTION_EN_0,
536 .mask = PMX_SMI_MASK,
537 .val = PMX_SMI_MASK,
538 }, {
539 .reg = PAD_FUNCTION_EN_1,
540 .mask = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
541 .val = PMX_SMINCS2_MASK | PMX_SMINCS3_MASK,
542 },
543};
544
545static struct spear_modemux smi_4_chips_modemux[] = {
546 {
547 .muxregs = smi_4_chips_muxreg,
548 .nmuxregs = ARRAY_SIZE(smi_4_chips_muxreg),
549 },
550};
551
552static struct spear_pingroup smi_4_chips_pingroup = {
553 .name = "smi_4_chips_grp",
554 .pins = smi_4_chips_pins,
555 .npins = ARRAY_SIZE(smi_4_chips_pins),
556 .modemuxs = smi_4_chips_modemux,
557 .nmodemuxs = ARRAY_SIZE(smi_4_chips_modemux),
558};
559
560static const char *const smi_grps[] = { "smi_2_chips_grp", "smi_4_chips_grp" };
561static struct spear_function smi_function = {
562 .name = "smi",
563 .groups = smi_grps,
564 .ngroups = ARRAY_SIZE(smi_grps),
565};
566
567/* Pad multiplexing for gmii device */
568static const unsigned gmii_pins[] = { 173, 174, 175, 176, 177, 178, 179, 180,
569 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194,
570 195, 196, 197, 198, 199, 200 };
571static struct spear_muxreg gmii_muxreg[] = {
572 {
573 .reg = PAD_FUNCTION_EN_0,
574 .mask = PMX_GMII_MASK,
575 .val = PMX_GMII_MASK,
576 },
577};
578
579static struct spear_modemux gmii_modemux[] = {
580 {
581 .muxregs = gmii_muxreg,
582 .nmuxregs = ARRAY_SIZE(gmii_muxreg),
583 },
584};
585
586static struct spear_pingroup gmii_pingroup = {
587 .name = "gmii_grp",
588 .pins = gmii_pins,
589 .npins = ARRAY_SIZE(gmii_pins),
590 .modemuxs = gmii_modemux,
591 .nmodemuxs = ARRAY_SIZE(gmii_modemux),
592};
593
594static const char *const gmii_grps[] = { "gmii_grp" };
595static struct spear_function gmii_function = {
596 .name = "gmii",
597 .groups = gmii_grps,
598 .ngroups = ARRAY_SIZE(gmii_grps),
599};
600
601/* Pad multiplexing for rgmii device */
602static const unsigned rgmii_pins[] = { 18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
603 28, 29, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 175,
604 180, 181, 182, 183, 185, 188, 193, 194, 195, 196, 197, 198, 211, 212 };
605static struct spear_muxreg rgmii_muxreg[] = {
606 {
607 .reg = PAD_FUNCTION_EN_0,
608 .mask = PMX_RGMII_REG0_MASK,
609 .val = 0,
610 }, {
611 .reg = PAD_FUNCTION_EN_1,
612 .mask = PMX_RGMII_REG1_MASK,
613 .val = 0,
614 }, {
615 .reg = PAD_FUNCTION_EN_2,
616 .mask = PMX_RGMII_REG2_MASK,
617 .val = 0,
618 },
619};
620
621static struct spear_modemux rgmii_modemux[] = {
622 {
623 .muxregs = rgmii_muxreg,
624 .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
625 },
626};
627
628static struct spear_pingroup rgmii_pingroup = {
629 .name = "rgmii_grp",
630 .pins = rgmii_pins,
631 .npins = ARRAY_SIZE(rgmii_pins),
632 .modemuxs = rgmii_modemux,
633 .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
634};
635
636static const char *const rgmii_grps[] = { "rgmii_grp" };
637static struct spear_function rgmii_function = {
638 .name = "rgmii",
639 .groups = rgmii_grps,
640 .ngroups = ARRAY_SIZE(rgmii_grps),
641};
642
643/* Pad multiplexing for smii_0_1_2 device */
644static const unsigned smii_0_1_2_pins[] = { 24, 25, 26, 27, 28, 29, 30, 31, 32,
645 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
646 51, 52, 53, 54, 55 };
647static struct spear_muxreg smii_0_1_2_muxreg[] = {
648 {
649 .reg = PAD_FUNCTION_EN_1,
650 .mask = PMX_SMII_0_1_2_MASK,
651 .val = 0,
652 },
653};
654
655static struct spear_modemux smii_0_1_2_modemux[] = {
656 {
657 .muxregs = smii_0_1_2_muxreg,
658 .nmuxregs = ARRAY_SIZE(smii_0_1_2_muxreg),
659 },
660};
661
662static struct spear_pingroup smii_0_1_2_pingroup = {
663 .name = "smii_0_1_2_grp",
664 .pins = smii_0_1_2_pins,
665 .npins = ARRAY_SIZE(smii_0_1_2_pins),
666 .modemuxs = smii_0_1_2_modemux,
667 .nmodemuxs = ARRAY_SIZE(smii_0_1_2_modemux),
668};
669
670static const char *const smii_0_1_2_grps[] = { "smii_0_1_2_grp" };
671static struct spear_function smii_0_1_2_function = {
672 .name = "smii_0_1_2",
673 .groups = smii_0_1_2_grps,
674 .ngroups = ARRAY_SIZE(smii_0_1_2_grps),
675};
676
677/* Pad multiplexing for ras_mii_txclk device */
678static const unsigned ras_mii_txclk_pins[] = { 98, 99 };
679static struct spear_muxreg ras_mii_txclk_muxreg[] = {
680 {
681 .reg = PAD_FUNCTION_EN_1,
682 .mask = PMX_NFCE2_MASK,
683 .val = 0,
684 },
685};
686
687static struct spear_modemux ras_mii_txclk_modemux[] = {
688 {
689 .muxregs = ras_mii_txclk_muxreg,
690 .nmuxregs = ARRAY_SIZE(ras_mii_txclk_muxreg),
691 },
692};
693
694static struct spear_pingroup ras_mii_txclk_pingroup = {
695 .name = "ras_mii_txclk_grp",
696 .pins = ras_mii_txclk_pins,
697 .npins = ARRAY_SIZE(ras_mii_txclk_pins),
698 .modemuxs = ras_mii_txclk_modemux,
699 .nmodemuxs = ARRAY_SIZE(ras_mii_txclk_modemux),
700};
701
702static const char *const ras_mii_txclk_grps[] = { "ras_mii_txclk_grp" };
703static struct spear_function ras_mii_txclk_function = {
704 .name = "ras_mii_txclk",
705 .groups = ras_mii_txclk_grps,
706 .ngroups = ARRAY_SIZE(ras_mii_txclk_grps),
707};
708
709/* Pad multiplexing for nand 8bit device (cs0 only) */
710static const unsigned nand_8bit_pins[] = { 56, 57, 58, 59, 60, 61, 62, 63, 64,
711 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
712 83, 84, 85, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
713 170, 171, 172, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211,
714 212 };
715static struct spear_muxreg nand_8bit_muxreg[] = {
716 {
717 .reg = PAD_FUNCTION_EN_0,
718 .mask = PMX_NAND8BIT_0_MASK,
719 .val = PMX_NAND8BIT_0_MASK,
720 }, {
721 .reg = PAD_FUNCTION_EN_1,
722 .mask = PMX_NAND8BIT_1_MASK,
723 .val = PMX_NAND8BIT_1_MASK,
724 },
725};
726
727static struct spear_modemux nand_8bit_modemux[] = {
728 {
729 .muxregs = nand_8bit_muxreg,
730 .nmuxregs = ARRAY_SIZE(nand_8bit_muxreg),
731 },
732};
733
734static struct spear_pingroup nand_8bit_pingroup = {
735 .name = "nand_8bit_grp",
736 .pins = nand_8bit_pins,
737 .npins = ARRAY_SIZE(nand_8bit_pins),
738 .modemuxs = nand_8bit_modemux,
739 .nmodemuxs = ARRAY_SIZE(nand_8bit_modemux),
740};
741
742/* Pad multiplexing for nand 16bit device */
743static const unsigned nand_16bit_pins[] = { 201, 202, 203, 204, 207, 208, 209,
744 210 };
745static struct spear_muxreg nand_16bit_muxreg[] = {
746 {
747 .reg = PAD_FUNCTION_EN_1,
748 .mask = PMX_NAND16BIT_1_MASK,
749 .val = PMX_NAND16BIT_1_MASK,
750 },
751};
752
753static struct spear_modemux nand_16bit_modemux[] = {
754 {
755 .muxregs = nand_16bit_muxreg,
756 .nmuxregs = ARRAY_SIZE(nand_16bit_muxreg),
757 },
758};
759
760static struct spear_pingroup nand_16bit_pingroup = {
761 .name = "nand_16bit_grp",
762 .pins = nand_16bit_pins,
763 .npins = ARRAY_SIZE(nand_16bit_pins),
764 .modemuxs = nand_16bit_modemux,
765 .nmodemuxs = ARRAY_SIZE(nand_16bit_modemux),
766};
767
768/* Pad multiplexing for nand 4 chips */
769static const unsigned nand_4_chips_pins[] = { 205, 206, 211, 212 };
770static struct spear_muxreg nand_4_chips_muxreg[] = {
771 {
772 .reg = PAD_FUNCTION_EN_1,
773 .mask = PMX_NAND_4CHIPS_MASK,
774 .val = PMX_NAND_4CHIPS_MASK,
775 },
776};
777
778static struct spear_modemux nand_4_chips_modemux[] = {
779 {
780 .muxregs = nand_4_chips_muxreg,
781 .nmuxregs = ARRAY_SIZE(nand_4_chips_muxreg),
782 },
783};
784
785static struct spear_pingroup nand_4_chips_pingroup = {
786 .name = "nand_4_chips_grp",
787 .pins = nand_4_chips_pins,
788 .npins = ARRAY_SIZE(nand_4_chips_pins),
789 .modemuxs = nand_4_chips_modemux,
790 .nmodemuxs = ARRAY_SIZE(nand_4_chips_modemux),
791};
792
793static const char *const nand_grps[] = { "nand_8bit_grp", "nand_16bit_grp",
794 "nand_4_chips_grp" };
795static struct spear_function nand_function = {
796 .name = "nand",
797 .groups = nand_grps,
798 .ngroups = ARRAY_SIZE(nand_grps),
799};
800
801/* Pad multiplexing for keyboard_6x6 device */
802static const unsigned keyboard_6x6_pins[] = { 201, 202, 203, 204, 205, 206, 207,
803 208, 209, 210, 211, 212 };
804static struct spear_muxreg keyboard_6x6_muxreg[] = {
805 {
806 .reg = PAD_FUNCTION_EN_1,
807 .mask = PMX_KEYBOARD_6X6_MASK | PMX_NFIO8_15_MASK |
808 PMX_NFCE1_MASK | PMX_NFCE2_MASK | PMX_NFWPRT1_MASK |
809 PMX_NFWPRT2_MASK,
810 .val = PMX_KEYBOARD_6X6_MASK,
811 },
812};
813
814static struct spear_modemux keyboard_6x6_modemux[] = {
815 {
816 .muxregs = keyboard_6x6_muxreg,
817 .nmuxregs = ARRAY_SIZE(keyboard_6x6_muxreg),
818 },
819};
820
821static struct spear_pingroup keyboard_6x6_pingroup = {
822 .name = "keyboard_6x6_grp",
823 .pins = keyboard_6x6_pins,
824 .npins = ARRAY_SIZE(keyboard_6x6_pins),
825 .modemuxs = keyboard_6x6_modemux,
826 .nmodemuxs = ARRAY_SIZE(keyboard_6x6_modemux),
827};
828
829/* Pad multiplexing for keyboard_rowcol6_8 device */
830static const unsigned keyboard_rowcol6_8_pins[] = { 24, 25, 26, 27, 28, 29 };
831static struct spear_muxreg keyboard_rowcol6_8_muxreg[] = {
832 {
833 .reg = PAD_FUNCTION_EN_1,
834 .mask = PMX_KBD_ROWCOL68_MASK,
835 .val = PMX_KBD_ROWCOL68_MASK,
836 },
837};
838
839static struct spear_modemux keyboard_rowcol6_8_modemux[] = {
840 {
841 .muxregs = keyboard_rowcol6_8_muxreg,
842 .nmuxregs = ARRAY_SIZE(keyboard_rowcol6_8_muxreg),
843 },
844};
845
846static struct spear_pingroup keyboard_rowcol6_8_pingroup = {
847 .name = "keyboard_rowcol6_8_grp",
848 .pins = keyboard_rowcol6_8_pins,
849 .npins = ARRAY_SIZE(keyboard_rowcol6_8_pins),
850 .modemuxs = keyboard_rowcol6_8_modemux,
851 .nmodemuxs = ARRAY_SIZE(keyboard_rowcol6_8_modemux),
852};
853
854static const char *const keyboard_grps[] = { "keyboard_6x6_grp",
855 "keyboard_rowcol6_8_grp" };
856static struct spear_function keyboard_function = {
857 .name = "keyboard",
858 .groups = keyboard_grps,
859 .ngroups = ARRAY_SIZE(keyboard_grps),
860};
861
862/* Pad multiplexing for uart0 device */
863static const unsigned uart0_pins[] = { 100, 101 };
864static struct spear_muxreg uart0_muxreg[] = {
865 {
866 .reg = PAD_FUNCTION_EN_0,
867 .mask = PMX_UART0_MASK,
868 .val = PMX_UART0_MASK,
869 },
870};
871
872static struct spear_modemux uart0_modemux[] = {
873 {
874 .muxregs = uart0_muxreg,
875 .nmuxregs = ARRAY_SIZE(uart0_muxreg),
876 },
877};
878
879static struct spear_pingroup uart0_pingroup = {
880 .name = "uart0_grp",
881 .pins = uart0_pins,
882 .npins = ARRAY_SIZE(uart0_pins),
883 .modemuxs = uart0_modemux,
884 .nmodemuxs = ARRAY_SIZE(uart0_modemux),
885};
886
887/* Pad multiplexing for uart0_modem device */
888static const unsigned uart0_modem_pins[] = { 12, 13, 14, 15, 16, 17 };
889static struct spear_muxreg uart0_modem_muxreg[] = {
890 {
891 .reg = PAD_FUNCTION_EN_1,
892 .mask = PMX_UART0_MODEM_MASK,
893 .val = PMX_UART0_MODEM_MASK,
894 },
895};
896
897static struct spear_modemux uart0_modem_modemux[] = {
898 {
899 .muxregs = uart0_modem_muxreg,
900 .nmuxregs = ARRAY_SIZE(uart0_modem_muxreg),
901 },
902};
903
904static struct spear_pingroup uart0_modem_pingroup = {
905 .name = "uart0_modem_grp",
906 .pins = uart0_modem_pins,
907 .npins = ARRAY_SIZE(uart0_modem_pins),
908 .modemuxs = uart0_modem_modemux,
909 .nmodemuxs = ARRAY_SIZE(uart0_modem_modemux),
910};
911
912static const char *const uart0_grps[] = { "uart0_grp", "uart0_modem_grp" };
913static struct spear_function uart0_function = {
914 .name = "uart0",
915 .groups = uart0_grps,
916 .ngroups = ARRAY_SIZE(uart0_grps),
917};
918
919/* Pad multiplexing for gpt0_tmr0 device */
920static const unsigned gpt0_tmr0_pins[] = { 10, 11 };
921static struct spear_muxreg gpt0_tmr0_muxreg[] = {
922 {
923 .reg = PAD_FUNCTION_EN_1,
924 .mask = PMX_GPT0_TMR0_MASK,
925 .val = PMX_GPT0_TMR0_MASK,
926 },
927};
928
929static struct spear_modemux gpt0_tmr0_modemux[] = {
930 {
931 .muxregs = gpt0_tmr0_muxreg,
932 .nmuxregs = ARRAY_SIZE(gpt0_tmr0_muxreg),
933 },
934};
935
936static struct spear_pingroup gpt0_tmr0_pingroup = {
937 .name = "gpt0_tmr0_grp",
938 .pins = gpt0_tmr0_pins,
939 .npins = ARRAY_SIZE(gpt0_tmr0_pins),
940 .modemuxs = gpt0_tmr0_modemux,
941 .nmodemuxs = ARRAY_SIZE(gpt0_tmr0_modemux),
942};
943
944/* Pad multiplexing for gpt0_tmr1 device */
945static const unsigned gpt0_tmr1_pins[] = { 8, 9 };
946static struct spear_muxreg gpt0_tmr1_muxreg[] = {
947 {
948 .reg = PAD_FUNCTION_EN_1,
949 .mask = PMX_GPT0_TMR1_MASK,
950 .val = PMX_GPT0_TMR1_MASK,
951 },
952};
953
954static struct spear_modemux gpt0_tmr1_modemux[] = {
955 {
956 .muxregs = gpt0_tmr1_muxreg,
957 .nmuxregs = ARRAY_SIZE(gpt0_tmr1_muxreg),
958 },
959};
960
961static struct spear_pingroup gpt0_tmr1_pingroup = {
962 .name = "gpt0_tmr1_grp",
963 .pins = gpt0_tmr1_pins,
964 .npins = ARRAY_SIZE(gpt0_tmr1_pins),
965 .modemuxs = gpt0_tmr1_modemux,
966 .nmodemuxs = ARRAY_SIZE(gpt0_tmr1_modemux),
967};
968
969static const char *const gpt0_grps[] = { "gpt0_tmr0_grp", "gpt0_tmr1_grp" };
970static struct spear_function gpt0_function = {
971 .name = "gpt0",
972 .groups = gpt0_grps,
973 .ngroups = ARRAY_SIZE(gpt0_grps),
974};
975
976/* Pad multiplexing for gpt1_tmr0 device */
977static const unsigned gpt1_tmr0_pins[] = { 6, 7 };
978static struct spear_muxreg gpt1_tmr0_muxreg[] = {
979 {
980 .reg = PAD_FUNCTION_EN_1,
981 .mask = PMX_GPT1_TMR0_MASK,
982 .val = PMX_GPT1_TMR0_MASK,
983 },
984};
985
986static struct spear_modemux gpt1_tmr0_modemux[] = {
987 {
988 .muxregs = gpt1_tmr0_muxreg,
989 .nmuxregs = ARRAY_SIZE(gpt1_tmr0_muxreg),
990 },
991};
992
993static struct spear_pingroup gpt1_tmr0_pingroup = {
994 .name = "gpt1_tmr0_grp",
995 .pins = gpt1_tmr0_pins,
996 .npins = ARRAY_SIZE(gpt1_tmr0_pins),
997 .modemuxs = gpt1_tmr0_modemux,
998 .nmodemuxs = ARRAY_SIZE(gpt1_tmr0_modemux),
999};
1000
1001/* Pad multiplexing for gpt1_tmr1 device */
1002static const unsigned gpt1_tmr1_pins[] = { 4, 5 };
1003static struct spear_muxreg gpt1_tmr1_muxreg[] = {
1004 {
1005 .reg = PAD_FUNCTION_EN_1,
1006 .mask = PMX_GPT1_TMR1_MASK,
1007 .val = PMX_GPT1_TMR1_MASK,
1008 },
1009};
1010
1011static struct spear_modemux gpt1_tmr1_modemux[] = {
1012 {
1013 .muxregs = gpt1_tmr1_muxreg,
1014 .nmuxregs = ARRAY_SIZE(gpt1_tmr1_muxreg),
1015 },
1016};
1017
1018static struct spear_pingroup gpt1_tmr1_pingroup = {
1019 .name = "gpt1_tmr1_grp",
1020 .pins = gpt1_tmr1_pins,
1021 .npins = ARRAY_SIZE(gpt1_tmr1_pins),
1022 .modemuxs = gpt1_tmr1_modemux,
1023 .nmodemuxs = ARRAY_SIZE(gpt1_tmr1_modemux),
1024};
1025
1026static const char *const gpt1_grps[] = { "gpt1_tmr1_grp", "gpt1_tmr0_grp" };
1027static struct spear_function gpt1_function = {
1028 .name = "gpt1",
1029 .groups = gpt1_grps,
1030 .ngroups = ARRAY_SIZE(gpt1_grps),
1031};
1032
1033/* Pad multiplexing for mcif device */
1034static const unsigned mcif_pins[] = { 86, 87, 88, 89, 90, 91, 92, 93, 213, 214,
1035 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
1036 229, 230, 231, 232, 233, 234, 235, 236, 237, 238, 239, 240, 241, 242,
1037 243, 244, 245 };
1038#define MCIF_MUXREG \
1039 { \
1040 .reg = PAD_FUNCTION_EN_0, \
1041 .mask = PMX_MCI_DATA8_15_MASK, \
1042 .val = PMX_MCI_DATA8_15_MASK, \
1043 }, { \
1044 .reg = PAD_FUNCTION_EN_1, \
1045 .mask = PMX_MCIFALL_1_MASK | PMX_NFWPRT1_MASK | \
1046 PMX_NFWPRT2_MASK, \
1047 .val = PMX_MCIFALL_1_MASK, \
1048 }, { \
1049 .reg = PAD_FUNCTION_EN_2, \
1050 .mask = PMX_MCIFALL_2_MASK, \
1051 .val = PMX_MCIFALL_2_MASK, \
1052 }
1053
1054/* sdhci device */
1055static struct spear_muxreg sdhci_muxreg[] = {
1056 MCIF_MUXREG,
1057 {
1058 .reg = PERIP_CFG,
1059 .mask = MCIF_SEL_MASK,
1060 .val = MCIF_SEL_SD,
1061 },
1062};
1063
1064static struct spear_modemux sdhci_modemux[] = {
1065 {
1066 .muxregs = sdhci_muxreg,
1067 .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
1068 },
1069};
1070
1071static struct spear_pingroup sdhci_pingroup = {
1072 .name = "sdhci_grp",
1073 .pins = mcif_pins,
1074 .npins = ARRAY_SIZE(mcif_pins),
1075 .modemuxs = sdhci_modemux,
1076 .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
1077};
1078
1079static const char *const sdhci_grps[] = { "sdhci_grp" };
1080static struct spear_function sdhci_function = {
1081 .name = "sdhci",
1082 .groups = sdhci_grps,
1083 .ngroups = ARRAY_SIZE(sdhci_grps),
1084};
1085
1086/* cf device */
1087static struct spear_muxreg cf_muxreg[] = {
1088 MCIF_MUXREG,
1089 {
1090 .reg = PERIP_CFG,
1091 .mask = MCIF_SEL_MASK,
1092 .val = MCIF_SEL_CF,
1093 },
1094};
1095
1096static struct spear_modemux cf_modemux[] = {
1097 {
1098 .muxregs = cf_muxreg,
1099 .nmuxregs = ARRAY_SIZE(cf_muxreg),
1100 },
1101};
1102
1103static struct spear_pingroup cf_pingroup = {
1104 .name = "cf_grp",
1105 .pins = mcif_pins,
1106 .npins = ARRAY_SIZE(mcif_pins),
1107 .modemuxs = cf_modemux,
1108 .nmodemuxs = ARRAY_SIZE(cf_modemux),
1109};
1110
1111static const char *const cf_grps[] = { "cf_grp" };
1112static struct spear_function cf_function = {
1113 .name = "cf",
1114 .groups = cf_grps,
1115 .ngroups = ARRAY_SIZE(cf_grps),
1116};
1117
1118/* xd device */
1119static struct spear_muxreg xd_muxreg[] = {
1120 MCIF_MUXREG,
1121 {
1122 .reg = PERIP_CFG,
1123 .mask = MCIF_SEL_MASK,
1124 .val = MCIF_SEL_XD,
1125 },
1126};
1127
1128static struct spear_modemux xd_modemux[] = {
1129 {
1130 .muxregs = xd_muxreg,
1131 .nmuxregs = ARRAY_SIZE(xd_muxreg),
1132 },
1133};
1134
1135static struct spear_pingroup xd_pingroup = {
1136 .name = "xd_grp",
1137 .pins = mcif_pins,
1138 .npins = ARRAY_SIZE(mcif_pins),
1139 .modemuxs = xd_modemux,
1140 .nmodemuxs = ARRAY_SIZE(xd_modemux),
1141};
1142
1143static const char *const xd_grps[] = { "xd_grp" };
1144static struct spear_function xd_function = {
1145 .name = "xd",
1146 .groups = xd_grps,
1147 .ngroups = ARRAY_SIZE(xd_grps),
1148};
1149
1150/* Pad multiplexing for touch_xy device */
1151static const unsigned touch_xy_pins[] = { 97 };
1152static struct spear_muxreg touch_xy_muxreg[] = {
1153 {
1154 .reg = PAD_FUNCTION_EN_2,
1155 .mask = PMX_TOUCH_XY_MASK,
1156 .val = PMX_TOUCH_XY_MASK,
1157 },
1158};
1159
1160static struct spear_modemux touch_xy_modemux[] = {
1161 {
1162 .muxregs = touch_xy_muxreg,
1163 .nmuxregs = ARRAY_SIZE(touch_xy_muxreg),
1164 },
1165};
1166
1167static struct spear_pingroup touch_xy_pingroup = {
1168 .name = "touch_xy_grp",
1169 .pins = touch_xy_pins,
1170 .npins = ARRAY_SIZE(touch_xy_pins),
1171 .modemuxs = touch_xy_modemux,
1172 .nmodemuxs = ARRAY_SIZE(touch_xy_modemux),
1173};
1174
1175static const char *const touch_xy_grps[] = { "touch_xy_grp" };
1176static struct spear_function touch_xy_function = {
1177 .name = "touchscreen",
1178 .groups = touch_xy_grps,
1179 .ngroups = ARRAY_SIZE(touch_xy_grps),
1180};
1181
1182/* Pad multiplexing for uart1 device */
1183/* Muxed with I2C */
1184static const unsigned uart1_dis_i2c_pins[] = { 102, 103 };
1185static struct spear_muxreg uart1_dis_i2c_muxreg[] = {
1186 {
1187 .reg = PAD_FUNCTION_EN_0,
1188 .mask = PMX_I2C0_MASK,
1189 .val = 0,
1190 },
1191};
1192
1193static struct spear_modemux uart1_dis_i2c_modemux[] = {
1194 {
1195 .muxregs = uart1_dis_i2c_muxreg,
1196 .nmuxregs = ARRAY_SIZE(uart1_dis_i2c_muxreg),
1197 },
1198};
1199
1200static struct spear_pingroup uart_1_dis_i2c_pingroup = {
1201 .name = "uart1_disable_i2c_grp",
1202 .pins = uart1_dis_i2c_pins,
1203 .npins = ARRAY_SIZE(uart1_dis_i2c_pins),
1204 .modemuxs = uart1_dis_i2c_modemux,
1205 .nmodemuxs = ARRAY_SIZE(uart1_dis_i2c_modemux),
1206};
1207
1208/* Muxed with SD/MMC */
1209static const unsigned uart1_dis_sd_pins[] = { 214, 215 };
1210static struct spear_muxreg uart1_dis_sd_muxreg[] = {
1211 {
1212 .reg = PAD_FUNCTION_EN_1,
1213 .mask = PMX_MCIDATA1_MASK |
1214 PMX_MCIDATA2_MASK,
1215 .val = 0,
1216 },
1217};
1218
1219static struct spear_modemux uart1_dis_sd_modemux[] = {
1220 {
1221 .muxregs = uart1_dis_sd_muxreg,
1222 .nmuxregs = ARRAY_SIZE(uart1_dis_sd_muxreg),
1223 },
1224};
1225
1226static struct spear_pingroup uart_1_dis_sd_pingroup = {
1227 .name = "uart1_disable_sd_grp",
1228 .pins = uart1_dis_sd_pins,
1229 .npins = ARRAY_SIZE(uart1_dis_sd_pins),
1230 .modemuxs = uart1_dis_sd_modemux,
1231 .nmodemuxs = ARRAY_SIZE(uart1_dis_sd_modemux),
1232};
1233
1234static const char *const uart1_grps[] = { "uart1_disable_i2c_grp",
1235 "uart1_disable_sd_grp" };
1236static struct spear_function uart1_function = {
1237 .name = "uart1",
1238 .groups = uart1_grps,
1239 .ngroups = ARRAY_SIZE(uart1_grps),
1240};
1241
1242/* Pad multiplexing for uart2_3 device */
1243static const unsigned uart2_3_pins[] = { 104, 105, 106, 107 };
1244static struct spear_muxreg uart2_3_muxreg[] = {
1245 {
1246 .reg = PAD_FUNCTION_EN_0,
1247 .mask = PMX_I2S0_MASK,
1248 .val = 0,
1249 },
1250};
1251
1252static struct spear_modemux uart2_3_modemux[] = {
1253 {
1254 .muxregs = uart2_3_muxreg,
1255 .nmuxregs = ARRAY_SIZE(uart2_3_muxreg),
1256 },
1257};
1258
1259static struct spear_pingroup uart_2_3_pingroup = {
1260 .name = "uart2_3_grp",
1261 .pins = uart2_3_pins,
1262 .npins = ARRAY_SIZE(uart2_3_pins),
1263 .modemuxs = uart2_3_modemux,
1264 .nmodemuxs = ARRAY_SIZE(uart2_3_modemux),
1265};
1266
1267static const char *const uart2_3_grps[] = { "uart2_3_grp" };
1268static struct spear_function uart2_3_function = {
1269 .name = "uart2_3",
1270 .groups = uart2_3_grps,
1271 .ngroups = ARRAY_SIZE(uart2_3_grps),
1272};
1273
1274/* Pad multiplexing for uart4 device */
1275static const unsigned uart4_pins[] = { 108, 113 };
1276static struct spear_muxreg uart4_muxreg[] = {
1277 {
1278 .reg = PAD_FUNCTION_EN_0,
1279 .mask = PMX_I2S0_MASK | PMX_CLCD1_MASK,
1280 .val = 0,
1281 },
1282};
1283
1284static struct spear_modemux uart4_modemux[] = {
1285 {
1286 .muxregs = uart4_muxreg,
1287 .nmuxregs = ARRAY_SIZE(uart4_muxreg),
1288 },
1289};
1290
1291static struct spear_pingroup uart_4_pingroup = {
1292 .name = "uart4_grp",
1293 .pins = uart4_pins,
1294 .npins = ARRAY_SIZE(uart4_pins),
1295 .modemuxs = uart4_modemux,
1296 .nmodemuxs = ARRAY_SIZE(uart4_modemux),
1297};
1298
1299static const char *const uart4_grps[] = { "uart4_grp" };
1300static struct spear_function uart4_function = {
1301 .name = "uart4",
1302 .groups = uart4_grps,
1303 .ngroups = ARRAY_SIZE(uart4_grps),
1304};
1305
1306/* Pad multiplexing for uart5 device */
1307static const unsigned uart5_pins[] = { 114, 115 };
1308static struct spear_muxreg uart5_muxreg[] = {
1309 {
1310 .reg = PAD_FUNCTION_EN_0,
1311 .mask = PMX_CLCD1_MASK,
1312 .val = 0,
1313 },
1314};
1315
1316static struct spear_modemux uart5_modemux[] = {
1317 {
1318 .muxregs = uart5_muxreg,
1319 .nmuxregs = ARRAY_SIZE(uart5_muxreg),
1320 },
1321};
1322
1323static struct spear_pingroup uart_5_pingroup = {
1324 .name = "uart5_grp",
1325 .pins = uart5_pins,
1326 .npins = ARRAY_SIZE(uart5_pins),
1327 .modemuxs = uart5_modemux,
1328 .nmodemuxs = ARRAY_SIZE(uart5_modemux),
1329};
1330
1331static const char *const uart5_grps[] = { "uart5_grp" };
1332static struct spear_function uart5_function = {
1333 .name = "uart5",
1334 .groups = uart5_grps,
1335 .ngroups = ARRAY_SIZE(uart5_grps),
1336};
1337
1338/* Pad multiplexing for rs485_0_1_tdm_0_1 device */
1339static const unsigned rs485_0_1_tdm_0_1_pins[] = { 116, 117, 118, 119, 120, 121,
1340 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135,
1341 136, 137 };
1342static struct spear_muxreg rs485_0_1_tdm_0_1_muxreg[] = {
1343 {
1344 .reg = PAD_FUNCTION_EN_0,
1345 .mask = PMX_CLCD1_MASK,
1346 .val = 0,
1347 },
1348};
1349
1350static struct spear_modemux rs485_0_1_tdm_0_1_modemux[] = {
1351 {
1352 .muxregs = rs485_0_1_tdm_0_1_muxreg,
1353 .nmuxregs = ARRAY_SIZE(rs485_0_1_tdm_0_1_muxreg),
1354 },
1355};
1356
1357static struct spear_pingroup rs485_0_1_tdm_0_1_pingroup = {
1358 .name = "rs485_0_1_tdm_0_1_grp",
1359 .pins = rs485_0_1_tdm_0_1_pins,
1360 .npins = ARRAY_SIZE(rs485_0_1_tdm_0_1_pins),
1361 .modemuxs = rs485_0_1_tdm_0_1_modemux,
1362 .nmodemuxs = ARRAY_SIZE(rs485_0_1_tdm_0_1_modemux),
1363};
1364
1365static const char *const rs485_0_1_tdm_0_1_grps[] = { "rs485_0_1_tdm_0_1_grp" };
1366static struct spear_function rs485_0_1_tdm_0_1_function = {
1367 .name = "rs485_0_1_tdm_0_1",
1368 .groups = rs485_0_1_tdm_0_1_grps,
1369 .ngroups = ARRAY_SIZE(rs485_0_1_tdm_0_1_grps),
1370};
1371
1372/* Pad multiplexing for i2c_1_2 device */
1373static const unsigned i2c_1_2_pins[] = { 138, 139, 140, 141 };
1374static struct spear_muxreg i2c_1_2_muxreg[] = {
1375 {
1376 .reg = PAD_FUNCTION_EN_0,
1377 .mask = PMX_CLCD1_MASK,
1378 .val = 0,
1379 },
1380};
1381
1382static struct spear_modemux i2c_1_2_modemux[] = {
1383 {
1384 .muxregs = i2c_1_2_muxreg,
1385 .nmuxregs = ARRAY_SIZE(i2c_1_2_muxreg),
1386 },
1387};
1388
1389static struct spear_pingroup i2c_1_2_pingroup = {
1390 .name = "i2c_1_2_grp",
1391 .pins = i2c_1_2_pins,
1392 .npins = ARRAY_SIZE(i2c_1_2_pins),
1393 .modemuxs = i2c_1_2_modemux,
1394 .nmodemuxs = ARRAY_SIZE(i2c_1_2_modemux),
1395};
1396
1397static const char *const i2c_1_2_grps[] = { "i2c_1_2_grp" };
1398static struct spear_function i2c_1_2_function = {
1399 .name = "i2c_1_2",
1400 .groups = i2c_1_2_grps,
1401 .ngroups = ARRAY_SIZE(i2c_1_2_grps),
1402};
1403
1404/* Pad multiplexing for i2c3_dis_smi_clcd device */
1405/* Muxed with SMI & CLCD */
1406static const unsigned i2c3_dis_smi_clcd_pins[] = { 142, 153 };
1407static struct spear_muxreg i2c3_dis_smi_clcd_muxreg[] = {
1408 {
1409 .reg = PAD_FUNCTION_EN_0,
1410 .mask = PMX_CLCD1_MASK | PMX_SMI_MASK,
1411 .val = 0,
1412 },
1413};
1414
1415static struct spear_modemux i2c3_dis_smi_clcd_modemux[] = {
1416 {
1417 .muxregs = i2c3_dis_smi_clcd_muxreg,
1418 .nmuxregs = ARRAY_SIZE(i2c3_dis_smi_clcd_muxreg),
1419 },
1420};
1421
1422static struct spear_pingroup i2c3_dis_smi_clcd_pingroup = {
1423 .name = "i2c3_dis_smi_clcd_grp",
1424 .pins = i2c3_dis_smi_clcd_pins,
1425 .npins = ARRAY_SIZE(i2c3_dis_smi_clcd_pins),
1426 .modemuxs = i2c3_dis_smi_clcd_modemux,
1427 .nmodemuxs = ARRAY_SIZE(i2c3_dis_smi_clcd_modemux),
1428};
1429
1430/* Pad multiplexing for i2c3_dis_sd_i2s0 device */
1431/* Muxed with SD/MMC & I2S1 */
1432static const unsigned i2c3_dis_sd_i2s0_pins[] = { 0, 216 };
1433static struct spear_muxreg i2c3_dis_sd_i2s0_muxreg[] = {
1434 {
1435 .reg = PAD_FUNCTION_EN_1,
1436 .mask = PMX_I2S1_MASK | PMX_MCIDATA3_MASK,
1437 .val = 0,
1438 },
1439};
1440
1441static struct spear_modemux i2c3_dis_sd_i2s0_modemux[] = {
1442 {
1443 .muxregs = i2c3_dis_sd_i2s0_muxreg,
1444 .nmuxregs = ARRAY_SIZE(i2c3_dis_sd_i2s0_muxreg),
1445 },
1446};
1447
1448static struct spear_pingroup i2c3_dis_sd_i2s0_pingroup = {
1449 .name = "i2c3_dis_sd_i2s0_grp",
1450 .pins = i2c3_dis_sd_i2s0_pins,
1451 .npins = ARRAY_SIZE(i2c3_dis_sd_i2s0_pins),
1452 .modemuxs = i2c3_dis_sd_i2s0_modemux,
1453 .nmodemuxs = ARRAY_SIZE(i2c3_dis_sd_i2s0_modemux),
1454};
1455
1456static const char *const i2c3_grps[] = { "i2c3_dis_smi_clcd_grp",
1457 "i2c3_dis_sd_i2s0_grp" };
1458static struct spear_function i2c3_unction = {
1459 .name = "i2c3_i2s1",
1460 .groups = i2c3_grps,
1461 .ngroups = ARRAY_SIZE(i2c3_grps),
1462};
1463
1464/* Pad multiplexing for i2c_4_5_dis_smi device */
1465/* Muxed with SMI */
1466static const unsigned i2c_4_5_dis_smi_pins[] = { 154, 155, 156, 157 };
1467static struct spear_muxreg i2c_4_5_dis_smi_muxreg[] = {
1468 {
1469 .reg = PAD_FUNCTION_EN_0,
1470 .mask = PMX_SMI_MASK,
1471 .val = 0,
1472 },
1473};
1474
1475static struct spear_modemux i2c_4_5_dis_smi_modemux[] = {
1476 {
1477 .muxregs = i2c_4_5_dis_smi_muxreg,
1478 .nmuxregs = ARRAY_SIZE(i2c_4_5_dis_smi_muxreg),
1479 },
1480};
1481
1482static struct spear_pingroup i2c_4_5_dis_smi_pingroup = {
1483 .name = "i2c_4_5_dis_smi_grp",
1484 .pins = i2c_4_5_dis_smi_pins,
1485 .npins = ARRAY_SIZE(i2c_4_5_dis_smi_pins),
1486 .modemuxs = i2c_4_5_dis_smi_modemux,
1487 .nmodemuxs = ARRAY_SIZE(i2c_4_5_dis_smi_modemux),
1488};
1489
1490/* Pad multiplexing for i2c4_dis_sd device */
1491/* Muxed with SD/MMC */
1492static const unsigned i2c4_dis_sd_pins[] = { 217, 218 };
1493static struct spear_muxreg i2c4_dis_sd_muxreg[] = {
1494 {
1495 .reg = PAD_FUNCTION_EN_1,
1496 .mask = PMX_MCIDATA4_MASK,
1497 .val = 0,
1498 }, {
1499 .reg = PAD_FUNCTION_EN_2,
1500 .mask = PMX_MCIDATA5_MASK,
1501 .val = 0,
1502 },
1503};
1504
1505static struct spear_modemux i2c4_dis_sd_modemux[] = {
1506 {
1507 .muxregs = i2c4_dis_sd_muxreg,
1508 .nmuxregs = ARRAY_SIZE(i2c4_dis_sd_muxreg),
1509 },
1510};
1511
1512static struct spear_pingroup i2c4_dis_sd_pingroup = {
1513 .name = "i2c4_dis_sd_grp",
1514 .pins = i2c4_dis_sd_pins,
1515 .npins = ARRAY_SIZE(i2c4_dis_sd_pins),
1516 .modemuxs = i2c4_dis_sd_modemux,
1517 .nmodemuxs = ARRAY_SIZE(i2c4_dis_sd_modemux),
1518};
1519
1520/* Pad multiplexing for i2c5_dis_sd device */
1521/* Muxed with SD/MMC */
1522static const unsigned i2c5_dis_sd_pins[] = { 219, 220 };
1523static struct spear_muxreg i2c5_dis_sd_muxreg[] = {
1524 {
1525 .reg = PAD_FUNCTION_EN_2,
1526 .mask = PMX_MCIDATA6_MASK |
1527 PMX_MCIDATA7_MASK,
1528 .val = 0,
1529 },
1530};
1531
1532static struct spear_modemux i2c5_dis_sd_modemux[] = {
1533 {
1534 .muxregs = i2c5_dis_sd_muxreg,
1535 .nmuxregs = ARRAY_SIZE(i2c5_dis_sd_muxreg),
1536 },
1537};
1538
1539static struct spear_pingroup i2c5_dis_sd_pingroup = {
1540 .name = "i2c5_dis_sd_grp",
1541 .pins = i2c5_dis_sd_pins,
1542 .npins = ARRAY_SIZE(i2c5_dis_sd_pins),
1543 .modemuxs = i2c5_dis_sd_modemux,
1544 .nmodemuxs = ARRAY_SIZE(i2c5_dis_sd_modemux),
1545};
1546
1547static const char *const i2c_4_5_grps[] = { "i2c5_dis_sd_grp",
1548 "i2c4_dis_sd_grp", "i2c_4_5_dis_smi_grp" };
1549static struct spear_function i2c_4_5_function = {
1550 .name = "i2c_4_5",
1551 .groups = i2c_4_5_grps,
1552 .ngroups = ARRAY_SIZE(i2c_4_5_grps),
1553};
1554
1555/* Pad multiplexing for i2c_6_7_dis_kbd device */
1556/* Muxed with KBD */
1557static const unsigned i2c_6_7_dis_kbd_pins[] = { 207, 208, 209, 210 };
1558static struct spear_muxreg i2c_6_7_dis_kbd_muxreg[] = {
1559 {
1560 .reg = PAD_FUNCTION_EN_1,
1561 .mask = PMX_KBD_ROWCOL25_MASK,
1562 .val = 0,
1563 },
1564};
1565
1566static struct spear_modemux i2c_6_7_dis_kbd_modemux[] = {
1567 {
1568 .muxregs = i2c_6_7_dis_kbd_muxreg,
1569 .nmuxregs = ARRAY_SIZE(i2c_6_7_dis_kbd_muxreg),
1570 },
1571};
1572
1573static struct spear_pingroup i2c_6_7_dis_kbd_pingroup = {
1574 .name = "i2c_6_7_dis_kbd_grp",
1575 .pins = i2c_6_7_dis_kbd_pins,
1576 .npins = ARRAY_SIZE(i2c_6_7_dis_kbd_pins),
1577 .modemuxs = i2c_6_7_dis_kbd_modemux,
1578 .nmodemuxs = ARRAY_SIZE(i2c_6_7_dis_kbd_modemux),
1579};
1580
1581/* Pad multiplexing for i2c6_dis_sd device */
1582/* Muxed with SD/MMC */
1583static const unsigned i2c6_dis_sd_pins[] = { 236, 237 };
1584static struct spear_muxreg i2c6_dis_sd_muxreg[] = {
1585 {
1586 .reg = PAD_FUNCTION_EN_2,
1587 .mask = PMX_MCIIORDRE_MASK |
1588 PMX_MCIIOWRWE_MASK,
1589 .val = 0,
1590 },
1591};
1592
1593static struct spear_modemux i2c6_dis_sd_modemux[] = {
1594 {
1595 .muxregs = i2c6_dis_sd_muxreg,
1596 .nmuxregs = ARRAY_SIZE(i2c6_dis_sd_muxreg),
1597 },
1598};
1599
1600static struct spear_pingroup i2c6_dis_sd_pingroup = {
1601 .name = "i2c6_dis_sd_grp",
1602 .pins = i2c6_dis_sd_pins,
1603 .npins = ARRAY_SIZE(i2c6_dis_sd_pins),
1604 .modemuxs = i2c6_dis_sd_modemux,
1605 .nmodemuxs = ARRAY_SIZE(i2c6_dis_sd_modemux),
1606};
1607
1608/* Pad multiplexing for i2c7_dis_sd device */
1609static const unsigned i2c7_dis_sd_pins[] = { 238, 239 };
1610static struct spear_muxreg i2c7_dis_sd_muxreg[] = {
1611 {
1612 .reg = PAD_FUNCTION_EN_2,
1613 .mask = PMX_MCIRESETCF_MASK |
1614 PMX_MCICS0CE_MASK,
1615 .val = 0,
1616 },
1617};
1618
1619static struct spear_modemux i2c7_dis_sd_modemux[] = {
1620 {
1621 .muxregs = i2c7_dis_sd_muxreg,
1622 .nmuxregs = ARRAY_SIZE(i2c7_dis_sd_muxreg),
1623 },
1624};
1625
1626static struct spear_pingroup i2c7_dis_sd_pingroup = {
1627 .name = "i2c7_dis_sd_grp",
1628 .pins = i2c7_dis_sd_pins,
1629 .npins = ARRAY_SIZE(i2c7_dis_sd_pins),
1630 .modemuxs = i2c7_dis_sd_modemux,
1631 .nmodemuxs = ARRAY_SIZE(i2c7_dis_sd_modemux),
1632};
1633
1634static const char *const i2c_6_7_grps[] = { "i2c6_dis_sd_grp",
1635 "i2c7_dis_sd_grp", "i2c_6_7_dis_kbd_grp" };
1636static struct spear_function i2c_6_7_function = {
1637 .name = "i2c_6_7",
1638 .groups = i2c_6_7_grps,
1639 .ngroups = ARRAY_SIZE(i2c_6_7_grps),
1640};
1641
1642/* Pad multiplexing for can0_dis_nor device */
1643/* Muxed with NOR */
1644static const unsigned can0_dis_nor_pins[] = { 56, 57 };
1645static struct spear_muxreg can0_dis_nor_muxreg[] = {
1646 {
1647 .reg = PAD_FUNCTION_EN_0,
1648 .mask = PMX_NFRSTPWDWN2_MASK,
1649 .val = 0,
1650 }, {
1651 .reg = PAD_FUNCTION_EN_1,
1652 .mask = PMX_NFRSTPWDWN3_MASK,
1653 .val = 0,
1654 },
1655};
1656
1657static struct spear_modemux can0_dis_nor_modemux[] = {
1658 {
1659 .muxregs = can0_dis_nor_muxreg,
1660 .nmuxregs = ARRAY_SIZE(can0_dis_nor_muxreg),
1661 },
1662};
1663
1664static struct spear_pingroup can0_dis_nor_pingroup = {
1665 .name = "can0_dis_nor_grp",
1666 .pins = can0_dis_nor_pins,
1667 .npins = ARRAY_SIZE(can0_dis_nor_pins),
1668 .modemuxs = can0_dis_nor_modemux,
1669 .nmodemuxs = ARRAY_SIZE(can0_dis_nor_modemux),
1670};
1671
1672/* Pad multiplexing for can0_dis_sd device */
1673/* Muxed with SD/MMC */
1674static const unsigned can0_dis_sd_pins[] = { 240, 241 };
1675static struct spear_muxreg can0_dis_sd_muxreg[] = {
1676 {
1677 .reg = PAD_FUNCTION_EN_2,
1678 .mask = PMX_MCICFINTR_MASK | PMX_MCIIORDY_MASK,
1679 .val = 0,
1680 },
1681};
1682
1683static struct spear_modemux can0_dis_sd_modemux[] = {
1684 {
1685 .muxregs = can0_dis_sd_muxreg,
1686 .nmuxregs = ARRAY_SIZE(can0_dis_sd_muxreg),
1687 },
1688};
1689
1690static struct spear_pingroup can0_dis_sd_pingroup = {
1691 .name = "can0_dis_sd_grp",
1692 .pins = can0_dis_sd_pins,
1693 .npins = ARRAY_SIZE(can0_dis_sd_pins),
1694 .modemuxs = can0_dis_sd_modemux,
1695 .nmodemuxs = ARRAY_SIZE(can0_dis_sd_modemux),
1696};
1697
1698static const char *const can0_grps[] = { "can0_dis_nor_grp", "can0_dis_sd_grp"
1699};
1700static struct spear_function can0_function = {
1701 .name = "can0",
1702 .groups = can0_grps,
1703 .ngroups = ARRAY_SIZE(can0_grps),
1704};
1705
1706/* Pad multiplexing for can1_dis_sd device */
1707/* Muxed with SD/MMC */
1708static const unsigned can1_dis_sd_pins[] = { 242, 243 };
1709static struct spear_muxreg can1_dis_sd_muxreg[] = {
1710 {
1711 .reg = PAD_FUNCTION_EN_2,
1712 .mask = PMX_MCICS1_MASK | PMX_MCIDMAACK_MASK,
1713 .val = 0,
1714 },
1715};
1716
1717static struct spear_modemux can1_dis_sd_modemux[] = {
1718 {
1719 .muxregs = can1_dis_sd_muxreg,
1720 .nmuxregs = ARRAY_SIZE(can1_dis_sd_muxreg),
1721 },
1722};
1723
1724static struct spear_pingroup can1_dis_sd_pingroup = {
1725 .name = "can1_dis_sd_grp",
1726 .pins = can1_dis_sd_pins,
1727 .npins = ARRAY_SIZE(can1_dis_sd_pins),
1728 .modemuxs = can1_dis_sd_modemux,
1729 .nmodemuxs = ARRAY_SIZE(can1_dis_sd_modemux),
1730};
1731
1732/* Pad multiplexing for can1_dis_kbd device */
1733/* Muxed with KBD */
1734static const unsigned can1_dis_kbd_pins[] = { 201, 202 };
1735static struct spear_muxreg can1_dis_kbd_muxreg[] = {
1736 {
1737 .reg = PAD_FUNCTION_EN_1,
1738 .mask = PMX_KBD_ROWCOL25_MASK,
1739 .val = 0,
1740 },
1741};
1742
1743static struct spear_modemux can1_dis_kbd_modemux[] = {
1744 {
1745 .muxregs = can1_dis_kbd_muxreg,
1746 .nmuxregs = ARRAY_SIZE(can1_dis_kbd_muxreg),
1747 },
1748};
1749
1750static struct spear_pingroup can1_dis_kbd_pingroup = {
1751 .name = "can1_dis_kbd_grp",
1752 .pins = can1_dis_kbd_pins,
1753 .npins = ARRAY_SIZE(can1_dis_kbd_pins),
1754 .modemuxs = can1_dis_kbd_modemux,
1755 .nmodemuxs = ARRAY_SIZE(can1_dis_kbd_modemux),
1756};
1757
1758static const char *const can1_grps[] = { "can1_dis_sd_grp", "can1_dis_kbd_grp"
1759};
1760static struct spear_function can1_function = {
1761 .name = "can1",
1762 .groups = can1_grps,
1763 .ngroups = ARRAY_SIZE(can1_grps),
1764};
1765
1766/* Pad multiplexing for pci device */
1767static const unsigned pci_sata_pins[] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 18,
1768 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
1769 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
1770 55, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99 };
1771#define PCI_SATA_MUXREG \
1772 { \
1773 .reg = PAD_FUNCTION_EN_0, \
1774 .mask = PMX_MCI_DATA8_15_MASK, \
1775 .val = 0, \
1776 }, { \
1777 .reg = PAD_FUNCTION_EN_1, \
1778 .mask = PMX_PCI_REG1_MASK, \
1779 .val = 0, \
1780 }, { \
1781 .reg = PAD_FUNCTION_EN_2, \
1782 .mask = PMX_PCI_REG2_MASK, \
1783 .val = 0, \
1784 }
1785
1786/* pad multiplexing for pcie0 device */
1787static struct spear_muxreg pcie0_muxreg[] = {
1788 PCI_SATA_MUXREG,
1789 {
1790 .reg = PCIE_SATA_CFG,
1791 .mask = PCIE_CFG_VAL(0),
1792 .val = PCIE_CFG_VAL(0),
1793 },
1794};
1795
1796static struct spear_modemux pcie0_modemux[] = {
1797 {
1798 .muxregs = pcie0_muxreg,
1799 .nmuxregs = ARRAY_SIZE(pcie0_muxreg),
1800 },
1801};
1802
1803static struct spear_pingroup pcie0_pingroup = {
1804 .name = "pcie0_grp",
1805 .pins = pci_sata_pins,
1806 .npins = ARRAY_SIZE(pci_sata_pins),
1807 .modemuxs = pcie0_modemux,
1808 .nmodemuxs = ARRAY_SIZE(pcie0_modemux),
1809};
1810
1811/* pad multiplexing for pcie1 device */
1812static struct spear_muxreg pcie1_muxreg[] = {
1813 PCI_SATA_MUXREG,
1814 {
1815 .reg = PCIE_SATA_CFG,
1816 .mask = PCIE_CFG_VAL(1),
1817 .val = PCIE_CFG_VAL(1),
1818 },
1819};
1820
1821static struct spear_modemux pcie1_modemux[] = {
1822 {
1823 .muxregs = pcie1_muxreg,
1824 .nmuxregs = ARRAY_SIZE(pcie1_muxreg),
1825 },
1826};
1827
1828static struct spear_pingroup pcie1_pingroup = {
1829 .name = "pcie1_grp",
1830 .pins = pci_sata_pins,
1831 .npins = ARRAY_SIZE(pci_sata_pins),
1832 .modemuxs = pcie1_modemux,
1833 .nmodemuxs = ARRAY_SIZE(pcie1_modemux),
1834};
1835
1836/* pad multiplexing for pcie2 device */
1837static struct spear_muxreg pcie2_muxreg[] = {
1838 PCI_SATA_MUXREG,
1839 {
1840 .reg = PCIE_SATA_CFG,
1841 .mask = PCIE_CFG_VAL(2),
1842 .val = PCIE_CFG_VAL(2),
1843 },
1844};
1845
1846static struct spear_modemux pcie2_modemux[] = {
1847 {
1848 .muxregs = pcie2_muxreg,
1849 .nmuxregs = ARRAY_SIZE(pcie2_muxreg),
1850 },
1851};
1852
1853static struct spear_pingroup pcie2_pingroup = {
1854 .name = "pcie2_grp",
1855 .pins = pci_sata_pins,
1856 .npins = ARRAY_SIZE(pci_sata_pins),
1857 .modemuxs = pcie2_modemux,
1858 .nmodemuxs = ARRAY_SIZE(pcie2_modemux),
1859};
1860
1861static const char *const pci_grps[] = { "pcie0_grp", "pcie1_grp", "pcie2_grp" };
1862static struct spear_function pci_function = {
1863 .name = "pci",
1864 .groups = pci_grps,
1865 .ngroups = ARRAY_SIZE(pci_grps),
1866};
1867
1868/* pad multiplexing for sata0 device */
1869static struct spear_muxreg sata0_muxreg[] = {
1870 PCI_SATA_MUXREG,
1871 {
1872 .reg = PCIE_SATA_CFG,
1873 .mask = SATA_CFG_VAL(0),
1874 .val = SATA_CFG_VAL(0),
1875 },
1876};
1877
1878static struct spear_modemux sata0_modemux[] = {
1879 {
1880 .muxregs = sata0_muxreg,
1881 .nmuxregs = ARRAY_SIZE(sata0_muxreg),
1882 },
1883};
1884
1885static struct spear_pingroup sata0_pingroup = {
1886 .name = "sata0_grp",
1887 .pins = pci_sata_pins,
1888 .npins = ARRAY_SIZE(pci_sata_pins),
1889 .modemuxs = sata0_modemux,
1890 .nmodemuxs = ARRAY_SIZE(sata0_modemux),
1891};
1892
1893/* pad multiplexing for sata1 device */
1894static struct spear_muxreg sata1_muxreg[] = {
1895 PCI_SATA_MUXREG,
1896 {
1897 .reg = PCIE_SATA_CFG,
1898 .mask = SATA_CFG_VAL(1),
1899 .val = SATA_CFG_VAL(1),
1900 },
1901};
1902
1903static struct spear_modemux sata1_modemux[] = {
1904 {
1905 .muxregs = sata1_muxreg,
1906 .nmuxregs = ARRAY_SIZE(sata1_muxreg),
1907 },
1908};
1909
1910static struct spear_pingroup sata1_pingroup = {
1911 .name = "sata1_grp",
1912 .pins = pci_sata_pins,
1913 .npins = ARRAY_SIZE(pci_sata_pins),
1914 .modemuxs = sata1_modemux,
1915 .nmodemuxs = ARRAY_SIZE(sata1_modemux),
1916};
1917
1918/* pad multiplexing for sata2 device */
1919static struct spear_muxreg sata2_muxreg[] = {
1920 PCI_SATA_MUXREG,
1921 {
1922 .reg = PCIE_SATA_CFG,
1923 .mask = SATA_CFG_VAL(2),
1924 .val = SATA_CFG_VAL(2),
1925 },
1926};
1927
1928static struct spear_modemux sata2_modemux[] = {
1929 {
1930 .muxregs = sata2_muxreg,
1931 .nmuxregs = ARRAY_SIZE(sata2_muxreg),
1932 },
1933};
1934
1935static struct spear_pingroup sata2_pingroup = {
1936 .name = "sata2_grp",
1937 .pins = pci_sata_pins,
1938 .npins = ARRAY_SIZE(pci_sata_pins),
1939 .modemuxs = sata2_modemux,
1940 .nmodemuxs = ARRAY_SIZE(sata2_modemux),
1941};
1942
1943static const char *const sata_grps[] = { "sata0_grp", "sata1_grp", "sata2_grp"
1944};
1945static struct spear_function sata_function = {
1946 .name = "sata",
1947 .groups = sata_grps,
1948 .ngroups = ARRAY_SIZE(sata_grps),
1949};
1950
1951/* Pad multiplexing for ssp1_dis_kbd device */
1952static const unsigned ssp1_dis_kbd_pins[] = { 203, 204, 205, 206 };
1953static struct spear_muxreg ssp1_dis_kbd_muxreg[] = {
1954 {
1955 .reg = PAD_FUNCTION_EN_1,
1956 .mask = PMX_KBD_ROWCOL25_MASK | PMX_KBD_COL1_MASK |
1957 PMX_KBD_COL0_MASK | PMX_NFIO8_15_MASK | PMX_NFCE1_MASK |
1958 PMX_NFCE2_MASK,
1959 .val = 0,
1960 },
1961};
1962
1963static struct spear_modemux ssp1_dis_kbd_modemux[] = {
1964 {
1965 .muxregs = ssp1_dis_kbd_muxreg,
1966 .nmuxregs = ARRAY_SIZE(ssp1_dis_kbd_muxreg),
1967 },
1968};
1969
1970static struct spear_pingroup ssp1_dis_kbd_pingroup = {
1971 .name = "ssp1_dis_kbd_grp",
1972 .pins = ssp1_dis_kbd_pins,
1973 .npins = ARRAY_SIZE(ssp1_dis_kbd_pins),
1974 .modemuxs = ssp1_dis_kbd_modemux,
1975 .nmodemuxs = ARRAY_SIZE(ssp1_dis_kbd_modemux),
1976};
1977
1978/* Pad multiplexing for ssp1_dis_sd device */
1979static const unsigned ssp1_dis_sd_pins[] = { 224, 226, 227, 228 };
1980static struct spear_muxreg ssp1_dis_sd_muxreg[] = {
1981 {
1982 .reg = PAD_FUNCTION_EN_2,
1983 .mask = PMX_MCIADDR0ALE_MASK | PMX_MCIADDR2_MASK |
1984 PMX_MCICECF_MASK | PMX_MCICEXD_MASK,
1985 .val = 0,
1986 },
1987};
1988
1989static struct spear_modemux ssp1_dis_sd_modemux[] = {
1990 {
1991 .muxregs = ssp1_dis_sd_muxreg,
1992 .nmuxregs = ARRAY_SIZE(ssp1_dis_sd_muxreg),
1993 },
1994};
1995
1996static struct spear_pingroup ssp1_dis_sd_pingroup = {
1997 .name = "ssp1_dis_sd_grp",
1998 .pins = ssp1_dis_sd_pins,
1999 .npins = ARRAY_SIZE(ssp1_dis_sd_pins),
2000 .modemuxs = ssp1_dis_sd_modemux,
2001 .nmodemuxs = ARRAY_SIZE(ssp1_dis_sd_modemux),
2002};
2003
2004static const char *const ssp1_grps[] = { "ssp1_dis_kbd_grp",
2005 "ssp1_dis_sd_grp" };
2006static struct spear_function ssp1_function = {
2007 .name = "ssp1",
2008 .groups = ssp1_grps,
2009 .ngroups = ARRAY_SIZE(ssp1_grps),
2010};
2011
2012/* Pad multiplexing for gpt64 device */
2013static const unsigned gpt64_pins[] = { 230, 231, 232, 245 };
2014static struct spear_muxreg gpt64_muxreg[] = {
2015 {
2016 .reg = PAD_FUNCTION_EN_2,
2017 .mask = PMX_MCICDCF1_MASK | PMX_MCICDCF2_MASK | PMX_MCICDXD_MASK
2018 | PMX_MCILEDS_MASK,
2019 .val = 0,
2020 },
2021};
2022
2023static struct spear_modemux gpt64_modemux[] = {
2024 {
2025 .muxregs = gpt64_muxreg,
2026 .nmuxregs = ARRAY_SIZE(gpt64_muxreg),
2027 },
2028};
2029
2030static struct spear_pingroup gpt64_pingroup = {
2031 .name = "gpt64_grp",
2032 .pins = gpt64_pins,
2033 .npins = ARRAY_SIZE(gpt64_pins),
2034 .modemuxs = gpt64_modemux,
2035 .nmodemuxs = ARRAY_SIZE(gpt64_modemux),
2036};
2037
2038static const char *const gpt64_grps[] = { "gpt64_grp" };
2039static struct spear_function gpt64_function = {
2040 .name = "gpt64",
2041 .groups = gpt64_grps,
2042 .ngroups = ARRAY_SIZE(gpt64_grps),
2043};
2044
2045/* pingroups */
2046static struct spear_pingroup *spear1310_pingroups[] = {
2047 &i2c0_pingroup,
2048 &ssp0_pingroup,
2049 &i2s0_pingroup,
2050 &i2s1_pingroup,
2051 &clcd_pingroup,
2052 &clcd_high_res_pingroup,
2053 &arm_gpio_pingroup,
2054 &smi_2_chips_pingroup,
2055 &smi_4_chips_pingroup,
2056 &gmii_pingroup,
2057 &rgmii_pingroup,
2058 &smii_0_1_2_pingroup,
2059 &ras_mii_txclk_pingroup,
2060 &nand_8bit_pingroup,
2061 &nand_16bit_pingroup,
2062 &nand_4_chips_pingroup,
2063 &keyboard_6x6_pingroup,
2064 &keyboard_rowcol6_8_pingroup,
2065 &uart0_pingroup,
2066 &uart0_modem_pingroup,
2067 &gpt0_tmr0_pingroup,
2068 &gpt0_tmr1_pingroup,
2069 &gpt1_tmr0_pingroup,
2070 &gpt1_tmr1_pingroup,
2071 &sdhci_pingroup,
2072 &cf_pingroup,
2073 &xd_pingroup,
2074 &touch_xy_pingroup,
2075 &ssp0_cs0_pingroup,
2076 &ssp0_cs1_2_pingroup,
2077 &uart_1_dis_i2c_pingroup,
2078 &uart_1_dis_sd_pingroup,
2079 &uart_2_3_pingroup,
2080 &uart_4_pingroup,
2081 &uart_5_pingroup,
2082 &rs485_0_1_tdm_0_1_pingroup,
2083 &i2c_1_2_pingroup,
2084 &i2c3_dis_smi_clcd_pingroup,
2085 &i2c3_dis_sd_i2s0_pingroup,
2086 &i2c_4_5_dis_smi_pingroup,
2087 &i2c4_dis_sd_pingroup,
2088 &i2c5_dis_sd_pingroup,
2089 &i2c_6_7_dis_kbd_pingroup,
2090 &i2c6_dis_sd_pingroup,
2091 &i2c7_dis_sd_pingroup,
2092 &can0_dis_nor_pingroup,
2093 &can0_dis_sd_pingroup,
2094 &can1_dis_sd_pingroup,
2095 &can1_dis_kbd_pingroup,
2096 &pcie0_pingroup,
2097 &pcie1_pingroup,
2098 &pcie2_pingroup,
2099 &sata0_pingroup,
2100 &sata1_pingroup,
2101 &sata2_pingroup,
2102 &ssp1_dis_kbd_pingroup,
2103 &ssp1_dis_sd_pingroup,
2104 &gpt64_pingroup,
2105};
2106
2107/* functions */
2108static struct spear_function *spear1310_functions[] = {
2109 &i2c0_function,
2110 &ssp0_function,
2111 &i2s0_function,
2112 &i2s1_function,
2113 &clcd_function,
2114 &arm_gpio_function,
2115 &smi_function,
2116 &gmii_function,
2117 &rgmii_function,
2118 &smii_0_1_2_function,
2119 &ras_mii_txclk_function,
2120 &nand_function,
2121 &keyboard_function,
2122 &uart0_function,
2123 &gpt0_function,
2124 &gpt1_function,
2125 &sdhci_function,
2126 &cf_function,
2127 &xd_function,
2128 &touch_xy_function,
2129 &uart1_function,
2130 &uart2_3_function,
2131 &uart4_function,
2132 &uart5_function,
2133 &rs485_0_1_tdm_0_1_function,
2134 &i2c_1_2_function,
2135 &i2c3_unction,
2136 &i2c_4_5_function,
2137 &i2c_6_7_function,
2138 &can0_function,
2139 &can1_function,
2140 &pci_function,
2141 &sata_function,
2142 &ssp1_function,
2143 &gpt64_function,
2144};
2145
2146static struct spear_pinctrl_machdata spear1310_machdata = {
2147 .pins = spear1310_pins,
2148 .npins = ARRAY_SIZE(spear1310_pins),
2149 .groups = spear1310_pingroups,
2150 .ngroups = ARRAY_SIZE(spear1310_pingroups),
2151 .functions = spear1310_functions,
2152 .nfunctions = ARRAY_SIZE(spear1310_functions),
2153 .modes_supported = false,
2154};
2155
2156static struct of_device_id spear1310_pinctrl_of_match[] __devinitdata = {
2157 {
2158 .compatible = "st,spear1310-pinmux",
2159 },
2160 {},
2161};
2162
2163static int __devinit spear1310_pinctrl_probe(struct platform_device *pdev)
2164{
2165 return spear_pinctrl_probe(pdev, &spear1310_machdata);
2166}
2167
2168static int __devexit spear1310_pinctrl_remove(struct platform_device *pdev)
2169{
2170 return spear_pinctrl_remove(pdev);
2171}
2172
2173static struct platform_driver spear1310_pinctrl_driver = {
2174 .driver = {
2175 .name = DRIVER_NAME,
2176 .owner = THIS_MODULE,
2177 .of_match_table = spear1310_pinctrl_of_match,
2178 },
2179 .probe = spear1310_pinctrl_probe,
2180 .remove = __devexit_p(spear1310_pinctrl_remove),
2181};
2182
2183static int __init spear1310_pinctrl_init(void)
2184{
2185 return platform_driver_register(&spear1310_pinctrl_driver);
2186}
2187arch_initcall(spear1310_pinctrl_init);
2188
2189static void __exit spear1310_pinctrl_exit(void)
2190{
2191 platform_driver_unregister(&spear1310_pinctrl_driver);
2192}
2193module_exit(spear1310_pinctrl_exit);
2194
2195MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
2196MODULE_DESCRIPTION("ST Microelectronics SPEAr1310 pinctrl driver");
2197MODULE_LICENSE("GPL v2");
2198MODULE_DEVICE_TABLE(of, spear1310_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear1340.c b/drivers/pinctrl/spear/pinctrl-spear1340.c
new file mode 100644
index 000000000000..a8ab2a6f51bf
--- /dev/null
+++ b/drivers/pinctrl/spear/pinctrl-spear1340.c
@@ -0,0 +1,1989 @@
1/*
2 * Driver for the ST Microelectronics SPEAr1340 pinmux
3 *
4 * Copyright (C) 2012 ST Microelectronics
5 * Viresh Kumar <viresh.kumar@st.com>
6 *
7 * This file is licensed under the terms of the GNU General Public
8 * License version 2. This program is licensed "as is" without any
9 * warranty of any kind, whether express or implied.
10 */
11
12#include <linux/err.h>
13#include <linux/init.h>
14#include <linux/module.h>
15#include <linux/of_device.h>
16#include <linux/platform_device.h>
17#include "pinctrl-spear.h"
18
19#define DRIVER_NAME "spear1340-pinmux"
20
21/* pins */
22static const struct pinctrl_pin_desc spear1340_pins[] = {
23 SPEAR_PIN_0_TO_101,
24 SPEAR_PIN_102_TO_245,
25 PINCTRL_PIN(246, "PLGPIO246"),
26 PINCTRL_PIN(247, "PLGPIO247"),
27 PINCTRL_PIN(248, "PLGPIO248"),
28 PINCTRL_PIN(249, "PLGPIO249"),
29 PINCTRL_PIN(250, "PLGPIO250"),
30 PINCTRL_PIN(251, "PLGPIO251"),
31};
32
33/* In SPEAr1340 there are two levels of pad muxing */
34/* - pads as gpio OR peripherals */
35#define PAD_FUNCTION_EN_1 0x668
36#define PAD_FUNCTION_EN_2 0x66C
37#define PAD_FUNCTION_EN_3 0x670
38#define PAD_FUNCTION_EN_4 0x674
39#define PAD_FUNCTION_EN_5 0x690
40#define PAD_FUNCTION_EN_6 0x694
41#define PAD_FUNCTION_EN_7 0x698
42#define PAD_FUNCTION_EN_8 0x69C
43
44/* - If peripherals, then primary OR alternate peripheral */
45#define PAD_SHARED_IP_EN_1 0x6A0
46#define PAD_SHARED_IP_EN_2 0x6A4
47
48/*
49 * Macro's for first level of pmx - pads as gpio OR peripherals. There are 8
50 * registers with 32 bits each for handling gpio pads, register 8 has only 26
51 * relevant bits.
52 */
53/* macro's for making pads as gpio's */
54#define PADS_AS_GPIO_REG0_MASK 0xFFFFFFFE
55#define PADS_AS_GPIO_REGS_MASK 0xFFFFFFFF
56#define PADS_AS_GPIO_REG7_MASK 0x07FFFFFF
57
58/* macro's for making pads as peripherals */
59#define FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK 0x00000FFE
60#define UART0_ENH_AND_GPT_REG0_MASK 0x0003F000
61#define PWM1_AND_KBD_COL5_REG0_MASK 0x00040000
62#define I2C1_REG0_MASK 0x01080000
63#define SPDIF_IN_REG0_MASK 0x00100000
64#define PWM2_AND_GPT0_TMR0_CPT_REG0_MASK 0x00400000
65#define PWM3_AND_GPT0_TMR1_CLK_REG0_MASK 0x00800000
66#define PWM0_AND_SSP0_CS1_REG0_MASK 0x02000000
67#define VIP_AND_CAM3_REG0_MASK 0xFC200000
68#define VIP_AND_CAM3_REG1_MASK 0x0000000F
69#define VIP_REG1_MASK 0x00001EF0
70#define VIP_AND_CAM2_REG1_MASK 0x007FE100
71#define VIP_AND_CAM1_REG1_MASK 0xFF800000
72#define VIP_AND_CAM1_REG2_MASK 0x00000003
73#define VIP_AND_CAM0_REG2_MASK 0x00001FFC
74#define SMI_REG2_MASK 0x0021E000
75#define SSP0_REG2_MASK 0x001E0000
76#define TS_AND_SSP0_CS2_REG2_MASK 0x00400000
77#define UART0_REG2_MASK 0x01800000
78#define UART1_REG2_MASK 0x06000000
79#define I2S_IN_REG2_MASK 0xF8000000
80#define DEVS_GRP_AND_MIPHY_DBG_REG3_MASK 0x000001FE
81#define I2S_OUT_REG3_MASK 0x000001EF
82#define I2S_IN_REG3_MASK 0x00000010
83#define GMAC_REG3_MASK 0xFFFFFE00
84#define GMAC_REG4_MASK 0x0000001F
85#define DEVS_GRP_AND_MIPHY_DBG_REG4_MASK 0x7FFFFF20
86#define SSP0_CS3_REG4_MASK 0x00000020
87#define I2C0_REG4_MASK 0x000000C0
88#define CEC0_REG4_MASK 0x00000100
89#define CEC1_REG4_MASK 0x00000200
90#define SPDIF_OUT_REG4_MASK 0x00000400
91#define CLCD_REG4_MASK 0x7FFFF800
92#define CLCD_AND_ARM_TRACE_REG4_MASK 0x80000000
93#define CLCD_AND_ARM_TRACE_REG5_MASK 0xFFFFFFFF
94#define CLCD_AND_ARM_TRACE_REG6_MASK 0x00000001
95#define FSMC_PNOR_AND_MCIF_REG6_MASK 0x073FFFFE
96#define MCIF_REG6_MASK 0xF8C00000
97#define MCIF_REG7_MASK 0x000043FF
98#define FSMC_8BIT_REG7_MASK 0x07FFBC00
99
100/* other registers */
101#define PERIP_CFG 0x42C
102 /* PERIP_CFG register masks */
103 #define SSP_CS_CTL_HW 0
104 #define SSP_CS_CTL_SW 1
105 #define SSP_CS_CTL_MASK 1
106 #define SSP_CS_CTL_SHIFT 21
107 #define SSP_CS_VAL_MASK 1
108 #define SSP_CS_VAL_SHIFT 20
109 #define SSP_CS_SEL_CS0 0
110 #define SSP_CS_SEL_CS1 1
111 #define SSP_CS_SEL_CS2 2
112 #define SSP_CS_SEL_MASK 3
113 #define SSP_CS_SEL_SHIFT 18
114
115 #define I2S_CHNL_2_0 (0)
116 #define I2S_CHNL_3_1 (1)
117 #define I2S_CHNL_5_1 (2)
118 #define I2S_CHNL_7_1 (3)
119 #define I2S_CHNL_PLAY_SHIFT (4)
120 #define I2S_CHNL_PLAY_MASK (3 << 4)
121 #define I2S_CHNL_REC_SHIFT (6)
122 #define I2S_CHNL_REC_MASK (3 << 6)
123
124 #define SPDIF_OUT_ENB_MASK (1 << 2)
125 #define SPDIF_OUT_ENB_SHIFT 2
126
127 #define MCIF_SEL_SD 1
128 #define MCIF_SEL_CF 2
129 #define MCIF_SEL_XD 3
130 #define MCIF_SEL_MASK 3
131 #define MCIF_SEL_SHIFT 0
132
133#define GMAC_CLK_CFG 0x248
134 #define GMAC_PHY_IF_GMII_VAL (0 << 3)
135 #define GMAC_PHY_IF_RGMII_VAL (1 << 3)
136 #define GMAC_PHY_IF_SGMII_VAL (2 << 3)
137 #define GMAC_PHY_IF_RMII_VAL (4 << 3)
138 #define GMAC_PHY_IF_SEL_MASK (7 << 3)
139 #define GMAC_PHY_INPUT_ENB_VAL 0
140 #define GMAC_PHY_SYNT_ENB_VAL 1
141 #define GMAC_PHY_CLK_MASK 1
142 #define GMAC_PHY_CLK_SHIFT 2
143 #define GMAC_PHY_125M_PAD_VAL 0
144 #define GMAC_PHY_PLL2_VAL 1
145 #define GMAC_PHY_OSC3_VAL 2
146 #define GMAC_PHY_INPUT_CLK_MASK 3
147 #define GMAC_PHY_INPUT_CLK_SHIFT 0
148
149#define PCIE_SATA_CFG 0x424
150 /* PCIE CFG MASks */
151 #define PCIE_CFG_DEVICE_PRESENT (1 << 11)
152 #define PCIE_CFG_POWERUP_RESET (1 << 10)
153 #define PCIE_CFG_CORE_CLK_EN (1 << 9)
154 #define PCIE_CFG_AUX_CLK_EN (1 << 8)
155 #define SATA_CFG_TX_CLK_EN (1 << 4)
156 #define SATA_CFG_RX_CLK_EN (1 << 3)
157 #define SATA_CFG_POWERUP_RESET (1 << 2)
158 #define SATA_CFG_PM_CLK_EN (1 << 1)
159 #define PCIE_SATA_SEL_PCIE (0)
160 #define PCIE_SATA_SEL_SATA (1)
161 #define SATA_PCIE_CFG_MASK 0xF1F
162 #define PCIE_CFG_VAL (PCIE_SATA_SEL_PCIE | PCIE_CFG_AUX_CLK_EN | \
163 PCIE_CFG_CORE_CLK_EN | PCIE_CFG_POWERUP_RESET |\
164 PCIE_CFG_DEVICE_PRESENT)
165 #define SATA_CFG_VAL (PCIE_SATA_SEL_SATA | SATA_CFG_PM_CLK_EN | \
166 SATA_CFG_POWERUP_RESET | SATA_CFG_RX_CLK_EN | \
167 SATA_CFG_TX_CLK_EN)
168
169/* Macro's for second level of pmx - pads as primary OR alternate peripheral */
170/* Write 0 to enable FSMC_16_BIT */
171#define KBD_ROW_COL_MASK (1 << 0)
172
173/* Write 0 to enable UART0_ENH */
174#define GPT_MASK (1 << 1) /* Only clk & cpt */
175
176/* Write 0 to enable PWM1 */
177#define KBD_COL5_MASK (1 << 2)
178
179/* Write 0 to enable PWM2 */
180#define GPT0_TMR0_CPT_MASK (1 << 3) /* Only clk & cpt */
181
182/* Write 0 to enable PWM3 */
183#define GPT0_TMR1_CLK_MASK (1 << 4) /* Only clk & cpt */
184
185/* Write 0 to enable PWM0 */
186#define SSP0_CS1_MASK (1 << 5)
187
188/* Write 0 to enable VIP */
189#define CAM3_MASK (1 << 6)
190
191/* Write 0 to enable VIP */
192#define CAM2_MASK (1 << 7)
193
194/* Write 0 to enable VIP */
195#define CAM1_MASK (1 << 8)
196
197/* Write 0 to enable VIP */
198#define CAM0_MASK (1 << 9)
199
200/* Write 0 to enable TS */
201#define SSP0_CS2_MASK (1 << 10)
202
203/* Write 0 to enable FSMC PNOR */
204#define MCIF_MASK (1 << 11)
205
206/* Write 0 to enable CLCD */
207#define ARM_TRACE_MASK (1 << 12)
208
209/* Write 0 to enable I2S, SSP0_CS2, CEC0, 1, SPDIF out, CLCD */
210#define MIPHY_DBG_MASK (1 << 13)
211
212/*
213 * Pad multiplexing for making all pads as gpio's. This is done to override the
214 * values passed from bootloader and start from scratch.
215 */
216static const unsigned pads_as_gpio_pins[] = { 251 };
217static struct spear_muxreg pads_as_gpio_muxreg[] = {
218 {
219 .reg = PAD_FUNCTION_EN_1,
220 .mask = PADS_AS_GPIO_REG0_MASK,
221 .val = 0x0,
222 }, {
223 .reg = PAD_FUNCTION_EN_2,
224 .mask = PADS_AS_GPIO_REGS_MASK,
225 .val = 0x0,
226 }, {
227 .reg = PAD_FUNCTION_EN_3,
228 .mask = PADS_AS_GPIO_REGS_MASK,
229 .val = 0x0,
230 }, {
231 .reg = PAD_FUNCTION_EN_4,
232 .mask = PADS_AS_GPIO_REGS_MASK,
233 .val = 0x0,
234 }, {
235 .reg = PAD_FUNCTION_EN_5,
236 .mask = PADS_AS_GPIO_REGS_MASK,
237 .val = 0x0,
238 }, {
239 .reg = PAD_FUNCTION_EN_6,
240 .mask = PADS_AS_GPIO_REGS_MASK,
241 .val = 0x0,
242 }, {
243 .reg = PAD_FUNCTION_EN_7,
244 .mask = PADS_AS_GPIO_REGS_MASK,
245 .val = 0x0,
246 }, {
247 .reg = PAD_FUNCTION_EN_8,
248 .mask = PADS_AS_GPIO_REG7_MASK,
249 .val = 0x0,
250 },
251};
252
253static struct spear_modemux pads_as_gpio_modemux[] = {
254 {
255 .muxregs = pads_as_gpio_muxreg,
256 .nmuxregs = ARRAY_SIZE(pads_as_gpio_muxreg),
257 },
258};
259
260static struct spear_pingroup pads_as_gpio_pingroup = {
261 .name = "pads_as_gpio_grp",
262 .pins = pads_as_gpio_pins,
263 .npins = ARRAY_SIZE(pads_as_gpio_pins),
264 .modemuxs = pads_as_gpio_modemux,
265 .nmodemuxs = ARRAY_SIZE(pads_as_gpio_modemux),
266};
267
268static const char *const pads_as_gpio_grps[] = { "pads_as_gpio_grp" };
269static struct spear_function pads_as_gpio_function = {
270 .name = "pads_as_gpio",
271 .groups = pads_as_gpio_grps,
272 .ngroups = ARRAY_SIZE(pads_as_gpio_grps),
273};
274
275/* Pad multiplexing for fsmc_8bit device */
276static const unsigned fsmc_8bit_pins[] = { 233, 234, 235, 236, 238, 239, 240,
277 241, 242, 243, 244, 245, 246, 247, 248, 249 };
278static struct spear_muxreg fsmc_8bit_muxreg[] = {
279 {
280 .reg = PAD_FUNCTION_EN_8,
281 .mask = FSMC_8BIT_REG7_MASK,
282 .val = FSMC_8BIT_REG7_MASK,
283 }
284};
285
286static struct spear_modemux fsmc_8bit_modemux[] = {
287 {
288 .muxregs = fsmc_8bit_muxreg,
289 .nmuxregs = ARRAY_SIZE(fsmc_8bit_muxreg),
290 },
291};
292
293static struct spear_pingroup fsmc_8bit_pingroup = {
294 .name = "fsmc_8bit_grp",
295 .pins = fsmc_8bit_pins,
296 .npins = ARRAY_SIZE(fsmc_8bit_pins),
297 .modemuxs = fsmc_8bit_modemux,
298 .nmodemuxs = ARRAY_SIZE(fsmc_8bit_modemux),
299};
300
301/* Pad multiplexing for fsmc_16bit device */
302static const unsigned fsmc_16bit_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 };
303static struct spear_muxreg fsmc_16bit_muxreg[] = {
304 {
305 .reg = PAD_SHARED_IP_EN_1,
306 .mask = KBD_ROW_COL_MASK,
307 .val = 0,
308 }, {
309 .reg = PAD_FUNCTION_EN_1,
310 .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
311 .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
312 },
313};
314
315static struct spear_modemux fsmc_16bit_modemux[] = {
316 {
317 .muxregs = fsmc_16bit_muxreg,
318 .nmuxregs = ARRAY_SIZE(fsmc_16bit_muxreg),
319 },
320};
321
322static struct spear_pingroup fsmc_16bit_pingroup = {
323 .name = "fsmc_16bit_grp",
324 .pins = fsmc_16bit_pins,
325 .npins = ARRAY_SIZE(fsmc_16bit_pins),
326 .modemuxs = fsmc_16bit_modemux,
327 .nmodemuxs = ARRAY_SIZE(fsmc_16bit_modemux),
328};
329
330/* pad multiplexing for fsmc_pnor device */
331static const unsigned fsmc_pnor_pins[] = { 192, 193, 194, 195, 196, 197, 198,
332 199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212,
333 215, 216, 217 };
334static struct spear_muxreg fsmc_pnor_muxreg[] = {
335 {
336 .reg = PAD_SHARED_IP_EN_1,
337 .mask = MCIF_MASK,
338 .val = 0,
339 }, {
340 .reg = PAD_FUNCTION_EN_7,
341 .mask = FSMC_PNOR_AND_MCIF_REG6_MASK,
342 .val = FSMC_PNOR_AND_MCIF_REG6_MASK,
343 },
344};
345
346static struct spear_modemux fsmc_pnor_modemux[] = {
347 {
348 .muxregs = fsmc_pnor_muxreg,
349 .nmuxregs = ARRAY_SIZE(fsmc_pnor_muxreg),
350 },
351};
352
353static struct spear_pingroup fsmc_pnor_pingroup = {
354 .name = "fsmc_pnor_grp",
355 .pins = fsmc_pnor_pins,
356 .npins = ARRAY_SIZE(fsmc_pnor_pins),
357 .modemuxs = fsmc_pnor_modemux,
358 .nmodemuxs = ARRAY_SIZE(fsmc_pnor_modemux),
359};
360
361static const char *const fsmc_grps[] = { "fsmc_8bit_grp", "fsmc_16bit_grp",
362 "fsmc_pnor_grp" };
363static struct spear_function fsmc_function = {
364 .name = "fsmc",
365 .groups = fsmc_grps,
366 .ngroups = ARRAY_SIZE(fsmc_grps),
367};
368
369/* pad multiplexing for keyboard rows-cols device */
370static const unsigned keyboard_row_col_pins[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
371 10 };
372static struct spear_muxreg keyboard_row_col_muxreg[] = {
373 {
374 .reg = PAD_SHARED_IP_EN_1,
375 .mask = KBD_ROW_COL_MASK,
376 .val = KBD_ROW_COL_MASK,
377 }, {
378 .reg = PAD_FUNCTION_EN_1,
379 .mask = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
380 .val = FSMC_16_BIT_AND_KBD_ROW_COL_REG0_MASK,
381 },
382};
383
384static struct spear_modemux keyboard_row_col_modemux[] = {
385 {
386 .muxregs = keyboard_row_col_muxreg,
387 .nmuxregs = ARRAY_SIZE(keyboard_row_col_muxreg),
388 },
389};
390
391static struct spear_pingroup keyboard_row_col_pingroup = {
392 .name = "keyboard_row_col_grp",
393 .pins = keyboard_row_col_pins,
394 .npins = ARRAY_SIZE(keyboard_row_col_pins),
395 .modemuxs = keyboard_row_col_modemux,
396 .nmodemuxs = ARRAY_SIZE(keyboard_row_col_modemux),
397};
398
399/* pad multiplexing for keyboard col5 device */
400static const unsigned keyboard_col5_pins[] = { 17 };
401static struct spear_muxreg keyboard_col5_muxreg[] = {
402 {
403 .reg = PAD_SHARED_IP_EN_1,
404 .mask = KBD_COL5_MASK,
405 .val = KBD_COL5_MASK,
406 }, {
407 .reg = PAD_FUNCTION_EN_1,
408 .mask = PWM1_AND_KBD_COL5_REG0_MASK,
409 .val = PWM1_AND_KBD_COL5_REG0_MASK,
410 },
411};
412
413static struct spear_modemux keyboard_col5_modemux[] = {
414 {
415 .muxregs = keyboard_col5_muxreg,
416 .nmuxregs = ARRAY_SIZE(keyboard_col5_muxreg),
417 },
418};
419
420static struct spear_pingroup keyboard_col5_pingroup = {
421 .name = "keyboard_col5_grp",
422 .pins = keyboard_col5_pins,
423 .npins = ARRAY_SIZE(keyboard_col5_pins),
424 .modemuxs = keyboard_col5_modemux,
425 .nmodemuxs = ARRAY_SIZE(keyboard_col5_modemux),
426};
427
428static const char *const keyboard_grps[] = { "keyboard_row_col_grp",
429 "keyboard_col5_grp" };
430static struct spear_function keyboard_function = {
431 .name = "keyboard",
432 .groups = keyboard_grps,
433 .ngroups = ARRAY_SIZE(keyboard_grps),
434};
435
436/* pad multiplexing for spdif_in device */
437static const unsigned spdif_in_pins[] = { 19 };
438static struct spear_muxreg spdif_in_muxreg[] = {
439 {
440 .reg = PAD_FUNCTION_EN_1,
441 .mask = SPDIF_IN_REG0_MASK,
442 .val = SPDIF_IN_REG0_MASK,
443 },
444};
445
446static struct spear_modemux spdif_in_modemux[] = {
447 {
448 .muxregs = spdif_in_muxreg,
449 .nmuxregs = ARRAY_SIZE(spdif_in_muxreg),
450 },
451};
452
453static struct spear_pingroup spdif_in_pingroup = {
454 .name = "spdif_in_grp",
455 .pins = spdif_in_pins,
456 .npins = ARRAY_SIZE(spdif_in_pins),
457 .modemuxs = spdif_in_modemux,
458 .nmodemuxs = ARRAY_SIZE(spdif_in_modemux),
459};
460
461static const char *const spdif_in_grps[] = { "spdif_in_grp" };
462static struct spear_function spdif_in_function = {
463 .name = "spdif_in",
464 .groups = spdif_in_grps,
465 .ngroups = ARRAY_SIZE(spdif_in_grps),
466};
467
468/* pad multiplexing for spdif_out device */
469static const unsigned spdif_out_pins[] = { 137 };
470static struct spear_muxreg spdif_out_muxreg[] = {
471 {
472 .reg = PAD_FUNCTION_EN_5,
473 .mask = SPDIF_OUT_REG4_MASK,
474 .val = SPDIF_OUT_REG4_MASK,
475 }, {
476 .reg = PERIP_CFG,
477 .mask = SPDIF_OUT_ENB_MASK,
478 .val = SPDIF_OUT_ENB_MASK,
479 }
480};
481
482static struct spear_modemux spdif_out_modemux[] = {
483 {
484 .muxregs = spdif_out_muxreg,
485 .nmuxregs = ARRAY_SIZE(spdif_out_muxreg),
486 },
487};
488
489static struct spear_pingroup spdif_out_pingroup = {
490 .name = "spdif_out_grp",
491 .pins = spdif_out_pins,
492 .npins = ARRAY_SIZE(spdif_out_pins),
493 .modemuxs = spdif_out_modemux,
494 .nmodemuxs = ARRAY_SIZE(spdif_out_modemux),
495};
496
497static const char *const spdif_out_grps[] = { "spdif_out_grp" };
498static struct spear_function spdif_out_function = {
499 .name = "spdif_out",
500 .groups = spdif_out_grps,
501 .ngroups = ARRAY_SIZE(spdif_out_grps),
502};
503
504/* pad multiplexing for gpt_0_1 device */
505static const unsigned gpt_0_1_pins[] = { 11, 12, 13, 14, 15, 16, 21, 22 };
506static struct spear_muxreg gpt_0_1_muxreg[] = {
507 {
508 .reg = PAD_SHARED_IP_EN_1,
509 .mask = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
510 .val = GPT_MASK | GPT0_TMR0_CPT_MASK | GPT0_TMR1_CLK_MASK,
511 }, {
512 .reg = PAD_FUNCTION_EN_1,
513 .mask = UART0_ENH_AND_GPT_REG0_MASK |
514 PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
515 PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
516 .val = UART0_ENH_AND_GPT_REG0_MASK |
517 PWM2_AND_GPT0_TMR0_CPT_REG0_MASK |
518 PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
519 },
520};
521
522static struct spear_modemux gpt_0_1_modemux[] = {
523 {
524 .muxregs = gpt_0_1_muxreg,
525 .nmuxregs = ARRAY_SIZE(gpt_0_1_muxreg),
526 },
527};
528
529static struct spear_pingroup gpt_0_1_pingroup = {
530 .name = "gpt_0_1_grp",
531 .pins = gpt_0_1_pins,
532 .npins = ARRAY_SIZE(gpt_0_1_pins),
533 .modemuxs = gpt_0_1_modemux,
534 .nmodemuxs = ARRAY_SIZE(gpt_0_1_modemux),
535};
536
537static const char *const gpt_0_1_grps[] = { "gpt_0_1_grp" };
538static struct spear_function gpt_0_1_function = {
539 .name = "gpt_0_1",
540 .groups = gpt_0_1_grps,
541 .ngroups = ARRAY_SIZE(gpt_0_1_grps),
542};
543
544/* pad multiplexing for pwm0 device */
545static const unsigned pwm0_pins[] = { 24 };
546static struct spear_muxreg pwm0_muxreg[] = {
547 {
548 .reg = PAD_SHARED_IP_EN_1,
549 .mask = SSP0_CS1_MASK,
550 .val = 0,
551 }, {
552 .reg = PAD_FUNCTION_EN_1,
553 .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
554 .val = PWM0_AND_SSP0_CS1_REG0_MASK,
555 },
556};
557
558static struct spear_modemux pwm0_modemux[] = {
559 {
560 .muxregs = pwm0_muxreg,
561 .nmuxregs = ARRAY_SIZE(pwm0_muxreg),
562 },
563};
564
565static struct spear_pingroup pwm0_pingroup = {
566 .name = "pwm0_grp",
567 .pins = pwm0_pins,
568 .npins = ARRAY_SIZE(pwm0_pins),
569 .modemuxs = pwm0_modemux,
570 .nmodemuxs = ARRAY_SIZE(pwm0_modemux),
571};
572
573/* pad multiplexing for pwm1 device */
574static const unsigned pwm1_pins[] = { 17 };
575static struct spear_muxreg pwm1_muxreg[] = {
576 {
577 .reg = PAD_SHARED_IP_EN_1,
578 .mask = KBD_COL5_MASK,
579 .val = 0,
580 }, {
581 .reg = PAD_FUNCTION_EN_1,
582 .mask = PWM1_AND_KBD_COL5_REG0_MASK,
583 .val = PWM1_AND_KBD_COL5_REG0_MASK,
584 },
585};
586
587static struct spear_modemux pwm1_modemux[] = {
588 {
589 .muxregs = pwm1_muxreg,
590 .nmuxregs = ARRAY_SIZE(pwm1_muxreg),
591 },
592};
593
594static struct spear_pingroup pwm1_pingroup = {
595 .name = "pwm1_grp",
596 .pins = pwm1_pins,
597 .npins = ARRAY_SIZE(pwm1_pins),
598 .modemuxs = pwm1_modemux,
599 .nmodemuxs = ARRAY_SIZE(pwm1_modemux),
600};
601
602/* pad multiplexing for pwm2 device */
603static const unsigned pwm2_pins[] = { 21 };
604static struct spear_muxreg pwm2_muxreg[] = {
605 {
606 .reg = PAD_SHARED_IP_EN_1,
607 .mask = GPT0_TMR0_CPT_MASK,
608 .val = 0,
609 }, {
610 .reg = PAD_FUNCTION_EN_1,
611 .mask = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
612 .val = PWM2_AND_GPT0_TMR0_CPT_REG0_MASK,
613 },
614};
615
616static struct spear_modemux pwm2_modemux[] = {
617 {
618 .muxregs = pwm2_muxreg,
619 .nmuxregs = ARRAY_SIZE(pwm2_muxreg),
620 },
621};
622
623static struct spear_pingroup pwm2_pingroup = {
624 .name = "pwm2_grp",
625 .pins = pwm2_pins,
626 .npins = ARRAY_SIZE(pwm2_pins),
627 .modemuxs = pwm2_modemux,
628 .nmodemuxs = ARRAY_SIZE(pwm2_modemux),
629};
630
631/* pad multiplexing for pwm3 device */
632static const unsigned pwm3_pins[] = { 22 };
633static struct spear_muxreg pwm3_muxreg[] = {
634 {
635 .reg = PAD_SHARED_IP_EN_1,
636 .mask = GPT0_TMR1_CLK_MASK,
637 .val = 0,
638 }, {
639 .reg = PAD_FUNCTION_EN_1,
640 .mask = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
641 .val = PWM3_AND_GPT0_TMR1_CLK_REG0_MASK,
642 },
643};
644
645static struct spear_modemux pwm3_modemux[] = {
646 {
647 .muxregs = pwm3_muxreg,
648 .nmuxregs = ARRAY_SIZE(pwm3_muxreg),
649 },
650};
651
652static struct spear_pingroup pwm3_pingroup = {
653 .name = "pwm3_grp",
654 .pins = pwm3_pins,
655 .npins = ARRAY_SIZE(pwm3_pins),
656 .modemuxs = pwm3_modemux,
657 .nmodemuxs = ARRAY_SIZE(pwm3_modemux),
658};
659
660static const char *const pwm_grps[] = { "pwm0_grp", "pwm1_grp", "pwm2_grp",
661 "pwm3_grp" };
662static struct spear_function pwm_function = {
663 .name = "pwm",
664 .groups = pwm_grps,
665 .ngroups = ARRAY_SIZE(pwm_grps),
666};
667
668/* pad multiplexing for vip_mux device */
669static const unsigned vip_mux_pins[] = { 35, 36, 37, 38, 40, 41, 42, 43 };
670static struct spear_muxreg vip_mux_muxreg[] = {
671 {
672 .reg = PAD_FUNCTION_EN_2,
673 .mask = VIP_REG1_MASK,
674 .val = VIP_REG1_MASK,
675 },
676};
677
678static struct spear_modemux vip_mux_modemux[] = {
679 {
680 .muxregs = vip_mux_muxreg,
681 .nmuxregs = ARRAY_SIZE(vip_mux_muxreg),
682 },
683};
684
685static struct spear_pingroup vip_mux_pingroup = {
686 .name = "vip_mux_grp",
687 .pins = vip_mux_pins,
688 .npins = ARRAY_SIZE(vip_mux_pins),
689 .modemuxs = vip_mux_modemux,
690 .nmodemuxs = ARRAY_SIZE(vip_mux_modemux),
691};
692
693/* pad multiplexing for vip_mux_cam0 (disables cam0) device */
694static const unsigned vip_mux_cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72,
695 73, 74, 75 };
696static struct spear_muxreg vip_mux_cam0_muxreg[] = {
697 {
698 .reg = PAD_SHARED_IP_EN_1,
699 .mask = CAM0_MASK,
700 .val = 0,
701 }, {
702 .reg = PAD_FUNCTION_EN_3,
703 .mask = VIP_AND_CAM0_REG2_MASK,
704 .val = VIP_AND_CAM0_REG2_MASK,
705 },
706};
707
708static struct spear_modemux vip_mux_cam0_modemux[] = {
709 {
710 .muxregs = vip_mux_cam0_muxreg,
711 .nmuxregs = ARRAY_SIZE(vip_mux_cam0_muxreg),
712 },
713};
714
715static struct spear_pingroup vip_mux_cam0_pingroup = {
716 .name = "vip_mux_cam0_grp",
717 .pins = vip_mux_cam0_pins,
718 .npins = ARRAY_SIZE(vip_mux_cam0_pins),
719 .modemuxs = vip_mux_cam0_modemux,
720 .nmodemuxs = ARRAY_SIZE(vip_mux_cam0_modemux),
721};
722
723/* pad multiplexing for vip_mux_cam1 (disables cam1) device */
724static const unsigned vip_mux_cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61,
725 62, 63, 64 };
726static struct spear_muxreg vip_mux_cam1_muxreg[] = {
727 {
728 .reg = PAD_SHARED_IP_EN_1,
729 .mask = CAM1_MASK,
730 .val = 0,
731 }, {
732 .reg = PAD_FUNCTION_EN_2,
733 .mask = VIP_AND_CAM1_REG1_MASK,
734 .val = VIP_AND_CAM1_REG1_MASK,
735 }, {
736 .reg = PAD_FUNCTION_EN_3,
737 .mask = VIP_AND_CAM1_REG2_MASK,
738 .val = VIP_AND_CAM1_REG2_MASK,
739 },
740};
741
742static struct spear_modemux vip_mux_cam1_modemux[] = {
743 {
744 .muxregs = vip_mux_cam1_muxreg,
745 .nmuxregs = ARRAY_SIZE(vip_mux_cam1_muxreg),
746 },
747};
748
749static struct spear_pingroup vip_mux_cam1_pingroup = {
750 .name = "vip_mux_cam1_grp",
751 .pins = vip_mux_cam1_pins,
752 .npins = ARRAY_SIZE(vip_mux_cam1_pins),
753 .modemuxs = vip_mux_cam1_modemux,
754 .nmodemuxs = ARRAY_SIZE(vip_mux_cam1_modemux),
755};
756
757/* pad multiplexing for vip_mux_cam2 (disables cam2) device */
758static const unsigned vip_mux_cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50,
759 51, 52, 53 };
760static struct spear_muxreg vip_mux_cam2_muxreg[] = {
761 {
762 .reg = PAD_SHARED_IP_EN_1,
763 .mask = CAM2_MASK,
764 .val = 0,
765 }, {
766 .reg = PAD_FUNCTION_EN_2,
767 .mask = VIP_AND_CAM2_REG1_MASK,
768 .val = VIP_AND_CAM2_REG1_MASK,
769 },
770};
771
772static struct spear_modemux vip_mux_cam2_modemux[] = {
773 {
774 .muxregs = vip_mux_cam2_muxreg,
775 .nmuxregs = ARRAY_SIZE(vip_mux_cam2_muxreg),
776 },
777};
778
779static struct spear_pingroup vip_mux_cam2_pingroup = {
780 .name = "vip_mux_cam2_grp",
781 .pins = vip_mux_cam2_pins,
782 .npins = ARRAY_SIZE(vip_mux_cam2_pins),
783 .modemuxs = vip_mux_cam2_modemux,
784 .nmodemuxs = ARRAY_SIZE(vip_mux_cam2_modemux),
785};
786
787/* pad multiplexing for vip_mux_cam3 (disables cam3) device */
788static const unsigned vip_mux_cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31,
789 32, 33, 34 };
790static struct spear_muxreg vip_mux_cam3_muxreg[] = {
791 {
792 .reg = PAD_SHARED_IP_EN_1,
793 .mask = CAM3_MASK,
794 .val = 0,
795 }, {
796 .reg = PAD_FUNCTION_EN_1,
797 .mask = VIP_AND_CAM3_REG0_MASK,
798 .val = VIP_AND_CAM3_REG0_MASK,
799 }, {
800 .reg = PAD_FUNCTION_EN_2,
801 .mask = VIP_AND_CAM3_REG1_MASK,
802 .val = VIP_AND_CAM3_REG1_MASK,
803 },
804};
805
806static struct spear_modemux vip_mux_cam3_modemux[] = {
807 {
808 .muxregs = vip_mux_cam3_muxreg,
809 .nmuxregs = ARRAY_SIZE(vip_mux_cam3_muxreg),
810 },
811};
812
813static struct spear_pingroup vip_mux_cam3_pingroup = {
814 .name = "vip_mux_cam3_grp",
815 .pins = vip_mux_cam3_pins,
816 .npins = ARRAY_SIZE(vip_mux_cam3_pins),
817 .modemuxs = vip_mux_cam3_modemux,
818 .nmodemuxs = ARRAY_SIZE(vip_mux_cam3_modemux),
819};
820
821static const char *const vip_grps[] = { "vip_mux_grp", "vip_mux_cam0_grp" ,
822 "vip_mux_cam1_grp" , "vip_mux_cam2_grp", "vip_mux_cam3_grp" };
823static struct spear_function vip_function = {
824 .name = "vip",
825 .groups = vip_grps,
826 .ngroups = ARRAY_SIZE(vip_grps),
827};
828
829/* pad multiplexing for cam0 device */
830static const unsigned cam0_pins[] = { 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75
831};
832static struct spear_muxreg cam0_muxreg[] = {
833 {
834 .reg = PAD_SHARED_IP_EN_1,
835 .mask = CAM0_MASK,
836 .val = CAM0_MASK,
837 }, {
838 .reg = PAD_FUNCTION_EN_3,
839 .mask = VIP_AND_CAM0_REG2_MASK,
840 .val = VIP_AND_CAM0_REG2_MASK,
841 },
842};
843
844static struct spear_modemux cam0_modemux[] = {
845 {
846 .muxregs = cam0_muxreg,
847 .nmuxregs = ARRAY_SIZE(cam0_muxreg),
848 },
849};
850
851static struct spear_pingroup cam0_pingroup = {
852 .name = "cam0_grp",
853 .pins = cam0_pins,
854 .npins = ARRAY_SIZE(cam0_pins),
855 .modemuxs = cam0_modemux,
856 .nmodemuxs = ARRAY_SIZE(cam0_modemux),
857};
858
859static const char *const cam0_grps[] = { "cam0_grp" };
860static struct spear_function cam0_function = {
861 .name = "cam0",
862 .groups = cam0_grps,
863 .ngroups = ARRAY_SIZE(cam0_grps),
864};
865
866/* pad multiplexing for cam1 device */
867static const unsigned cam1_pins[] = { 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64
868};
869static struct spear_muxreg cam1_muxreg[] = {
870 {
871 .reg = PAD_SHARED_IP_EN_1,
872 .mask = CAM1_MASK,
873 .val = CAM1_MASK,
874 }, {
875 .reg = PAD_FUNCTION_EN_2,
876 .mask = VIP_AND_CAM1_REG1_MASK,
877 .val = VIP_AND_CAM1_REG1_MASK,
878 }, {
879 .reg = PAD_FUNCTION_EN_3,
880 .mask = VIP_AND_CAM1_REG2_MASK,
881 .val = VIP_AND_CAM1_REG2_MASK,
882 },
883};
884
885static struct spear_modemux cam1_modemux[] = {
886 {
887 .muxregs = cam1_muxreg,
888 .nmuxregs = ARRAY_SIZE(cam1_muxreg),
889 },
890};
891
892static struct spear_pingroup cam1_pingroup = {
893 .name = "cam1_grp",
894 .pins = cam1_pins,
895 .npins = ARRAY_SIZE(cam1_pins),
896 .modemuxs = cam1_modemux,
897 .nmodemuxs = ARRAY_SIZE(cam1_modemux),
898};
899
900static const char *const cam1_grps[] = { "cam1_grp" };
901static struct spear_function cam1_function = {
902 .name = "cam1",
903 .groups = cam1_grps,
904 .ngroups = ARRAY_SIZE(cam1_grps),
905};
906
907/* pad multiplexing for cam2 device */
908static const unsigned cam2_pins[] = { 39, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53
909};
910static struct spear_muxreg cam2_muxreg[] = {
911 {
912 .reg = PAD_SHARED_IP_EN_1,
913 .mask = CAM2_MASK,
914 .val = CAM2_MASK,
915 }, {
916 .reg = PAD_FUNCTION_EN_2,
917 .mask = VIP_AND_CAM2_REG1_MASK,
918 .val = VIP_AND_CAM2_REG1_MASK,
919 },
920};
921
922static struct spear_modemux cam2_modemux[] = {
923 {
924 .muxregs = cam2_muxreg,
925 .nmuxregs = ARRAY_SIZE(cam2_muxreg),
926 },
927};
928
929static struct spear_pingroup cam2_pingroup = {
930 .name = "cam2_grp",
931 .pins = cam2_pins,
932 .npins = ARRAY_SIZE(cam2_pins),
933 .modemuxs = cam2_modemux,
934 .nmodemuxs = ARRAY_SIZE(cam2_modemux),
935};
936
937static const char *const cam2_grps[] = { "cam2_grp" };
938static struct spear_function cam2_function = {
939 .name = "cam2",
940 .groups = cam2_grps,
941 .ngroups = ARRAY_SIZE(cam2_grps),
942};
943
944/* pad multiplexing for cam3 device */
945static const unsigned cam3_pins[] = { 20, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34
946};
947static struct spear_muxreg cam3_muxreg[] = {
948 {
949 .reg = PAD_SHARED_IP_EN_1,
950 .mask = CAM3_MASK,
951 .val = CAM3_MASK,
952 }, {
953 .reg = PAD_FUNCTION_EN_1,
954 .mask = VIP_AND_CAM3_REG0_MASK,
955 .val = VIP_AND_CAM3_REG0_MASK,
956 }, {
957 .reg = PAD_FUNCTION_EN_2,
958 .mask = VIP_AND_CAM3_REG1_MASK,
959 .val = VIP_AND_CAM3_REG1_MASK,
960 },
961};
962
963static struct spear_modemux cam3_modemux[] = {
964 {
965 .muxregs = cam3_muxreg,
966 .nmuxregs = ARRAY_SIZE(cam3_muxreg),
967 },
968};
969
970static struct spear_pingroup cam3_pingroup = {
971 .name = "cam3_grp",
972 .pins = cam3_pins,
973 .npins = ARRAY_SIZE(cam3_pins),
974 .modemuxs = cam3_modemux,
975 .nmodemuxs = ARRAY_SIZE(cam3_modemux),
976};
977
978static const char *const cam3_grps[] = { "cam3_grp" };
979static struct spear_function cam3_function = {
980 .name = "cam3",
981 .groups = cam3_grps,
982 .ngroups = ARRAY_SIZE(cam3_grps),
983};
984
985/* pad multiplexing for smi device */
986static const unsigned smi_pins[] = { 76, 77, 78, 79, 84 };
987static struct spear_muxreg smi_muxreg[] = {
988 {
989 .reg = PAD_FUNCTION_EN_3,
990 .mask = SMI_REG2_MASK,
991 .val = SMI_REG2_MASK,
992 },
993};
994
995static struct spear_modemux smi_modemux[] = {
996 {
997 .muxregs = smi_muxreg,
998 .nmuxregs = ARRAY_SIZE(smi_muxreg),
999 },
1000};
1001
1002static struct spear_pingroup smi_pingroup = {
1003 .name = "smi_grp",
1004 .pins = smi_pins,
1005 .npins = ARRAY_SIZE(smi_pins),
1006 .modemuxs = smi_modemux,
1007 .nmodemuxs = ARRAY_SIZE(smi_modemux),
1008};
1009
1010static const char *const smi_grps[] = { "smi_grp" };
1011static struct spear_function smi_function = {
1012 .name = "smi",
1013 .groups = smi_grps,
1014 .ngroups = ARRAY_SIZE(smi_grps),
1015};
1016
1017/* pad multiplexing for ssp0 device */
1018static const unsigned ssp0_pins[] = { 80, 81, 82, 83 };
1019static struct spear_muxreg ssp0_muxreg[] = {
1020 {
1021 .reg = PAD_FUNCTION_EN_3,
1022 .mask = SSP0_REG2_MASK,
1023 .val = SSP0_REG2_MASK,
1024 },
1025};
1026
1027static struct spear_modemux ssp0_modemux[] = {
1028 {
1029 .muxregs = ssp0_muxreg,
1030 .nmuxregs = ARRAY_SIZE(ssp0_muxreg),
1031 },
1032};
1033
1034static struct spear_pingroup ssp0_pingroup = {
1035 .name = "ssp0_grp",
1036 .pins = ssp0_pins,
1037 .npins = ARRAY_SIZE(ssp0_pins),
1038 .modemuxs = ssp0_modemux,
1039 .nmodemuxs = ARRAY_SIZE(ssp0_modemux),
1040};
1041
1042/* pad multiplexing for ssp0_cs1 device */
1043static const unsigned ssp0_cs1_pins[] = { 24 };
1044static struct spear_muxreg ssp0_cs1_muxreg[] = {
1045 {
1046 .reg = PAD_SHARED_IP_EN_1,
1047 .mask = SSP0_CS1_MASK,
1048 .val = SSP0_CS1_MASK,
1049 }, {
1050 .reg = PAD_FUNCTION_EN_1,
1051 .mask = PWM0_AND_SSP0_CS1_REG0_MASK,
1052 .val = PWM0_AND_SSP0_CS1_REG0_MASK,
1053 },
1054};
1055
1056static struct spear_modemux ssp0_cs1_modemux[] = {
1057 {
1058 .muxregs = ssp0_cs1_muxreg,
1059 .nmuxregs = ARRAY_SIZE(ssp0_cs1_muxreg),
1060 },
1061};
1062
1063static struct spear_pingroup ssp0_cs1_pingroup = {
1064 .name = "ssp0_cs1_grp",
1065 .pins = ssp0_cs1_pins,
1066 .npins = ARRAY_SIZE(ssp0_cs1_pins),
1067 .modemuxs = ssp0_cs1_modemux,
1068 .nmodemuxs = ARRAY_SIZE(ssp0_cs1_modemux),
1069};
1070
1071/* pad multiplexing for ssp0_cs2 device */
1072static const unsigned ssp0_cs2_pins[] = { 85 };
1073static struct spear_muxreg ssp0_cs2_muxreg[] = {
1074 {
1075 .reg = PAD_SHARED_IP_EN_1,
1076 .mask = SSP0_CS2_MASK,
1077 .val = SSP0_CS2_MASK,
1078 }, {
1079 .reg = PAD_FUNCTION_EN_3,
1080 .mask = TS_AND_SSP0_CS2_REG2_MASK,
1081 .val = TS_AND_SSP0_CS2_REG2_MASK,
1082 },
1083};
1084
1085static struct spear_modemux ssp0_cs2_modemux[] = {
1086 {
1087 .muxregs = ssp0_cs2_muxreg,
1088 .nmuxregs = ARRAY_SIZE(ssp0_cs2_muxreg),
1089 },
1090};
1091
1092static struct spear_pingroup ssp0_cs2_pingroup = {
1093 .name = "ssp0_cs2_grp",
1094 .pins = ssp0_cs2_pins,
1095 .npins = ARRAY_SIZE(ssp0_cs2_pins),
1096 .modemuxs = ssp0_cs2_modemux,
1097 .nmodemuxs = ARRAY_SIZE(ssp0_cs2_modemux),
1098};
1099
1100/* pad multiplexing for ssp0_cs3 device */
1101static const unsigned ssp0_cs3_pins[] = { 132 };
1102static struct spear_muxreg ssp0_cs3_muxreg[] = {
1103 {
1104 .reg = PAD_FUNCTION_EN_5,
1105 .mask = SSP0_CS3_REG4_MASK,
1106 .val = SSP0_CS3_REG4_MASK,
1107 },
1108};
1109
1110static struct spear_modemux ssp0_cs3_modemux[] = {
1111 {
1112 .muxregs = ssp0_cs3_muxreg,
1113 .nmuxregs = ARRAY_SIZE(ssp0_cs3_muxreg),
1114 },
1115};
1116
1117static struct spear_pingroup ssp0_cs3_pingroup = {
1118 .name = "ssp0_cs3_grp",
1119 .pins = ssp0_cs3_pins,
1120 .npins = ARRAY_SIZE(ssp0_cs3_pins),
1121 .modemuxs = ssp0_cs3_modemux,
1122 .nmodemuxs = ARRAY_SIZE(ssp0_cs3_modemux),
1123};
1124
1125static const char *const ssp0_grps[] = { "ssp0_grp", "ssp0_cs1_grp",
1126 "ssp0_cs2_grp", "ssp0_cs3_grp" };
1127static struct spear_function ssp0_function = {
1128 .name = "ssp0",
1129 .groups = ssp0_grps,
1130 .ngroups = ARRAY_SIZE(ssp0_grps),
1131};
1132
1133/* pad multiplexing for uart0 device */
1134static const unsigned uart0_pins[] = { 86, 87 };
1135static struct spear_muxreg uart0_muxreg[] = {
1136 {
1137 .reg = PAD_FUNCTION_EN_3,
1138 .mask = UART0_REG2_MASK,
1139 .val = UART0_REG2_MASK,
1140 },
1141};
1142
1143static struct spear_modemux uart0_modemux[] = {
1144 {
1145 .muxregs = uart0_muxreg,
1146 .nmuxregs = ARRAY_SIZE(uart0_muxreg),
1147 },
1148};
1149
1150static struct spear_pingroup uart0_pingroup = {
1151 .name = "uart0_grp",
1152 .pins = uart0_pins,
1153 .npins = ARRAY_SIZE(uart0_pins),
1154 .modemuxs = uart0_modemux,
1155 .nmodemuxs = ARRAY_SIZE(uart0_modemux),
1156};
1157
1158/* pad multiplexing for uart0_enh device */
1159static const unsigned uart0_enh_pins[] = { 11, 12, 13, 14, 15, 16 };
1160static struct spear_muxreg uart0_enh_muxreg[] = {
1161 {
1162 .reg = PAD_SHARED_IP_EN_1,
1163 .mask = GPT_MASK,
1164 .val = 0,
1165 }, {
1166 .reg = PAD_FUNCTION_EN_1,
1167 .mask = UART0_ENH_AND_GPT_REG0_MASK,
1168 .val = UART0_ENH_AND_GPT_REG0_MASK,
1169 },
1170};
1171
1172static struct spear_modemux uart0_enh_modemux[] = {
1173 {
1174 .muxregs = uart0_enh_muxreg,
1175 .nmuxregs = ARRAY_SIZE(uart0_enh_muxreg),
1176 },
1177};
1178
1179static struct spear_pingroup uart0_enh_pingroup = {
1180 .name = "uart0_enh_grp",
1181 .pins = uart0_enh_pins,
1182 .npins = ARRAY_SIZE(uart0_enh_pins),
1183 .modemuxs = uart0_enh_modemux,
1184 .nmodemuxs = ARRAY_SIZE(uart0_enh_modemux),
1185};
1186
1187static const char *const uart0_grps[] = { "uart0_grp", "uart0_enh_grp" };
1188static struct spear_function uart0_function = {
1189 .name = "uart0",
1190 .groups = uart0_grps,
1191 .ngroups = ARRAY_SIZE(uart0_grps),
1192};
1193
1194/* pad multiplexing for uart1 device */
1195static const unsigned uart1_pins[] = { 88, 89 };
1196static struct spear_muxreg uart1_muxreg[] = {
1197 {
1198 .reg = PAD_FUNCTION_EN_3,
1199 .mask = UART1_REG2_MASK,
1200 .val = UART1_REG2_MASK,
1201 },
1202};
1203
1204static struct spear_modemux uart1_modemux[] = {
1205 {
1206 .muxregs = uart1_muxreg,
1207 .nmuxregs = ARRAY_SIZE(uart1_muxreg),
1208 },
1209};
1210
1211static struct spear_pingroup uart1_pingroup = {
1212 .name = "uart1_grp",
1213 .pins = uart1_pins,
1214 .npins = ARRAY_SIZE(uart1_pins),
1215 .modemuxs = uart1_modemux,
1216 .nmodemuxs = ARRAY_SIZE(uart1_modemux),
1217};
1218
1219static const char *const uart1_grps[] = { "uart1_grp" };
1220static struct spear_function uart1_function = {
1221 .name = "uart1",
1222 .groups = uart1_grps,
1223 .ngroups = ARRAY_SIZE(uart1_grps),
1224};
1225
1226/* pad multiplexing for i2s_in device */
1227static const unsigned i2s_in_pins[] = { 90, 91, 92, 93, 94, 99 };
1228static struct spear_muxreg i2s_in_muxreg[] = {
1229 {
1230 .reg = PAD_FUNCTION_EN_3,
1231 .mask = I2S_IN_REG2_MASK,
1232 .val = I2S_IN_REG2_MASK,
1233 }, {
1234 .reg = PAD_FUNCTION_EN_4,
1235 .mask = I2S_IN_REG3_MASK,
1236 .val = I2S_IN_REG3_MASK,
1237 },
1238};
1239
1240static struct spear_modemux i2s_in_modemux[] = {
1241 {
1242 .muxregs = i2s_in_muxreg,
1243 .nmuxregs = ARRAY_SIZE(i2s_in_muxreg),
1244 },
1245};
1246
1247static struct spear_pingroup i2s_in_pingroup = {
1248 .name = "i2s_in_grp",
1249 .pins = i2s_in_pins,
1250 .npins = ARRAY_SIZE(i2s_in_pins),
1251 .modemuxs = i2s_in_modemux,
1252 .nmodemuxs = ARRAY_SIZE(i2s_in_modemux),
1253};
1254
1255/* pad multiplexing for i2s_out device */
1256static const unsigned i2s_out_pins[] = { 95, 96, 97, 98, 100, 101, 102, 103 };
1257static struct spear_muxreg i2s_out_muxreg[] = {
1258 {
1259 .reg = PAD_FUNCTION_EN_4,
1260 .mask = I2S_OUT_REG3_MASK,
1261 .val = I2S_OUT_REG3_MASK,
1262 },
1263};
1264
1265static struct spear_modemux i2s_out_modemux[] = {
1266 {
1267 .muxregs = i2s_out_muxreg,
1268 .nmuxregs = ARRAY_SIZE(i2s_out_muxreg),
1269 },
1270};
1271
1272static struct spear_pingroup i2s_out_pingroup = {
1273 .name = "i2s_out_grp",
1274 .pins = i2s_out_pins,
1275 .npins = ARRAY_SIZE(i2s_out_pins),
1276 .modemuxs = i2s_out_modemux,
1277 .nmodemuxs = ARRAY_SIZE(i2s_out_modemux),
1278};
1279
1280static const char *const i2s_grps[] = { "i2s_in_grp", "i2s_out_grp" };
1281static struct spear_function i2s_function = {
1282 .name = "i2s",
1283 .groups = i2s_grps,
1284 .ngroups = ARRAY_SIZE(i2s_grps),
1285};
1286
1287/* pad multiplexing for gmac device */
1288static const unsigned gmac_pins[] = { 104, 105, 106, 107, 108, 109, 110, 111,
1289 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125,
1290 126, 127, 128, 129, 130, 131 };
1291#define GMAC_MUXREG \
1292 { \
1293 .reg = PAD_FUNCTION_EN_4, \
1294 .mask = GMAC_REG3_MASK, \
1295 .val = GMAC_REG3_MASK, \
1296 }, { \
1297 .reg = PAD_FUNCTION_EN_5, \
1298 .mask = GMAC_REG4_MASK, \
1299 .val = GMAC_REG4_MASK, \
1300 }
1301
1302/* pad multiplexing for gmii device */
1303static struct spear_muxreg gmii_muxreg[] = {
1304 GMAC_MUXREG,
1305 {
1306 .reg = GMAC_CLK_CFG,
1307 .mask = GMAC_PHY_IF_SEL_MASK,
1308 .val = GMAC_PHY_IF_GMII_VAL,
1309 },
1310};
1311
1312static struct spear_modemux gmii_modemux[] = {
1313 {
1314 .muxregs = gmii_muxreg,
1315 .nmuxregs = ARRAY_SIZE(gmii_muxreg),
1316 },
1317};
1318
1319static struct spear_pingroup gmii_pingroup = {
1320 .name = "gmii_grp",
1321 .pins = gmac_pins,
1322 .npins = ARRAY_SIZE(gmac_pins),
1323 .modemuxs = gmii_modemux,
1324 .nmodemuxs = ARRAY_SIZE(gmii_modemux),
1325};
1326
1327/* pad multiplexing for rgmii device */
1328static struct spear_muxreg rgmii_muxreg[] = {
1329 GMAC_MUXREG,
1330 {
1331 .reg = GMAC_CLK_CFG,
1332 .mask = GMAC_PHY_IF_SEL_MASK,
1333 .val = GMAC_PHY_IF_RGMII_VAL,
1334 },
1335};
1336
1337static struct spear_modemux rgmii_modemux[] = {
1338 {
1339 .muxregs = rgmii_muxreg,
1340 .nmuxregs = ARRAY_SIZE(rgmii_muxreg),
1341 },
1342};
1343
1344static struct spear_pingroup rgmii_pingroup = {
1345 .name = "rgmii_grp",
1346 .pins = gmac_pins,
1347 .npins = ARRAY_SIZE(gmac_pins),
1348 .modemuxs = rgmii_modemux,
1349 .nmodemuxs = ARRAY_SIZE(rgmii_modemux),
1350};
1351
1352/* pad multiplexing for rmii device */
1353static struct spear_muxreg rmii_muxreg[] = {
1354 GMAC_MUXREG,
1355 {
1356 .reg = GMAC_CLK_CFG,
1357 .mask = GMAC_PHY_IF_SEL_MASK,
1358 .val = GMAC_PHY_IF_RMII_VAL,
1359 },
1360};
1361
1362static struct spear_modemux rmii_modemux[] = {
1363 {
1364 .muxregs = rmii_muxreg,
1365 .nmuxregs = ARRAY_SIZE(rmii_muxreg),
1366 },
1367};
1368
1369static struct spear_pingroup rmii_pingroup = {
1370 .name = "rmii_grp",
1371 .pins = gmac_pins,
1372 .npins = ARRAY_SIZE(gmac_pins),
1373 .modemuxs = rmii_modemux,
1374 .nmodemuxs = ARRAY_SIZE(rmii_modemux),
1375};
1376
1377/* pad multiplexing for sgmii device */
1378static struct spear_muxreg sgmii_muxreg[] = {
1379 GMAC_MUXREG,
1380 {
1381 .reg = GMAC_CLK_CFG,
1382 .mask = GMAC_PHY_IF_SEL_MASK,
1383 .val = GMAC_PHY_IF_SGMII_VAL,
1384 },
1385};
1386
1387static struct spear_modemux sgmii_modemux[] = {
1388 {
1389 .muxregs = sgmii_muxreg,
1390 .nmuxregs = ARRAY_SIZE(sgmii_muxreg),
1391 },
1392};
1393
1394static struct spear_pingroup sgmii_pingroup = {
1395 .name = "sgmii_grp",
1396 .pins = gmac_pins,
1397 .npins = ARRAY_SIZE(gmac_pins),
1398 .modemuxs = sgmii_modemux,
1399 .nmodemuxs = ARRAY_SIZE(sgmii_modemux),
1400};
1401
1402static const char *const gmac_grps[] = { "gmii_grp", "rgmii_grp", "rmii_grp",
1403 "sgmii_grp" };
1404static struct spear_function gmac_function = {
1405 .name = "gmac",
1406 .groups = gmac_grps,
1407 .ngroups = ARRAY_SIZE(gmac_grps),
1408};
1409
1410/* pad multiplexing for i2c0 device */
1411static const unsigned i2c0_pins[] = { 133, 134 };
1412static struct spear_muxreg i2c0_muxreg[] = {
1413 {
1414 .reg = PAD_FUNCTION_EN_5,
1415 .mask = I2C0_REG4_MASK,
1416 .val = I2C0_REG4_MASK,
1417 },
1418};
1419
1420static struct spear_modemux i2c0_modemux[] = {
1421 {
1422 .muxregs = i2c0_muxreg,
1423 .nmuxregs = ARRAY_SIZE(i2c0_muxreg),
1424 },
1425};
1426
1427static struct spear_pingroup i2c0_pingroup = {
1428 .name = "i2c0_grp",
1429 .pins = i2c0_pins,
1430 .npins = ARRAY_SIZE(i2c0_pins),
1431 .modemuxs = i2c0_modemux,
1432 .nmodemuxs = ARRAY_SIZE(i2c0_modemux),
1433};
1434
1435static const char *const i2c0_grps[] = { "i2c0_grp" };
1436static struct spear_function i2c0_function = {
1437 .name = "i2c0",
1438 .groups = i2c0_grps,
1439 .ngroups = ARRAY_SIZE(i2c0_grps),
1440};
1441
1442/* pad multiplexing for i2c1 device */
1443static const unsigned i2c1_pins[] = { 18, 23 };
1444static struct spear_muxreg i2c1_muxreg[] = {
1445 {
1446 .reg = PAD_FUNCTION_EN_1,
1447 .mask = I2C1_REG0_MASK,
1448 .val = I2C1_REG0_MASK,
1449 },
1450};
1451
1452static struct spear_modemux i2c1_modemux[] = {
1453 {
1454 .muxregs = i2c1_muxreg,
1455 .nmuxregs = ARRAY_SIZE(i2c1_muxreg),
1456 },
1457};
1458
1459static struct spear_pingroup i2c1_pingroup = {
1460 .name = "i2c1_grp",
1461 .pins = i2c1_pins,
1462 .npins = ARRAY_SIZE(i2c1_pins),
1463 .modemuxs = i2c1_modemux,
1464 .nmodemuxs = ARRAY_SIZE(i2c1_modemux),
1465};
1466
1467static const char *const i2c1_grps[] = { "i2c1_grp" };
1468static struct spear_function i2c1_function = {
1469 .name = "i2c1",
1470 .groups = i2c1_grps,
1471 .ngroups = ARRAY_SIZE(i2c1_grps),
1472};
1473
1474/* pad multiplexing for cec0 device */
1475static const unsigned cec0_pins[] = { 135 };
1476static struct spear_muxreg cec0_muxreg[] = {
1477 {
1478 .reg = PAD_FUNCTION_EN_5,
1479 .mask = CEC0_REG4_MASK,
1480 .val = CEC0_REG4_MASK,
1481 },
1482};
1483
1484static struct spear_modemux cec0_modemux[] = {
1485 {
1486 .muxregs = cec0_muxreg,
1487 .nmuxregs = ARRAY_SIZE(cec0_muxreg),
1488 },
1489};
1490
1491static struct spear_pingroup cec0_pingroup = {
1492 .name = "cec0_grp",
1493 .pins = cec0_pins,
1494 .npins = ARRAY_SIZE(cec0_pins),
1495 .modemuxs = cec0_modemux,
1496 .nmodemuxs = ARRAY_SIZE(cec0_modemux),
1497};
1498
1499static const char *const cec0_grps[] = { "cec0_grp" };
1500static struct spear_function cec0_function = {
1501 .name = "cec0",
1502 .groups = cec0_grps,
1503 .ngroups = ARRAY_SIZE(cec0_grps),
1504};
1505
1506/* pad multiplexing for cec1 device */
1507static const unsigned cec1_pins[] = { 136 };
1508static struct spear_muxreg cec1_muxreg[] = {
1509 {
1510 .reg = PAD_FUNCTION_EN_5,
1511 .mask = CEC1_REG4_MASK,
1512 .val = CEC1_REG4_MASK,
1513 },
1514};
1515
1516static struct spear_modemux cec1_modemux[] = {
1517 {
1518 .muxregs = cec1_muxreg,
1519 .nmuxregs = ARRAY_SIZE(cec1_muxreg),
1520 },
1521};
1522
1523static struct spear_pingroup cec1_pingroup = {
1524 .name = "cec1_grp",
1525 .pins = cec1_pins,
1526 .npins = ARRAY_SIZE(cec1_pins),
1527 .modemuxs = cec1_modemux,
1528 .nmodemuxs = ARRAY_SIZE(cec1_modemux),
1529};
1530
1531static const char *const cec1_grps[] = { "cec1_grp" };
1532static struct spear_function cec1_function = {
1533 .name = "cec1",
1534 .groups = cec1_grps,
1535 .ngroups = ARRAY_SIZE(cec1_grps),
1536};
1537
1538/* pad multiplexing for mcif devices */
1539static const unsigned mcif_pins[] = { 193, 194, 195, 196, 197, 198, 199, 200,
1540 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213, 214,
1541 215, 216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228,
1542 229, 230, 231, 232, 237 };
1543#define MCIF_MUXREG \
1544 { \
1545 .reg = PAD_SHARED_IP_EN_1, \
1546 .mask = MCIF_MASK, \
1547 .val = MCIF_MASK, \
1548 }, { \
1549 .reg = PAD_FUNCTION_EN_7, \
1550 .mask = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \
1551 .val = FSMC_PNOR_AND_MCIF_REG6_MASK | MCIF_REG6_MASK, \
1552 }, { \
1553 .reg = PAD_FUNCTION_EN_8, \
1554 .mask = MCIF_REG7_MASK, \
1555 .val = MCIF_REG7_MASK, \
1556 }
1557
1558/* Pad multiplexing for sdhci device */
1559static struct spear_muxreg sdhci_muxreg[] = {
1560 MCIF_MUXREG,
1561 {
1562 .reg = PERIP_CFG,
1563 .mask = MCIF_SEL_MASK,
1564 .val = MCIF_SEL_SD,
1565 },
1566};
1567
1568static struct spear_modemux sdhci_modemux[] = {
1569 {
1570 .muxregs = sdhci_muxreg,
1571 .nmuxregs = ARRAY_SIZE(sdhci_muxreg),
1572 },
1573};
1574
1575static struct spear_pingroup sdhci_pingroup = {
1576 .name = "sdhci_grp",
1577 .pins = mcif_pins,
1578 .npins = ARRAY_SIZE(mcif_pins),
1579 .modemuxs = sdhci_modemux,
1580 .nmodemuxs = ARRAY_SIZE(sdhci_modemux),
1581};
1582
1583static const char *const sdhci_grps[] = { "sdhci_grp" };
1584static struct spear_function sdhci_function = {
1585 .name = "sdhci",
1586 .groups = sdhci_grps,
1587 .ngroups = ARRAY_SIZE(sdhci_grps),
1588};
1589
1590/* Pad multiplexing for cf device */
1591static struct spear_muxreg cf_muxreg[] = {
1592 MCIF_MUXREG,
1593 {
1594 .reg = PERIP_CFG,
1595 .mask = MCIF_SEL_MASK,
1596 .val = MCIF_SEL_CF,
1597 },
1598};
1599
1600static struct spear_modemux cf_modemux[] = {
1601 {
1602 .muxregs = cf_muxreg,
1603 .nmuxregs = ARRAY_SIZE(cf_muxreg),
1604 },
1605};
1606
1607static struct spear_pingroup cf_pingroup = {
1608 .name = "cf_grp",
1609 .pins = mcif_pins,
1610 .npins = ARRAY_SIZE(mcif_pins),
1611 .modemuxs = cf_modemux,
1612 .nmodemuxs = ARRAY_SIZE(cf_modemux),
1613};
1614
1615static const char *const cf_grps[] = { "cf_grp" };
1616static struct spear_function cf_function = {
1617 .name = "cf",
1618 .groups = cf_grps,
1619 .ngroups = ARRAY_SIZE(cf_grps),
1620};
1621
1622/* Pad multiplexing for xd device */
1623static struct spear_muxreg xd_muxreg[] = {
1624 MCIF_MUXREG,
1625 {
1626 .reg = PERIP_CFG,
1627 .mask = MCIF_SEL_MASK,
1628 .val = MCIF_SEL_XD,
1629 },
1630};
1631
1632static struct spear_modemux xd_modemux[] = {
1633 {
1634 .muxregs = xd_muxreg,
1635 .nmuxregs = ARRAY_SIZE(xd_muxreg),
1636 },
1637};
1638
1639static struct spear_pingroup xd_pingroup = {
1640 .name = "xd_grp",
1641 .pins = mcif_pins,
1642 .npins = ARRAY_SIZE(mcif_pins),
1643 .modemuxs = xd_modemux,
1644 .nmodemuxs = ARRAY_SIZE(xd_modemux),
1645};
1646
1647static const char *const xd_grps[] = { "xd_grp" };
1648static struct spear_function xd_function = {
1649 .name = "xd",
1650 .groups = xd_grps,
1651 .ngroups = ARRAY_SIZE(xd_grps),
1652};
1653
1654/* pad multiplexing for clcd device */
1655static const unsigned clcd_pins[] = { 138, 139, 140, 141, 142, 143, 144, 145,
1656 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
1657 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173,
1658 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187,
1659 188, 189, 190, 191 };
1660static struct spear_muxreg clcd_muxreg[] = {
1661 {
1662 .reg = PAD_SHARED_IP_EN_1,
1663 .mask = ARM_TRACE_MASK | MIPHY_DBG_MASK,
1664 .val = 0,
1665 }, {
1666 .reg = PAD_FUNCTION_EN_5,
1667 .mask = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
1668 .val = CLCD_REG4_MASK | CLCD_AND_ARM_TRACE_REG4_MASK,
1669 }, {
1670 .reg = PAD_FUNCTION_EN_6,
1671 .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
1672 .val = CLCD_AND_ARM_TRACE_REG5_MASK,
1673 }, {
1674 .reg = PAD_FUNCTION_EN_7,
1675 .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
1676 .val = CLCD_AND_ARM_TRACE_REG6_MASK,
1677 },
1678};
1679
1680static struct spear_modemux clcd_modemux[] = {
1681 {
1682 .muxregs = clcd_muxreg,
1683 .nmuxregs = ARRAY_SIZE(clcd_muxreg),
1684 },
1685};
1686
1687static struct spear_pingroup clcd_pingroup = {
1688 .name = "clcd_grp",
1689 .pins = clcd_pins,
1690 .npins = ARRAY_SIZE(clcd_pins),
1691 .modemuxs = clcd_modemux,
1692 .nmodemuxs = ARRAY_SIZE(clcd_modemux),
1693};
1694
1695static const char *const clcd_grps[] = { "clcd_grp" };
1696static struct spear_function clcd_function = {
1697 .name = "clcd",
1698 .groups = clcd_grps,
1699 .ngroups = ARRAY_SIZE(clcd_grps),
1700};
1701
1702/* pad multiplexing for arm_trace device */
1703static const unsigned arm_trace_pins[] = { 158, 159, 160, 161, 162, 163, 164,
1704 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178,
1705 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192,
1706 193, 194, 195, 196, 197, 198, 199, 200 };
1707static struct spear_muxreg arm_trace_muxreg[] = {
1708 {
1709 .reg = PAD_SHARED_IP_EN_1,
1710 .mask = ARM_TRACE_MASK,
1711 .val = ARM_TRACE_MASK,
1712 }, {
1713 .reg = PAD_FUNCTION_EN_5,
1714 .mask = CLCD_AND_ARM_TRACE_REG4_MASK,
1715 .val = CLCD_AND_ARM_TRACE_REG4_MASK,
1716 }, {
1717 .reg = PAD_FUNCTION_EN_6,
1718 .mask = CLCD_AND_ARM_TRACE_REG5_MASK,
1719 .val = CLCD_AND_ARM_TRACE_REG5_MASK,
1720 }, {
1721 .reg = PAD_FUNCTION_EN_7,
1722 .mask = CLCD_AND_ARM_TRACE_REG6_MASK,
1723 .val = CLCD_AND_ARM_TRACE_REG6_MASK,
1724 },
1725};
1726
1727static struct spear_modemux arm_trace_modemux[] = {
1728 {
1729 .muxregs = arm_trace_muxreg,
1730 .nmuxregs = ARRAY_SIZE(arm_trace_muxreg),
1731 },
1732};
1733
1734static struct spear_pingroup arm_trace_pingroup = {
1735 .name = "arm_trace_grp",
1736 .pins = arm_trace_pins,
1737 .npins = ARRAY_SIZE(arm_trace_pins),
1738 .modemuxs = arm_trace_modemux,
1739 .nmodemuxs = ARRAY_SIZE(arm_trace_modemux),
1740};
1741
1742static const char *const arm_trace_grps[] = { "arm_trace_grp" };
1743static struct spear_function arm_trace_function = {
1744 .name = "arm_trace",
1745 .groups = arm_trace_grps,
1746 .ngroups = ARRAY_SIZE(arm_trace_grps),
1747};
1748
1749/* pad multiplexing for miphy_dbg device */
1750static const unsigned miphy_dbg_pins[] = { 96, 97, 98, 99, 100, 101, 102, 103,
1751 132, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147,
1752 148, 149, 150, 151, 152, 153, 154, 155, 156, 157 };
1753static struct spear_muxreg miphy_dbg_muxreg[] = {
1754 {
1755 .reg = PAD_SHARED_IP_EN_1,
1756 .mask = MIPHY_DBG_MASK,
1757 .val = MIPHY_DBG_MASK,
1758 }, {
1759 .reg = PAD_FUNCTION_EN_5,
1760 .mask = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
1761 .val = DEVS_GRP_AND_MIPHY_DBG_REG4_MASK,
1762 },
1763};
1764
1765static struct spear_modemux miphy_dbg_modemux[] = {
1766 {
1767 .muxregs = miphy_dbg_muxreg,
1768 .nmuxregs = ARRAY_SIZE(miphy_dbg_muxreg),
1769 },
1770};
1771
1772static struct spear_pingroup miphy_dbg_pingroup = {
1773 .name = "miphy_dbg_grp",
1774 .pins = miphy_dbg_pins,
1775 .npins = ARRAY_SIZE(miphy_dbg_pins),
1776 .modemuxs = miphy_dbg_modemux,
1777 .nmodemuxs = ARRAY_SIZE(miphy_dbg_modemux),
1778};
1779
1780static const char *const miphy_dbg_grps[] = { "miphy_dbg_grp" };
1781static struct spear_function miphy_dbg_function = {
1782 .name = "miphy_dbg",
1783 .groups = miphy_dbg_grps,
1784 .ngroups = ARRAY_SIZE(miphy_dbg_grps),
1785};
1786
1787/* pad multiplexing for pcie device */
1788static const unsigned pcie_pins[] = { 250 };
1789static struct spear_muxreg pcie_muxreg[] = {
1790 {
1791 .reg = PCIE_SATA_CFG,
1792 .mask = SATA_PCIE_CFG_MASK,
1793 .val = PCIE_CFG_VAL,
1794 },
1795};
1796
1797static struct spear_modemux pcie_modemux[] = {
1798 {
1799 .muxregs = pcie_muxreg,
1800 .nmuxregs = ARRAY_SIZE(pcie_muxreg),
1801 },
1802};
1803
1804static struct spear_pingroup pcie_pingroup = {
1805 .name = "pcie_grp",
1806 .pins = pcie_pins,
1807 .npins = ARRAY_SIZE(pcie_pins),
1808 .modemuxs = pcie_modemux,
1809 .nmodemuxs = ARRAY_SIZE(pcie_modemux),
1810};
1811
1812static const char *const pcie_grps[] = { "pcie_grp" };
1813static struct spear_function pcie_function = {
1814 .name = "pcie",
1815 .groups = pcie_grps,
1816 .ngroups = ARRAY_SIZE(pcie_grps),
1817};
1818
1819/* pad multiplexing for sata device */
1820static const unsigned sata_pins[] = { 250 };
1821static struct spear_muxreg sata_muxreg[] = {
1822 {
1823 .reg = PCIE_SATA_CFG,
1824 .mask = SATA_PCIE_CFG_MASK,
1825 .val = SATA_CFG_VAL,
1826 },
1827};
1828
1829static struct spear_modemux sata_modemux[] = {
1830 {
1831 .muxregs = sata_muxreg,
1832 .nmuxregs = ARRAY_SIZE(sata_muxreg),
1833 },
1834};
1835
1836static struct spear_pingroup sata_pingroup = {
1837 .name = "sata_grp",
1838 .pins = sata_pins,
1839 .npins = ARRAY_SIZE(sata_pins),
1840 .modemuxs = sata_modemux,
1841 .nmodemuxs = ARRAY_SIZE(sata_modemux),
1842};
1843
1844static const char *const sata_grps[] = { "sata_grp" };
1845static struct spear_function sata_function = {
1846 .name = "sata",
1847 .groups = sata_grps,
1848 .ngroups = ARRAY_SIZE(sata_grps),
1849};
1850
1851/* pingroups */
1852static struct spear_pingroup *spear1340_pingroups[] = {
1853 &pads_as_gpio_pingroup,
1854 &fsmc_8bit_pingroup,
1855 &fsmc_16bit_pingroup,
1856 &fsmc_pnor_pingroup,
1857 &keyboard_row_col_pingroup,
1858 &keyboard_col5_pingroup,
1859 &spdif_in_pingroup,
1860 &spdif_out_pingroup,
1861 &gpt_0_1_pingroup,
1862 &pwm0_pingroup,
1863 &pwm1_pingroup,
1864 &pwm2_pingroup,
1865 &pwm3_pingroup,
1866 &vip_mux_pingroup,
1867 &vip_mux_cam0_pingroup,
1868 &vip_mux_cam1_pingroup,
1869 &vip_mux_cam2_pingroup,
1870 &vip_mux_cam3_pingroup,
1871 &cam0_pingroup,
1872 &cam1_pingroup,
1873 &cam2_pingroup,
1874 &cam3_pingroup,
1875 &smi_pingroup,
1876 &ssp0_pingroup,
1877 &ssp0_cs1_pingroup,
1878 &ssp0_cs2_pingroup,
1879 &ssp0_cs3_pingroup,
1880 &uart0_pingroup,
1881 &uart0_enh_pingroup,
1882 &uart1_pingroup,
1883 &i2s_in_pingroup,
1884 &i2s_out_pingroup,
1885 &gmii_pingroup,
1886 &rgmii_pingroup,
1887 &rmii_pingroup,
1888 &sgmii_pingroup,
1889 &i2c0_pingroup,
1890 &i2c1_pingroup,
1891 &cec0_pingroup,
1892 &cec1_pingroup,
1893 &sdhci_pingroup,
1894 &cf_pingroup,
1895 &xd_pingroup,
1896 &clcd_pingroup,
1897 &arm_trace_pingroup,
1898 &miphy_dbg_pingroup,
1899 &pcie_pingroup,
1900 &sata_pingroup,
1901};
1902
1903/* functions */
1904static struct spear_function *spear1340_functions[] = {
1905 &pads_as_gpio_function,
1906 &fsmc_function,
1907 &keyboard_function,
1908 &spdif_in_function,
1909 &spdif_out_function,
1910 &gpt_0_1_function,
1911 &pwm_function,
1912 &vip_function,
1913 &cam0_function,
1914 &cam1_function,
1915 &cam2_function,
1916 &cam3_function,
1917 &smi_function,
1918 &ssp0_function,
1919 &uart0_function,
1920 &uart1_function,
1921 &i2s_function,
1922 &gmac_function,
1923 &i2c0_function,
1924 &i2c1_function,
1925 &cec0_function,
1926 &cec1_function,
1927 &sdhci_function,
1928 &cf_function,
1929 &xd_function,
1930 &clcd_function,
1931 &arm_trace_function,
1932 &miphy_dbg_function,
1933 &pcie_function,
1934 &sata_function,
1935};
1936
1937static struct spear_pinctrl_machdata spear1340_machdata = {
1938 .pins = spear1340_pins,
1939 .npins = ARRAY_SIZE(spear1340_pins),
1940 .groups = spear1340_pingroups,
1941 .ngroups = ARRAY_SIZE(spear1340_pingroups),
1942 .functions = spear1340_functions,
1943 .nfunctions = ARRAY_SIZE(spear1340_functions),
1944 .modes_supported = false,
1945};
1946
1947static struct of_device_id spear1340_pinctrl_of_match[] __devinitdata = {
1948 {
1949 .compatible = "st,spear1340-pinmux",
1950 },
1951 {},
1952};
1953
1954static int __devinit spear1340_pinctrl_probe(struct platform_device *pdev)
1955{
1956 return spear_pinctrl_probe(pdev, &spear1340_machdata);
1957}
1958
1959static int __devexit spear1340_pinctrl_remove(struct platform_device *pdev)
1960{
1961 return spear_pinctrl_remove(pdev);
1962}
1963
1964static struct platform_driver spear1340_pinctrl_driver = {
1965 .driver = {
1966 .name = DRIVER_NAME,
1967 .owner = THIS_MODULE,
1968 .of_match_table = spear1340_pinctrl_of_match,
1969 },
1970 .probe = spear1340_pinctrl_probe,
1971 .remove = __devexit_p(spear1340_pinctrl_remove),
1972};
1973
1974static int __init spear1340_pinctrl_init(void)
1975{
1976 return platform_driver_register(&spear1340_pinctrl_driver);
1977}
1978arch_initcall(spear1340_pinctrl_init);
1979
1980static void __exit spear1340_pinctrl_exit(void)
1981{
1982 platform_driver_unregister(&spear1340_pinctrl_driver);
1983}
1984module_exit(spear1340_pinctrl_exit);
1985
1986MODULE_AUTHOR("Viresh Kumar <viresh.kumar@st.com>");
1987MODULE_DESCRIPTION("ST Microelectronics SPEAr1340 pinctrl driver");
1988MODULE_LICENSE("GPL v2");
1989MODULE_DEVICE_TABLE(of, spear1340_pinctrl_of_match);
diff --git a/drivers/pinctrl/spear/pinctrl-spear3xx.c b/drivers/pinctrl/spear/pinctrl-spear3xx.c
index 832049a8b1c9..91c883bc46a6 100644
--- a/drivers/pinctrl/spear/pinctrl-spear3xx.c
+++ b/drivers/pinctrl/spear/pinctrl-spear3xx.c
@@ -15,108 +15,7 @@
15 15
16/* pins */ 16/* pins */
17static const struct pinctrl_pin_desc spear3xx_pins[] = { 17static const struct pinctrl_pin_desc spear3xx_pins[] = {
18 PINCTRL_PIN(0, "PLGPIO0"), 18 SPEAR_PIN_0_TO_101,
19 PINCTRL_PIN(1, "PLGPIO1"),
20 PINCTRL_PIN(2, "PLGPIO2"),
21 PINCTRL_PIN(3, "PLGPIO3"),
22 PINCTRL_PIN(4, "PLGPIO4"),
23 PINCTRL_PIN(5, "PLGPIO5"),
24 PINCTRL_PIN(6, "PLGPIO6"),
25 PINCTRL_PIN(7, "PLGPIO7"),
26 PINCTRL_PIN(8, "PLGPIO8"),
27 PINCTRL_PIN(9, "PLGPIO9"),
28 PINCTRL_PIN(10, "PLGPIO10"),
29 PINCTRL_PIN(11, "PLGPIO11"),
30 PINCTRL_PIN(12, "PLGPIO12"),
31 PINCTRL_PIN(13, "PLGPIO13"),
32 PINCTRL_PIN(14, "PLGPIO14"),
33 PINCTRL_PIN(15, "PLGPIO15"),
34 PINCTRL_PIN(16, "PLGPIO16"),
35 PINCTRL_PIN(17, "PLGPIO17"),
36 PINCTRL_PIN(18, "PLGPIO18"),
37 PINCTRL_PIN(19, "PLGPIO19"),
38 PINCTRL_PIN(20, "PLGPIO20"),
39 PINCTRL_PIN(21, "PLGPIO21"),
40 PINCTRL_PIN(22, "PLGPIO22"),
41 PINCTRL_PIN(23, "PLGPIO23"),
42 PINCTRL_PIN(24, "PLGPIO24"),
43 PINCTRL_PIN(25, "PLGPIO25"),
44 PINCTRL_PIN(26, "PLGPIO26"),
45 PINCTRL_PIN(27, "PLGPIO27"),
46 PINCTRL_PIN(28, "PLGPIO28"),
47 PINCTRL_PIN(29, "PLGPIO29"),
48 PINCTRL_PIN(30, "PLGPIO30"),
49 PINCTRL_PIN(31, "PLGPIO31"),
50 PINCTRL_PIN(32, "PLGPIO32"),
51 PINCTRL_PIN(33, "PLGPIO33"),
52 PINCTRL_PIN(34, "PLGPIO34"),
53 PINCTRL_PIN(35, "PLGPIO35"),
54 PINCTRL_PIN(36, "PLGPIO36"),
55 PINCTRL_PIN(37, "PLGPIO37"),
56 PINCTRL_PIN(38, "PLGPIO38"),
57 PINCTRL_PIN(39, "PLGPIO39"),
58 PINCTRL_PIN(40, "PLGPIO40"),
59 PINCTRL_PIN(41, "PLGPIO41"),
60 PINCTRL_PIN(42, "PLGPIO42"),
61 PINCTRL_PIN(43, "PLGPIO43"),
62 PINCTRL_PIN(44, "PLGPIO44"),
63 PINCTRL_PIN(45, "PLGPIO45"),
64 PINCTRL_PIN(46, "PLGPIO46"),
65 PINCTRL_PIN(47, "PLGPIO47"),
66 PINCTRL_PIN(48, "PLGPIO48"),
67 PINCTRL_PIN(49, "PLGPIO49"),
68 PINCTRL_PIN(50, "PLGPIO50"),
69 PINCTRL_PIN(51, "PLGPIO51"),
70 PINCTRL_PIN(52, "PLGPIO52"),
71 PINCTRL_PIN(53, "PLGPIO53"),
72 PINCTRL_PIN(54, "PLGPIO54"),
73 PINCTRL_PIN(55, "PLGPIO55"),
74 PINCTRL_PIN(56, "PLGPIO56"),
75 PINCTRL_PIN(57, "PLGPIO57"),
76 PINCTRL_PIN(58, "PLGPIO58"),
77 PINCTRL_PIN(59, "PLGPIO59"),
78 PINCTRL_PIN(60, "PLGPIO60"),
79 PINCTRL_PIN(61, "PLGPIO61"),
80 PINCTRL_PIN(62, "PLGPIO62"),
81 PINCTRL_PIN(63, "PLGPIO63"),
82 PINCTRL_PIN(64, "PLGPIO64"),
83 PINCTRL_PIN(65, "PLGPIO65"),
84 PINCTRL_PIN(66, "PLGPIO66"),
85 PINCTRL_PIN(67, "PLGPIO67"),
86 PINCTRL_PIN(68, "PLGPIO68"),
87 PINCTRL_PIN(69, "PLGPIO69"),
88 PINCTRL_PIN(70, "PLGPIO70"),
89 PINCTRL_PIN(71, "PLGPIO71"),
90 PINCTRL_PIN(72, "PLGPIO72"),
91 PINCTRL_PIN(73, "PLGPIO73"),
92 PINCTRL_PIN(74, "PLGPIO74"),
93 PINCTRL_PIN(75, "PLGPIO75"),
94 PINCTRL_PIN(76, "PLGPIO76"),
95 PINCTRL_PIN(77, "PLGPIO77"),
96 PINCTRL_PIN(78, "PLGPIO78"),
97 PINCTRL_PIN(79, "PLGPIO79"),
98 PINCTRL_PIN(80, "PLGPIO80"),
99 PINCTRL_PIN(81, "PLGPIO81"),
100 PINCTRL_PIN(82, "PLGPIO82"),
101 PINCTRL_PIN(83, "PLGPIO83"),
102 PINCTRL_PIN(84, "PLGPIO84"),
103 PINCTRL_PIN(85, "PLGPIO85"),
104 PINCTRL_PIN(86, "PLGPIO86"),
105 PINCTRL_PIN(87, "PLGPIO87"),
106 PINCTRL_PIN(88, "PLGPIO88"),
107 PINCTRL_PIN(89, "PLGPIO89"),
108 PINCTRL_PIN(90, "PLGPIO90"),
109 PINCTRL_PIN(91, "PLGPIO91"),
110 PINCTRL_PIN(92, "PLGPIO92"),
111 PINCTRL_PIN(93, "PLGPIO93"),
112 PINCTRL_PIN(94, "PLGPIO94"),
113 PINCTRL_PIN(95, "PLGPIO95"),
114 PINCTRL_PIN(96, "PLGPIO96"),
115 PINCTRL_PIN(97, "PLGPIO97"),
116 PINCTRL_PIN(98, "PLGPIO98"),
117 PINCTRL_PIN(99, "PLGPIO99"),
118 PINCTRL_PIN(100, "PLGPIO100"),
119 PINCTRL_PIN(101, "PLGPIO101"),
120}; 19};
121 20
122/* firda_pins */ 21/* firda_pins */
diff --git a/drivers/platform/x86/toshiba_acpi.c b/drivers/platform/x86/toshiba_acpi.c
index ee79ce64d9df..57787d87d9a4 100644
--- a/drivers/platform/x86/toshiba_acpi.c
+++ b/drivers/platform/x86/toshiba_acpi.c
@@ -1104,6 +1104,7 @@ static int __devinit toshiba_acpi_add(struct acpi_device *acpi_dev)
1104 1104
1105 mutex_init(&dev->mutex); 1105 mutex_init(&dev->mutex);
1106 1106
1107 memset(&props, 0, sizeof(props));
1107 props.type = BACKLIGHT_PLATFORM; 1108 props.type = BACKLIGHT_PLATFORM;
1108 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; 1109 props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1;
1109 dev->backlight_dev = backlight_device_register("toshiba", 1110 dev->backlight_dev = backlight_device_register("toshiba",
diff --git a/drivers/power/wm831x_power.c b/drivers/power/wm831x_power.c
index 987332b71d8d..fc1ad9551182 100644
--- a/drivers/power/wm831x_power.c
+++ b/drivers/power/wm831x_power.c
@@ -565,7 +565,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
565 goto err_usb; 565 goto err_usb;
566 } 566 }
567 567
568 irq = platform_get_irq_byname(pdev, "SYSLO"); 568 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
569 ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq, 569 ret = request_threaded_irq(irq, NULL, wm831x_syslo_irq,
570 IRQF_TRIGGER_RISING, "System power low", 570 IRQF_TRIGGER_RISING, "System power low",
571 power); 571 power);
@@ -575,7 +575,7 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
575 goto err_battery; 575 goto err_battery;
576 } 576 }
577 577
578 irq = platform_get_irq_byname(pdev, "PWR SRC"); 578 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
579 ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq, 579 ret = request_threaded_irq(irq, NULL, wm831x_pwr_src_irq,
580 IRQF_TRIGGER_RISING, "Power source", 580 IRQF_TRIGGER_RISING, "Power source",
581 power); 581 power);
@@ -586,7 +586,9 @@ static __devinit int wm831x_power_probe(struct platform_device *pdev)
586 } 586 }
587 587
588 for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { 588 for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
589 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); 589 irq = wm831x_irq(wm831x,
590 platform_get_irq_byname(pdev,
591 wm831x_bat_irqs[i]));
590 ret = request_threaded_irq(irq, NULL, wm831x_bat_irq, 592 ret = request_threaded_irq(irq, NULL, wm831x_bat_irq,
591 IRQF_TRIGGER_RISING, 593 IRQF_TRIGGER_RISING,
592 wm831x_bat_irqs[i], 594 wm831x_bat_irqs[i],
@@ -606,10 +608,10 @@ err_bat_irq:
606 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); 608 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]);
607 free_irq(irq, power); 609 free_irq(irq, power);
608 } 610 }
609 irq = platform_get_irq_byname(pdev, "PWR SRC"); 611 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
610 free_irq(irq, power); 612 free_irq(irq, power);
611err_syslo: 613err_syslo:
612 irq = platform_get_irq_byname(pdev, "SYSLO"); 614 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
613 free_irq(irq, power); 615 free_irq(irq, power);
614err_battery: 616err_battery:
615 if (power->have_battery) 617 if (power->have_battery)
@@ -626,17 +628,20 @@ err_kmalloc:
626static __devexit int wm831x_power_remove(struct platform_device *pdev) 628static __devexit int wm831x_power_remove(struct platform_device *pdev)
627{ 629{
628 struct wm831x_power *wm831x_power = platform_get_drvdata(pdev); 630 struct wm831x_power *wm831x_power = platform_get_drvdata(pdev);
631 struct wm831x *wm831x = wm831x_power->wm831x;
629 int irq, i; 632 int irq, i;
630 633
631 for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) { 634 for (i = 0; i < ARRAY_SIZE(wm831x_bat_irqs); i++) {
632 irq = platform_get_irq_byname(pdev, wm831x_bat_irqs[i]); 635 irq = wm831x_irq(wm831x,
636 platform_get_irq_byname(pdev,
637 wm831x_bat_irqs[i]));
633 free_irq(irq, wm831x_power); 638 free_irq(irq, wm831x_power);
634 } 639 }
635 640
636 irq = platform_get_irq_byname(pdev, "PWR SRC"); 641 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "PWR SRC"));
637 free_irq(irq, wm831x_power); 642 free_irq(irq, wm831x_power);
638 643
639 irq = platform_get_irq_byname(pdev, "SYSLO"); 644 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "SYSLO"));
640 free_irq(irq, wm831x_power); 645 free_irq(irq, wm831x_power);
641 646
642 if (wm831x_power->have_battery) 647 if (wm831x_power->have_battery)
diff --git a/drivers/regulator/anatop-regulator.c b/drivers/regulator/anatop-regulator.c
index 49b2112b0486..3660bace123c 100644
--- a/drivers/regulator/anatop-regulator.c
+++ b/drivers/regulator/anatop-regulator.c
@@ -47,7 +47,7 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
47 int max_uV, unsigned *selector) 47 int max_uV, unsigned *selector)
48{ 48{
49 struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg); 49 struct anatop_regulator *anatop_reg = rdev_get_drvdata(reg);
50 u32 val, sel; 50 u32 val, sel, mask;
51 int uv; 51 int uv;
52 52
53 uv = min_uV; 53 uv = min_uV;
@@ -71,11 +71,10 @@ static int anatop_set_voltage(struct regulator_dev *reg, int min_uV,
71 val = anatop_reg->min_bit_val + sel; 71 val = anatop_reg->min_bit_val + sel;
72 *selector = sel; 72 *selector = sel;
73 dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val); 73 dev_dbg(&reg->dev, "%s: calculated val %d\n", __func__, val);
74 anatop_set_bits(anatop_reg->mfd, 74 mask = ((1 << anatop_reg->vol_bit_width) - 1) <<
75 anatop_reg->control_reg, 75 anatop_reg->vol_bit_shift;
76 anatop_reg->vol_bit_shift, 76 val <<= anatop_reg->vol_bit_shift;
77 anatop_reg->vol_bit_width, 77 anatop_write_reg(anatop_reg->mfd, anatop_reg->control_reg, val, mask);
78 val);
79 78
80 return 0; 79 return 0;
81} 80}
@@ -88,10 +87,9 @@ static int anatop_get_voltage_sel(struct regulator_dev *reg)
88 if (!anatop_reg->control_reg) 87 if (!anatop_reg->control_reg)
89 return -ENOTSUPP; 88 return -ENOTSUPP;
90 89
91 val = anatop_get_bits(anatop_reg->mfd, 90 val = anatop_read_reg(anatop_reg->mfd, anatop_reg->control_reg);
92 anatop_reg->control_reg, 91 val = (val & ((1 << anatop_reg->vol_bit_width) - 1)) >>
93 anatop_reg->vol_bit_shift, 92 anatop_reg->vol_bit_shift;
94 anatop_reg->vol_bit_width);
95 93
96 return val - anatop_reg->min_bit_val; 94 return val - anatop_reg->min_bit_val;
97} 95}
diff --git a/drivers/regulator/tps65910-regulator.c b/drivers/regulator/tps65910-regulator.c
index 4e01a423471b..6bf864b4bdf6 100644
--- a/drivers/regulator/tps65910-regulator.c
+++ b/drivers/regulator/tps65910-regulator.c
@@ -331,21 +331,16 @@ struct tps65910_reg {
331 331
332static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg) 332static inline int tps65910_read(struct tps65910_reg *pmic, u8 reg)
333{ 333{
334 u8 val; 334 unsigned int val;
335 int err; 335 int err;
336 336
337 err = pmic->mfd->read(pmic->mfd, reg, 1, &val); 337 err = tps65910_reg_read(pmic->mfd, reg, &val);
338 if (err) 338 if (err)
339 return err; 339 return err;
340 340
341 return val; 341 return val;
342} 342}
343 343
344static inline int tps65910_write(struct tps65910_reg *pmic, u8 reg, u8 val)
345{
346 return pmic->mfd->write(pmic->mfd, reg, 1, &val);
347}
348
349static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg, 344static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
350 u8 set_mask, u8 clear_mask) 345 u8 set_mask, u8 clear_mask)
351{ 346{
@@ -362,7 +357,7 @@ static int tps65910_modify_bits(struct tps65910_reg *pmic, u8 reg,
362 357
363 data &= ~clear_mask; 358 data &= ~clear_mask;
364 data |= set_mask; 359 data |= set_mask;
365 err = tps65910_write(pmic, reg, data); 360 err = tps65910_reg_write(pmic->mfd, reg, data);
366 if (err) 361 if (err)
367 dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg); 362 dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
368 363
@@ -371,7 +366,7 @@ out:
371 return err; 366 return err;
372} 367}
373 368
374static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg) 369static int tps65910_reg_read_locked(struct tps65910_reg *pmic, u8 reg)
375{ 370{
376 int data; 371 int data;
377 372
@@ -385,13 +380,13 @@ static int tps65910_reg_read(struct tps65910_reg *pmic, u8 reg)
385 return data; 380 return data;
386} 381}
387 382
388static int tps65910_reg_write(struct tps65910_reg *pmic, u8 reg, u8 val) 383static int tps65910_reg_write_locked(struct tps65910_reg *pmic, u8 reg, u8 val)
389{ 384{
390 int err; 385 int err;
391 386
392 mutex_lock(&pmic->mutex); 387 mutex_lock(&pmic->mutex);
393 388
394 err = tps65910_write(pmic, reg, val); 389 err = tps65910_reg_write(pmic->mfd, reg, val);
395 if (err < 0) 390 if (err < 0)
396 dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg); 391 dev_err(pmic->mfd->dev, "Write for reg 0x%x failed\n", reg);
397 392
@@ -490,9 +485,9 @@ static int tps65910_set_mode(struct regulator_dev *dev, unsigned int mode)
490 LDO_ST_MODE_BIT); 485 LDO_ST_MODE_BIT);
491 case REGULATOR_MODE_IDLE: 486 case REGULATOR_MODE_IDLE:
492 value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT; 487 value = LDO_ST_ON_BIT | LDO_ST_MODE_BIT;
493 return tps65910_set_bits(mfd, reg, value); 488 return tps65910_reg_set_bits(mfd, reg, value);
494 case REGULATOR_MODE_STANDBY: 489 case REGULATOR_MODE_STANDBY:
495 return tps65910_clear_bits(mfd, reg, LDO_ST_ON_BIT); 490 return tps65910_reg_clear_bits(mfd, reg, LDO_ST_ON_BIT);
496 } 491 }
497 492
498 return -EINVAL; 493 return -EINVAL;
@@ -507,7 +502,7 @@ static unsigned int tps65910_get_mode(struct regulator_dev *dev)
507 if (reg < 0) 502 if (reg < 0)
508 return reg; 503 return reg;
509 504
510 value = tps65910_reg_read(pmic, reg); 505 value = tps65910_reg_read_locked(pmic, reg);
511 if (value < 0) 506 if (value < 0)
512 return value; 507 return value;
513 508
@@ -527,28 +522,28 @@ static int tps65910_get_voltage_dcdc_sel(struct regulator_dev *dev)
527 522
528 switch (id) { 523 switch (id) {
529 case TPS65910_REG_VDD1: 524 case TPS65910_REG_VDD1:
530 opvsel = tps65910_reg_read(pmic, TPS65910_VDD1_OP); 525 opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_OP);
531 mult = tps65910_reg_read(pmic, TPS65910_VDD1); 526 mult = tps65910_reg_read_locked(pmic, TPS65910_VDD1);
532 mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT; 527 mult = (mult & VDD1_VGAIN_SEL_MASK) >> VDD1_VGAIN_SEL_SHIFT;
533 srvsel = tps65910_reg_read(pmic, TPS65910_VDD1_SR); 528 srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD1_SR);
534 sr = opvsel & VDD1_OP_CMD_MASK; 529 sr = opvsel & VDD1_OP_CMD_MASK;
535 opvsel &= VDD1_OP_SEL_MASK; 530 opvsel &= VDD1_OP_SEL_MASK;
536 srvsel &= VDD1_SR_SEL_MASK; 531 srvsel &= VDD1_SR_SEL_MASK;
537 vselmax = 75; 532 vselmax = 75;
538 break; 533 break;
539 case TPS65910_REG_VDD2: 534 case TPS65910_REG_VDD2:
540 opvsel = tps65910_reg_read(pmic, TPS65910_VDD2_OP); 535 opvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_OP);
541 mult = tps65910_reg_read(pmic, TPS65910_VDD2); 536 mult = tps65910_reg_read_locked(pmic, TPS65910_VDD2);
542 mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT; 537 mult = (mult & VDD2_VGAIN_SEL_MASK) >> VDD2_VGAIN_SEL_SHIFT;
543 srvsel = tps65910_reg_read(pmic, TPS65910_VDD2_SR); 538 srvsel = tps65910_reg_read_locked(pmic, TPS65910_VDD2_SR);
544 sr = opvsel & VDD2_OP_CMD_MASK; 539 sr = opvsel & VDD2_OP_CMD_MASK;
545 opvsel &= VDD2_OP_SEL_MASK; 540 opvsel &= VDD2_OP_SEL_MASK;
546 srvsel &= VDD2_SR_SEL_MASK; 541 srvsel &= VDD2_SR_SEL_MASK;
547 vselmax = 75; 542 vselmax = 75;
548 break; 543 break;
549 case TPS65911_REG_VDDCTRL: 544 case TPS65911_REG_VDDCTRL:
550 opvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_OP); 545 opvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_OP);
551 srvsel = tps65910_reg_read(pmic, TPS65911_VDDCTRL_SR); 546 srvsel = tps65910_reg_read_locked(pmic, TPS65911_VDDCTRL_SR);
552 sr = opvsel & VDDCTRL_OP_CMD_MASK; 547 sr = opvsel & VDDCTRL_OP_CMD_MASK;
553 opvsel &= VDDCTRL_OP_SEL_MASK; 548 opvsel &= VDDCTRL_OP_SEL_MASK;
554 srvsel &= VDDCTRL_SR_SEL_MASK; 549 srvsel &= VDDCTRL_SR_SEL_MASK;
@@ -588,7 +583,7 @@ static int tps65910_get_voltage_sel(struct regulator_dev *dev)
588 if (reg < 0) 583 if (reg < 0)
589 return reg; 584 return reg;
590 585
591 value = tps65910_reg_read(pmic, reg); 586 value = tps65910_reg_read_locked(pmic, reg);
592 if (value < 0) 587 if (value < 0)
593 return value; 588 return value;
594 589
@@ -625,7 +620,7 @@ static int tps65911_get_voltage_sel(struct regulator_dev *dev)
625 620
626 reg = pmic->get_ctrl_reg(id); 621 reg = pmic->get_ctrl_reg(id);
627 622
628 value = tps65910_reg_read(pmic, reg); 623 value = tps65910_reg_read_locked(pmic, reg);
629 624
630 switch (id) { 625 switch (id) {
631 case TPS65911_REG_LDO1: 626 case TPS65911_REG_LDO1:
@@ -670,7 +665,7 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
670 tps65910_modify_bits(pmic, TPS65910_VDD1, 665 tps65910_modify_bits(pmic, TPS65910_VDD1,
671 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT), 666 (dcdc_mult << VDD1_VGAIN_SEL_SHIFT),
672 VDD1_VGAIN_SEL_MASK); 667 VDD1_VGAIN_SEL_MASK);
673 tps65910_reg_write(pmic, TPS65910_VDD1_OP, vsel); 668 tps65910_reg_write_locked(pmic, TPS65910_VDD1_OP, vsel);
674 break; 669 break;
675 case TPS65910_REG_VDD2: 670 case TPS65910_REG_VDD2:
676 dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1; 671 dcdc_mult = (selector / VDD1_2_NUM_VOLT_FINE) + 1;
@@ -681,11 +676,11 @@ static int tps65910_set_voltage_dcdc_sel(struct regulator_dev *dev,
681 tps65910_modify_bits(pmic, TPS65910_VDD2, 676 tps65910_modify_bits(pmic, TPS65910_VDD2,
682 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT), 677 (dcdc_mult << VDD2_VGAIN_SEL_SHIFT),
683 VDD1_VGAIN_SEL_MASK); 678 VDD1_VGAIN_SEL_MASK);
684 tps65910_reg_write(pmic, TPS65910_VDD2_OP, vsel); 679 tps65910_reg_write_locked(pmic, TPS65910_VDD2_OP, vsel);
685 break; 680 break;
686 case TPS65911_REG_VDDCTRL: 681 case TPS65911_REG_VDDCTRL:
687 vsel = selector + 3; 682 vsel = selector + 3;
688 tps65910_reg_write(pmic, TPS65911_VDDCTRL_OP, vsel); 683 tps65910_reg_write_locked(pmic, TPS65911_VDDCTRL_OP, vsel);
689 } 684 }
690 685
691 return 0; 686 return 0;
@@ -936,10 +931,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
936 931
937 /* External EN1 control */ 932 /* External EN1 control */
938 if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1) 933 if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN1)
939 ret = tps65910_set_bits(mfd, 934 ret = tps65910_reg_set_bits(mfd,
940 TPS65910_EN1_LDO_ASS + regoffs, bit_pos); 935 TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
941 else 936 else
942 ret = tps65910_clear_bits(mfd, 937 ret = tps65910_reg_clear_bits(mfd,
943 TPS65910_EN1_LDO_ASS + regoffs, bit_pos); 938 TPS65910_EN1_LDO_ASS + regoffs, bit_pos);
944 if (ret < 0) { 939 if (ret < 0) {
945 dev_err(mfd->dev, 940 dev_err(mfd->dev,
@@ -949,10 +944,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
949 944
950 /* External EN2 control */ 945 /* External EN2 control */
951 if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2) 946 if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN2)
952 ret = tps65910_set_bits(mfd, 947 ret = tps65910_reg_set_bits(mfd,
953 TPS65910_EN2_LDO_ASS + regoffs, bit_pos); 948 TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
954 else 949 else
955 ret = tps65910_clear_bits(mfd, 950 ret = tps65910_reg_clear_bits(mfd,
956 TPS65910_EN2_LDO_ASS + regoffs, bit_pos); 951 TPS65910_EN2_LDO_ASS + regoffs, bit_pos);
957 if (ret < 0) { 952 if (ret < 0) {
958 dev_err(mfd->dev, 953 dev_err(mfd->dev,
@@ -964,10 +959,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
964 if ((tps65910_chip_id(mfd) == TPS65910) && 959 if ((tps65910_chip_id(mfd) == TPS65910) &&
965 (id >= TPS65910_REG_VDIG1)) { 960 (id >= TPS65910_REG_VDIG1)) {
966 if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3) 961 if (ext_sleep_config & TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3)
967 ret = tps65910_set_bits(mfd, 962 ret = tps65910_reg_set_bits(mfd,
968 TPS65910_EN3_LDO_ASS + regoffs, bit_pos); 963 TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
969 else 964 else
970 ret = tps65910_clear_bits(mfd, 965 ret = tps65910_reg_clear_bits(mfd,
971 TPS65910_EN3_LDO_ASS + regoffs, bit_pos); 966 TPS65910_EN3_LDO_ASS + regoffs, bit_pos);
972 if (ret < 0) { 967 if (ret < 0) {
973 dev_err(mfd->dev, 968 dev_err(mfd->dev,
@@ -979,10 +974,10 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
979 /* Return if no external control is selected */ 974 /* Return if no external control is selected */
980 if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) { 975 if (!(ext_sleep_config & EXT_SLEEP_CONTROL)) {
981 /* Clear all sleep controls */ 976 /* Clear all sleep controls */
982 ret = tps65910_clear_bits(mfd, 977 ret = tps65910_reg_clear_bits(mfd,
983 TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos); 978 TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
984 if (!ret) 979 if (!ret)
985 ret = tps65910_clear_bits(mfd, 980 ret = tps65910_reg_clear_bits(mfd,
986 TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos); 981 TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
987 if (ret < 0) 982 if (ret < 0)
988 dev_err(mfd->dev, 983 dev_err(mfd->dev,
@@ -1001,32 +996,33 @@ static int tps65910_set_ext_sleep_config(struct tps65910_reg *pmic,
1001 (tps65910_chip_id(mfd) == TPS65911))) { 996 (tps65910_chip_id(mfd) == TPS65911))) {
1002 int op_reg_add = pmic->get_ctrl_reg(id) + 1; 997 int op_reg_add = pmic->get_ctrl_reg(id) + 1;
1003 int sr_reg_add = pmic->get_ctrl_reg(id) + 2; 998 int sr_reg_add = pmic->get_ctrl_reg(id) + 2;
1004 int opvsel = tps65910_reg_read(pmic, op_reg_add); 999 int opvsel = tps65910_reg_read_locked(pmic, op_reg_add);
1005 int srvsel = tps65910_reg_read(pmic, sr_reg_add); 1000 int srvsel = tps65910_reg_read_locked(pmic, sr_reg_add);
1006 if (opvsel & VDD1_OP_CMD_MASK) { 1001 if (opvsel & VDD1_OP_CMD_MASK) {
1007 u8 reg_val = srvsel & VDD1_OP_SEL_MASK; 1002 u8 reg_val = srvsel & VDD1_OP_SEL_MASK;
1008 ret = tps65910_reg_write(pmic, op_reg_add, reg_val); 1003 ret = tps65910_reg_write_locked(pmic, op_reg_add,
1004 reg_val);
1009 if (ret < 0) { 1005 if (ret < 0) {
1010 dev_err(mfd->dev, 1006 dev_err(mfd->dev,
1011 "Error in configuring op register\n"); 1007 "Error in configuring op register\n");
1012 return ret; 1008 return ret;
1013 } 1009 }
1014 } 1010 }
1015 ret = tps65910_reg_write(pmic, sr_reg_add, 0); 1011 ret = tps65910_reg_write_locked(pmic, sr_reg_add, 0);
1016 if (ret < 0) { 1012 if (ret < 0) {
1017 dev_err(mfd->dev, "Error in settting sr register\n"); 1013 dev_err(mfd->dev, "Error in settting sr register\n");
1018 return ret; 1014 return ret;
1019 } 1015 }
1020 } 1016 }
1021 1017
1022 ret = tps65910_clear_bits(mfd, 1018 ret = tps65910_reg_clear_bits(mfd,
1023 TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos); 1019 TPS65910_SLEEP_KEEP_LDO_ON + regoffs, bit_pos);
1024 if (!ret) { 1020 if (!ret) {
1025 if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP) 1021 if (ext_sleep_config & TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP)
1026 ret = tps65910_set_bits(mfd, 1022 ret = tps65910_reg_set_bits(mfd,
1027 TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos); 1023 TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
1028 else 1024 else
1029 ret = tps65910_clear_bits(mfd, 1025 ret = tps65910_reg_clear_bits(mfd,
1030 TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos); 1026 TPS65910_SLEEP_SET_LDO_OFF + regoffs, bit_pos);
1031 } 1027 }
1032 if (ret < 0) 1028 if (ret < 0)
@@ -1177,7 +1173,7 @@ static __devinit int tps65910_probe(struct platform_device *pdev)
1177 platform_set_drvdata(pdev, pmic); 1173 platform_set_drvdata(pdev, pmic);
1178 1174
1179 /* Give control of all register to control port */ 1175 /* Give control of all register to control port */
1180 tps65910_set_bits(pmic->mfd, TPS65910_DEVCTRL, 1176 tps65910_reg_set_bits(pmic->mfd, TPS65910_DEVCTRL,
1181 DEVCTRL_SR_CTL_I2C_SEL_MASK); 1177 DEVCTRL_SR_CTL_I2C_SEL_MASK);
1182 1178
1183 switch(tps65910_chip_id(tps65910)) { 1179 switch(tps65910_chip_id(tps65910)) {
diff --git a/drivers/regulator/wm831x-dcdc.c b/drivers/regulator/wm831x-dcdc.c
index a885911bb5fc..099da11e989f 100644
--- a/drivers/regulator/wm831x-dcdc.c
+++ b/drivers/regulator/wm831x-dcdc.c
@@ -535,7 +535,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
535 goto err; 535 goto err;
536 } 536 }
537 537
538 irq = platform_get_irq_byname(pdev, "UV"); 538 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
539 ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, 539 ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
540 IRQF_TRIGGER_RISING, dcdc->name, dcdc); 540 IRQF_TRIGGER_RISING, dcdc->name, dcdc);
541 if (ret != 0) { 541 if (ret != 0) {
@@ -544,7 +544,7 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
544 goto err_regulator; 544 goto err_regulator;
545 } 545 }
546 546
547 irq = platform_get_irq_byname(pdev, "HC"); 547 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC"));
548 ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq, 548 ret = request_threaded_irq(irq, NULL, wm831x_dcdc_oc_irq,
549 IRQF_TRIGGER_RISING, dcdc->name, dcdc); 549 IRQF_TRIGGER_RISING, dcdc->name, dcdc);
550 if (ret != 0) { 550 if (ret != 0) {
@@ -558,7 +558,8 @@ static __devinit int wm831x_buckv_probe(struct platform_device *pdev)
558 return 0; 558 return 0;
559 559
560err_uv: 560err_uv:
561 free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); 561 free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
562 dcdc);
562err_regulator: 563err_regulator:
563 regulator_unregister(dcdc->regulator); 564 regulator_unregister(dcdc->regulator);
564err: 565err:
@@ -570,11 +571,14 @@ err:
570static __devexit int wm831x_buckv_remove(struct platform_device *pdev) 571static __devexit int wm831x_buckv_remove(struct platform_device *pdev)
571{ 572{
572 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev); 573 struct wm831x_dcdc *dcdc = platform_get_drvdata(pdev);
574 struct wm831x *wm831x = dcdc->wm831x;
573 575
574 platform_set_drvdata(pdev, NULL); 576 platform_set_drvdata(pdev, NULL);
575 577
576 free_irq(platform_get_irq_byname(pdev, "HC"), dcdc); 578 free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "HC")),
577 free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); 579 dcdc);
580 free_irq(wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV")),
581 dcdc);
578 regulator_unregister(dcdc->regulator); 582 regulator_unregister(dcdc->regulator);
579 if (dcdc->dvs_gpio) 583 if (dcdc->dvs_gpio)
580 gpio_free(dcdc->dvs_gpio); 584 gpio_free(dcdc->dvs_gpio);
@@ -726,7 +730,7 @@ static __devinit int wm831x_buckp_probe(struct platform_device *pdev)
726 goto err; 730 goto err;
727 } 731 }
728 732
729 irq = platform_get_irq_byname(pdev, "UV"); 733 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
730 ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, 734 ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
731 IRQF_TRIGGER_RISING, dcdc->name, dcdc); 735 IRQF_TRIGGER_RISING, dcdc->name, dcdc);
732 if (ret != 0) { 736 if (ret != 0) {
@@ -751,7 +755,8 @@ static __devexit int wm831x_buckp_remove(struct platform_device *pdev)
751 755
752 platform_set_drvdata(pdev, NULL); 756 platform_set_drvdata(pdev, NULL);
753 757
754 free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); 758 free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
759 dcdc);
755 regulator_unregister(dcdc->regulator); 760 regulator_unregister(dcdc->regulator);
756 761
757 return 0; 762 return 0;
@@ -859,7 +864,7 @@ static __devinit int wm831x_boostp_probe(struct platform_device *pdev)
859 goto err; 864 goto err;
860 } 865 }
861 866
862 irq = platform_get_irq_byname(pdev, "UV"); 867 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
863 ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq, 868 ret = request_threaded_irq(irq, NULL, wm831x_dcdc_uv_irq,
864 IRQF_TRIGGER_RISING, dcdc->name, 869 IRQF_TRIGGER_RISING, dcdc->name,
865 dcdc); 870 dcdc);
@@ -885,7 +890,8 @@ static __devexit int wm831x_boostp_remove(struct platform_device *pdev)
885 890
886 platform_set_drvdata(pdev, NULL); 891 platform_set_drvdata(pdev, NULL);
887 892
888 free_irq(platform_get_irq_byname(pdev, "UV"), dcdc); 893 free_irq(wm831x_irq(dcdc->wm831x, platform_get_irq_byname(pdev, "UV")),
894 dcdc);
889 regulator_unregister(dcdc->regulator); 895 regulator_unregister(dcdc->regulator);
890 896
891 return 0; 897 return 0;
diff --git a/drivers/regulator/wm831x-isink.c b/drivers/regulator/wm831x-isink.c
index b50ab778b098..0d207c297714 100644
--- a/drivers/regulator/wm831x-isink.c
+++ b/drivers/regulator/wm831x-isink.c
@@ -202,7 +202,7 @@ static __devinit int wm831x_isink_probe(struct platform_device *pdev)
202 goto err; 202 goto err;
203 } 203 }
204 204
205 irq = platform_get_irq(pdev, 0); 205 irq = wm831x_irq(wm831x, platform_get_irq(pdev, 0));
206 ret = request_threaded_irq(irq, NULL, wm831x_isink_irq, 206 ret = request_threaded_irq(irq, NULL, wm831x_isink_irq,
207 IRQF_TRIGGER_RISING, isink->name, isink); 207 IRQF_TRIGGER_RISING, isink->name, isink);
208 if (ret != 0) { 208 if (ret != 0) {
@@ -227,7 +227,7 @@ static __devexit int wm831x_isink_remove(struct platform_device *pdev)
227 227
228 platform_set_drvdata(pdev, NULL); 228 platform_set_drvdata(pdev, NULL);
229 229
230 free_irq(platform_get_irq(pdev, 0), isink); 230 free_irq(wm831x_irq(isink->wm831x, platform_get_irq(pdev, 0)), isink);
231 231
232 regulator_unregister(isink->regulator); 232 regulator_unregister(isink->regulator);
233 233
diff --git a/drivers/regulator/wm831x-ldo.c b/drivers/regulator/wm831x-ldo.c
index aa1f8b3fbe16..a9a28d8ac185 100644
--- a/drivers/regulator/wm831x-ldo.c
+++ b/drivers/regulator/wm831x-ldo.c
@@ -321,7 +321,7 @@ static __devinit int wm831x_gp_ldo_probe(struct platform_device *pdev)
321 goto err; 321 goto err;
322 } 322 }
323 323
324 irq = platform_get_irq_byname(pdev, "UV"); 324 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
325 ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq, 325 ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
326 IRQF_TRIGGER_RISING, ldo->name, 326 IRQF_TRIGGER_RISING, ldo->name,
327 ldo); 327 ldo);
@@ -347,7 +347,8 @@ static __devexit int wm831x_gp_ldo_remove(struct platform_device *pdev)
347 347
348 platform_set_drvdata(pdev, NULL); 348 platform_set_drvdata(pdev, NULL);
349 349
350 free_irq(platform_get_irq_byname(pdev, "UV"), ldo); 350 free_irq(wm831x_irq(ldo->wm831x,
351 platform_get_irq_byname(pdev, "UV")), ldo);
351 regulator_unregister(ldo->regulator); 352 regulator_unregister(ldo->regulator);
352 353
353 return 0; 354 return 0;
@@ -582,7 +583,7 @@ static __devinit int wm831x_aldo_probe(struct platform_device *pdev)
582 goto err; 583 goto err;
583 } 584 }
584 585
585 irq = platform_get_irq_byname(pdev, "UV"); 586 irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "UV"));
586 ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq, 587 ret = request_threaded_irq(irq, NULL, wm831x_ldo_uv_irq,
587 IRQF_TRIGGER_RISING, ldo->name, ldo); 588 IRQF_TRIGGER_RISING, ldo->name, ldo);
588 if (ret != 0) { 589 if (ret != 0) {
@@ -605,7 +606,8 @@ static __devexit int wm831x_aldo_remove(struct platform_device *pdev)
605{ 606{
606 struct wm831x_ldo *ldo = platform_get_drvdata(pdev); 607 struct wm831x_ldo *ldo = platform_get_drvdata(pdev);
607 608
608 free_irq(platform_get_irq_byname(pdev, "UV"), ldo); 609 free_irq(wm831x_irq(ldo->wm831x, platform_get_irq_byname(pdev, "UV")),
610 ldo);
609 regulator_unregister(ldo->regulator); 611 regulator_unregister(ldo->regulator);
610 612
611 return 0; 613 return 0;
diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig
index 4161bfe462cd..08cbdb900a18 100644
--- a/drivers/rtc/Kconfig
+++ b/drivers/rtc/Kconfig
@@ -620,27 +620,6 @@ config RTC_DRV_MSM6242
620 This driver can also be built as a module. If so, the module 620 This driver can also be built as a module. If so, the module
621 will be called rtc-msm6242. 621 will be called rtc-msm6242.
622 622
623config RTC_DRV_IMXDI
624 tristate "Freescale IMX DryIce Real Time Clock"
625 depends on ARCH_MX25
626 depends on RTC_CLASS
627 help
628 Support for Freescale IMX DryIce RTC
629
630 This driver can also be built as a module, if so, the module
631 will be called "rtc-imxdi".
632
633config RTC_MXC
634 tristate "Freescale MXC Real Time Clock"
635 depends on ARCH_MXC
636 depends on RTC_CLASS
637 help
638 If you say yes here you get support for the Freescale MXC
639 RTC module.
640
641 This driver can also be built as a module, if so, the module
642 will be called "rtc-mxc".
643
644config RTC_DRV_BQ4802 623config RTC_DRV_BQ4802
645 tristate "TI BQ4802" 624 tristate "TI BQ4802"
646 help 625 help
@@ -738,6 +717,16 @@ config RTC_DRV_DAVINCI
738 This driver can also be built as a module. If so, the module 717 This driver can also be built as a module. If so, the module
739 will be called rtc-davinci. 718 will be called rtc-davinci.
740 719
720config RTC_DRV_IMXDI
721 tristate "Freescale IMX DryIce Real Time Clock"
722 depends on SOC_IMX25
723 depends on RTC_CLASS
724 help
725 Support for Freescale IMX DryIce RTC
726
727 This driver can also be built as a module, if so, the module
728 will be called "rtc-imxdi".
729
741config RTC_DRV_OMAP 730config RTC_DRV_OMAP
742 tristate "TI OMAP1" 731 tristate "TI OMAP1"
743 depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX 732 depends on ARCH_OMAP15XX || ARCH_OMAP16XX || ARCH_OMAP730 || ARCH_DAVINCI_DA8XX
@@ -1087,4 +1076,15 @@ config RTC_DRV_LOONGSON1
1087 This driver can also be built as a module. If so, the module 1076 This driver can also be built as a module. If so, the module
1088 will be called rtc-ls1x. 1077 will be called rtc-ls1x.
1089 1078
1079config RTC_DRV_MXC
1080 tristate "Freescale MXC Real Time Clock"
1081 depends on ARCH_MXC
1082 depends on RTC_CLASS
1083 help
1084 If you say yes here you get support for the Freescale MXC
1085 RTC module.
1086
1087 This driver can also be built as a module, if so, the module
1088 will be called "rtc-mxc".
1089
1090endif # RTC_CLASS 1090endif # RTC_CLASS
diff --git a/drivers/rtc/Makefile b/drivers/rtc/Makefile
index 727ae7786e6c..2973921c30d8 100644
--- a/drivers/rtc/Makefile
+++ b/drivers/rtc/Makefile
@@ -61,7 +61,7 @@ obj-$(CONFIG_RTC_DRV_M41T94) += rtc-m41t94.o
61obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o 61obj-$(CONFIG_RTC_DRV_M48T35) += rtc-m48t35.o
62obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o 62obj-$(CONFIG_RTC_DRV_M48T59) += rtc-m48t59.o
63obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o 63obj-$(CONFIG_RTC_DRV_M48T86) += rtc-m48t86.o
64obj-$(CONFIG_RTC_MXC) += rtc-mxc.o 64obj-$(CONFIG_RTC_DRV_MXC) += rtc-mxc.o
65obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o 65obj-$(CONFIG_RTC_DRV_MAX6900) += rtc-max6900.o
66obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o 66obj-$(CONFIG_RTC_DRV_MAX8925) += rtc-max8925.o
67obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o 67obj-$(CONFIG_RTC_DRV_MAX8998) += rtc-max8998.o
diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
index c293d0cdb104..836710ce750e 100644
--- a/drivers/rtc/rtc-ds1307.c
+++ b/drivers/rtc/rtc-ds1307.c
@@ -17,8 +17,7 @@
17#include <linux/string.h> 17#include <linux/string.h>
18#include <linux/rtc.h> 18#include <linux/rtc.h>
19#include <linux/bcd.h> 19#include <linux/bcd.h>
20 20#include <linux/rtc/ds1307.h>
21
22 21
23/* 22/*
24 * We can't determine type by probing, but if we expect pre-Linux code 23 * We can't determine type by probing, but if we expect pre-Linux code
@@ -92,7 +91,8 @@ enum ds_type {
92# define DS1337_BIT_A2I 0x02 91# define DS1337_BIT_A2I 0x02
93# define DS1337_BIT_A1I 0x01 92# define DS1337_BIT_A1I 0x01
94#define DS1339_REG_ALARM1_SECS 0x07 93#define DS1339_REG_ALARM1_SECS 0x07
95#define DS1339_REG_TRICKLE 0x10 94
95#define DS13XX_TRICKLE_CHARGER_MAGIC 0xa0
96 96
97#define RX8025_REG_CTRL1 0x0e 97#define RX8025_REG_CTRL1 0x0e
98# define RX8025_BIT_2412 0x20 98# define RX8025_BIT_2412 0x20
@@ -124,6 +124,7 @@ struct chip_desc {
124 unsigned alarm:1; 124 unsigned alarm:1;
125 u16 nvram_offset; 125 u16 nvram_offset;
126 u16 nvram_size; 126 u16 nvram_size;
127 u16 trickle_charger_reg;
127}; 128};
128 129
129static const struct chip_desc chips[last_ds_type] = { 130static const struct chip_desc chips[last_ds_type] = {
@@ -140,6 +141,13 @@ static const struct chip_desc chips[last_ds_type] = {
140 }, 141 },
141 [ds_1339] = { 142 [ds_1339] = {
142 .alarm = 1, 143 .alarm = 1,
144 .trickle_charger_reg = 0x10,
145 },
146 [ds_1340] = {
147 .trickle_charger_reg = 0x08,
148 },
149 [ds_1388] = {
150 .trickle_charger_reg = 0x0a,
143 }, 151 },
144 [ds_3231] = { 152 [ds_3231] = {
145 .alarm = 1, 153 .alarm = 1,
@@ -619,6 +627,7 @@ static int __devinit ds1307_probe(struct i2c_client *client,
619 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent); 627 struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
620 int want_irq = false; 628 int want_irq = false;
621 unsigned char *buf; 629 unsigned char *buf;
630 struct ds1307_platform_data *pdata = client->dev.platform_data;
622 static const int bbsqi_bitpos[] = { 631 static const int bbsqi_bitpos[] = {
623 [ds_1337] = 0, 632 [ds_1337] = 0,
624 [ds_1339] = DS1339_BIT_BBSQI, 633 [ds_1339] = DS1339_BIT_BBSQI,
@@ -637,7 +646,10 @@ static int __devinit ds1307_probe(struct i2c_client *client,
637 646
638 ds1307->client = client; 647 ds1307->client = client;
639 ds1307->type = id->driver_data; 648 ds1307->type = id->driver_data;
640 ds1307->offset = 0; 649
650 if (pdata && pdata->trickle_charger_setup && chip->trickle_charger_reg)
651 i2c_smbus_write_byte_data(client, chip->trickle_charger_reg,
652 DS13XX_TRICKLE_CHARGER_MAGIC | pdata->trickle_charger_setup);
641 653
642 buf = ds1307->regs; 654 buf = ds1307->regs;
643 if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { 655 if (i2c_check_functionality(adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) {
diff --git a/drivers/rtc/rtc-ep93xx.c b/drivers/rtc/rtc-ep93xx.c
index 14a42a1edc66..9602278ff988 100644
--- a/drivers/rtc/rtc-ep93xx.c
+++ b/drivers/rtc/rtc-ep93xx.c
@@ -127,7 +127,7 @@ static const struct attribute_group ep93xx_rtc_sysfs_files = {
127 .attrs = ep93xx_rtc_attrs, 127 .attrs = ep93xx_rtc_attrs,
128}; 128};
129 129
130static int __init ep93xx_rtc_probe(struct platform_device *pdev) 130static int __devinit ep93xx_rtc_probe(struct platform_device *pdev)
131{ 131{
132 struct ep93xx_rtc *ep93xx_rtc; 132 struct ep93xx_rtc *ep93xx_rtc;
133 struct resource *res; 133 struct resource *res;
@@ -174,7 +174,7 @@ exit:
174 return err; 174 return err;
175} 175}
176 176
177static int __exit ep93xx_rtc_remove(struct platform_device *pdev) 177static int __devexit ep93xx_rtc_remove(struct platform_device *pdev)
178{ 178{
179 struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev); 179 struct ep93xx_rtc *ep93xx_rtc = platform_get_drvdata(pdev);
180 180
@@ -186,31 +186,19 @@ static int __exit ep93xx_rtc_remove(struct platform_device *pdev)
186 return 0; 186 return 0;
187} 187}
188 188
189/* work with hotplug and coldplug */
190MODULE_ALIAS("platform:ep93xx-rtc");
191
192static struct platform_driver ep93xx_rtc_driver = { 189static struct platform_driver ep93xx_rtc_driver = {
193 .driver = { 190 .driver = {
194 .name = "ep93xx-rtc", 191 .name = "ep93xx-rtc",
195 .owner = THIS_MODULE, 192 .owner = THIS_MODULE,
196 }, 193 },
197 .remove = __exit_p(ep93xx_rtc_remove), 194 .probe = ep93xx_rtc_probe,
195 .remove = __devexit_p(ep93xx_rtc_remove),
198}; 196};
199 197
200static int __init ep93xx_rtc_init(void) 198module_platform_driver(ep93xx_rtc_driver);
201{
202 return platform_driver_probe(&ep93xx_rtc_driver, ep93xx_rtc_probe);
203}
204
205static void __exit ep93xx_rtc_exit(void)
206{
207 platform_driver_unregister(&ep93xx_rtc_driver);
208}
209 199
210MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>"); 200MODULE_AUTHOR("Alessandro Zummo <a.zummo@towertech.it>");
211MODULE_DESCRIPTION("EP93XX RTC driver"); 201MODULE_DESCRIPTION("EP93XX RTC driver");
212MODULE_LICENSE("GPL"); 202MODULE_LICENSE("GPL");
213MODULE_VERSION(DRV_VERSION); 203MODULE_VERSION(DRV_VERSION);
214 204MODULE_ALIAS("platform:ep93xx-rtc");
215module_init(ep93xx_rtc_init);
216module_exit(ep93xx_rtc_exit);
diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c
index d93a9608b1f0..891cd6c61d0a 100644
--- a/drivers/rtc/rtc-imxdi.c
+++ b/drivers/rtc/rtc-imxdi.c
@@ -405,7 +405,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
405 imxdi->clk = clk_get(&pdev->dev, NULL); 405 imxdi->clk = clk_get(&pdev->dev, NULL);
406 if (IS_ERR(imxdi->clk)) 406 if (IS_ERR(imxdi->clk))
407 return PTR_ERR(imxdi->clk); 407 return PTR_ERR(imxdi->clk);
408 clk_enable(imxdi->clk); 408 clk_prepare_enable(imxdi->clk);
409 409
410 /* 410 /*
411 * Initialize dryice hardware 411 * Initialize dryice hardware
@@ -470,7 +470,7 @@ static int dryice_rtc_probe(struct platform_device *pdev)
470 return 0; 470 return 0;
471 471
472err: 472err:
473 clk_disable(imxdi->clk); 473 clk_disable_unprepare(imxdi->clk);
474 clk_put(imxdi->clk); 474 clk_put(imxdi->clk);
475 475
476 return rc; 476 return rc;
@@ -487,7 +487,7 @@ static int __devexit dryice_rtc_remove(struct platform_device *pdev)
487 487
488 rtc_device_unregister(imxdi->rtc); 488 rtc_device_unregister(imxdi->rtc);
489 489
490 clk_disable(imxdi->clk); 490 clk_disable_unprepare(imxdi->clk);
491 clk_put(imxdi->clk); 491 clk_put(imxdi->clk);
492 492
493 return 0; 493 return 0;
diff --git a/drivers/rtc/rtc-lpc32xx.c b/drivers/rtc/rtc-lpc32xx.c
index 63c72189c64b..d5218553741f 100644
--- a/drivers/rtc/rtc-lpc32xx.c
+++ b/drivers/rtc/rtc-lpc32xx.c
@@ -19,6 +19,7 @@
19#include <linux/rtc.h> 19#include <linux/rtc.h>
20#include <linux/slab.h> 20#include <linux/slab.h>
21#include <linux/io.h> 21#include <linux/io.h>
22#include <linux/of.h>
22 23
23/* 24/*
24 * Clock and Power control register offsets 25 * Clock and Power control register offsets
@@ -386,13 +387,22 @@ static const struct dev_pm_ops lpc32xx_rtc_pm_ops = {
386#define LPC32XX_RTC_PM_OPS NULL 387#define LPC32XX_RTC_PM_OPS NULL
387#endif 388#endif
388 389
390#ifdef CONFIG_OF
391static const struct of_device_id lpc32xx_rtc_match[] = {
392 { .compatible = "nxp,lpc3220-rtc" },
393 { }
394};
395MODULE_DEVICE_TABLE(of, lpc32xx_rtc_match);
396#endif
397
389static struct platform_driver lpc32xx_rtc_driver = { 398static struct platform_driver lpc32xx_rtc_driver = {
390 .probe = lpc32xx_rtc_probe, 399 .probe = lpc32xx_rtc_probe,
391 .remove = __devexit_p(lpc32xx_rtc_remove), 400 .remove = __devexit_p(lpc32xx_rtc_remove),
392 .driver = { 401 .driver = {
393 .name = RTC_NAME, 402 .name = RTC_NAME,
394 .owner = THIS_MODULE, 403 .owner = THIS_MODULE,
395 .pm = LPC32XX_RTC_PM_OPS 404 .pm = LPC32XX_RTC_PM_OPS,
405 .of_match_table = of_match_ptr(lpc32xx_rtc_match),
396 }, 406 },
397}; 407};
398 408
diff --git a/drivers/rtc/rtc-m41t93.c b/drivers/rtc/rtc-m41t93.c
index 10f1c29436ec..efab3d48cb15 100644
--- a/drivers/rtc/rtc-m41t93.c
+++ b/drivers/rtc/rtc-m41t93.c
@@ -48,6 +48,7 @@ static inline int m41t93_set_reg(struct spi_device *spi, u8 addr, u8 data)
48static int m41t93_set_time(struct device *dev, struct rtc_time *tm) 48static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
49{ 49{
50 struct spi_device *spi = to_spi_device(dev); 50 struct spi_device *spi = to_spi_device(dev);
51 int tmp;
51 u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */ 52 u8 buf[9] = {0x80}; /* write cmd + 8 data bytes */
52 u8 * const data = &buf[1]; /* ptr to first data byte */ 53 u8 * const data = &buf[1]; /* ptr to first data byte */
53 54
@@ -62,6 +63,30 @@ static int m41t93_set_time(struct device *dev, struct rtc_time *tm)
62 return -EINVAL; 63 return -EINVAL;
63 } 64 }
64 65
66 tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
67 if (tmp < 0)
68 return tmp;
69
70 if (tmp & M41T93_FLAG_OF) {
71 dev_warn(&spi->dev, "OF bit is set, resetting.\n");
72 m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
73
74 tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
75 if (tmp < 0) {
76 return tmp;
77 } else if (tmp & M41T93_FLAG_OF) {
78 /* OF cannot be immediately reset: oscillator has to be
79 * restarted. */
80 u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
81
82 dev_warn(&spi->dev,
83 "OF bit is still set, kickstarting clock.\n");
84 m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
85 reset_osc &= ~M41T93_FLAG_ST;
86 m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
87 }
88 }
89
65 data[M41T93_REG_SSEC] = 0; 90 data[M41T93_REG_SSEC] = 0;
66 data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec); 91 data[M41T93_REG_ST_SEC] = bin2bcd(tm->tm_sec);
67 data[M41T93_REG_MIN] = bin2bcd(tm->tm_min); 92 data[M41T93_REG_MIN] = bin2bcd(tm->tm_min);
@@ -89,10 +114,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
89 1. halt bit (HT) is set: the clock is running but update of readout 114 1. halt bit (HT) is set: the clock is running but update of readout
90 registers has been disabled due to power failure. This is normal 115 registers has been disabled due to power failure. This is normal
91 case after poweron. Time is valid after resetting HT bit. 116 case after poweron. Time is valid after resetting HT bit.
92 2. oscillator fail bit (OF) is set. Oscillator has be stopped and 117 2. oscillator fail bit (OF) is set: time is invalid.
93 time is invalid:
94 a) OF can be immeditely reset.
95 b) OF cannot be immediately reset: oscillator has to be restarted.
96 */ 118 */
97 tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT); 119 tmp = spi_w8r8(spi, M41T93_REG_ALM_HOUR_HT);
98 if (tmp < 0) 120 if (tmp < 0)
@@ -110,21 +132,7 @@ static int m41t93_get_time(struct device *dev, struct rtc_time *tm)
110 132
111 if (tmp & M41T93_FLAG_OF) { 133 if (tmp & M41T93_FLAG_OF) {
112 ret = -EINVAL; 134 ret = -EINVAL;
113 dev_warn(&spi->dev, "OF bit is set, resetting.\n"); 135 dev_warn(&spi->dev, "OF bit is set, write time to restart.\n");
114 m41t93_set_reg(spi, M41T93_REG_FLAGS, tmp & ~M41T93_FLAG_OF);
115
116 tmp = spi_w8r8(spi, M41T93_REG_FLAGS);
117 if (tmp < 0)
118 return tmp;
119 else if (tmp & M41T93_FLAG_OF) {
120 u8 reset_osc = buf[M41T93_REG_ST_SEC] | M41T93_FLAG_ST;
121
122 dev_warn(&spi->dev,
123 "OF bit is still set, kickstarting clock.\n");
124 m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
125 reset_osc &= ~M41T93_FLAG_ST;
126 m41t93_set_reg(spi, M41T93_REG_ST_SEC, reset_osc);
127 }
128 } 136 }
129 137
130 if (tmp & M41T93_FLAG_BL) 138 if (tmp & M41T93_FLAG_BL)
diff --git a/drivers/rtc/rtc-pcf8563.c b/drivers/rtc/rtc-pcf8563.c
index bc0677de1996..97a3284bb7c6 100644
--- a/drivers/rtc/rtc-pcf8563.c
+++ b/drivers/rtc/rtc-pcf8563.c
@@ -64,6 +64,7 @@ struct pcf8563 {
64 * 1970...2069. 64 * 1970...2069.
65 */ 65 */
66 int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */ 66 int c_polarity; /* 0: MO_C=1 means 19xx, otherwise MO_C=1 means 20xx */
67 int voltage_low; /* incicates if a low_voltage was detected */
67}; 68};
68 69
69/* 70/*
@@ -86,9 +87,11 @@ static int pcf8563_get_datetime(struct i2c_client *client, struct rtc_time *tm)
86 return -EIO; 87 return -EIO;
87 } 88 }
88 89
89 if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) 90 if (buf[PCF8563_REG_SC] & PCF8563_SC_LV) {
91 pcf8563->voltage_low = 1;
90 dev_info(&client->dev, 92 dev_info(&client->dev,
91 "low voltage detected, date/time is not reliable.\n"); 93 "low voltage detected, date/time is not reliable.\n");
94 }
92 95
93 dev_dbg(&client->dev, 96 dev_dbg(&client->dev,
94 "%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, " 97 "%s: raw data is st1=%02x, st2=%02x, sec=%02x, min=%02x, hr=%02x, "
@@ -173,6 +176,44 @@ static int pcf8563_set_datetime(struct i2c_client *client, struct rtc_time *tm)
173 return 0; 176 return 0;
174} 177}
175 178
179#ifdef CONFIG_RTC_INTF_DEV
180static int pcf8563_rtc_ioctl(struct device *dev, unsigned int cmd, unsigned long arg)
181{
182 struct pcf8563 *pcf8563 = i2c_get_clientdata(to_i2c_client(dev));
183 struct rtc_time tm;
184
185 switch (cmd) {
186 case RTC_VL_READ:
187 if (pcf8563->voltage_low)
188 dev_info(dev, "low voltage detected, date/time is not reliable.\n");
189
190 if (copy_to_user((void __user *)arg, &pcf8563->voltage_low,
191 sizeof(int)))
192 return -EFAULT;
193 return 0;
194 case RTC_VL_CLR:
195 /*
196 * Clear the VL bit in the seconds register in case
197 * the time has not been set already (which would
198 * have cleared it). This does not really matter
199 * because of the cached voltage_low value but do it
200 * anyway for consistency.
201 */
202 if (pcf8563_get_datetime(to_i2c_client(dev), &tm))
203 pcf8563_set_datetime(to_i2c_client(dev), &tm);
204
205 /* Clear the cached value. */
206 pcf8563->voltage_low = 0;
207
208 return 0;
209 default:
210 return -ENOIOCTLCMD;
211 }
212}
213#else
214#define pcf8563_rtc_ioctl NULL
215#endif
216
176static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm) 217static int pcf8563_rtc_read_time(struct device *dev, struct rtc_time *tm)
177{ 218{
178 return pcf8563_get_datetime(to_i2c_client(dev), tm); 219 return pcf8563_get_datetime(to_i2c_client(dev), tm);
@@ -184,6 +225,7 @@ static int pcf8563_rtc_set_time(struct device *dev, struct rtc_time *tm)
184} 225}
185 226
186static const struct rtc_class_ops pcf8563_rtc_ops = { 227static const struct rtc_class_ops pcf8563_rtc_ops = {
228 .ioctl = pcf8563_rtc_ioctl,
187 .read_time = pcf8563_rtc_read_time, 229 .read_time = pcf8563_rtc_read_time,
188 .set_time = pcf8563_rtc_set_time, 230 .set_time = pcf8563_rtc_set_time,
189}; 231};
diff --git a/drivers/rtc/rtc-pl031.c b/drivers/rtc/rtc-pl031.c
index f027c063fb20..cc0533994f6e 100644
--- a/drivers/rtc/rtc-pl031.c
+++ b/drivers/rtc/rtc-pl031.c
@@ -220,17 +220,9 @@ static irqreturn_t pl031_interrupt(int irq, void *dev_id)
220 unsigned long events = 0; 220 unsigned long events = 0;
221 221
222 rtcmis = readl(ldata->base + RTC_MIS); 222 rtcmis = readl(ldata->base + RTC_MIS);
223 if (rtcmis) { 223 if (rtcmis & RTC_BIT_AI) {
224 writel(rtcmis, ldata->base + RTC_ICR); 224 writel(RTC_BIT_AI, ldata->base + RTC_ICR);
225 225 events |= (RTC_AF | RTC_IRQF);
226 if (rtcmis & RTC_BIT_AI)
227 events |= (RTC_AF | RTC_IRQF);
228
229 /* Timer interrupt is only available in ST variants */
230 if ((rtcmis & RTC_BIT_PI) &&
231 (ldata->hw_designer == AMBA_VENDOR_ST))
232 events |= (RTC_PF | RTC_IRQF);
233
234 rtc_update_irq(ldata->rtc, 1, events); 226 rtc_update_irq(ldata->rtc, 1, events);
235 227
236 return IRQ_HANDLED; 228 return IRQ_HANDLED;
diff --git a/drivers/rtc/rtc-s3c.c b/drivers/rtc/rtc-s3c.c
index 3f3a29752369..7e6af0b22f17 100644
--- a/drivers/rtc/rtc-s3c.c
+++ b/drivers/rtc/rtc-s3c.c
@@ -670,6 +670,7 @@ static int s3c_rtc_resume(struct platform_device *pdev)
670#define s3c_rtc_resume NULL 670#define s3c_rtc_resume NULL
671#endif 671#endif
672 672
673#ifdef CONFIG_OF
673static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = { 674static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
674 [TYPE_S3C2410] = { TYPE_S3C2410 }, 675 [TYPE_S3C2410] = { TYPE_S3C2410 },
675 [TYPE_S3C2416] = { TYPE_S3C2416 }, 676 [TYPE_S3C2416] = { TYPE_S3C2416 },
@@ -677,7 +678,6 @@ static struct s3c_rtc_drv_data s3c_rtc_drv_data_array[] = {
677 [TYPE_S3C64XX] = { TYPE_S3C64XX }, 678 [TYPE_S3C64XX] = { TYPE_S3C64XX },
678}; 679};
679 680
680#ifdef CONFIG_OF
681static const struct of_device_id s3c_rtc_dt_match[] = { 681static const struct of_device_id s3c_rtc_dt_match[] = {
682 { 682 {
683 .compatible = "samsung,s3c2410-rtc", 683 .compatible = "samsung,s3c2410-rtc",
diff --git a/drivers/rtc/rtc-spear.c b/drivers/rtc/rtc-spear.c
index e38da0dc4187..1f76320e545b 100644
--- a/drivers/rtc/rtc-spear.c
+++ b/drivers/rtc/rtc-spear.c
@@ -16,6 +16,7 @@
16#include <linux/io.h> 16#include <linux/io.h>
17#include <linux/irq.h> 17#include <linux/irq.h>
18#include <linux/module.h> 18#include <linux/module.h>
19#include <linux/of.h>
19#include <linux/platform_device.h> 20#include <linux/platform_device.h>
20#include <linux/rtc.h> 21#include <linux/rtc.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
@@ -519,6 +520,14 @@ static void spear_rtc_shutdown(struct platform_device *pdev)
519 clk_disable(config->clk); 520 clk_disable(config->clk);
520} 521}
521 522
523#ifdef CONFIG_OF
524static const struct of_device_id spear_rtc_id_table[] = {
525 { .compatible = "st,spear600-rtc" },
526 {}
527};
528MODULE_DEVICE_TABLE(of, spear_rtc_id_table);
529#endif
530
522static struct platform_driver spear_rtc_driver = { 531static struct platform_driver spear_rtc_driver = {
523 .probe = spear_rtc_probe, 532 .probe = spear_rtc_probe,
524 .remove = __devexit_p(spear_rtc_remove), 533 .remove = __devexit_p(spear_rtc_remove),
@@ -527,6 +536,7 @@ static struct platform_driver spear_rtc_driver = {
527 .shutdown = spear_rtc_shutdown, 536 .shutdown = spear_rtc_shutdown,
528 .driver = { 537 .driver = {
529 .name = "rtc-spear", 538 .name = "rtc-spear",
539 .of_match_table = of_match_ptr(spear_rtc_id_table),
530 }, 540 },
531}; 541};
532 542
diff --git a/drivers/rtc/rtc-tegra.c b/drivers/rtc/rtc-tegra.c
index 75259fe38602..c006025cecc8 100644
--- a/drivers/rtc/rtc-tegra.c
+++ b/drivers/rtc/rtc-tegra.c
@@ -309,7 +309,8 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
309 struct resource *res; 309 struct resource *res;
310 int ret; 310 int ret;
311 311
312 info = kzalloc(sizeof(struct tegra_rtc_info), GFP_KERNEL); 312 info = devm_kzalloc(&pdev->dev, sizeof(struct tegra_rtc_info),
313 GFP_KERNEL);
313 if (!info) 314 if (!info)
314 return -ENOMEM; 315 return -ENOMEM;
315 316
@@ -317,29 +318,18 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
317 if (!res) { 318 if (!res) {
318 dev_err(&pdev->dev, 319 dev_err(&pdev->dev,
319 "Unable to allocate resources for device.\n"); 320 "Unable to allocate resources for device.\n");
320 ret = -EBUSY; 321 return -EBUSY;
321 goto err_free_info;
322 } 322 }
323 323
324 if (!request_mem_region(res->start, resource_size(res), pdev->name)) { 324 info->rtc_base = devm_request_and_ioremap(&pdev->dev, res);
325 dev_err(&pdev->dev, 325 if (!info->rtc_base) {
326 "Unable to request mem region for device.\n"); 326 dev_err(&pdev->dev, "Unable to request mem region and grab IOs for device.\n");
327 ret = -EBUSY; 327 return -EBUSY;
328 goto err_free_info;
329 } 328 }
330 329
331 info->tegra_rtc_irq = platform_get_irq(pdev, 0); 330 info->tegra_rtc_irq = platform_get_irq(pdev, 0);
332 if (info->tegra_rtc_irq <= 0) { 331 if (info->tegra_rtc_irq <= 0)
333 ret = -EBUSY; 332 return -EBUSY;
334 goto err_release_mem_region;
335 }
336
337 info->rtc_base = ioremap_nocache(res->start, resource_size(res));
338 if (!info->rtc_base) {
339 dev_err(&pdev->dev, "Unable to grab IOs for device.\n");
340 ret = -EBUSY;
341 goto err_release_mem_region;
342 }
343 333
344 /* set context info. */ 334 /* set context info. */
345 info->pdev = pdev; 335 info->pdev = pdev;
@@ -362,11 +352,12 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
362 dev_err(&pdev->dev, 352 dev_err(&pdev->dev,
363 "Unable to register device (err=%d).\n", 353 "Unable to register device (err=%d).\n",
364 ret); 354 ret);
365 goto err_iounmap; 355 return ret;
366 } 356 }
367 357
368 ret = request_irq(info->tegra_rtc_irq, tegra_rtc_irq_handler, 358 ret = devm_request_irq(&pdev->dev, info->tegra_rtc_irq,
369 IRQF_TRIGGER_HIGH, "rtc alarm", &pdev->dev); 359 tegra_rtc_irq_handler, IRQF_TRIGGER_HIGH,
360 "rtc alarm", &pdev->dev);
370 if (ret) { 361 if (ret) {
371 dev_err(&pdev->dev, 362 dev_err(&pdev->dev,
372 "Unable to request interrupt for device (err=%d).\n", 363 "Unable to request interrupt for device (err=%d).\n",
@@ -380,12 +371,6 @@ static int __devinit tegra_rtc_probe(struct platform_device *pdev)
380 371
381err_dev_unreg: 372err_dev_unreg:
382 rtc_device_unregister(info->rtc_dev); 373 rtc_device_unregister(info->rtc_dev);
383err_iounmap:
384 iounmap(info->rtc_base);
385err_release_mem_region:
386 release_mem_region(res->start, resource_size(res));
387err_free_info:
388 kfree(info);
389 374
390 return ret; 375 return ret;
391} 376}
@@ -393,17 +378,8 @@ err_free_info:
393static int __devexit tegra_rtc_remove(struct platform_device *pdev) 378static int __devexit tegra_rtc_remove(struct platform_device *pdev)
394{ 379{
395 struct tegra_rtc_info *info = platform_get_drvdata(pdev); 380 struct tegra_rtc_info *info = platform_get_drvdata(pdev);
396 struct resource *res;
397
398 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
399 if (!res)
400 return -EBUSY;
401 381
402 free_irq(info->tegra_rtc_irq, &pdev->dev);
403 rtc_device_unregister(info->rtc_dev); 382 rtc_device_unregister(info->rtc_dev);
404 iounmap(info->rtc_base);
405 release_mem_region(res->start, resource_size(res));
406 kfree(info);
407 383
408 platform_set_drvdata(pdev, NULL); 384 platform_set_drvdata(pdev, NULL);
409 385
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index 3b6e6a67e765..59c6245e0421 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -396,7 +396,7 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
396{ 396{
397 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); 397 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
398 struct wm831x_rtc *wm831x_rtc; 398 struct wm831x_rtc *wm831x_rtc;
399 int alm_irq = platform_get_irq_byname(pdev, "ALM"); 399 int alm_irq = wm831x_irq(wm831x, platform_get_irq_byname(pdev, "ALM"));
400 int ret = 0; 400 int ret = 0;
401 401
402 wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL); 402 wm831x_rtc = devm_kzalloc(&pdev->dev, sizeof(*wm831x_rtc), GFP_KERNEL);
diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c
index 36506366158d..766cb7b19b40 100644
--- a/drivers/s390/char/sclp_cmd.c
+++ b/drivers/s390/char/sclp_cmd.c
@@ -17,6 +17,7 @@
17#include <linux/mm.h> 17#include <linux/mm.h>
18#include <linux/mmzone.h> 18#include <linux/mmzone.h>
19#include <linux/memory.h> 19#include <linux/memory.h>
20#include <linux/module.h>
20#include <linux/platform_device.h> 21#include <linux/platform_device.h>
21#include <asm/chpid.h> 22#include <asm/chpid.h>
22#include <asm/sclp.h> 23#include <asm/sclp.h>
@@ -38,7 +39,8 @@ struct read_info_sccb {
38 u64 facilities; /* 48-55 */ 39 u64 facilities; /* 48-55 */
39 u8 _reserved2[84 - 56]; /* 56-83 */ 40 u8 _reserved2[84 - 56]; /* 56-83 */
40 u8 fac84; /* 84 */ 41 u8 fac84; /* 84 */
41 u8 _reserved3[91 - 85]; /* 85-90 */ 42 u8 fac85; /* 85 */
43 u8 _reserved3[91 - 86]; /* 86-90 */
42 u8 flags; /* 91 */ 44 u8 flags; /* 91 */
43 u8 _reserved4[100 - 92]; /* 92-99 */ 45 u8 _reserved4[100 - 92]; /* 92-99 */
44 u32 rnsize2; /* 100-103 */ 46 u32 rnsize2; /* 100-103 */
@@ -51,6 +53,7 @@ static int __initdata early_read_info_sccb_valid;
51 53
52u64 sclp_facilities; 54u64 sclp_facilities;
53static u8 sclp_fac84; 55static u8 sclp_fac84;
56static u8 sclp_fac85;
54static unsigned long long rzm; 57static unsigned long long rzm;
55static unsigned long long rnmax; 58static unsigned long long rnmax;
56 59
@@ -112,6 +115,7 @@ void __init sclp_facilities_detect(void)
112 sccb = &early_read_info_sccb; 115 sccb = &early_read_info_sccb;
113 sclp_facilities = sccb->facilities; 116 sclp_facilities = sccb->facilities;
114 sclp_fac84 = sccb->fac84; 117 sclp_fac84 = sccb->fac84;
118 sclp_fac85 = sccb->fac85;
115 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2; 119 rnmax = sccb->rnmax ? sccb->rnmax : sccb->rnmax2;
116 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2; 120 rzm = sccb->rnsize ? sccb->rnsize : sccb->rnsize2;
117 rzm <<= 20; 121 rzm <<= 20;
@@ -127,6 +131,12 @@ unsigned long long sclp_get_rzm(void)
127 return rzm; 131 return rzm;
128} 132}
129 133
134u8 sclp_get_fac85(void)
135{
136 return sclp_fac85;
137}
138EXPORT_SYMBOL_GPL(sclp_get_fac85);
139
130/* 140/*
131 * This function will be called after sclp_facilities_detect(), which gets 141 * This function will be called after sclp_facilities_detect(), which gets
132 * called from early.c code. Therefore the sccb should have valid contents. 142 * called from early.c code. Therefore the sccb should have valid contents.
diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
index 00c024039c97..cd2fe350e724 100644
--- a/drivers/spi/Kconfig
+++ b/drivers/spi/Kconfig
@@ -311,7 +311,7 @@ config SPI_S3C24XX_FIQ
311 311
312config SPI_S3C64XX 312config SPI_S3C64XX
313 tristate "Samsung S3C64XX series type SPI" 313 tristate "Samsung S3C64XX series type SPI"
314 depends on (ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS) 314 depends on (ARCH_S3C24XX || ARCH_S3C64XX || ARCH_S5P64X0 || ARCH_EXYNOS)
315 select S3C64XX_DMA if ARCH_S3C64XX 315 select S3C64XX_DMA if ARCH_S3C64XX
316 help 316 help
317 SPI driver for Samsung S3C64XX and newer SoCs. 317 SPI driver for Samsung S3C64XX and newer SoCs.
diff --git a/drivers/spi/spi-imx.c b/drivers/spi/spi-imx.c
index 69c9a6601f45..47877d687614 100644
--- a/drivers/spi/spi-imx.c
+++ b/drivers/spi/spi-imx.c
@@ -86,7 +86,8 @@ struct spi_imx_data {
86 struct completion xfer_done; 86 struct completion xfer_done;
87 void __iomem *base; 87 void __iomem *base;
88 int irq; 88 int irq;
89 struct clk *clk; 89 struct clk *clk_per;
90 struct clk *clk_ipg;
90 unsigned long spi_clk; 91 unsigned long spi_clk;
91 92
92 unsigned int count; 93 unsigned int count;
@@ -853,15 +854,22 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
853 goto out_free_irq; 854 goto out_free_irq;
854 } 855 }
855 856
856 spi_imx->clk = clk_get(&pdev->dev, NULL); 857 spi_imx->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
857 if (IS_ERR(spi_imx->clk)) { 858 if (IS_ERR(spi_imx->clk_ipg)) {
858 dev_err(&pdev->dev, "unable to get clock\n"); 859 ret = PTR_ERR(spi_imx->clk_ipg);
859 ret = PTR_ERR(spi_imx->clk);
860 goto out_free_irq; 860 goto out_free_irq;
861 } 861 }
862 862
863 clk_enable(spi_imx->clk); 863 spi_imx->clk_per = devm_clk_get(&pdev->dev, "per");
864 spi_imx->spi_clk = clk_get_rate(spi_imx->clk); 864 if (IS_ERR(spi_imx->clk_per)) {
865 ret = PTR_ERR(spi_imx->clk_per);
866 goto out_free_irq;
867 }
868
869 clk_prepare_enable(spi_imx->clk_per);
870 clk_prepare_enable(spi_imx->clk_ipg);
871
872 spi_imx->spi_clk = clk_get_rate(spi_imx->clk_per);
865 873
866 spi_imx->devtype_data->reset(spi_imx); 874 spi_imx->devtype_data->reset(spi_imx);
867 875
@@ -879,8 +887,8 @@ static int __devinit spi_imx_probe(struct platform_device *pdev)
879 return ret; 887 return ret;
880 888
881out_clk_put: 889out_clk_put:
882 clk_disable(spi_imx->clk); 890 clk_disable_unprepare(spi_imx->clk_per);
883 clk_put(spi_imx->clk); 891 clk_disable_unprepare(spi_imx->clk_ipg);
884out_free_irq: 892out_free_irq:
885 free_irq(spi_imx->irq, spi_imx); 893 free_irq(spi_imx->irq, spi_imx);
886out_iounmap: 894out_iounmap:
@@ -908,8 +916,8 @@ static int __devexit spi_imx_remove(struct platform_device *pdev)
908 spi_bitbang_stop(&spi_imx->bitbang); 916 spi_bitbang_stop(&spi_imx->bitbang);
909 917
910 writel(0, spi_imx->base + MXC_CSPICTRL); 918 writel(0, spi_imx->base + MXC_CSPICTRL);
911 clk_disable(spi_imx->clk); 919 clk_disable_unprepare(spi_imx->clk_per);
912 clk_put(spi_imx->clk); 920 clk_disable_unprepare(spi_imx->clk_ipg);
913 free_irq(spi_imx->irq, spi_imx); 921 free_irq(spi_imx->irq, spi_imx);
914 iounmap(spi_imx->base); 922 iounmap(spi_imx->base);
915 923
diff --git a/drivers/spi/spi-orion.c b/drivers/spi/spi-orion.c
index e496f799b7a9..dfd04e91fa6d 100644
--- a/drivers/spi/spi-orion.c
+++ b/drivers/spi/spi-orion.c
@@ -16,8 +16,8 @@
16#include <linux/err.h> 16#include <linux/err.h>
17#include <linux/io.h> 17#include <linux/io.h>
18#include <linux/spi/spi.h> 18#include <linux/spi/spi.h>
19#include <linux/spi/orion_spi.h>
20#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/clk.h>
21#include <asm/unaligned.h> 21#include <asm/unaligned.h>
22 22
23#define DRIVER_NAME "orion_spi" 23#define DRIVER_NAME "orion_spi"
@@ -46,6 +46,7 @@ struct orion_spi {
46 unsigned int max_speed; 46 unsigned int max_speed;
47 unsigned int min_speed; 47 unsigned int min_speed;
48 struct orion_spi_info *spi_info; 48 struct orion_spi_info *spi_info;
49 struct clk *clk;
49}; 50};
50 51
51static struct workqueue_struct *orion_spi_wq; 52static struct workqueue_struct *orion_spi_wq;
@@ -104,7 +105,7 @@ static int orion_spi_baudrate_set(struct spi_device *spi, unsigned int speed)
104 105
105 orion_spi = spi_master_get_devdata(spi->master); 106 orion_spi = spi_master_get_devdata(spi->master);
106 107
107 tclk_hz = orion_spi->spi_info->tclk; 108 tclk_hz = clk_get_rate(orion_spi->clk);
108 109
109 /* 110 /*
110 * the supported rates are: 4,6,8...30 111 * the supported rates are: 4,6,8...30
@@ -450,6 +451,7 @@ static int __init orion_spi_probe(struct platform_device *pdev)
450 struct orion_spi *spi; 451 struct orion_spi *spi;
451 struct resource *r; 452 struct resource *r;
452 struct orion_spi_info *spi_info; 453 struct orion_spi_info *spi_info;
454 unsigned long tclk_hz;
453 int status = 0; 455 int status = 0;
454 456
455 spi_info = pdev->dev.platform_data; 457 spi_info = pdev->dev.platform_data;
@@ -476,19 +478,28 @@ static int __init orion_spi_probe(struct platform_device *pdev)
476 spi->master = master; 478 spi->master = master;
477 spi->spi_info = spi_info; 479 spi->spi_info = spi_info;
478 480
479 spi->max_speed = DIV_ROUND_UP(spi_info->tclk, 4); 481 spi->clk = clk_get(&pdev->dev, NULL);
480 spi->min_speed = DIV_ROUND_UP(spi_info->tclk, 30); 482 if (IS_ERR(spi->clk)) {
483 status = PTR_ERR(spi->clk);
484 goto out;
485 }
486
487 clk_prepare(spi->clk);
488 clk_enable(spi->clk);
489 tclk_hz = clk_get_rate(spi->clk);
490 spi->max_speed = DIV_ROUND_UP(tclk_hz, 4);
491 spi->min_speed = DIV_ROUND_UP(tclk_hz, 30);
481 492
482 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 493 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
483 if (r == NULL) { 494 if (r == NULL) {
484 status = -ENODEV; 495 status = -ENODEV;
485 goto out; 496 goto out_rel_clk;
486 } 497 }
487 498
488 if (!request_mem_region(r->start, resource_size(r), 499 if (!request_mem_region(r->start, resource_size(r),
489 dev_name(&pdev->dev))) { 500 dev_name(&pdev->dev))) {
490 status = -EBUSY; 501 status = -EBUSY;
491 goto out; 502 goto out_rel_clk;
492 } 503 }
493 spi->base = ioremap(r->start, SZ_1K); 504 spi->base = ioremap(r->start, SZ_1K);
494 505
@@ -508,7 +519,9 @@ static int __init orion_spi_probe(struct platform_device *pdev)
508 519
509out_rel_mem: 520out_rel_mem:
510 release_mem_region(r->start, resource_size(r)); 521 release_mem_region(r->start, resource_size(r));
511 522out_rel_clk:
523 clk_disable_unprepare(spi->clk);
524 clk_put(spi->clk);
512out: 525out:
513 spi_master_put(master); 526 spi_master_put(master);
514 return status; 527 return status;
@@ -526,6 +539,9 @@ static int __exit orion_spi_remove(struct platform_device *pdev)
526 539
527 cancel_work_sync(&spi->work); 540 cancel_work_sync(&spi->work);
528 541
542 clk_disable_unprepare(spi->clk);
543 clk_put(spi->clk);
544
529 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 545 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
530 release_mem_region(r->start, resource_size(r)); 546 release_mem_region(r->start, resource_size(r));
531 547
diff --git a/drivers/staging/android/ashmem.c b/drivers/staging/android/ashmem.c
index 4511420849bc..e84dbecd0991 100644
--- a/drivers/staging/android/ashmem.c
+++ b/drivers/staging/android/ashmem.c
@@ -19,6 +19,7 @@
19#include <linux/module.h> 19#include <linux/module.h>
20#include <linux/file.h> 20#include <linux/file.h>
21#include <linux/fs.h> 21#include <linux/fs.h>
22#include <linux/falloc.h>
22#include <linux/miscdevice.h> 23#include <linux/miscdevice.h>
23#include <linux/security.h> 24#include <linux/security.h>
24#include <linux/mm.h> 25#include <linux/mm.h>
@@ -363,11 +364,12 @@ static int ashmem_shrink(struct shrinker *s, struct shrink_control *sc)
363 364
364 mutex_lock(&ashmem_mutex); 365 mutex_lock(&ashmem_mutex);
365 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { 366 list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) {
366 struct inode *inode = range->asma->file->f_dentry->d_inode;
367 loff_t start = range->pgstart * PAGE_SIZE; 367 loff_t start = range->pgstart * PAGE_SIZE;
368 loff_t end = (range->pgend + 1) * PAGE_SIZE - 1; 368 loff_t end = (range->pgend + 1) * PAGE_SIZE;
369 369
370 vmtruncate_range(inode, start, end); 370 do_fallocate(range->asma->file,
371 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
372 start, end - start);
371 range->purged = ASHMEM_WAS_PURGED; 373 range->purged = ASHMEM_WAS_PURGED;
372 lru_del(range); 374 lru_del(range);
373 375
diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
index 59af3945ea85..65c7c62c7aae 100644
--- a/drivers/tty/pty.c
+++ b/drivers/tty/pty.c
@@ -633,7 +633,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
633 mutex_unlock(&devpts_mutex); 633 mutex_unlock(&devpts_mutex);
634 634
635 mutex_lock(&tty_mutex); 635 mutex_lock(&tty_mutex);
636 mutex_lock(&devpts_mutex);
637 tty = tty_init_dev(ptm_driver, index); 636 tty = tty_init_dev(ptm_driver, index);
638 637
639 if (IS_ERR(tty)) { 638 if (IS_ERR(tty)) {
@@ -643,7 +642,6 @@ static int ptmx_open(struct inode *inode, struct file *filp)
643 642
644 /* The tty returned here is locked so we can safely 643 /* The tty returned here is locked so we can safely
645 drop the mutex */ 644 drop the mutex */
646 mutex_unlock(&devpts_mutex);
647 mutex_unlock(&tty_mutex); 645 mutex_unlock(&tty_mutex);
648 646
649 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */ 647 set_bit(TTY_PTY_LOCK, &tty->flags); /* LOCK THE SLAVE */
diff --git a/drivers/tty/serial/imx.c b/drivers/tty/serial/imx.c
index ec206732f68c..4ef747307ecb 100644
--- a/drivers/tty/serial/imx.c
+++ b/drivers/tty/serial/imx.c
@@ -205,7 +205,8 @@ struct imx_port {
205 unsigned int irda_inv_rx:1; 205 unsigned int irda_inv_rx:1;
206 unsigned int irda_inv_tx:1; 206 unsigned int irda_inv_tx:1;
207 unsigned short trcv_delay; /* transceiver delay */ 207 unsigned short trcv_delay; /* transceiver delay */
208 struct clk *clk; 208 struct clk *clk_ipg;
209 struct clk *clk_per;
209 struct imx_uart_data *devdata; 210 struct imx_uart_data *devdata;
210}; 211};
211 212
@@ -673,7 +674,7 @@ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode)
673 * RFDIV is set such way to satisfy requested uartclk value 674 * RFDIV is set such way to satisfy requested uartclk value
674 */ 675 */
675 val = TXTL << 10 | RXTL; 676 val = TXTL << 10 | RXTL;
676 ufcr_rfdiv = (clk_get_rate(sport->clk) + sport->port.uartclk / 2) 677 ufcr_rfdiv = (clk_get_rate(sport->clk_per) + sport->port.uartclk / 2)
677 / sport->port.uartclk; 678 / sport->port.uartclk;
678 679
679 if(!ufcr_rfdiv) 680 if(!ufcr_rfdiv)
@@ -1286,7 +1287,7 @@ imx_console_get_options(struct imx_port *sport, int *baud,
1286 else 1287 else
1287 ucfr_rfdiv = 6 - ucfr_rfdiv; 1288 ucfr_rfdiv = 6 - ucfr_rfdiv;
1288 1289
1289 uartclk = clk_get_rate(sport->clk); 1290 uartclk = clk_get_rate(sport->clk_per);
1290 uartclk /= ucfr_rfdiv; 1291 uartclk /= ucfr_rfdiv;
1291 1292
1292 { /* 1293 { /*
@@ -1511,14 +1512,22 @@ static int serial_imx_probe(struct platform_device *pdev)
1511 goto unmap; 1512 goto unmap;
1512 } 1513 }
1513 1514
1514 sport->clk = clk_get(&pdev->dev, "uart"); 1515 sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
1515 if (IS_ERR(sport->clk)) { 1516 if (IS_ERR(sport->clk_ipg)) {
1516 ret = PTR_ERR(sport->clk); 1517 ret = PTR_ERR(sport->clk_ipg);
1517 goto unmap; 1518 goto unmap;
1518 } 1519 }
1519 clk_prepare_enable(sport->clk);
1520 1520
1521 sport->port.uartclk = clk_get_rate(sport->clk); 1521 sport->clk_per = devm_clk_get(&pdev->dev, "per");
1522 if (IS_ERR(sport->clk_per)) {
1523 ret = PTR_ERR(sport->clk_per);
1524 goto unmap;
1525 }
1526
1527 clk_prepare_enable(sport->clk_per);
1528 clk_prepare_enable(sport->clk_ipg);
1529
1530 sport->port.uartclk = clk_get_rate(sport->clk_per);
1522 1531
1523 imx_ports[sport->port.line] = sport; 1532 imx_ports[sport->port.line] = sport;
1524 1533
@@ -1539,8 +1548,8 @@ deinit:
1539 if (pdata && pdata->exit) 1548 if (pdata && pdata->exit)
1540 pdata->exit(pdev); 1549 pdata->exit(pdev);
1541clkput: 1550clkput:
1542 clk_disable_unprepare(sport->clk); 1551 clk_disable_unprepare(sport->clk_per);
1543 clk_put(sport->clk); 1552 clk_disable_unprepare(sport->clk_ipg);
1544unmap: 1553unmap:
1545 iounmap(sport->port.membase); 1554 iounmap(sport->port.membase);
1546free: 1555free:
@@ -1558,11 +1567,10 @@ static int serial_imx_remove(struct platform_device *pdev)
1558 1567
1559 platform_set_drvdata(pdev, NULL); 1568 platform_set_drvdata(pdev, NULL);
1560 1569
1561 if (sport) { 1570 uart_remove_one_port(&imx_reg, &sport->port);
1562 uart_remove_one_port(&imx_reg, &sport->port); 1571
1563 clk_disable_unprepare(sport->clk); 1572 clk_disable_unprepare(sport->clk_per);
1564 clk_put(sport->clk); 1573 clk_disable_unprepare(sport->clk_ipg);
1565 }
1566 1574
1567 if (pdata && pdata->exit) 1575 if (pdata && pdata->exit)
1568 pdata->exit(pdev); 1576 pdata->exit(pdev);
diff --git a/drivers/tty/serial/lantiq.c b/drivers/tty/serial/lantiq.c
index 96c1cacc7360..02da071fe1e7 100644
--- a/drivers/tty/serial/lantiq.c
+++ b/drivers/tty/serial/lantiq.c
@@ -31,16 +31,19 @@
31#include <linux/tty_flip.h> 31#include <linux/tty_flip.h>
32#include <linux/serial_core.h> 32#include <linux/serial_core.h>
33#include <linux/serial.h> 33#include <linux/serial.h>
34#include <linux/platform_device.h> 34#include <linux/of_platform.h>
35#include <linux/of_address.h>
36#include <linux/of_irq.h>
35#include <linux/io.h> 37#include <linux/io.h>
36#include <linux/clk.h> 38#include <linux/clk.h>
39#include <linux/gpio.h>
37 40
38#include <lantiq_soc.h> 41#include <lantiq_soc.h>
39 42
40#define PORT_LTQ_ASC 111 43#define PORT_LTQ_ASC 111
41#define MAXPORTS 2 44#define MAXPORTS 2
42#define UART_DUMMY_UER_RX 1 45#define UART_DUMMY_UER_RX 1
43#define DRVNAME "ltq_asc" 46#define DRVNAME "lantiq,asc"
44#ifdef __BIG_ENDIAN 47#ifdef __BIG_ENDIAN
45#define LTQ_ASC_TBUF (0x0020 + 3) 48#define LTQ_ASC_TBUF (0x0020 + 3)
46#define LTQ_ASC_RBUF (0x0024 + 3) 49#define LTQ_ASC_RBUF (0x0024 + 3)
@@ -114,6 +117,9 @@ static DEFINE_SPINLOCK(ltq_asc_lock);
114 117
115struct ltq_uart_port { 118struct ltq_uart_port {
116 struct uart_port port; 119 struct uart_port port;
120 /* clock used to derive divider */
121 struct clk *fpiclk;
122 /* clock gating of the ASC core */
117 struct clk *clk; 123 struct clk *clk;
118 unsigned int tx_irq; 124 unsigned int tx_irq;
119 unsigned int rx_irq; 125 unsigned int rx_irq;
@@ -316,7 +322,9 @@ lqasc_startup(struct uart_port *port)
316 struct ltq_uart_port *ltq_port = to_ltq_uart_port(port); 322 struct ltq_uart_port *ltq_port = to_ltq_uart_port(port);
317 int retval; 323 int retval;
318 324
319 port->uartclk = clk_get_rate(ltq_port->clk); 325 if (ltq_port->clk)
326 clk_enable(ltq_port->clk);
327 port->uartclk = clk_get_rate(ltq_port->fpiclk);
320 328
321 ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET), 329 ltq_w32_mask(ASCCLC_DISS | ASCCLC_RMCMASK, (1 << ASCCLC_RMCOFFSET),
322 port->membase + LTQ_ASC_CLC); 330 port->membase + LTQ_ASC_CLC);
@@ -382,6 +390,8 @@ lqasc_shutdown(struct uart_port *port)
382 port->membase + LTQ_ASC_RXFCON); 390 port->membase + LTQ_ASC_RXFCON);
383 ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU, 391 ltq_w32_mask(ASCTXFCON_TXFEN, ASCTXFCON_TXFFLU,
384 port->membase + LTQ_ASC_TXFCON); 392 port->membase + LTQ_ASC_TXFCON);
393 if (ltq_port->clk)
394 clk_disable(ltq_port->clk);
385} 395}
386 396
387static void 397static void
@@ -630,7 +640,7 @@ lqasc_console_setup(struct console *co, char *options)
630 640
631 port = &ltq_port->port; 641 port = &ltq_port->port;
632 642
633 port->uartclk = clk_get_rate(ltq_port->clk); 643 port->uartclk = clk_get_rate(ltq_port->fpiclk);
634 644
635 if (options) 645 if (options)
636 uart_parse_options(options, &baud, &parity, &bits, &flow); 646 uart_parse_options(options, &baud, &parity, &bits, &flow);
@@ -668,37 +678,32 @@ static struct uart_driver lqasc_reg = {
668static int __init 678static int __init
669lqasc_probe(struct platform_device *pdev) 679lqasc_probe(struct platform_device *pdev)
670{ 680{
681 struct device_node *node = pdev->dev.of_node;
671 struct ltq_uart_port *ltq_port; 682 struct ltq_uart_port *ltq_port;
672 struct uart_port *port; 683 struct uart_port *port;
673 struct resource *mmres, *irqres; 684 struct resource *mmres, irqres[3];
674 int tx_irq, rx_irq, err_irq; 685 int line = 0;
675 struct clk *clk;
676 int ret; 686 int ret;
677 687
678 mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0); 688 mmres = platform_get_resource(pdev, IORESOURCE_MEM, 0);
679 irqres = platform_get_resource(pdev, IORESOURCE_IRQ, 0); 689 ret = of_irq_to_resource_table(node, irqres, 3);
680 if (!mmres || !irqres) 690 if (!mmres || (ret != 3)) {
691 dev_err(&pdev->dev,
692 "failed to get memory/irq for serial port\n");
681 return -ENODEV; 693 return -ENODEV;
694 }
682 695
683 if (pdev->id >= MAXPORTS) 696 /* check if this is the console port */
684 return -EBUSY; 697 if (mmres->start != CPHYSADDR(LTQ_EARLY_ASC))
698 line = 1;
685 699
686 if (lqasc_port[pdev->id] != NULL) 700 if (lqasc_port[line]) {
701 dev_err(&pdev->dev, "port %d already allocated\n", line);
687 return -EBUSY; 702 return -EBUSY;
688
689 clk = clk_get(&pdev->dev, "fpi");
690 if (IS_ERR(clk)) {
691 pr_err("failed to get fpi clk\n");
692 return -ENOENT;
693 } 703 }
694 704
695 tx_irq = platform_get_irq_byname(pdev, "tx"); 705 ltq_port = devm_kzalloc(&pdev->dev, sizeof(struct ltq_uart_port),
696 rx_irq = platform_get_irq_byname(pdev, "rx"); 706 GFP_KERNEL);
697 err_irq = platform_get_irq_byname(pdev, "err");
698 if ((tx_irq < 0) | (rx_irq < 0) | (err_irq < 0))
699 return -ENODEV;
700
701 ltq_port = kzalloc(sizeof(struct ltq_uart_port), GFP_KERNEL);
702 if (!ltq_port) 707 if (!ltq_port)
703 return -ENOMEM; 708 return -ENOMEM;
704 709
@@ -709,19 +714,26 @@ lqasc_probe(struct platform_device *pdev)
709 port->ops = &lqasc_pops; 714 port->ops = &lqasc_pops;
710 port->fifosize = 16; 715 port->fifosize = 16;
711 port->type = PORT_LTQ_ASC, 716 port->type = PORT_LTQ_ASC,
712 port->line = pdev->id; 717 port->line = line;
713 port->dev = &pdev->dev; 718 port->dev = &pdev->dev;
714 719 /* unused, just to be backward-compatible */
715 port->irq = tx_irq; /* unused, just to be backward-compatibe */ 720 port->irq = irqres[0].start;
716 port->mapbase = mmres->start; 721 port->mapbase = mmres->start;
717 722
718 ltq_port->clk = clk; 723 ltq_port->fpiclk = clk_get_fpi();
724 if (IS_ERR(ltq_port->fpiclk)) {
725 pr_err("failed to get fpi clk\n");
726 return -ENOENT;
727 }
719 728
720 ltq_port->tx_irq = tx_irq; 729 /* not all asc ports have clock gates, lets ignore the return code */
721 ltq_port->rx_irq = rx_irq; 730 ltq_port->clk = clk_get(&pdev->dev, NULL);
722 ltq_port->err_irq = err_irq;
723 731
724 lqasc_port[pdev->id] = ltq_port; 732 ltq_port->tx_irq = irqres[0].start;
733 ltq_port->rx_irq = irqres[1].start;
734 ltq_port->err_irq = irqres[2].start;
735
736 lqasc_port[line] = ltq_port;
725 platform_set_drvdata(pdev, ltq_port); 737 platform_set_drvdata(pdev, ltq_port);
726 738
727 ret = uart_add_one_port(&lqasc_reg, port); 739 ret = uart_add_one_port(&lqasc_reg, port);
@@ -729,10 +741,17 @@ lqasc_probe(struct platform_device *pdev)
729 return ret; 741 return ret;
730} 742}
731 743
744static const struct of_device_id ltq_asc_match[] = {
745 { .compatible = DRVNAME },
746 {},
747};
748MODULE_DEVICE_TABLE(of, ltq_asc_match);
749
732static struct platform_driver lqasc_driver = { 750static struct platform_driver lqasc_driver = {
733 .driver = { 751 .driver = {
734 .name = DRVNAME, 752 .name = DRVNAME,
735 .owner = THIS_MODULE, 753 .owner = THIS_MODULE,
754 .of_match_table = ltq_asc_match,
736 }, 755 },
737}; 756};
738 757
diff --git a/drivers/tty/serial/sb1250-duart.c b/drivers/tty/serial/sb1250-duart.c
index 0be8a2f00d0b..f76b1688c5c8 100644
--- a/drivers/tty/serial/sb1250-duart.c
+++ b/drivers/tty/serial/sb1250-duart.c
@@ -31,6 +31,7 @@
31#include <linux/interrupt.h> 31#include <linux/interrupt.h>
32#include <linux/ioport.h> 32#include <linux/ioport.h>
33#include <linux/kernel.h> 33#include <linux/kernel.h>
34#include <linux/module.h>
34#include <linux/major.h> 35#include <linux/major.h>
35#include <linux/serial.h> 36#include <linux/serial.h>
36#include <linux/serial_core.h> 37#include <linux/serial_core.h>
diff --git a/drivers/tty/serial/zs.c b/drivers/tty/serial/zs.c
index 4001eee6c08d..92c00b24d0df 100644
--- a/drivers/tty/serial/zs.c
+++ b/drivers/tty/serial/zs.c
@@ -57,6 +57,7 @@
57#include <linux/ioport.h> 57#include <linux/ioport.h>
58#include <linux/irqflags.h> 58#include <linux/irqflags.h>
59#include <linux/kernel.h> 59#include <linux/kernel.h>
60#include <linux/module.h>
60#include <linux/major.h> 61#include <linux/major.h>
61#include <linux/serial.h> 62#include <linux/serial.h>
62#include <linux/serial_core.h> 63#include <linux/serial_core.h>
diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
index 173a9000a6cb..ba8be396a621 100644
--- a/drivers/tty/tty_ldisc.c
+++ b/drivers/tty/tty_ldisc.c
@@ -894,6 +894,23 @@ int tty_ldisc_setup(struct tty_struct *tty, struct tty_struct *o_tty)
894 tty_ldisc_enable(tty); 894 tty_ldisc_enable(tty);
895 return 0; 895 return 0;
896} 896}
897
898static void tty_ldisc_kill(struct tty_struct *tty)
899{
900 mutex_lock(&tty->ldisc_mutex);
901 /*
902 * Now kill off the ldisc
903 */
904 tty_ldisc_close(tty, tty->ldisc);
905 tty_ldisc_put(tty->ldisc);
906 /* Force an oops if we mess this up */
907 tty->ldisc = NULL;
908
909 /* Ensure the next open requests the N_TTY ldisc */
910 tty_set_termios_ldisc(tty, N_TTY);
911 mutex_unlock(&tty->ldisc_mutex);
912}
913
897/** 914/**
898 * tty_ldisc_release - release line discipline 915 * tty_ldisc_release - release line discipline
899 * @tty: tty being shut down 916 * @tty: tty being shut down
@@ -912,27 +929,19 @@ void tty_ldisc_release(struct tty_struct *tty, struct tty_struct *o_tty)
912 * race with the set_ldisc code path. 929 * race with the set_ldisc code path.
913 */ 930 */
914 931
915 tty_unlock(tty); 932 tty_unlock_pair(tty, o_tty);
916 tty_ldisc_halt(tty); 933 tty_ldisc_halt(tty);
917 tty_ldisc_flush_works(tty); 934 tty_ldisc_flush_works(tty);
918 tty_lock(tty); 935 if (o_tty) {
919 936 tty_ldisc_halt(o_tty);
920 mutex_lock(&tty->ldisc_mutex); 937 tty_ldisc_flush_works(o_tty);
921 /* 938 }
922 * Now kill off the ldisc 939 tty_lock_pair(tty, o_tty);
923 */
924 tty_ldisc_close(tty, tty->ldisc);
925 tty_ldisc_put(tty->ldisc);
926 /* Force an oops if we mess this up */
927 tty->ldisc = NULL;
928 940
929 /* Ensure the next open requests the N_TTY ldisc */
930 tty_set_termios_ldisc(tty, N_TTY);
931 mutex_unlock(&tty->ldisc_mutex);
932 941
933 /* This will need doing differently if we need to lock */ 942 tty_ldisc_kill(tty);
934 if (o_tty) 943 if (o_tty)
935 tty_ldisc_release(o_tty, NULL); 944 tty_ldisc_kill(o_tty);
936 945
937 /* And the memory resources remaining (buffers, termios) will be 946 /* And the memory resources remaining (buffers, termios) will be
938 disposed of when the kref hits zero */ 947 disposed of when the kref hits zero */
diff --git a/drivers/usb/host/ehci-mxc.c b/drivers/usb/host/ehci-mxc.c
index a797d51ecbe8..c778ffe4e4e5 100644
--- a/drivers/usb/host/ehci-mxc.c
+++ b/drivers/usb/host/ehci-mxc.c
@@ -32,7 +32,7 @@
32#define ULPI_VIEWPORT_OFFSET 0x170 32#define ULPI_VIEWPORT_OFFSET 0x170
33 33
34struct ehci_mxc_priv { 34struct ehci_mxc_priv {
35 struct clk *usbclk, *ahbclk, *phy1clk; 35 struct clk *usbclk, *ahbclk, *phyclk;
36 struct usb_hcd *hcd; 36 struct usb_hcd *hcd;
37}; 37};
38 38
@@ -166,31 +166,26 @@ static int ehci_mxc_drv_probe(struct platform_device *pdev)
166 } 166 }
167 167
168 /* enable clocks */ 168 /* enable clocks */
169 priv->usbclk = clk_get(dev, "usb"); 169 priv->usbclk = clk_get(dev, "ipg");
170 if (IS_ERR(priv->usbclk)) { 170 if (IS_ERR(priv->usbclk)) {
171 ret = PTR_ERR(priv->usbclk); 171 ret = PTR_ERR(priv->usbclk);
172 goto err_clk; 172 goto err_clk;
173 } 173 }
174 clk_enable(priv->usbclk); 174 clk_prepare_enable(priv->usbclk);
175 175
176 if (!cpu_is_mx35() && !cpu_is_mx25()) { 176 priv->ahbclk = clk_get(dev, "ahb");
177 priv->ahbclk = clk_get(dev, "usb_ahb"); 177 if (IS_ERR(priv->ahbclk)) {
178 if (IS_ERR(priv->ahbclk)) { 178 ret = PTR_ERR(priv->ahbclk);
179 ret = PTR_ERR(priv->ahbclk); 179 goto err_clk_ahb;
180 goto err_clk_ahb;
181 }
182 clk_enable(priv->ahbclk);
183 } 180 }
181 clk_prepare_enable(priv->ahbclk);
184 182
185 /* "dr" device has its own clock on i.MX51 */ 183 /* "dr" device has its own clock on i.MX51 */
186 if (cpu_is_mx51() && (pdev->id == 0)) { 184 priv->phyclk = clk_get(dev, "phy");
187 priv->phy1clk = clk_get(dev, "usb_phy1"); 185 if (IS_ERR(priv->phyclk))
188 if (IS_ERR(priv->phy1clk)) { 186 priv->phyclk = NULL;
189 ret = PTR_ERR(priv->phy1clk); 187 if (priv->phyclk)
190 goto err_clk_phy; 188 clk_prepare_enable(priv->phyclk);
191 }
192 clk_enable(priv->phy1clk);
193 }
194 189
195 190
196 /* call platform specific init function */ 191 /* call platform specific init function */
@@ -265,17 +260,15 @@ err_add:
265 if (pdata && pdata->exit) 260 if (pdata && pdata->exit)
266 pdata->exit(pdev); 261 pdata->exit(pdev);
267err_init: 262err_init:
268 if (priv->phy1clk) { 263 if (priv->phyclk) {
269 clk_disable(priv->phy1clk); 264 clk_disable_unprepare(priv->phyclk);
270 clk_put(priv->phy1clk); 265 clk_put(priv->phyclk);
271 }
272err_clk_phy:
273 if (priv->ahbclk) {
274 clk_disable(priv->ahbclk);
275 clk_put(priv->ahbclk);
276 } 266 }
267
268 clk_disable_unprepare(priv->ahbclk);
269 clk_put(priv->ahbclk);
277err_clk_ahb: 270err_clk_ahb:
278 clk_disable(priv->usbclk); 271 clk_disable_unprepare(priv->usbclk);
279 clk_put(priv->usbclk); 272 clk_put(priv->usbclk);
280err_clk: 273err_clk:
281 iounmap(hcd->regs); 274 iounmap(hcd->regs);
@@ -307,15 +300,14 @@ static int __exit ehci_mxc_drv_remove(struct platform_device *pdev)
307 usb_put_hcd(hcd); 300 usb_put_hcd(hcd);
308 platform_set_drvdata(pdev, NULL); 301 platform_set_drvdata(pdev, NULL);
309 302
310 clk_disable(priv->usbclk); 303 clk_disable_unprepare(priv->usbclk);
311 clk_put(priv->usbclk); 304 clk_put(priv->usbclk);
312 if (priv->ahbclk) { 305 clk_disable_unprepare(priv->ahbclk);
313 clk_disable(priv->ahbclk); 306 clk_put(priv->ahbclk);
314 clk_put(priv->ahbclk); 307
315 } 308 if (priv->phyclk) {
316 if (priv->phy1clk) { 309 clk_disable_unprepare(priv->phyclk);
317 clk_disable(priv->phy1clk); 310 clk_put(priv->phyclk);
318 clk_put(priv->phy1clk);
319 } 311 }
320 312
321 kfree(priv); 313 kfree(priv);
diff --git a/drivers/usb/host/ehci-orion.c b/drivers/usb/host/ehci-orion.c
index 6c6a5a3b4ea7..82de1073aa52 100644
--- a/drivers/usb/host/ehci-orion.c
+++ b/drivers/usb/host/ehci-orion.c
@@ -12,6 +12,7 @@
12#include <linux/module.h> 12#include <linux/module.h>
13#include <linux/platform_device.h> 13#include <linux/platform_device.h>
14#include <linux/mbus.h> 14#include <linux/mbus.h>
15#include <linux/clk.h>
15#include <plat/ehci-orion.h> 16#include <plat/ehci-orion.h>
16 17
17#define rdl(off) __raw_readl(hcd->regs + (off)) 18#define rdl(off) __raw_readl(hcd->regs + (off))
@@ -198,6 +199,7 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
198 struct resource *res; 199 struct resource *res;
199 struct usb_hcd *hcd; 200 struct usb_hcd *hcd;
200 struct ehci_hcd *ehci; 201 struct ehci_hcd *ehci;
202 struct clk *clk;
201 void __iomem *regs; 203 void __iomem *regs;
202 int irq, err; 204 int irq, err;
203 205
@@ -238,6 +240,14 @@ static int __devinit ehci_orion_drv_probe(struct platform_device *pdev)
238 goto err2; 240 goto err2;
239 } 241 }
240 242
243 /* Not all platforms can gate the clock, so it is not
244 an error if the clock does not exists. */
245 clk = clk_get(&pdev->dev, NULL);
246 if (!IS_ERR(clk)) {
247 clk_prepare_enable(clk);
248 clk_put(clk);
249 }
250
241 hcd = usb_create_hcd(&ehci_orion_hc_driver, 251 hcd = usb_create_hcd(&ehci_orion_hc_driver,
242 &pdev->dev, dev_name(&pdev->dev)); 252 &pdev->dev, dev_name(&pdev->dev));
243 if (!hcd) { 253 if (!hcd) {
@@ -301,12 +311,18 @@ err1:
301static int __exit ehci_orion_drv_remove(struct platform_device *pdev) 311static int __exit ehci_orion_drv_remove(struct platform_device *pdev)
302{ 312{
303 struct usb_hcd *hcd = platform_get_drvdata(pdev); 313 struct usb_hcd *hcd = platform_get_drvdata(pdev);
314 struct clk *clk;
304 315
305 usb_remove_hcd(hcd); 316 usb_remove_hcd(hcd);
306 iounmap(hcd->regs); 317 iounmap(hcd->regs);
307 release_mem_region(hcd->rsrc_start, hcd->rsrc_len); 318 release_mem_region(hcd->rsrc_start, hcd->rsrc_len);
308 usb_put_hcd(hcd); 319 usb_put_hcd(hcd);
309 320
321 clk = clk_get(&pdev->dev, NULL);
322 if (!IS_ERR(clk)) {
323 clk_disable_unprepare(clk);
324 clk_put(clk);
325 }
310 return 0; 326 return 0;
311} 327}
312 328
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
index 4a44bf833611..68548236ec42 100644
--- a/drivers/usb/host/ehci-tegra.c
+++ b/drivers/usb/host/ehci-tegra.c
@@ -722,8 +722,9 @@ static int tegra_ehci_probe(struct platform_device *pdev)
722 } 722 }
723 } 723 }
724 724
725 tegra->phy = tegra_usb_phy_open(instance, hcd->regs, pdata->phy_config, 725 tegra->phy = tegra_usb_phy_open(&pdev->dev, instance, hcd->regs,
726 TEGRA_USB_PHY_MODE_HOST); 726 pdata->phy_config,
727 TEGRA_USB_PHY_MODE_HOST);
727 if (IS_ERR(tegra->phy)) { 728 if (IS_ERR(tegra->phy)) {
728 dev_err(&pdev->dev, "Failed to open USB phy\n"); 729 dev_err(&pdev->dev, "Failed to open USB phy\n");
729 err = -ENXIO; 730 err = -ENXIO;
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index af16884491ed..fa2b03750316 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -184,6 +184,18 @@ config BACKLIGHT_GENERIC
184 known as the Corgi backlight driver. If you have a Sharp Zaurus 184 known as the Corgi backlight driver. If you have a Sharp Zaurus
185 SL-C7xx, SL-Cxx00 or SL-6000x say y. 185 SL-C7xx, SL-Cxx00 or SL-6000x say y.
186 186
187config BACKLIGHT_LM3533
188 tristate "Backlight Driver for LM3533"
189 depends on BACKLIGHT_CLASS_DEVICE
190 depends on MFD_LM3533
191 help
192 Say Y to enable the backlight driver for National Semiconductor / TI
193 LM3533 Lighting Power chips.
194
195 The backlights can be controlled directly, through PWM input, or by
196 the ambient-light-sensor interface. The chip supports 256 brightness
197 levels.
198
187config BACKLIGHT_LOCOMO 199config BACKLIGHT_LOCOMO
188 tristate "Sharp LOCOMO LCD/Backlight Driver" 200 tristate "Sharp LOCOMO LCD/Backlight Driver"
189 depends on SHARP_LOCOMO 201 depends on SHARP_LOCOMO
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 36855ae887d6..a2ac9cfbaf6b 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -21,6 +21,7 @@ obj-$(CONFIG_BACKLIGHT_EP93XX) += ep93xx_bl.o
21obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o 21obj-$(CONFIG_BACKLIGHT_GENERIC) += generic_bl.o
22obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o 22obj-$(CONFIG_BACKLIGHT_HP700) += jornada720_bl.o
23obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o 23obj-$(CONFIG_BACKLIGHT_HP680) += hp680_bl.o
24obj-$(CONFIG_BACKLIGHT_LM3533) += lm3533_bl.o
24obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o 25obj-$(CONFIG_BACKLIGHT_LOCOMO) += locomolcd.o
25obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o 26obj-$(CONFIG_BACKLIGHT_LP855X) += lp855x_bl.o
26obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o 27obj-$(CONFIG_BACKLIGHT_OMAP1) += omap1_bl.o
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
index 4911ea7989c8..df5db99af23d 100644
--- a/drivers/video/backlight/adp5520_bl.c
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -160,7 +160,7 @@ static ssize_t adp5520_store(struct device *dev, const char *buf,
160 unsigned long val; 160 unsigned long val;
161 int ret; 161 int ret;
162 162
163 ret = strict_strtoul(buf, 10, &val); 163 ret = kstrtoul(buf, 10, &val);
164 if (ret) 164 if (ret)
165 return ret; 165 return ret;
166 166
@@ -214,7 +214,7 @@ static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
214 struct adp5520_bl *data = dev_get_drvdata(dev); 214 struct adp5520_bl *data = dev_get_drvdata(dev);
215 int ret; 215 int ret;
216 216
217 ret = strict_strtoul(buf, 10, &data->cached_daylight_max); 217 ret = kstrtoul(buf, 10, &data->cached_daylight_max);
218 if (ret < 0) 218 if (ret < 0)
219 return ret; 219 return ret;
220 220
diff --git a/drivers/video/backlight/adp8860_bl.c b/drivers/video/backlight/adp8860_bl.c
index 550dbf0bb896..77d1fdba597f 100644
--- a/drivers/video/backlight/adp8860_bl.c
+++ b/drivers/video/backlight/adp8860_bl.c
@@ -222,7 +222,8 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
222 struct led_info *cur_led; 222 struct led_info *cur_led;
223 int ret, i; 223 int ret, i;
224 224
225 led = kzalloc(sizeof(*led) * pdata->num_leds, GFP_KERNEL); 225 led = devm_kzalloc(&client->dev, sizeof(*led) * pdata->num_leds,
226 GFP_KERNEL);
226 if (led == NULL) { 227 if (led == NULL) {
227 dev_err(&client->dev, "failed to alloc memory\n"); 228 dev_err(&client->dev, "failed to alloc memory\n");
228 return -ENOMEM; 229 return -ENOMEM;
@@ -236,7 +237,7 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
236 237
237 if (ret) { 238 if (ret) {
238 dev_err(&client->dev, "failed to write\n"); 239 dev_err(&client->dev, "failed to write\n");
239 goto err_free; 240 return ret;
240 } 241 }
241 242
242 for (i = 0; i < pdata->num_leds; ++i) { 243 for (i = 0; i < pdata->num_leds; ++i) {
@@ -291,9 +292,6 @@ static int __devinit adp8860_led_probe(struct i2c_client *client)
291 cancel_work_sync(&led[i].work); 292 cancel_work_sync(&led[i].work);
292 } 293 }
293 294
294 err_free:
295 kfree(led);
296
297 return ret; 295 return ret;
298} 296}
299 297
@@ -309,7 +307,6 @@ static int __devexit adp8860_led_remove(struct i2c_client *client)
309 cancel_work_sync(&data->led[i].work); 307 cancel_work_sync(&data->led[i].work);
310 } 308 }
311 309
312 kfree(data->led);
313 return 0; 310 return 0;
314} 311}
315#else 312#else
@@ -451,7 +448,7 @@ static ssize_t adp8860_store(struct device *dev, const char *buf,
451 unsigned long val; 448 unsigned long val;
452 int ret; 449 int ret;
453 450
454 ret = strict_strtoul(buf, 10, &val); 451 ret = kstrtoul(buf, 10, &val);
455 if (ret) 452 if (ret)
456 return ret; 453 return ret;
457 454
@@ -501,7 +498,7 @@ static ssize_t adp8860_bl_l1_daylight_max_store(struct device *dev,
501 struct device_attribute *attr, const char *buf, size_t count) 498 struct device_attribute *attr, const char *buf, size_t count)
502{ 499{
503 struct adp8860_bl *data = dev_get_drvdata(dev); 500 struct adp8860_bl *data = dev_get_drvdata(dev);
504 int ret = strict_strtoul(buf, 10, &data->cached_daylight_max); 501 int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
505 if (ret) 502 if (ret)
506 return ret; 503 return ret;
507 504
@@ -608,7 +605,7 @@ static ssize_t adp8860_bl_ambient_light_zone_store(struct device *dev,
608 uint8_t reg_val; 605 uint8_t reg_val;
609 int ret; 606 int ret;
610 607
611 ret = strict_strtoul(buf, 10, &val); 608 ret = kstrtoul(buf, 10, &val);
612 if (ret) 609 if (ret)
613 return ret; 610 return ret;
614 611
@@ -675,13 +672,13 @@ static int __devinit adp8860_probe(struct i2c_client *client,
675 return -EINVAL; 672 return -EINVAL;
676 } 673 }
677 674
678 data = kzalloc(sizeof(*data), GFP_KERNEL); 675 data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
679 if (data == NULL) 676 if (data == NULL)
680 return -ENOMEM; 677 return -ENOMEM;
681 678
682 ret = adp8860_read(client, ADP8860_MFDVID, &reg_val); 679 ret = adp8860_read(client, ADP8860_MFDVID, &reg_val);
683 if (ret < 0) 680 if (ret < 0)
684 goto out2; 681 return ret;
685 682
686 switch (ADP8860_MANID(reg_val)) { 683 switch (ADP8860_MANID(reg_val)) {
687 case ADP8863_MANUFID: 684 case ADP8863_MANUFID:
@@ -694,8 +691,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
694 break; 691 break;
695 default: 692 default:
696 dev_err(&client->dev, "failed to probe\n"); 693 dev_err(&client->dev, "failed to probe\n");
697 ret = -ENODEV; 694 return -ENODEV;
698 goto out2;
699 } 695 }
700 696
701 /* It's confirmed that the DEVID field is actually a REVID */ 697 /* It's confirmed that the DEVID field is actually a REVID */
@@ -717,8 +713,7 @@ static int __devinit adp8860_probe(struct i2c_client *client,
717 &client->dev, data, &adp8860_bl_ops, &props); 713 &client->dev, data, &adp8860_bl_ops, &props);
718 if (IS_ERR(bl)) { 714 if (IS_ERR(bl)) {
719 dev_err(&client->dev, "failed to register backlight\n"); 715 dev_err(&client->dev, "failed to register backlight\n");
720 ret = PTR_ERR(bl); 716 return PTR_ERR(bl);
721 goto out2;
722 } 717 }
723 718
724 bl->props.brightness = ADP8860_MAX_BRIGHTNESS; 719 bl->props.brightness = ADP8860_MAX_BRIGHTNESS;
@@ -756,8 +751,6 @@ out:
756 &adp8860_bl_attr_group); 751 &adp8860_bl_attr_group);
757out1: 752out1:
758 backlight_device_unregister(bl); 753 backlight_device_unregister(bl);
759out2:
760 kfree(data);
761 754
762 return ret; 755 return ret;
763} 756}
@@ -776,7 +769,6 @@ static int __devexit adp8860_remove(struct i2c_client *client)
776 &adp8860_bl_attr_group); 769 &adp8860_bl_attr_group);
777 770
778 backlight_device_unregister(data->bl); 771 backlight_device_unregister(data->bl);
779 kfree(data);
780 772
781 return 0; 773 return 0;
782} 774}
diff --git a/drivers/video/backlight/adp8870_bl.c b/drivers/video/backlight/adp8870_bl.c
index 9be58c6f18f1..edf7f91c8e61 100644
--- a/drivers/video/backlight/adp8870_bl.c
+++ b/drivers/video/backlight/adp8870_bl.c
@@ -244,8 +244,8 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
244 struct led_info *cur_led; 244 struct led_info *cur_led;
245 int ret, i; 245 int ret, i;
246 246
247 247 led = devm_kzalloc(&client->dev, pdata->num_leds * sizeof(*led),
248 led = kcalloc(pdata->num_leds, sizeof(*led), GFP_KERNEL); 248 GFP_KERNEL);
249 if (led == NULL) { 249 if (led == NULL) {
250 dev_err(&client->dev, "failed to alloc memory\n"); 250 dev_err(&client->dev, "failed to alloc memory\n");
251 return -ENOMEM; 251 return -ENOMEM;
@@ -253,17 +253,17 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
253 253
254 ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law); 254 ret = adp8870_write(client, ADP8870_ISCLAW, pdata->led_fade_law);
255 if (ret) 255 if (ret)
256 goto err_free; 256 return ret;
257 257
258 ret = adp8870_write(client, ADP8870_ISCT1, 258 ret = adp8870_write(client, ADP8870_ISCT1,
259 (pdata->led_on_time & 0x3) << 6); 259 (pdata->led_on_time & 0x3) << 6);
260 if (ret) 260 if (ret)
261 goto err_free; 261 return ret;
262 262
263 ret = adp8870_write(client, ADP8870_ISCF, 263 ret = adp8870_write(client, ADP8870_ISCF,
264 FADE_VAL(pdata->led_fade_in, pdata->led_fade_out)); 264 FADE_VAL(pdata->led_fade_in, pdata->led_fade_out));
265 if (ret) 265 if (ret)
266 goto err_free; 266 return ret;
267 267
268 for (i = 0; i < pdata->num_leds; ++i) { 268 for (i = 0; i < pdata->num_leds; ++i) {
269 cur_led = &pdata->leds[i]; 269 cur_led = &pdata->leds[i];
@@ -317,9 +317,6 @@ static int __devinit adp8870_led_probe(struct i2c_client *client)
317 cancel_work_sync(&led[i].work); 317 cancel_work_sync(&led[i].work);
318 } 318 }
319 319
320 err_free:
321 kfree(led);
322
323 return ret; 320 return ret;
324} 321}
325 322
@@ -335,7 +332,6 @@ static int __devexit adp8870_led_remove(struct i2c_client *client)
335 cancel_work_sync(&data->led[i].work); 332 cancel_work_sync(&data->led[i].work);
336 } 333 }
337 334
338 kfree(data->led);
339 return 0; 335 return 0;
340} 336}
341#else 337#else
@@ -572,7 +568,7 @@ static ssize_t adp8870_store(struct device *dev, const char *buf,
572 unsigned long val; 568 unsigned long val;
573 int ret; 569 int ret;
574 570
575 ret = strict_strtoul(buf, 10, &val); 571 ret = kstrtoul(buf, 10, &val);
576 if (ret) 572 if (ret)
577 return ret; 573 return ret;
578 574
@@ -652,7 +648,7 @@ static ssize_t adp8870_bl_l1_daylight_max_store(struct device *dev,
652 struct device_attribute *attr, const char *buf, size_t count) 648 struct device_attribute *attr, const char *buf, size_t count)
653{ 649{
654 struct adp8870_bl *data = dev_get_drvdata(dev); 650 struct adp8870_bl *data = dev_get_drvdata(dev);
655 int ret = strict_strtoul(buf, 10, &data->cached_daylight_max); 651 int ret = kstrtoul(buf, 10, &data->cached_daylight_max);
656 if (ret) 652 if (ret)
657 return ret; 653 return ret;
658 654
@@ -794,7 +790,7 @@ static ssize_t adp8870_bl_ambient_light_zone_store(struct device *dev,
794 uint8_t reg_val; 790 uint8_t reg_val;
795 int ret; 791 int ret;
796 792
797 ret = strict_strtoul(buf, 10, &val); 793 ret = kstrtoul(buf, 10, &val);
798 if (ret) 794 if (ret)
799 return ret; 795 return ret;
800 796
@@ -874,7 +870,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
874 return -ENODEV; 870 return -ENODEV;
875 } 871 }
876 872
877 data = kzalloc(sizeof(*data), GFP_KERNEL); 873 data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL);
878 if (data == NULL) 874 if (data == NULL)
879 return -ENOMEM; 875 return -ENOMEM;
880 876
@@ -894,8 +890,7 @@ static int __devinit adp8870_probe(struct i2c_client *client,
894 &client->dev, data, &adp8870_bl_ops, &props); 890 &client->dev, data, &adp8870_bl_ops, &props);
895 if (IS_ERR(bl)) { 891 if (IS_ERR(bl)) {
896 dev_err(&client->dev, "failed to register backlight\n"); 892 dev_err(&client->dev, "failed to register backlight\n");
897 ret = PTR_ERR(bl); 893 return PTR_ERR(bl);
898 goto out2;
899 } 894 }
900 895
901 data->bl = bl; 896 data->bl = bl;
@@ -930,8 +925,6 @@ out:
930 &adp8870_bl_attr_group); 925 &adp8870_bl_attr_group);
931out1: 926out1:
932 backlight_device_unregister(bl); 927 backlight_device_unregister(bl);
933out2:
934 kfree(data);
935 928
936 return ret; 929 return ret;
937} 930}
@@ -950,7 +943,6 @@ static int __devexit adp8870_remove(struct i2c_client *client)
950 &adp8870_bl_attr_group); 943 &adp8870_bl_attr_group);
951 944
952 backlight_device_unregister(data->bl); 945 backlight_device_unregister(data->bl);
953 kfree(data);
954 946
955 return 0; 947 return 0;
956} 948}
diff --git a/drivers/video/backlight/ams369fg06.c b/drivers/video/backlight/ams369fg06.c
index 7bdadc790117..3729238e7096 100644
--- a/drivers/video/backlight/ams369fg06.c
+++ b/drivers/video/backlight/ams369fg06.c
@@ -482,7 +482,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
482 struct backlight_device *bd = NULL; 482 struct backlight_device *bd = NULL;
483 struct backlight_properties props; 483 struct backlight_properties props;
484 484
485 lcd = kzalloc(sizeof(struct ams369fg06), GFP_KERNEL); 485 lcd = devm_kzalloc(&spi->dev, sizeof(struct ams369fg06), GFP_KERNEL);
486 if (!lcd) 486 if (!lcd)
487 return -ENOMEM; 487 return -ENOMEM;
488 488
@@ -492,7 +492,7 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
492 ret = spi_setup(spi); 492 ret = spi_setup(spi);
493 if (ret < 0) { 493 if (ret < 0) {
494 dev_err(&spi->dev, "spi setup failed.\n"); 494 dev_err(&spi->dev, "spi setup failed.\n");
495 goto out_free_lcd; 495 return ret;
496 } 496 }
497 497
498 lcd->spi = spi; 498 lcd->spi = spi;
@@ -501,15 +501,13 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
501 lcd->lcd_pd = spi->dev.platform_data; 501 lcd->lcd_pd = spi->dev.platform_data;
502 if (!lcd->lcd_pd) { 502 if (!lcd->lcd_pd) {
503 dev_err(&spi->dev, "platform data is NULL\n"); 503 dev_err(&spi->dev, "platform data is NULL\n");
504 goto out_free_lcd; 504 return -EFAULT;
505 } 505 }
506 506
507 ld = lcd_device_register("ams369fg06", &spi->dev, lcd, 507 ld = lcd_device_register("ams369fg06", &spi->dev, lcd,
508 &ams369fg06_lcd_ops); 508 &ams369fg06_lcd_ops);
509 if (IS_ERR(ld)) { 509 if (IS_ERR(ld))
510 ret = PTR_ERR(ld); 510 return PTR_ERR(ld);
511 goto out_free_lcd;
512 }
513 511
514 lcd->ld = ld; 512 lcd->ld = ld;
515 513
@@ -547,8 +545,6 @@ static int __devinit ams369fg06_probe(struct spi_device *spi)
547 545
548out_lcd_unregister: 546out_lcd_unregister:
549 lcd_device_unregister(ld); 547 lcd_device_unregister(ld);
550out_free_lcd:
551 kfree(lcd);
552 return ret; 548 return ret;
553} 549}
554 550
@@ -559,7 +555,6 @@ static int __devexit ams369fg06_remove(struct spi_device *spi)
559 ams369fg06_power(lcd, FB_BLANK_POWERDOWN); 555 ams369fg06_power(lcd, FB_BLANK_POWERDOWN);
560 backlight_device_unregister(lcd->bd); 556 backlight_device_unregister(lcd->bd);
561 lcd_device_unregister(lcd->ld); 557 lcd_device_unregister(lcd->ld);
562 kfree(lcd);
563 558
564 return 0; 559 return 0;
565} 560}
@@ -619,7 +614,6 @@ static void ams369fg06_shutdown(struct spi_device *spi)
619static struct spi_driver ams369fg06_driver = { 614static struct spi_driver ams369fg06_driver = {
620 .driver = { 615 .driver = {
621 .name = "ams369fg06", 616 .name = "ams369fg06",
622 .bus = &spi_bus_type,
623 .owner = THIS_MODULE, 617 .owner = THIS_MODULE,
624 }, 618 },
625 .probe = ams369fg06_probe, 619 .probe = ams369fg06_probe,
diff --git a/drivers/video/backlight/apple_bl.c b/drivers/video/backlight/apple_bl.c
index a523b255e124..9dc73ac3709a 100644
--- a/drivers/video/backlight/apple_bl.c
+++ b/drivers/video/backlight/apple_bl.c
@@ -16,6 +16,8 @@
16 * get at the firmware code in order to figure out what it's actually doing. 16 * get at the firmware code in order to figure out what it's actually doing.
17 */ 17 */
18 18
19#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
20
19#include <linux/module.h> 21#include <linux/module.h>
20#include <linux/kernel.h> 22#include <linux/kernel.h>
21#include <linux/init.h> 23#include <linux/init.h>
@@ -25,6 +27,7 @@
25#include <linux/pci.h> 27#include <linux/pci.h>
26#include <linux/acpi.h> 28#include <linux/acpi.h>
27#include <linux/atomic.h> 29#include <linux/atomic.h>
30#include <linux/apple_bl.h>
28 31
29static struct backlight_device *apple_backlight_device; 32static struct backlight_device *apple_backlight_device;
30 33
@@ -39,8 +42,6 @@ struct hw_data {
39 42
40static const struct hw_data *hw_data; 43static const struct hw_data *hw_data;
41 44
42#define DRIVER "apple_backlight: "
43
44/* Module parameters. */ 45/* Module parameters. */
45static int debug; 46static int debug;
46module_param_named(debug, debug, int, 0644); 47module_param_named(debug, debug, int, 0644);
@@ -60,8 +61,7 @@ static int intel_chipset_send_intensity(struct backlight_device *bd)
60 int intensity = bd->props.brightness; 61 int intensity = bd->props.brightness;
61 62
62 if (debug) 63 if (debug)
63 printk(KERN_DEBUG DRIVER "setting brightness to %d\n", 64 pr_debug("setting brightness to %d\n", intensity);
64 intensity);
65 65
66 intel_chipset_set_brightness(intensity); 66 intel_chipset_set_brightness(intensity);
67 return 0; 67 return 0;
@@ -76,8 +76,7 @@ static int intel_chipset_get_intensity(struct backlight_device *bd)
76 intensity = inb(0xb3) >> 4; 76 intensity = inb(0xb3) >> 4;
77 77
78 if (debug) 78 if (debug)
79 printk(KERN_DEBUG DRIVER "read brightness of %d\n", 79 pr_debug("read brightness of %d\n", intensity);
80 intensity);
81 80
82 return intensity; 81 return intensity;
83} 82}
@@ -107,8 +106,7 @@ static int nvidia_chipset_send_intensity(struct backlight_device *bd)
107 int intensity = bd->props.brightness; 106 int intensity = bd->props.brightness;
108 107
109 if (debug) 108 if (debug)
110 printk(KERN_DEBUG DRIVER "setting brightness to %d\n", 109 pr_debug("setting brightness to %d\n", intensity);
111 intensity);
112 110
113 nvidia_chipset_set_brightness(intensity); 111 nvidia_chipset_set_brightness(intensity);
114 return 0; 112 return 0;
@@ -123,8 +121,7 @@ static int nvidia_chipset_get_intensity(struct backlight_device *bd)
123 intensity = inb(0x52f) >> 4; 121 intensity = inb(0x52f) >> 4;
124 122
125 if (debug) 123 if (debug)
126 printk(KERN_DEBUG DRIVER "read brightness of %d\n", 124 pr_debug("read brightness of %d\n", intensity);
127 intensity);
128 125
129 return intensity; 126 return intensity;
130} 127}
@@ -149,7 +146,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
149 host = pci_get_bus_and_slot(0, 0); 146 host = pci_get_bus_and_slot(0, 0);
150 147
151 if (!host) { 148 if (!host) {
152 printk(KERN_ERR DRIVER "unable to find PCI host\n"); 149 pr_err("unable to find PCI host\n");
153 return -ENODEV; 150 return -ENODEV;
154 } 151 }
155 152
@@ -161,7 +158,7 @@ static int __devinit apple_bl_add(struct acpi_device *dev)
161 pci_dev_put(host); 158 pci_dev_put(host);
162 159
163 if (!hw_data) { 160 if (!hw_data) {
164 printk(KERN_ERR DRIVER "unknown hardware\n"); 161 pr_err("unknown hardware\n");
165 return -ENODEV; 162 return -ENODEV;
166 } 163 }
167 164
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index bf5b1ece7160..297db2fa91f5 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -5,6 +5,8 @@
5 * 5 *
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
8#include <linux/module.h> 10#include <linux/module.h>
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/device.h> 12#include <linux/device.h>
@@ -123,7 +125,7 @@ static ssize_t backlight_store_power(struct device *dev,
123 rc = -ENXIO; 125 rc = -ENXIO;
124 mutex_lock(&bd->ops_lock); 126 mutex_lock(&bd->ops_lock);
125 if (bd->ops) { 127 if (bd->ops) {
126 pr_debug("backlight: set power to %lu\n", power); 128 pr_debug("set power to %lu\n", power);
127 if (bd->props.power != power) { 129 if (bd->props.power != power) {
128 bd->props.power = power; 130 bd->props.power = power;
129 backlight_update_status(bd); 131 backlight_update_status(bd);
@@ -161,8 +163,7 @@ static ssize_t backlight_store_brightness(struct device *dev,
161 if (brightness > bd->props.max_brightness) 163 if (brightness > bd->props.max_brightness)
162 rc = -EINVAL; 164 rc = -EINVAL;
163 else { 165 else {
164 pr_debug("backlight: set brightness to %lu\n", 166 pr_debug("set brightness to %lu\n", brightness);
165 brightness);
166 bd->props.brightness = brightness; 167 bd->props.brightness = brightness;
167 backlight_update_status(bd); 168 backlight_update_status(bd);
168 rc = count; 169 rc = count;
@@ -378,8 +379,8 @@ static int __init backlight_class_init(void)
378{ 379{
379 backlight_class = class_create(THIS_MODULE, "backlight"); 380 backlight_class = class_create(THIS_MODULE, "backlight");
380 if (IS_ERR(backlight_class)) { 381 if (IS_ERR(backlight_class)) {
381 printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n", 382 pr_warn("Unable to create backlight class; errno = %ld\n",
382 PTR_ERR(backlight_class)); 383 PTR_ERR(backlight_class));
383 return PTR_ERR(backlight_class); 384 return PTR_ERR(backlight_class);
384 } 385 }
385 386
diff --git a/drivers/video/backlight/corgi_lcd.c b/drivers/video/backlight/corgi_lcd.c
index 6dab13fe562e..23d732677ba1 100644
--- a/drivers/video/backlight/corgi_lcd.c
+++ b/drivers/video/backlight/corgi_lcd.c
@@ -544,7 +544,7 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
544 return -EINVAL; 544 return -EINVAL;
545 } 545 }
546 546
547 lcd = kzalloc(sizeof(struct corgi_lcd), GFP_KERNEL); 547 lcd = devm_kzalloc(&spi->dev, sizeof(struct corgi_lcd), GFP_KERNEL);
548 if (!lcd) { 548 if (!lcd) {
549 dev_err(&spi->dev, "failed to allocate memory\n"); 549 dev_err(&spi->dev, "failed to allocate memory\n");
550 return -ENOMEM; 550 return -ENOMEM;
@@ -554,10 +554,9 @@ static int __devinit corgi_lcd_probe(struct spi_device *spi)
554 554
555 lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev, 555 lcd->lcd_dev = lcd_device_register("corgi_lcd", &spi->dev,
556 lcd, &corgi_lcd_ops); 556 lcd, &corgi_lcd_ops);
557 if (IS_ERR(lcd->lcd_dev)) { 557 if (IS_ERR(lcd->lcd_dev))
558 ret = PTR_ERR(lcd->lcd_dev); 558 return PTR_ERR(lcd->lcd_dev);
559 goto err_free_lcd; 559
560 }
561 lcd->power = FB_BLANK_POWERDOWN; 560 lcd->power = FB_BLANK_POWERDOWN;
562 lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA; 561 lcd->mode = (pdata) ? pdata->init_mode : CORGI_LCD_MODE_VGA;
563 562
@@ -591,8 +590,6 @@ err_unregister_bl:
591 backlight_device_unregister(lcd->bl_dev); 590 backlight_device_unregister(lcd->bl_dev);
592err_unregister_lcd: 591err_unregister_lcd:
593 lcd_device_unregister(lcd->lcd_dev); 592 lcd_device_unregister(lcd->lcd_dev);
594err_free_lcd:
595 kfree(lcd);
596 return ret; 593 return ret;
597} 594}
598 595
@@ -613,7 +610,6 @@ static int __devexit corgi_lcd_remove(struct spi_device *spi)
613 610
614 corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN); 611 corgi_lcd_set_power(lcd->lcd_dev, FB_BLANK_POWERDOWN);
615 lcd_device_unregister(lcd->lcd_dev); 612 lcd_device_unregister(lcd->lcd_dev);
616 kfree(lcd);
617 613
618 return 0; 614 return 0;
619} 615}
diff --git a/drivers/video/backlight/cr_bllcd.c b/drivers/video/backlight/cr_bllcd.c
index 22489eb5f3e0..37bae801e23b 100644
--- a/drivers/video/backlight/cr_bllcd.c
+++ b/drivers/video/backlight/cr_bllcd.c
@@ -27,6 +27,8 @@
27 * Alan Hourihane <alanh-at-tungstengraphics-dot-com> 27 * Alan Hourihane <alanh-at-tungstengraphics-dot-com>
28 */ 28 */
29 29
30#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
31
30#include <linux/module.h> 32#include <linux/module.h>
31#include <linux/kernel.h> 33#include <linux/kernel.h>
32#include <linux/init.h> 34#include <linux/init.h>
@@ -180,14 +182,13 @@ static int cr_backlight_probe(struct platform_device *pdev)
180 lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL, 182 lpc_dev = pci_get_device(PCI_VENDOR_ID_INTEL,
181 CRVML_DEVICE_LPC, NULL); 183 CRVML_DEVICE_LPC, NULL);
182 if (!lpc_dev) { 184 if (!lpc_dev) {
183 printk("INTEL CARILLO RANCH LPC not found.\n"); 185 pr_err("INTEL CARILLO RANCH LPC not found.\n");
184 return -ENODEV; 186 return -ENODEV;
185 } 187 }
186 188
187 pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en); 189 pci_read_config_byte(lpc_dev, CRVML_REG_GPIOEN, &dev_en);
188 if (!(dev_en & CRVML_GPIOEN_BIT)) { 190 if (!(dev_en & CRVML_GPIOEN_BIT)) {
189 printk(KERN_ERR 191 pr_err("Carillo Ranch GPIO device was not enabled.\n");
190 "Carillo Ranch GPIO device was not enabled.\n");
191 pci_dev_put(lpc_dev); 192 pci_dev_put(lpc_dev);
192 return -ENODEV; 193 return -ENODEV;
193 } 194 }
@@ -270,7 +271,7 @@ static int __init cr_backlight_init(void)
270 return PTR_ERR(crp); 271 return PTR_ERR(crp);
271 } 272 }
272 273
273 printk("Carillo Ranch Backlight Driver Initialized.\n"); 274 pr_info("Carillo Ranch Backlight Driver Initialized.\n");
274 275
275 return 0; 276 return 0;
276} 277}
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 30e19681a30b..573c7ece0fde 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -136,6 +136,7 @@ static int da903x_backlight_probe(struct platform_device *pdev)
136 da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2, 136 da903x_write(data->da903x_dev, DA9034_WLED_CONTROL2,
137 DA9034_WLED_ISET(pdata->output_current)); 137 DA9034_WLED_ISET(pdata->output_current));
138 138
139 memset(&props, 0, sizeof(props));
139 props.type = BACKLIGHT_RAW; 140 props.type = BACKLIGHT_RAW;
140 props.max_brightness = max_brightness; 141 props.max_brightness = max_brightness;
141 bl = backlight_device_register(pdev->name, data->da903x_dev, data, 142 bl = backlight_device_register(pdev->name, data->da903x_dev, data,
diff --git a/drivers/video/backlight/generic_bl.c b/drivers/video/backlight/generic_bl.c
index 9ce6170c1860..8c660fcd250d 100644
--- a/drivers/video/backlight/generic_bl.c
+++ b/drivers/video/backlight/generic_bl.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/module.h> 14#include <linux/module.h>
13#include <linux/kernel.h> 15#include <linux/kernel.h>
14#include <linux/init.h> 16#include <linux/init.h>
@@ -106,7 +108,7 @@ static int genericbl_probe(struct platform_device *pdev)
106 108
107 generic_backlight_device = bd; 109 generic_backlight_device = bd;
108 110
109 printk("Generic Backlight Driver Initialized.\n"); 111 pr_info("Generic Backlight Driver Initialized.\n");
110 return 0; 112 return 0;
111} 113}
112 114
@@ -120,7 +122,7 @@ static int genericbl_remove(struct platform_device *pdev)
120 122
121 backlight_device_unregister(bd); 123 backlight_device_unregister(bd);
122 124
123 printk("Generic Backlight Driver Unloaded\n"); 125 pr_info("Generic Backlight Driver Unloaded\n");
124 return 0; 126 return 0;
125} 127}
126 128
diff --git a/drivers/video/backlight/ili9320.c b/drivers/video/backlight/ili9320.c
index 5118a9f029ab..6c9399341bcf 100644
--- a/drivers/video/backlight/ili9320.c
+++ b/drivers/video/backlight/ili9320.c
@@ -220,7 +220,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
220 220
221 /* allocate and initialse our state */ 221 /* allocate and initialse our state */
222 222
223 ili = kzalloc(sizeof(struct ili9320), GFP_KERNEL); 223 ili = devm_kzalloc(&spi->dev, sizeof(struct ili9320), GFP_KERNEL);
224 if (ili == NULL) { 224 if (ili == NULL) {
225 dev_err(dev, "no memory for device\n"); 225 dev_err(dev, "no memory for device\n");
226 return -ENOMEM; 226 return -ENOMEM;
@@ -240,8 +240,7 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
240 lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops); 240 lcd = lcd_device_register("ili9320", dev, ili, &ili9320_ops);
241 if (IS_ERR(lcd)) { 241 if (IS_ERR(lcd)) {
242 dev_err(dev, "failed to register lcd device\n"); 242 dev_err(dev, "failed to register lcd device\n");
243 ret = PTR_ERR(lcd); 243 return PTR_ERR(lcd);
244 goto err_free;
245 } 244 }
246 245
247 ili->lcd = lcd; 246 ili->lcd = lcd;
@@ -259,9 +258,6 @@ int __devinit ili9320_probe_spi(struct spi_device *spi,
259 err_unregister: 258 err_unregister:
260 lcd_device_unregister(lcd); 259 lcd_device_unregister(lcd);
261 260
262 err_free:
263 kfree(ili);
264
265 return ret; 261 return ret;
266} 262}
267 263
@@ -272,7 +268,6 @@ int __devexit ili9320_remove(struct ili9320 *ili)
272 ili9320_power(ili, FB_BLANK_POWERDOWN); 268 ili9320_power(ili, FB_BLANK_POWERDOWN);
273 269
274 lcd_device_unregister(ili->lcd); 270 lcd_device_unregister(ili->lcd);
275 kfree(ili);
276 271
277 return 0; 272 return 0;
278} 273}
diff --git a/drivers/video/backlight/jornada720_bl.c b/drivers/video/backlight/jornada720_bl.c
index 2f8af5d786ab..16f593b64427 100644
--- a/drivers/video/backlight/jornada720_bl.c
+++ b/drivers/video/backlight/jornada720_bl.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/backlight.h> 14#include <linux/backlight.h>
13#include <linux/device.h> 15#include <linux/device.h>
14#include <linux/fb.h> 16#include <linux/fb.h>
@@ -38,7 +40,7 @@ static int jornada_bl_get_brightness(struct backlight_device *bd)
38 ret = jornada_ssp_byte(GETBRIGHTNESS); 40 ret = jornada_ssp_byte(GETBRIGHTNESS);
39 41
40 if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) { 42 if (jornada_ssp_byte(GETBRIGHTNESS) != TXDUMMY) {
41 printk(KERN_ERR "bl : get brightness timeout\n"); 43 pr_err("get brightness timeout\n");
42 jornada_ssp_end(); 44 jornada_ssp_end();
43 return -ETIMEDOUT; 45 return -ETIMEDOUT;
44 } else /* exchange txdummy for value */ 46 } else /* exchange txdummy for value */
@@ -59,7 +61,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
59 if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) { 61 if ((bd->props.power != FB_BLANK_UNBLANK) || (bd->props.fb_blank != FB_BLANK_UNBLANK)) {
60 ret = jornada_ssp_byte(BRIGHTNESSOFF); 62 ret = jornada_ssp_byte(BRIGHTNESSOFF);
61 if (ret != TXDUMMY) { 63 if (ret != TXDUMMY) {
62 printk(KERN_INFO "bl : brightness off timeout\n"); 64 pr_info("brightness off timeout\n");
63 /* turn off backlight */ 65 /* turn off backlight */
64 PPSR &= ~PPC_LDD1; 66 PPSR &= ~PPC_LDD1;
65 PPDR |= PPC_LDD1; 67 PPDR |= PPC_LDD1;
@@ -70,7 +72,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
70 72
71 /* send command to our mcu */ 73 /* send command to our mcu */
72 if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) { 74 if (jornada_ssp_byte(SETBRIGHTNESS) != TXDUMMY) {
73 printk(KERN_INFO "bl : failed to set brightness\n"); 75 pr_info("failed to set brightness\n");
74 ret = -ETIMEDOUT; 76 ret = -ETIMEDOUT;
75 goto out; 77 goto out;
76 } 78 }
@@ -81,7 +83,7 @@ static int jornada_bl_update_status(struct backlight_device *bd)
81 but due to physical layout it is equal to 0, so we simply 83 but due to physical layout it is equal to 0, so we simply
82 invert the value (MAX VALUE - NEW VALUE). */ 84 invert the value (MAX VALUE - NEW VALUE). */
83 if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) { 85 if (jornada_ssp_byte(BL_MAX_BRIGHT - bd->props.brightness) != TXDUMMY) {
84 printk(KERN_ERR "bl : set brightness failed\n"); 86 pr_err("set brightness failed\n");
85 ret = -ETIMEDOUT; 87 ret = -ETIMEDOUT;
86 } 88 }
87 89
@@ -113,7 +115,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
113 115
114 if (IS_ERR(bd)) { 116 if (IS_ERR(bd)) {
115 ret = PTR_ERR(bd); 117 ret = PTR_ERR(bd);
116 printk(KERN_ERR "bl : failed to register device, err=%x\n", ret); 118 pr_err("failed to register device, err=%x\n", ret);
117 return ret; 119 return ret;
118 } 120 }
119 121
@@ -125,7 +127,7 @@ static int jornada_bl_probe(struct platform_device *pdev)
125 jornada_bl_update_status(bd); 127 jornada_bl_update_status(bd);
126 128
127 platform_set_drvdata(pdev, bd); 129 platform_set_drvdata(pdev, bd);
128 printk(KERN_INFO "HP Jornada 700 series backlight driver\n"); 130 pr_info("HP Jornada 700 series backlight driver\n");
129 131
130 return 0; 132 return 0;
131} 133}
diff --git a/drivers/video/backlight/jornada720_lcd.c b/drivers/video/backlight/jornada720_lcd.c
index 22d231a17e3c..635b30523fd5 100644
--- a/drivers/video/backlight/jornada720_lcd.c
+++ b/drivers/video/backlight/jornada720_lcd.c
@@ -9,6 +9,8 @@
9 * 9 *
10 */ 10 */
11 11
12#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
13
12#include <linux/device.h> 14#include <linux/device.h>
13#include <linux/fb.h> 15#include <linux/fb.h>
14#include <linux/kernel.h> 16#include <linux/kernel.h>
@@ -44,7 +46,7 @@ static int jornada_lcd_get_contrast(struct lcd_device *dev)
44 jornada_ssp_start(); 46 jornada_ssp_start();
45 47
46 if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) { 48 if (jornada_ssp_byte(GETCONTRAST) != TXDUMMY) {
47 printk(KERN_ERR "lcd: get contrast failed\n"); 49 pr_err("get contrast failed\n");
48 jornada_ssp_end(); 50 jornada_ssp_end();
49 return -ETIMEDOUT; 51 return -ETIMEDOUT;
50 } else { 52 } else {
@@ -65,7 +67,7 @@ static int jornada_lcd_set_contrast(struct lcd_device *dev, int value)
65 67
66 /* push the new value */ 68 /* push the new value */
67 if (jornada_ssp_byte(value) != TXDUMMY) { 69 if (jornada_ssp_byte(value) != TXDUMMY) {
68 printk(KERN_ERR "lcd : set contrast failed\n"); 70 pr_err("set contrast failed\n");
69 jornada_ssp_end(); 71 jornada_ssp_end();
70 return -ETIMEDOUT; 72 return -ETIMEDOUT;
71 } 73 }
@@ -103,7 +105,7 @@ static int jornada_lcd_probe(struct platform_device *pdev)
103 105
104 if (IS_ERR(lcd_device)) { 106 if (IS_ERR(lcd_device)) {
105 ret = PTR_ERR(lcd_device); 107 ret = PTR_ERR(lcd_device);
106 printk(KERN_ERR "lcd : failed to register device\n"); 108 pr_err("failed to register device\n");
107 return ret; 109 return ret;
108 } 110 }
109 111
diff --git a/drivers/video/backlight/l4f00242t03.c b/drivers/video/backlight/l4f00242t03.c
index 6022b67285ec..40f606a86093 100644
--- a/drivers/video/backlight/l4f00242t03.c
+++ b/drivers/video/backlight/l4f00242t03.c
@@ -11,6 +11,8 @@
11 * published by the Free Software Foundation. 11 * published by the Free Software Foundation.
12 */ 12 */
13 13
14#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15
14#include <linux/device.h> 16#include <linux/device.h>
15#include <linux/kernel.h> 17#include <linux/kernel.h>
16#include <linux/delay.h> 18#include <linux/delay.h>
@@ -159,7 +161,8 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
159 return -EINVAL; 161 return -EINVAL;
160 } 162 }
161 163
162 priv = kzalloc(sizeof(struct l4f00242t03_priv), GFP_KERNEL); 164 priv = devm_kzalloc(&spi->dev, sizeof(struct l4f00242t03_priv),
165 GFP_KERNEL);
163 166
164 if (priv == NULL) { 167 if (priv == NULL) {
165 dev_err(&spi->dev, "No memory for this device.\n"); 168 dev_err(&spi->dev, "No memory for this device.\n");
@@ -177,7 +180,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
177 if (ret) { 180 if (ret) {
178 dev_err(&spi->dev, 181 dev_err(&spi->dev,
179 "Unable to get the lcd l4f00242t03 reset gpio.\n"); 182 "Unable to get the lcd l4f00242t03 reset gpio.\n");
180 goto err; 183 return ret;
181 } 184 }
182 185
183 ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW, 186 ret = gpio_request_one(pdata->data_enable_gpio, GPIOF_OUT_INIT_LOW,
@@ -185,7 +188,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
185 if (ret) { 188 if (ret) {
186 dev_err(&spi->dev, 189 dev_err(&spi->dev,
187 "Unable to get the lcd l4f00242t03 data en gpio.\n"); 190 "Unable to get the lcd l4f00242t03 data en gpio.\n");
188 goto err2; 191 goto err;
189 } 192 }
190 193
191 priv->io_reg = regulator_get(&spi->dev, "vdd"); 194 priv->io_reg = regulator_get(&spi->dev, "vdd");
@@ -193,7 +196,7 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
193 ret = PTR_ERR(priv->io_reg); 196 ret = PTR_ERR(priv->io_reg);
194 dev_err(&spi->dev, "%s: Unable to get the IO regulator\n", 197 dev_err(&spi->dev, "%s: Unable to get the IO regulator\n",
195 __func__); 198 __func__);
196 goto err3; 199 goto err2;
197 } 200 }
198 201
199 priv->core_reg = regulator_get(&spi->dev, "vcore"); 202 priv->core_reg = regulator_get(&spi->dev, "vcore");
@@ -201,14 +204,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
201 ret = PTR_ERR(priv->core_reg); 204 ret = PTR_ERR(priv->core_reg);
202 dev_err(&spi->dev, "%s: Unable to get the core regulator\n", 205 dev_err(&spi->dev, "%s: Unable to get the core regulator\n",
203 __func__); 206 __func__);
204 goto err4; 207 goto err3;
205 } 208 }
206 209
207 priv->ld = lcd_device_register("l4f00242t03", 210 priv->ld = lcd_device_register("l4f00242t03",
208 &spi->dev, priv, &l4f_ops); 211 &spi->dev, priv, &l4f_ops);
209 if (IS_ERR(priv->ld)) { 212 if (IS_ERR(priv->ld)) {
210 ret = PTR_ERR(priv->ld); 213 ret = PTR_ERR(priv->ld);
211 goto err5; 214 goto err4;
212 } 215 }
213 216
214 /* Init the LCD */ 217 /* Init the LCD */
@@ -220,16 +223,14 @@ static int __devinit l4f00242t03_probe(struct spi_device *spi)
220 223
221 return 0; 224 return 0;
222 225
223err5:
224 regulator_put(priv->core_reg);
225err4: 226err4:
226 regulator_put(priv->io_reg); 227 regulator_put(priv->core_reg);
227err3: 228err3:
228 gpio_free(pdata->data_enable_gpio); 229 regulator_put(priv->io_reg);
229err2: 230err2:
230 gpio_free(pdata->reset_gpio); 231 gpio_free(pdata->data_enable_gpio);
231err: 232err:
232 kfree(priv); 233 gpio_free(pdata->reset_gpio);
233 234
234 return ret; 235 return ret;
235} 236}
@@ -250,8 +251,6 @@ static int __devexit l4f00242t03_remove(struct spi_device *spi)
250 regulator_put(priv->io_reg); 251 regulator_put(priv->io_reg);
251 regulator_put(priv->core_reg); 252 regulator_put(priv->core_reg);
252 253
253 kfree(priv);
254
255 return 0; 254 return 0;
256} 255}
257 256
diff --git a/drivers/video/backlight/lcd.c b/drivers/video/backlight/lcd.c
index 79c1b0d609a8..a5d0d024bb92 100644
--- a/drivers/video/backlight/lcd.c
+++ b/drivers/video/backlight/lcd.c
@@ -5,6 +5,8 @@
5 * 5 *
6 */ 6 */
7 7
8#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
9
8#include <linux/module.h> 10#include <linux/module.h>
9#include <linux/init.h> 11#include <linux/init.h>
10#include <linux/device.h> 12#include <linux/device.h>
@@ -32,6 +34,8 @@ static int fb_notifier_callback(struct notifier_block *self,
32 case FB_EVENT_BLANK: 34 case FB_EVENT_BLANK:
33 case FB_EVENT_MODE_CHANGE: 35 case FB_EVENT_MODE_CHANGE:
34 case FB_EVENT_MODE_CHANGE_ALL: 36 case FB_EVENT_MODE_CHANGE_ALL:
37 case FB_EARLY_EVENT_BLANK:
38 case FB_R_EARLY_EVENT_BLANK:
35 break; 39 break;
36 default: 40 default:
37 return 0; 41 return 0;
@@ -46,6 +50,14 @@ static int fb_notifier_callback(struct notifier_block *self,
46 if (event == FB_EVENT_BLANK) { 50 if (event == FB_EVENT_BLANK) {
47 if (ld->ops->set_power) 51 if (ld->ops->set_power)
48 ld->ops->set_power(ld, *(int *)evdata->data); 52 ld->ops->set_power(ld, *(int *)evdata->data);
53 } else if (event == FB_EARLY_EVENT_BLANK) {
54 if (ld->ops->early_set_power)
55 ld->ops->early_set_power(ld,
56 *(int *)evdata->data);
57 } else if (event == FB_R_EARLY_EVENT_BLANK) {
58 if (ld->ops->r_early_set_power)
59 ld->ops->r_early_set_power(ld,
60 *(int *)evdata->data);
49 } else { 61 } else {
50 if (ld->ops->set_mode) 62 if (ld->ops->set_mode)
51 ld->ops->set_mode(ld, evdata->data); 63 ld->ops->set_mode(ld, evdata->data);
@@ -106,7 +118,7 @@ static ssize_t lcd_store_power(struct device *dev,
106 118
107 mutex_lock(&ld->ops_lock); 119 mutex_lock(&ld->ops_lock);
108 if (ld->ops && ld->ops->set_power) { 120 if (ld->ops && ld->ops->set_power) {
109 pr_debug("lcd: set power to %lu\n", power); 121 pr_debug("set power to %lu\n", power);
110 ld->ops->set_power(ld, power); 122 ld->ops->set_power(ld, power);
111 rc = count; 123 rc = count;
112 } 124 }
@@ -142,7 +154,7 @@ static ssize_t lcd_store_contrast(struct device *dev,
142 154
143 mutex_lock(&ld->ops_lock); 155 mutex_lock(&ld->ops_lock);
144 if (ld->ops && ld->ops->set_contrast) { 156 if (ld->ops && ld->ops->set_contrast) {
145 pr_debug("lcd: set contrast to %lu\n", contrast); 157 pr_debug("set contrast to %lu\n", contrast);
146 ld->ops->set_contrast(ld, contrast); 158 ld->ops->set_contrast(ld, contrast);
147 rc = count; 159 rc = count;
148 } 160 }
@@ -253,8 +265,8 @@ static int __init lcd_class_init(void)
253{ 265{
254 lcd_class = class_create(THIS_MODULE, "lcd"); 266 lcd_class = class_create(THIS_MODULE, "lcd");
255 if (IS_ERR(lcd_class)) { 267 if (IS_ERR(lcd_class)) {
256 printk(KERN_WARNING "Unable to create backlight class; errno = %ld\n", 268 pr_warn("Unable to create backlight class; errno = %ld\n",
257 PTR_ERR(lcd_class)); 269 PTR_ERR(lcd_class));
258 return PTR_ERR(lcd_class); 270 return PTR_ERR(lcd_class);
259 } 271 }
260 272
diff --git a/drivers/video/backlight/ld9040.c b/drivers/video/backlight/ld9040.c
index efd352be21ae..58f517fb7d40 100644
--- a/drivers/video/backlight/ld9040.c
+++ b/drivers/video/backlight/ld9040.c
@@ -707,7 +707,7 @@ static int ld9040_probe(struct spi_device *spi)
707 struct backlight_device *bd = NULL; 707 struct backlight_device *bd = NULL;
708 struct backlight_properties props; 708 struct backlight_properties props;
709 709
710 lcd = kzalloc(sizeof(struct ld9040), GFP_KERNEL); 710 lcd = devm_kzalloc(&spi->dev, sizeof(struct ld9040), GFP_KERNEL);
711 if (!lcd) 711 if (!lcd)
712 return -ENOMEM; 712 return -ENOMEM;
713 713
@@ -717,7 +717,7 @@ static int ld9040_probe(struct spi_device *spi)
717 ret = spi_setup(spi); 717 ret = spi_setup(spi);
718 if (ret < 0) { 718 if (ret < 0) {
719 dev_err(&spi->dev, "spi setup failed.\n"); 719 dev_err(&spi->dev, "spi setup failed.\n");
720 goto out_free_lcd; 720 return ret;
721 } 721 }
722 722
723 lcd->spi = spi; 723 lcd->spi = spi;
@@ -726,7 +726,7 @@ static int ld9040_probe(struct spi_device *spi)
726 lcd->lcd_pd = spi->dev.platform_data; 726 lcd->lcd_pd = spi->dev.platform_data;
727 if (!lcd->lcd_pd) { 727 if (!lcd->lcd_pd) {
728 dev_err(&spi->dev, "platform data is NULL.\n"); 728 dev_err(&spi->dev, "platform data is NULL.\n");
729 goto out_free_lcd; 729 return -EFAULT;
730 } 730 }
731 731
732 mutex_init(&lcd->lock); 732 mutex_init(&lcd->lock);
@@ -734,13 +734,13 @@ static int ld9040_probe(struct spi_device *spi)
734 ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies); 734 ret = regulator_bulk_get(lcd->dev, ARRAY_SIZE(supplies), supplies);
735 if (ret) { 735 if (ret) {
736 dev_err(lcd->dev, "Failed to get regulators: %d\n", ret); 736 dev_err(lcd->dev, "Failed to get regulators: %d\n", ret);
737 goto out_free_lcd; 737 return ret;
738 } 738 }
739 739
740 ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops); 740 ld = lcd_device_register("ld9040", &spi->dev, lcd, &ld9040_lcd_ops);
741 if (IS_ERR(ld)) { 741 if (IS_ERR(ld)) {
742 ret = PTR_ERR(ld); 742 ret = PTR_ERR(ld);
743 goto out_free_lcd; 743 goto out_free_regulator;
744 } 744 }
745 745
746 lcd->ld = ld; 746 lcd->ld = ld;
@@ -782,10 +782,9 @@ static int ld9040_probe(struct spi_device *spi)
782 782
783out_unregister_lcd: 783out_unregister_lcd:
784 lcd_device_unregister(lcd->ld); 784 lcd_device_unregister(lcd->ld);
785out_free_lcd: 785out_free_regulator:
786 regulator_bulk_free(ARRAY_SIZE(supplies), supplies); 786 regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
787 787
788 kfree(lcd);
789 return ret; 788 return ret;
790} 789}
791 790
@@ -797,7 +796,6 @@ static int __devexit ld9040_remove(struct spi_device *spi)
797 backlight_device_unregister(lcd->bd); 796 backlight_device_unregister(lcd->bd);
798 lcd_device_unregister(lcd->ld); 797 lcd_device_unregister(lcd->ld);
799 regulator_bulk_free(ARRAY_SIZE(supplies), supplies); 798 regulator_bulk_free(ARRAY_SIZE(supplies), supplies);
800 kfree(lcd);
801 799
802 return 0; 800 return 0;
803} 801}
@@ -846,7 +844,6 @@ static void ld9040_shutdown(struct spi_device *spi)
846static struct spi_driver ld9040_driver = { 844static struct spi_driver ld9040_driver = {
847 .driver = { 845 .driver = {
848 .name = "ld9040", 846 .name = "ld9040",
849 .bus = &spi_bus_type,
850 .owner = THIS_MODULE, 847 .owner = THIS_MODULE,
851 }, 848 },
852 .probe = ld9040_probe, 849 .probe = ld9040_probe,
diff --git a/drivers/video/backlight/lm3533_bl.c b/drivers/video/backlight/lm3533_bl.c
new file mode 100644
index 000000000000..bebeb63607db
--- /dev/null
+++ b/drivers/video/backlight/lm3533_bl.c
@@ -0,0 +1,423 @@
1/*
2 * lm3533-bl.c -- LM3533 Backlight driver
3 *
4 * Copyright (C) 2011-2012 Texas Instruments
5 *
6 * Author: Johan Hovold <jhovold@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/platform_device.h>
17#include <linux/backlight.h>
18#include <linux/fb.h>
19#include <linux/slab.h>
20
21#include <linux/mfd/lm3533.h>
22
23
24#define LM3533_HVCTRLBANK_COUNT 2
25#define LM3533_BL_MAX_BRIGHTNESS 255
26
27#define LM3533_REG_CTRLBANK_AB_BCONF 0x1a
28
29
30struct lm3533_bl {
31 struct lm3533 *lm3533;
32 struct lm3533_ctrlbank cb;
33 struct backlight_device *bd;
34 int id;
35};
36
37
38static inline int lm3533_bl_get_ctrlbank_id(struct lm3533_bl *bl)
39{
40 return bl->id;
41}
42
43static int lm3533_bl_update_status(struct backlight_device *bd)
44{
45 struct lm3533_bl *bl = bl_get_data(bd);
46 int brightness = bd->props.brightness;
47
48 if (bd->props.power != FB_BLANK_UNBLANK)
49 brightness = 0;
50 if (bd->props.fb_blank != FB_BLANK_UNBLANK)
51 brightness = 0;
52
53 return lm3533_ctrlbank_set_brightness(&bl->cb, (u8)brightness);
54}
55
56static int lm3533_bl_get_brightness(struct backlight_device *bd)
57{
58 struct lm3533_bl *bl = bl_get_data(bd);
59 u8 val;
60 int ret;
61
62 ret = lm3533_ctrlbank_get_brightness(&bl->cb, &val);
63 if (ret)
64 return ret;
65
66 return val;
67}
68
69static const struct backlight_ops lm3533_bl_ops = {
70 .get_brightness = lm3533_bl_get_brightness,
71 .update_status = lm3533_bl_update_status,
72};
73
74static ssize_t show_id(struct device *dev,
75 struct device_attribute *attr, char *buf)
76{
77 struct lm3533_bl *bl = dev_get_drvdata(dev);
78
79 return scnprintf(buf, PAGE_SIZE, "%d\n", bl->id);
80}
81
82static ssize_t show_als_channel(struct device *dev,
83 struct device_attribute *attr, char *buf)
84{
85 struct lm3533_bl *bl = dev_get_drvdata(dev);
86 unsigned channel = lm3533_bl_get_ctrlbank_id(bl);
87
88 return scnprintf(buf, PAGE_SIZE, "%u\n", channel);
89}
90
91static ssize_t show_als_en(struct device *dev,
92 struct device_attribute *attr, char *buf)
93{
94 struct lm3533_bl *bl = dev_get_drvdata(dev);
95 int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
96 u8 val;
97 u8 mask;
98 bool enable;
99 int ret;
100
101 ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
102 if (ret)
103 return ret;
104
105 mask = 1 << (2 * ctrlbank);
106 enable = val & mask;
107
108 return scnprintf(buf, PAGE_SIZE, "%d\n", enable);
109}
110
111static ssize_t store_als_en(struct device *dev,
112 struct device_attribute *attr,
113 const char *buf, size_t len)
114{
115 struct lm3533_bl *bl = dev_get_drvdata(dev);
116 int ctrlbank = lm3533_bl_get_ctrlbank_id(bl);
117 int enable;
118 u8 val;
119 u8 mask;
120 int ret;
121
122 if (kstrtoint(buf, 0, &enable))
123 return -EINVAL;
124
125 mask = 1 << (2 * ctrlbank);
126
127 if (enable)
128 val = mask;
129 else
130 val = 0;
131
132 ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
133 mask);
134 if (ret)
135 return ret;
136
137 return len;
138}
139
140static ssize_t show_linear(struct device *dev,
141 struct device_attribute *attr, char *buf)
142{
143 struct lm3533_bl *bl = dev_get_drvdata(dev);
144 u8 val;
145 u8 mask;
146 int linear;
147 int ret;
148
149 ret = lm3533_read(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, &val);
150 if (ret)
151 return ret;
152
153 mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
154
155 if (val & mask)
156 linear = 1;
157 else
158 linear = 0;
159
160 return scnprintf(buf, PAGE_SIZE, "%x\n", linear);
161}
162
163static ssize_t store_linear(struct device *dev,
164 struct device_attribute *attr,
165 const char *buf, size_t len)
166{
167 struct lm3533_bl *bl = dev_get_drvdata(dev);
168 unsigned long linear;
169 u8 mask;
170 u8 val;
171 int ret;
172
173 if (kstrtoul(buf, 0, &linear))
174 return -EINVAL;
175
176 mask = 1 << (2 * lm3533_bl_get_ctrlbank_id(bl) + 1);
177
178 if (linear)
179 val = mask;
180 else
181 val = 0;
182
183 ret = lm3533_update(bl->lm3533, LM3533_REG_CTRLBANK_AB_BCONF, val,
184 mask);
185 if (ret)
186 return ret;
187
188 return len;
189}
190
191static ssize_t show_pwm(struct device *dev,
192 struct device_attribute *attr,
193 char *buf)
194{
195 struct lm3533_bl *bl = dev_get_drvdata(dev);
196 u8 val;
197 int ret;
198
199 ret = lm3533_ctrlbank_get_pwm(&bl->cb, &val);
200 if (ret)
201 return ret;
202
203 return scnprintf(buf, PAGE_SIZE, "%u\n", val);
204}
205
206static ssize_t store_pwm(struct device *dev,
207 struct device_attribute *attr,
208 const char *buf, size_t len)
209{
210 struct lm3533_bl *bl = dev_get_drvdata(dev);
211 u8 val;
212 int ret;
213
214 if (kstrtou8(buf, 0, &val))
215 return -EINVAL;
216
217 ret = lm3533_ctrlbank_set_pwm(&bl->cb, val);
218 if (ret)
219 return ret;
220
221 return len;
222}
223
224static LM3533_ATTR_RO(als_channel);
225static LM3533_ATTR_RW(als_en);
226static LM3533_ATTR_RO(id);
227static LM3533_ATTR_RW(linear);
228static LM3533_ATTR_RW(pwm);
229
230static struct attribute *lm3533_bl_attributes[] = {
231 &dev_attr_als_channel.attr,
232 &dev_attr_als_en.attr,
233 &dev_attr_id.attr,
234 &dev_attr_linear.attr,
235 &dev_attr_pwm.attr,
236 NULL,
237};
238
239static umode_t lm3533_bl_attr_is_visible(struct kobject *kobj,
240 struct attribute *attr, int n)
241{
242 struct device *dev = container_of(kobj, struct device, kobj);
243 struct lm3533_bl *bl = dev_get_drvdata(dev);
244 umode_t mode = attr->mode;
245
246 if (attr == &dev_attr_als_channel.attr ||
247 attr == &dev_attr_als_en.attr) {
248 if (!bl->lm3533->have_als)
249 mode = 0;
250 }
251
252 return mode;
253};
254
255static struct attribute_group lm3533_bl_attribute_group = {
256 .is_visible = lm3533_bl_attr_is_visible,
257 .attrs = lm3533_bl_attributes
258};
259
260static int __devinit lm3533_bl_setup(struct lm3533_bl *bl,
261 struct lm3533_bl_platform_data *pdata)
262{
263 int ret;
264
265 ret = lm3533_ctrlbank_set_max_current(&bl->cb, pdata->max_current);
266 if (ret)
267 return ret;
268
269 return lm3533_ctrlbank_set_pwm(&bl->cb, pdata->pwm);
270}
271
272static int __devinit lm3533_bl_probe(struct platform_device *pdev)
273{
274 struct lm3533 *lm3533;
275 struct lm3533_bl_platform_data *pdata;
276 struct lm3533_bl *bl;
277 struct backlight_device *bd;
278 struct backlight_properties props;
279 int ret;
280
281 dev_dbg(&pdev->dev, "%s\n", __func__);
282
283 lm3533 = dev_get_drvdata(pdev->dev.parent);
284 if (!lm3533)
285 return -EINVAL;
286
287 pdata = pdev->dev.platform_data;
288 if (!pdata) {
289 dev_err(&pdev->dev, "no platform data\n");
290 return -EINVAL;
291 }
292
293 if (pdev->id < 0 || pdev->id >= LM3533_HVCTRLBANK_COUNT) {
294 dev_err(&pdev->dev, "illegal backlight id %d\n", pdev->id);
295 return -EINVAL;
296 }
297
298 bl = kzalloc(sizeof(*bl), GFP_KERNEL);
299 if (!bl) {
300 dev_err(&pdev->dev,
301 "failed to allocate memory for backlight\n");
302 return -ENOMEM;
303 }
304
305 bl->lm3533 = lm3533;
306 bl->id = pdev->id;
307
308 bl->cb.lm3533 = lm3533;
309 bl->cb.id = lm3533_bl_get_ctrlbank_id(bl);
310 bl->cb.dev = NULL; /* until registered */
311
312 memset(&props, 0, sizeof(props));
313 props.type = BACKLIGHT_RAW;
314 props.max_brightness = LM3533_BL_MAX_BRIGHTNESS;
315 props.brightness = pdata->default_brightness;
316 bd = backlight_device_register(pdata->name, pdev->dev.parent, bl,
317 &lm3533_bl_ops, &props);
318 if (IS_ERR(bd)) {
319 dev_err(&pdev->dev, "failed to register backlight device\n");
320 ret = PTR_ERR(bd);
321 goto err_free;
322 }
323
324 bl->bd = bd;
325 bl->cb.dev = &bl->bd->dev;
326
327 platform_set_drvdata(pdev, bl);
328
329 ret = sysfs_create_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
330 if (ret < 0) {
331 dev_err(&pdev->dev, "failed to create sysfs attributes\n");
332 goto err_unregister;
333 }
334
335 backlight_update_status(bd);
336
337 ret = lm3533_bl_setup(bl, pdata);
338 if (ret)
339 goto err_sysfs_remove;
340
341 ret = lm3533_ctrlbank_enable(&bl->cb);
342 if (ret)
343 goto err_sysfs_remove;
344
345 return 0;
346
347err_sysfs_remove:
348 sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
349err_unregister:
350 backlight_device_unregister(bd);
351err_free:
352 kfree(bl);
353
354 return ret;
355}
356
357static int __devexit lm3533_bl_remove(struct platform_device *pdev)
358{
359 struct lm3533_bl *bl = platform_get_drvdata(pdev);
360 struct backlight_device *bd = bl->bd;
361
362 dev_dbg(&bd->dev, "%s\n", __func__);
363
364 bd->props.power = FB_BLANK_POWERDOWN;
365 bd->props.brightness = 0;
366
367 lm3533_ctrlbank_disable(&bl->cb);
368 sysfs_remove_group(&bd->dev.kobj, &lm3533_bl_attribute_group);
369 backlight_device_unregister(bd);
370 kfree(bl);
371
372 return 0;
373}
374
375#ifdef CONFIG_PM
376static int lm3533_bl_suspend(struct platform_device *pdev, pm_message_t state)
377{
378 struct lm3533_bl *bl = platform_get_drvdata(pdev);
379
380 dev_dbg(&pdev->dev, "%s\n", __func__);
381
382 return lm3533_ctrlbank_disable(&bl->cb);
383}
384
385static int lm3533_bl_resume(struct platform_device *pdev)
386{
387 struct lm3533_bl *bl = platform_get_drvdata(pdev);
388
389 dev_dbg(&pdev->dev, "%s\n", __func__);
390
391 return lm3533_ctrlbank_enable(&bl->cb);
392}
393#else
394#define lm3533_bl_suspend NULL
395#define lm3533_bl_resume NULL
396#endif
397
398static void lm3533_bl_shutdown(struct platform_device *pdev)
399{
400 struct lm3533_bl *bl = platform_get_drvdata(pdev);
401
402 dev_dbg(&pdev->dev, "%s\n", __func__);
403
404 lm3533_ctrlbank_disable(&bl->cb);
405}
406
407static struct platform_driver lm3533_bl_driver = {
408 .driver = {
409 .name = "lm3533-backlight",
410 .owner = THIS_MODULE,
411 },
412 .probe = lm3533_bl_probe,
413 .remove = __devexit_p(lm3533_bl_remove),
414 .shutdown = lm3533_bl_shutdown,
415 .suspend = lm3533_bl_suspend,
416 .resume = lm3533_bl_resume,
417};
418module_platform_driver(lm3533_bl_driver);
419
420MODULE_AUTHOR("Johan Hovold <jhovold@gmail.com>");
421MODULE_DESCRIPTION("LM3533 Backlight driver");
422MODULE_LICENSE("GPL");
423MODULE_ALIAS("platform:lm3533-backlight");
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
index 4161f9e3982a..a9f2c36966f1 100644
--- a/drivers/video/backlight/lms283gf05.c
+++ b/drivers/video/backlight/lms283gf05.c
@@ -168,7 +168,8 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
168 goto err; 168 goto err;
169 } 169 }
170 170
171 st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL); 171 st = devm_kzalloc(&spi->dev, sizeof(struct lms283gf05_state),
172 GFP_KERNEL);
172 if (st == NULL) { 173 if (st == NULL) {
173 dev_err(&spi->dev, "No memory for device state\n"); 174 dev_err(&spi->dev, "No memory for device state\n");
174 ret = -ENOMEM; 175 ret = -ENOMEM;
@@ -178,7 +179,7 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
178 ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops); 179 ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
179 if (IS_ERR(ld)) { 180 if (IS_ERR(ld)) {
180 ret = PTR_ERR(ld); 181 ret = PTR_ERR(ld);
181 goto err2; 182 goto err;
182 } 183 }
183 184
184 st->spi = spi; 185 st->spi = spi;
@@ -193,8 +194,6 @@ static int __devinit lms283gf05_probe(struct spi_device *spi)
193 194
194 return 0; 195 return 0;
195 196
196err2:
197 kfree(st);
198err: 197err:
199 if (pdata != NULL) 198 if (pdata != NULL)
200 gpio_free(pdata->reset_gpio); 199 gpio_free(pdata->reset_gpio);
@@ -212,8 +211,6 @@ static int __devexit lms283gf05_remove(struct spi_device *spi)
212 if (pdata != NULL) 211 if (pdata != NULL)
213 gpio_free(pdata->reset_gpio); 212 gpio_free(pdata->reset_gpio);
214 213
215 kfree(st);
216
217 return 0; 214 return 0;
218} 215}
219 216
diff --git a/drivers/video/backlight/ltv350qv.c b/drivers/video/backlight/ltv350qv.c
index 333949ff3265..6c0f1ac0d32a 100644
--- a/drivers/video/backlight/ltv350qv.c
+++ b/drivers/video/backlight/ltv350qv.c
@@ -232,23 +232,20 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
232 struct lcd_device *ld; 232 struct lcd_device *ld;
233 int ret; 233 int ret;
234 234
235 lcd = kzalloc(sizeof(struct ltv350qv), GFP_KERNEL); 235 lcd = devm_kzalloc(&spi->dev, sizeof(struct ltv350qv), GFP_KERNEL);
236 if (!lcd) 236 if (!lcd)
237 return -ENOMEM; 237 return -ENOMEM;
238 238
239 lcd->spi = spi; 239 lcd->spi = spi;
240 lcd->power = FB_BLANK_POWERDOWN; 240 lcd->power = FB_BLANK_POWERDOWN;
241 lcd->buffer = kzalloc(8, GFP_KERNEL); 241 lcd->buffer = devm_kzalloc(&spi->dev, 8, GFP_KERNEL);
242 if (!lcd->buffer) { 242 if (!lcd->buffer)
243 ret = -ENOMEM; 243 return -ENOMEM;
244 goto out_free_lcd;
245 }
246 244
247 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops); 245 ld = lcd_device_register("ltv350qv", &spi->dev, lcd, &ltv_ops);
248 if (IS_ERR(ld)) { 246 if (IS_ERR(ld))
249 ret = PTR_ERR(ld); 247 return PTR_ERR(ld);
250 goto out_free_buffer; 248
251 }
252 lcd->ld = ld; 249 lcd->ld = ld;
253 250
254 ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK); 251 ret = ltv350qv_power(lcd, FB_BLANK_UNBLANK);
@@ -261,10 +258,6 @@ static int __devinit ltv350qv_probe(struct spi_device *spi)
261 258
262out_unregister: 259out_unregister:
263 lcd_device_unregister(ld); 260 lcd_device_unregister(ld);
264out_free_buffer:
265 kfree(lcd->buffer);
266out_free_lcd:
267 kfree(lcd);
268 return ret; 261 return ret;
269} 262}
270 263
@@ -274,8 +267,6 @@ static int __devexit ltv350qv_remove(struct spi_device *spi)
274 267
275 ltv350qv_power(lcd, FB_BLANK_POWERDOWN); 268 ltv350qv_power(lcd, FB_BLANK_POWERDOWN);
276 lcd_device_unregister(lcd->ld); 269 lcd_device_unregister(lcd->ld);
277 kfree(lcd->buffer);
278 kfree(lcd);
279 270
280 return 0; 271 return 0;
281} 272}
@@ -310,7 +301,6 @@ static void ltv350qv_shutdown(struct spi_device *spi)
310static struct spi_driver ltv350qv_driver = { 301static struct spi_driver ltv350qv_driver = {
311 .driver = { 302 .driver = {
312 .name = "ltv350qv", 303 .name = "ltv350qv",
313 .bus = &spi_bus_type,
314 .owner = THIS_MODULE, 304 .owner = THIS_MODULE,
315 }, 305 },
316 306
diff --git a/drivers/video/backlight/omap1_bl.c b/drivers/video/backlight/omap1_bl.c
index 0175bfb08a1c..bfdc5fbeaa11 100644
--- a/drivers/video/backlight/omap1_bl.c
+++ b/drivers/video/backlight/omap1_bl.c
@@ -18,6 +18,8 @@
18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA 18 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
19 */ 19 */
20 20
21#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
22
21#include <linux/module.h> 23#include <linux/module.h>
22#include <linux/kernel.h> 24#include <linux/kernel.h>
23#include <linux/init.h> 25#include <linux/init.h>
@@ -168,7 +170,7 @@ static int omapbl_probe(struct platform_device *pdev)
168 dev->props.brightness = pdata->default_intensity; 170 dev->props.brightness = pdata->default_intensity;
169 omapbl_update_status(dev); 171 omapbl_update_status(dev);
170 172
171 printk(KERN_INFO "OMAP LCD backlight initialised\n"); 173 pr_info("OMAP LCD backlight initialised\n");
172 174
173 return 0; 175 return 0;
174} 176}
diff --git a/drivers/video/backlight/pcf50633-backlight.c b/drivers/video/backlight/pcf50633-backlight.c
index c65853cb9740..c092159f4383 100644
--- a/drivers/video/backlight/pcf50633-backlight.c
+++ b/drivers/video/backlight/pcf50633-backlight.c
@@ -111,6 +111,7 @@ static int __devinit pcf50633_bl_probe(struct platform_device *pdev)
111 if (!pcf_bl) 111 if (!pcf_bl)
112 return -ENOMEM; 112 return -ENOMEM;
113 113
114 memset(&bl_props, 0, sizeof(bl_props));
114 bl_props.type = BACKLIGHT_RAW; 115 bl_props.type = BACKLIGHT_RAW;
115 bl_props.max_brightness = 0x3f; 116 bl_props.max_brightness = 0x3f;
116 bl_props.power = FB_BLANK_UNBLANK; 117 bl_props.power = FB_BLANK_UNBLANK;
diff --git a/drivers/video/backlight/progear_bl.c b/drivers/video/backlight/progear_bl.c
index 6af183d6465e..69b35f02929e 100644
--- a/drivers/video/backlight/progear_bl.c
+++ b/drivers/video/backlight/progear_bl.c
@@ -15,6 +15,8 @@
15 * 15 *
16 */ 16 */
17 17
18#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
19
18#include <linux/module.h> 20#include <linux/module.h>
19#include <linux/kernel.h> 21#include <linux/kernel.h>
20#include <linux/init.h> 22#include <linux/init.h>
@@ -68,13 +70,13 @@ static int progearbl_probe(struct platform_device *pdev)
68 70
69 pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL); 71 pmu_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M7101, NULL);
70 if (!pmu_dev) { 72 if (!pmu_dev) {
71 printk("ALI M7101 PMU not found.\n"); 73 pr_err("ALI M7101 PMU not found.\n");
72 return -ENODEV; 74 return -ENODEV;
73 } 75 }
74 76
75 sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL); 77 sb_dev = pci_get_device(PCI_VENDOR_ID_AL, PCI_DEVICE_ID_AL_M1533, NULL);
76 if (!sb_dev) { 78 if (!sb_dev) {
77 printk("ALI 1533 SB not found.\n"); 79 pr_err("ALI 1533 SB not found.\n");
78 ret = -ENODEV; 80 ret = -ENODEV;
79 goto put_pmu; 81 goto put_pmu;
80 } 82 }
diff --git a/drivers/video/backlight/s6e63m0.c b/drivers/video/backlight/s6e63m0.c
index e264f55b2574..6437ae474cf2 100644
--- a/drivers/video/backlight/s6e63m0.c
+++ b/drivers/video/backlight/s6e63m0.c
@@ -741,7 +741,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
741 struct backlight_device *bd = NULL; 741 struct backlight_device *bd = NULL;
742 struct backlight_properties props; 742 struct backlight_properties props;
743 743
744 lcd = kzalloc(sizeof(struct s6e63m0), GFP_KERNEL); 744 lcd = devm_kzalloc(&spi->dev, sizeof(struct s6e63m0), GFP_KERNEL);
745 if (!lcd) 745 if (!lcd)
746 return -ENOMEM; 746 return -ENOMEM;
747 747
@@ -751,7 +751,7 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
751 ret = spi_setup(spi); 751 ret = spi_setup(spi);
752 if (ret < 0) { 752 if (ret < 0) {
753 dev_err(&spi->dev, "spi setup failed.\n"); 753 dev_err(&spi->dev, "spi setup failed.\n");
754 goto out_free_lcd; 754 return ret;
755 } 755 }
756 756
757 lcd->spi = spi; 757 lcd->spi = spi;
@@ -760,14 +760,12 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
760 lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data; 760 lcd->lcd_pd = (struct lcd_platform_data *)spi->dev.platform_data;
761 if (!lcd->lcd_pd) { 761 if (!lcd->lcd_pd) {
762 dev_err(&spi->dev, "platform data is NULL.\n"); 762 dev_err(&spi->dev, "platform data is NULL.\n");
763 goto out_free_lcd; 763 return -EFAULT;
764 } 764 }
765 765
766 ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops); 766 ld = lcd_device_register("s6e63m0", &spi->dev, lcd, &s6e63m0_lcd_ops);
767 if (IS_ERR(ld)) { 767 if (IS_ERR(ld))
768 ret = PTR_ERR(ld); 768 return PTR_ERR(ld);
769 goto out_free_lcd;
770 }
771 769
772 lcd->ld = ld; 770 lcd->ld = ld;
773 771
@@ -824,8 +822,6 @@ static int __devinit s6e63m0_probe(struct spi_device *spi)
824 822
825out_lcd_unregister: 823out_lcd_unregister:
826 lcd_device_unregister(ld); 824 lcd_device_unregister(ld);
827out_free_lcd:
828 kfree(lcd);
829 return ret; 825 return ret;
830} 826}
831 827
@@ -838,7 +834,6 @@ static int __devexit s6e63m0_remove(struct spi_device *spi)
838 device_remove_file(&spi->dev, &dev_attr_gamma_mode); 834 device_remove_file(&spi->dev, &dev_attr_gamma_mode);
839 backlight_device_unregister(lcd->bd); 835 backlight_device_unregister(lcd->bd);
840 lcd_device_unregister(lcd->ld); 836 lcd_device_unregister(lcd->ld);
841 kfree(lcd);
842 837
843 return 0; 838 return 0;
844} 839}
@@ -899,7 +894,6 @@ static void s6e63m0_shutdown(struct spi_device *spi)
899static struct spi_driver s6e63m0_driver = { 894static struct spi_driver s6e63m0_driver = {
900 .driver = { 895 .driver = {
901 .name = "s6e63m0", 896 .name = "s6e63m0",
902 .bus = &spi_bus_type,
903 .owner = THIS_MODULE, 897 .owner = THIS_MODULE,
904 }, 898 },
905 .probe = s6e63m0_probe, 899 .probe = s6e63m0_probe,
diff --git a/drivers/video/backlight/tdo24m.c b/drivers/video/backlight/tdo24m.c
index 2368b8e5f89e..02444d042cd5 100644
--- a/drivers/video/backlight/tdo24m.c
+++ b/drivers/video/backlight/tdo24m.c
@@ -349,7 +349,7 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
349 if (err) 349 if (err)
350 return err; 350 return err;
351 351
352 lcd = kzalloc(sizeof(struct tdo24m), GFP_KERNEL); 352 lcd = devm_kzalloc(&spi->dev, sizeof(struct tdo24m), GFP_KERNEL);
353 if (!lcd) 353 if (!lcd)
354 return -ENOMEM; 354 return -ENOMEM;
355 355
@@ -357,11 +357,9 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
357 lcd->power = FB_BLANK_POWERDOWN; 357 lcd->power = FB_BLANK_POWERDOWN;
358 lcd->mode = MODE_VGA; /* default to VGA */ 358 lcd->mode = MODE_VGA; /* default to VGA */
359 359
360 lcd->buf = kmalloc(TDO24M_SPI_BUFF_SIZE, GFP_KERNEL); 360 lcd->buf = devm_kzalloc(&spi->dev, TDO24M_SPI_BUFF_SIZE, GFP_KERNEL);
361 if (lcd->buf == NULL) { 361 if (lcd->buf == NULL)
362 kfree(lcd);
363 return -ENOMEM; 362 return -ENOMEM;
364 }
365 363
366 m = &lcd->msg; 364 m = &lcd->msg;
367 x = &lcd->xfer; 365 x = &lcd->xfer;
@@ -383,15 +381,13 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
383 break; 381 break;
384 default: 382 default:
385 dev_err(&spi->dev, "Unsupported model"); 383 dev_err(&spi->dev, "Unsupported model");
386 goto out_free; 384 return -EINVAL;
387 } 385 }
388 386
389 lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev, 387 lcd->lcd_dev = lcd_device_register("tdo24m", &spi->dev,
390 lcd, &tdo24m_ops); 388 lcd, &tdo24m_ops);
391 if (IS_ERR(lcd->lcd_dev)) { 389 if (IS_ERR(lcd->lcd_dev))
392 err = PTR_ERR(lcd->lcd_dev); 390 return PTR_ERR(lcd->lcd_dev);
393 goto out_free;
394 }
395 391
396 dev_set_drvdata(&spi->dev, lcd); 392 dev_set_drvdata(&spi->dev, lcd);
397 err = tdo24m_power(lcd, FB_BLANK_UNBLANK); 393 err = tdo24m_power(lcd, FB_BLANK_UNBLANK);
@@ -402,9 +398,6 @@ static int __devinit tdo24m_probe(struct spi_device *spi)
402 398
403out_unregister: 399out_unregister:
404 lcd_device_unregister(lcd->lcd_dev); 400 lcd_device_unregister(lcd->lcd_dev);
405out_free:
406 kfree(lcd->buf);
407 kfree(lcd);
408 return err; 401 return err;
409} 402}
410 403
@@ -414,8 +407,6 @@ static int __devexit tdo24m_remove(struct spi_device *spi)
414 407
415 tdo24m_power(lcd, FB_BLANK_POWERDOWN); 408 tdo24m_power(lcd, FB_BLANK_POWERDOWN);
416 lcd_device_unregister(lcd->lcd_dev); 409 lcd_device_unregister(lcd->lcd_dev);
417 kfree(lcd->buf);
418 kfree(lcd);
419 410
420 return 0; 411 return 0;
421} 412}
diff --git a/drivers/video/backlight/tosa_bl.c b/drivers/video/backlight/tosa_bl.c
index 2b241abced43..0d54e607e82d 100644
--- a/drivers/video/backlight/tosa_bl.c
+++ b/drivers/video/backlight/tosa_bl.c
@@ -82,8 +82,11 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
82 const struct i2c_device_id *id) 82 const struct i2c_device_id *id)
83{ 83{
84 struct backlight_properties props; 84 struct backlight_properties props;
85 struct tosa_bl_data *data = kzalloc(sizeof(struct tosa_bl_data), GFP_KERNEL); 85 struct tosa_bl_data *data;
86 int ret = 0; 86 int ret = 0;
87
88 data = devm_kzalloc(&client->dev, sizeof(struct tosa_bl_data),
89 GFP_KERNEL);
87 if (!data) 90 if (!data)
88 return -ENOMEM; 91 return -ENOMEM;
89 92
@@ -92,7 +95,7 @@ static int __devinit tosa_bl_probe(struct i2c_client *client,
92 ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight"); 95 ret = gpio_request(TOSA_GPIO_BL_C20MA, "backlight");
93 if (ret) { 96 if (ret) {
94 dev_dbg(&data->bl->dev, "Unable to request gpio!\n"); 97 dev_dbg(&data->bl->dev, "Unable to request gpio!\n");
95 goto err_gpio_bl; 98 return ret;
96 } 99 }
97 ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0); 100 ret = gpio_direction_output(TOSA_GPIO_BL_C20MA, 0);
98 if (ret) 101 if (ret)
@@ -122,8 +125,6 @@ err_reg:
122 data->bl = NULL; 125 data->bl = NULL;
123err_gpio_dir: 126err_gpio_dir:
124 gpio_free(TOSA_GPIO_BL_C20MA); 127 gpio_free(TOSA_GPIO_BL_C20MA);
125err_gpio_bl:
126 kfree(data);
127 return ret; 128 return ret;
128} 129}
129 130
@@ -136,8 +137,6 @@ static int __devexit tosa_bl_remove(struct i2c_client *client)
136 137
137 gpio_free(TOSA_GPIO_BL_C20MA); 138 gpio_free(TOSA_GPIO_BL_C20MA);
138 139
139 kfree(data);
140
141 return 0; 140 return 0;
142} 141}
143 142
diff --git a/drivers/video/backlight/tosa_lcd.c b/drivers/video/backlight/tosa_lcd.c
index 2231aec23918..47823b8efff0 100644
--- a/drivers/video/backlight/tosa_lcd.c
+++ b/drivers/video/backlight/tosa_lcd.c
@@ -174,7 +174,8 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
174 int ret; 174 int ret;
175 struct tosa_lcd_data *data; 175 struct tosa_lcd_data *data;
176 176
177 data = kzalloc(sizeof(struct tosa_lcd_data), GFP_KERNEL); 177 data = devm_kzalloc(&spi->dev, sizeof(struct tosa_lcd_data),
178 GFP_KERNEL);
178 if (!data) 179 if (!data)
179 return -ENOMEM; 180 return -ENOMEM;
180 181
@@ -187,7 +188,7 @@ static int __devinit tosa_lcd_probe(struct spi_device *spi)
187 188
188 ret = spi_setup(spi); 189 ret = spi_setup(spi);
189 if (ret < 0) 190 if (ret < 0)
190 goto err_spi; 191 return ret;
191 192
192 data->spi = spi; 193 data->spi = spi;
193 dev_set_drvdata(&spi->dev, data); 194 dev_set_drvdata(&spi->dev, data);
@@ -224,8 +225,6 @@ err_gpio_dir:
224 gpio_free(TOSA_GPIO_TG_ON); 225 gpio_free(TOSA_GPIO_TG_ON);
225err_gpio_tg: 226err_gpio_tg:
226 dev_set_drvdata(&spi->dev, NULL); 227 dev_set_drvdata(&spi->dev, NULL);
227err_spi:
228 kfree(data);
229 return ret; 228 return ret;
230} 229}
231 230
@@ -242,7 +241,6 @@ static int __devexit tosa_lcd_remove(struct spi_device *spi)
242 241
243 gpio_free(TOSA_GPIO_TG_ON); 242 gpio_free(TOSA_GPIO_TG_ON);
244 dev_set_drvdata(&spi->dev, NULL); 243 dev_set_drvdata(&spi->dev, NULL);
245 kfree(data);
246 244
247 return 0; 245 return 0;
248} 246}
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
index 5d365deb5f82..9e5517a3a52b 100644
--- a/drivers/video/backlight/wm831x_bl.c
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -194,6 +194,7 @@ static int wm831x_backlight_probe(struct platform_device *pdev)
194 data->current_brightness = 0; 194 data->current_brightness = 0;
195 data->isink_reg = isink_reg; 195 data->isink_reg = isink_reg;
196 196
197 memset(&props, 0, sizeof(props));
197 props.type = BACKLIGHT_RAW; 198 props.type = BACKLIGHT_RAW;
198 props.max_brightness = max_isel; 199 props.max_brightness = max_isel;
199 bl = backlight_device_register("wm831x", &pdev->dev, data, 200 bl = backlight_device_register("wm831x", &pdev->dev, data,
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index c6ce416ab587..0dff12a1daef 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1046,20 +1046,29 @@ fb_set_var(struct fb_info *info, struct fb_var_screeninfo *var)
1046int 1046int
1047fb_blank(struct fb_info *info, int blank) 1047fb_blank(struct fb_info *info, int blank)
1048{ 1048{
1049 int ret = -EINVAL; 1049 struct fb_event event;
1050 int ret = -EINVAL, early_ret;
1050 1051
1051 if (blank > FB_BLANK_POWERDOWN) 1052 if (blank > FB_BLANK_POWERDOWN)
1052 blank = FB_BLANK_POWERDOWN; 1053 blank = FB_BLANK_POWERDOWN;
1053 1054
1055 event.info = info;
1056 event.data = &blank;
1057
1058 early_ret = fb_notifier_call_chain(FB_EARLY_EVENT_BLANK, &event);
1059
1054 if (info->fbops->fb_blank) 1060 if (info->fbops->fb_blank)
1055 ret = info->fbops->fb_blank(blank, info); 1061 ret = info->fbops->fb_blank(blank, info);
1056 1062
1057 if (!ret) { 1063 if (!ret)
1058 struct fb_event event;
1059
1060 event.info = info;
1061 event.data = &blank;
1062 fb_notifier_call_chain(FB_EVENT_BLANK, &event); 1064 fb_notifier_call_chain(FB_EVENT_BLANK, &event);
1065 else {
1066 /*
1067 * if fb_blank is failed then revert effects of
1068 * the early blank event.
1069 */
1070 if (!early_ret)
1071 fb_notifier_call_chain(FB_R_EARLY_EVENT_BLANK, &event);
1063 } 1072 }
1064 1073
1065 return ret; 1074 return ret;
diff --git a/drivers/video/imxfb.c b/drivers/video/imxfb.c
index f135dbead07d..caad3689b4e6 100644
--- a/drivers/video/imxfb.c
+++ b/drivers/video/imxfb.c
@@ -131,7 +131,9 @@ struct imxfb_rgb {
131struct imxfb_info { 131struct imxfb_info {
132 struct platform_device *pdev; 132 struct platform_device *pdev;
133 void __iomem *regs; 133 void __iomem *regs;
134 struct clk *clk; 134 struct clk *clk_ipg;
135 struct clk *clk_ahb;
136 struct clk *clk_per;
135 137
136 /* 138 /*
137 * These are the addresses we mapped 139 * These are the addresses we mapped
@@ -340,7 +342,7 @@ static int imxfb_check_var(struct fb_var_screeninfo *var, struct fb_info *info)
340 342
341 pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel); 343 pr_debug("var->bits_per_pixel=%d\n", var->bits_per_pixel);
342 344
343 lcd_clk = clk_get_rate(fbi->clk); 345 lcd_clk = clk_get_rate(fbi->clk_per);
344 346
345 tmp = var->pixclock * (unsigned long long)lcd_clk; 347 tmp = var->pixclock * (unsigned long long)lcd_clk;
346 348
@@ -455,11 +457,17 @@ static int imxfb_bl_update_status(struct backlight_device *bl)
455 457
456 fbi->pwmr = (fbi->pwmr & ~0xFF) | brightness; 458 fbi->pwmr = (fbi->pwmr & ~0xFF) | brightness;
457 459
458 if (bl->props.fb_blank != FB_BLANK_UNBLANK) 460 if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
459 clk_enable(fbi->clk); 461 clk_prepare_enable(fbi->clk_ipg);
462 clk_prepare_enable(fbi->clk_ahb);
463 clk_prepare_enable(fbi->clk_per);
464 }
460 writel(fbi->pwmr, fbi->regs + LCDC_PWMR); 465 writel(fbi->pwmr, fbi->regs + LCDC_PWMR);
461 if (bl->props.fb_blank != FB_BLANK_UNBLANK) 466 if (bl->props.fb_blank != FB_BLANK_UNBLANK) {
462 clk_disable(fbi->clk); 467 clk_disable_unprepare(fbi->clk_per);
468 clk_disable_unprepare(fbi->clk_ahb);
469 clk_disable_unprepare(fbi->clk_ipg);
470 }
463 471
464 return 0; 472 return 0;
465} 473}
@@ -522,7 +530,9 @@ static void imxfb_enable_controller(struct imxfb_info *fbi)
522 */ 530 */
523 writel(RMCR_LCDC_EN_MX1, fbi->regs + LCDC_RMCR); 531 writel(RMCR_LCDC_EN_MX1, fbi->regs + LCDC_RMCR);
524 532
525 clk_enable(fbi->clk); 533 clk_prepare_enable(fbi->clk_ipg);
534 clk_prepare_enable(fbi->clk_ahb);
535 clk_prepare_enable(fbi->clk_per);
526 536
527 if (fbi->backlight_power) 537 if (fbi->backlight_power)
528 fbi->backlight_power(1); 538 fbi->backlight_power(1);
@@ -539,7 +549,9 @@ static void imxfb_disable_controller(struct imxfb_info *fbi)
539 if (fbi->lcd_power) 549 if (fbi->lcd_power)
540 fbi->lcd_power(0); 550 fbi->lcd_power(0);
541 551
542 clk_disable(fbi->clk); 552 clk_disable_unprepare(fbi->clk_per);
553 clk_disable_unprepare(fbi->clk_ipg);
554 clk_disable_unprepare(fbi->clk_ahb);
543 555
544 writel(0, fbi->regs + LCDC_RMCR); 556 writel(0, fbi->regs + LCDC_RMCR);
545} 557}
@@ -770,10 +782,21 @@ static int __init imxfb_probe(struct platform_device *pdev)
770 goto failed_req; 782 goto failed_req;
771 } 783 }
772 784
773 fbi->clk = clk_get(&pdev->dev, NULL); 785 fbi->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
774 if (IS_ERR(fbi->clk)) { 786 if (IS_ERR(fbi->clk_ipg)) {
775 ret = PTR_ERR(fbi->clk); 787 ret = PTR_ERR(fbi->clk_ipg);
776 dev_err(&pdev->dev, "unable to get clock: %d\n", ret); 788 goto failed_getclock;
789 }
790
791 fbi->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
792 if (IS_ERR(fbi->clk_ahb)) {
793 ret = PTR_ERR(fbi->clk_ahb);
794 goto failed_getclock;
795 }
796
797 fbi->clk_per = devm_clk_get(&pdev->dev, "per");
798 if (IS_ERR(fbi->clk_per)) {
799 ret = PTR_ERR(fbi->clk_per);
777 goto failed_getclock; 800 goto failed_getclock;
778 } 801 }
779 802
@@ -858,7 +881,6 @@ failed_platform_init:
858failed_map: 881failed_map:
859 iounmap(fbi->regs); 882 iounmap(fbi->regs);
860failed_ioremap: 883failed_ioremap:
861 clk_put(fbi->clk);
862failed_getclock: 884failed_getclock:
863 release_mem_region(res->start, resource_size(res)); 885 release_mem_region(res->start, resource_size(res));
864failed_req: 886failed_req:
@@ -895,8 +917,6 @@ static int __devexit imxfb_remove(struct platform_device *pdev)
895 917
896 iounmap(fbi->regs); 918 iounmap(fbi->regs);
897 release_mem_region(res->start, resource_size(res)); 919 release_mem_region(res->start, resource_size(res));
898 clk_disable(fbi->clk);
899 clk_put(fbi->clk);
900 920
901 platform_set_drvdata(pdev, NULL); 921 platform_set_drvdata(pdev, NULL);
902 922
diff --git a/drivers/video/omap2/displays/panel-acx565akm.c b/drivers/video/omap2/displays/panel-acx565akm.c
index d26f37ac69d8..74e7cf078505 100644
--- a/drivers/video/omap2/displays/panel-acx565akm.c
+++ b/drivers/video/omap2/displays/panel-acx565akm.c
@@ -532,6 +532,7 @@ static int acx_panel_probe(struct omap_dss_device *dssdev)
532 532
533 /*------- Backlight control --------*/ 533 /*------- Backlight control --------*/
534 534
535 memset(&props, 0, sizeof(props));
535 props.fb_blank = FB_BLANK_UNBLANK; 536 props.fb_blank = FB_BLANK_UNBLANK;
536 props.power = FB_BLANK_UNBLANK; 537 props.power = FB_BLANK_UNBLANK;
537 props.type = BACKLIGHT_RAW; 538 props.type = BACKLIGHT_RAW;
diff --git a/drivers/w1/masters/mxc_w1.c b/drivers/w1/masters/mxc_w1.c
index a3b6a74c67a7..1cc61a700fa8 100644
--- a/drivers/w1/masters/mxc_w1.c
+++ b/drivers/w1/masters/mxc_w1.c
@@ -138,7 +138,7 @@ static int __devinit mxc_w1_probe(struct platform_device *pdev)
138 goto failed_ioremap; 138 goto failed_ioremap;
139 } 139 }
140 140
141 clk_enable(mdev->clk); 141 clk_prepare_enable(mdev->clk);
142 __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER); 142 __raw_writeb(mdev->clkdiv, mdev->regs + MXC_W1_TIME_DIVIDER);
143 143
144 mdev->bus_master.data = mdev; 144 mdev->bus_master.data = mdev;
@@ -178,7 +178,7 @@ static int __devexit mxc_w1_remove(struct platform_device *pdev)
178 178
179 iounmap(mdev->regs); 179 iounmap(mdev->regs);
180 release_mem_region(res->start, resource_size(res)); 180 release_mem_region(res->start, resource_size(res));
181 clk_disable(mdev->clk); 181 clk_disable_unprepare(mdev->clk);
182 clk_put(mdev->clk); 182 clk_put(mdev->clk);
183 183
184 platform_set_drvdata(pdev, NULL); 184 platform_set_drvdata(pdev, NULL);
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index a18bf6358eb8..d92d7488be16 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -565,6 +565,7 @@ config INTEL_SCU_WATCHDOG
565config ITCO_WDT 565config ITCO_WDT
566 tristate "Intel TCO Timer/Watchdog" 566 tristate "Intel TCO Timer/Watchdog"
567 depends on (X86 || IA64) && PCI 567 depends on (X86 || IA64) && PCI
568 select LPC_ICH
568 ---help--- 569 ---help---
569 Hardware driver for the intel TCO timer based watchdog devices. 570 Hardware driver for the intel TCO timer based watchdog devices.
570 These drivers are included in the Intel 82801 I/O Controller 571 These drivers are included in the Intel 82801 I/O Controller
diff --git a/drivers/watchdog/iTCO_vendor.h b/drivers/watchdog/iTCO_vendor.h
index 9e27e6422f66..3c57b45537a2 100644
--- a/drivers/watchdog/iTCO_vendor.h
+++ b/drivers/watchdog/iTCO_vendor.h
@@ -1,8 +1,8 @@
1/* iTCO Vendor Specific Support hooks */ 1/* iTCO Vendor Specific Support hooks */
2#ifdef CONFIG_ITCO_VENDOR_SUPPORT 2#ifdef CONFIG_ITCO_VENDOR_SUPPORT
3extern void iTCO_vendor_pre_start(unsigned long, unsigned int); 3extern void iTCO_vendor_pre_start(struct resource *, unsigned int);
4extern void iTCO_vendor_pre_stop(unsigned long); 4extern void iTCO_vendor_pre_stop(struct resource *);
5extern void iTCO_vendor_pre_keepalive(unsigned long, unsigned int); 5extern void iTCO_vendor_pre_keepalive(struct resource *, unsigned int);
6extern void iTCO_vendor_pre_set_heartbeat(unsigned int); 6extern void iTCO_vendor_pre_set_heartbeat(unsigned int);
7extern int iTCO_vendor_check_noreboot_on(void); 7extern int iTCO_vendor_check_noreboot_on(void);
8#else 8#else
diff --git a/drivers/watchdog/iTCO_vendor_support.c b/drivers/watchdog/iTCO_vendor_support.c
index 2721d29ce243..b6b2f90b5d44 100644
--- a/drivers/watchdog/iTCO_vendor_support.c
+++ b/drivers/watchdog/iTCO_vendor_support.c
@@ -35,11 +35,6 @@
35 35
36#include "iTCO_vendor.h" 36#include "iTCO_vendor.h"
37 37
38/* iTCO defines */
39#define SMI_EN (acpibase + 0x30) /* SMI Control and Enable Register */
40#define TCOBASE (acpibase + 0x60) /* TCO base address */
41#define TCO1_STS (TCOBASE + 0x04) /* TCO1 Status Register */
42
43/* List of vendor support modes */ 38/* List of vendor support modes */
44/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */ 39/* SuperMicro Pentium 3 Era 370SSE+-OEM1/P3TSSE */
45#define SUPERMICRO_OLD_BOARD 1 40#define SUPERMICRO_OLD_BOARD 1
@@ -82,24 +77,24 @@ MODULE_PARM_DESC(vendorsupport, "iTCO vendor specific support mode, default="
82 * 20.6 seconds. 77 * 20.6 seconds.
83 */ 78 */
84 79
85static void supermicro_old_pre_start(unsigned long acpibase) 80static void supermicro_old_pre_start(struct resource *smires)
86{ 81{
87 unsigned long val32; 82 unsigned long val32;
88 83
89 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ 84 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */
90 val32 = inl(SMI_EN); 85 val32 = inl(smires->start);
91 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ 86 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
92 outl(val32, SMI_EN); /* Needed to activate watchdog */ 87 outl(val32, smires->start); /* Needed to activate watchdog */
93} 88}
94 89
95static void supermicro_old_pre_stop(unsigned long acpibase) 90static void supermicro_old_pre_stop(struct resource *smires)
96{ 91{
97 unsigned long val32; 92 unsigned long val32;
98 93
99 /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */ 94 /* Bit 13: TCO_EN -> 1 = Enables the TCO logic to generate SMI# */
100 val32 = inl(SMI_EN); 95 val32 = inl(smires->start);
101 val32 |= 0x00002000; /* Turn on SMI clearing watchdog */ 96 val32 |= 0x00002000; /* Turn on SMI clearing watchdog */
102 outl(val32, SMI_EN); /* Needed to deactivate watchdog */ 97 outl(val32, smires->start); /* Needed to deactivate watchdog */
103} 98}
104 99
105/* 100/*
@@ -270,66 +265,66 @@ static void supermicro_new_pre_set_heartbeat(unsigned int heartbeat)
270 * Don't use this fix if you don't need to!!! 265 * Don't use this fix if you don't need to!!!
271 */ 266 */
272 267
273static void broken_bios_start(unsigned long acpibase) 268static void broken_bios_start(struct resource *smires)
274{ 269{
275 unsigned long val32; 270 unsigned long val32;
276 271
277 val32 = inl(SMI_EN); 272 val32 = inl(smires->start);
278 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# 273 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI#
279 Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */ 274 Bit 0: GBL_SMI_EN -> 0 = No SMI# will be generated by ICH. */
280 val32 &= 0xffffdffe; 275 val32 &= 0xffffdffe;
281 outl(val32, SMI_EN); 276 outl(val32, smires->start);
282} 277}
283 278
284static void broken_bios_stop(unsigned long acpibase) 279static void broken_bios_stop(struct resource *smires)
285{ 280{
286 unsigned long val32; 281 unsigned long val32;
287 282
288 val32 = inl(SMI_EN); 283 val32 = inl(smires->start);
289 /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI# 284 /* Bit 13: TCO_EN -> 1 = Enables TCO logic generating an SMI#
290 Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */ 285 Bit 0: GBL_SMI_EN -> 1 = Turn global SMI on again. */
291 val32 |= 0x00002001; 286 val32 |= 0x00002001;
292 outl(val32, SMI_EN); 287 outl(val32, smires->start);
293} 288}
294 289
295/* 290/*
296 * Generic Support Functions 291 * Generic Support Functions
297 */ 292 */
298 293
299void iTCO_vendor_pre_start(unsigned long acpibase, 294void iTCO_vendor_pre_start(struct resource *smires,
300 unsigned int heartbeat) 295 unsigned int heartbeat)
301{ 296{
302 switch (vendorsupport) { 297 switch (vendorsupport) {
303 case SUPERMICRO_OLD_BOARD: 298 case SUPERMICRO_OLD_BOARD:
304 supermicro_old_pre_start(acpibase); 299 supermicro_old_pre_start(smires);
305 break; 300 break;
306 case SUPERMICRO_NEW_BOARD: 301 case SUPERMICRO_NEW_BOARD:
307 supermicro_new_pre_start(heartbeat); 302 supermicro_new_pre_start(heartbeat);
308 break; 303 break;
309 case BROKEN_BIOS: 304 case BROKEN_BIOS:
310 broken_bios_start(acpibase); 305 broken_bios_start(smires);
311 break; 306 break;
312 } 307 }
313} 308}
314EXPORT_SYMBOL(iTCO_vendor_pre_start); 309EXPORT_SYMBOL(iTCO_vendor_pre_start);
315 310
316void iTCO_vendor_pre_stop(unsigned long acpibase) 311void iTCO_vendor_pre_stop(struct resource *smires)
317{ 312{
318 switch (vendorsupport) { 313 switch (vendorsupport) {
319 case SUPERMICRO_OLD_BOARD: 314 case SUPERMICRO_OLD_BOARD:
320 supermicro_old_pre_stop(acpibase); 315 supermicro_old_pre_stop(smires);
321 break; 316 break;
322 case SUPERMICRO_NEW_BOARD: 317 case SUPERMICRO_NEW_BOARD:
323 supermicro_new_pre_stop(); 318 supermicro_new_pre_stop();
324 break; 319 break;
325 case BROKEN_BIOS: 320 case BROKEN_BIOS:
326 broken_bios_stop(acpibase); 321 broken_bios_stop(smires);
327 break; 322 break;
328 } 323 }
329} 324}
330EXPORT_SYMBOL(iTCO_vendor_pre_stop); 325EXPORT_SYMBOL(iTCO_vendor_pre_stop);
331 326
332void iTCO_vendor_pre_keepalive(unsigned long acpibase, unsigned int heartbeat) 327void iTCO_vendor_pre_keepalive(struct resource *smires, unsigned int heartbeat)
333{ 328{
334 if (vendorsupport == SUPERMICRO_NEW_BOARD) 329 if (vendorsupport == SUPERMICRO_NEW_BOARD)
335 supermicro_new_pre_set_heartbeat(heartbeat); 330 supermicro_new_pre_set_heartbeat(heartbeat);
diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c
index 9fecb95645a3..741528b032e2 100644
--- a/drivers/watchdog/iTCO_wdt.c
+++ b/drivers/watchdog/iTCO_wdt.c
@@ -66,316 +66,16 @@
66#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */ 66#include <linux/spinlock.h> /* For spin_lock/spin_unlock/... */
67#include <linux/uaccess.h> /* For copy_to_user/put_user/... */ 67#include <linux/uaccess.h> /* For copy_to_user/put_user/... */
68#include <linux/io.h> /* For inb/outb/... */ 68#include <linux/io.h> /* For inb/outb/... */
69#include <linux/mfd/core.h>
70#include <linux/mfd/lpc_ich.h>
69 71
70#include "iTCO_vendor.h" 72#include "iTCO_vendor.h"
71 73
72/* TCO related info */
73enum iTCO_chipsets {
74 TCO_ICH = 0, /* ICH */
75 TCO_ICH0, /* ICH0 */
76 TCO_ICH2, /* ICH2 */
77 TCO_ICH2M, /* ICH2-M */
78 TCO_ICH3, /* ICH3-S */
79 TCO_ICH3M, /* ICH3-M */
80 TCO_ICH4, /* ICH4 */
81 TCO_ICH4M, /* ICH4-M */
82 TCO_CICH, /* C-ICH */
83 TCO_ICH5, /* ICH5 & ICH5R */
84 TCO_6300ESB, /* 6300ESB */
85 TCO_ICH6, /* ICH6 & ICH6R */
86 TCO_ICH6M, /* ICH6-M */
87 TCO_ICH6W, /* ICH6W & ICH6RW */
88 TCO_631XESB, /* 631xESB/632xESB */
89 TCO_ICH7, /* ICH7 & ICH7R */
90 TCO_ICH7DH, /* ICH7DH */
91 TCO_ICH7M, /* ICH7-M & ICH7-U */
92 TCO_ICH7MDH, /* ICH7-M DH */
93 TCO_NM10, /* NM10 */
94 TCO_ICH8, /* ICH8 & ICH8R */
95 TCO_ICH8DH, /* ICH8DH */
96 TCO_ICH8DO, /* ICH8DO */
97 TCO_ICH8M, /* ICH8M */
98 TCO_ICH8ME, /* ICH8M-E */
99 TCO_ICH9, /* ICH9 */
100 TCO_ICH9R, /* ICH9R */
101 TCO_ICH9DH, /* ICH9DH */
102 TCO_ICH9DO, /* ICH9DO */
103 TCO_ICH9M, /* ICH9M */
104 TCO_ICH9ME, /* ICH9M-E */
105 TCO_ICH10, /* ICH10 */
106 TCO_ICH10R, /* ICH10R */
107 TCO_ICH10D, /* ICH10D */
108 TCO_ICH10DO, /* ICH10DO */
109 TCO_PCH, /* PCH Desktop Full Featured */
110 TCO_PCHM, /* PCH Mobile Full Featured */
111 TCO_P55, /* P55 */
112 TCO_PM55, /* PM55 */
113 TCO_H55, /* H55 */
114 TCO_QM57, /* QM57 */
115 TCO_H57, /* H57 */
116 TCO_HM55, /* HM55 */
117 TCO_Q57, /* Q57 */
118 TCO_HM57, /* HM57 */
119 TCO_PCHMSFF, /* PCH Mobile SFF Full Featured */
120 TCO_QS57, /* QS57 */
121 TCO_3400, /* 3400 */
122 TCO_3420, /* 3420 */
123 TCO_3450, /* 3450 */
124 TCO_EP80579, /* EP80579 */
125 TCO_CPT, /* Cougar Point */
126 TCO_CPTD, /* Cougar Point Desktop */
127 TCO_CPTM, /* Cougar Point Mobile */
128 TCO_PBG, /* Patsburg */
129 TCO_DH89XXCC, /* DH89xxCC */
130 TCO_PPT, /* Panther Point */
131 TCO_LPT, /* Lynx Point */
132};
133
134static struct {
135 char *name;
136 unsigned int iTCO_version;
137} iTCO_chipset_info[] __devinitdata = {
138 {"ICH", 1},
139 {"ICH0", 1},
140 {"ICH2", 1},
141 {"ICH2-M", 1},
142 {"ICH3-S", 1},
143 {"ICH3-M", 1},
144 {"ICH4", 1},
145 {"ICH4-M", 1},
146 {"C-ICH", 1},
147 {"ICH5 or ICH5R", 1},
148 {"6300ESB", 1},
149 {"ICH6 or ICH6R", 2},
150 {"ICH6-M", 2},
151 {"ICH6W or ICH6RW", 2},
152 {"631xESB/632xESB", 2},
153 {"ICH7 or ICH7R", 2},
154 {"ICH7DH", 2},
155 {"ICH7-M or ICH7-U", 2},
156 {"ICH7-M DH", 2},
157 {"NM10", 2},
158 {"ICH8 or ICH8R", 2},
159 {"ICH8DH", 2},
160 {"ICH8DO", 2},
161 {"ICH8M", 2},
162 {"ICH8M-E", 2},
163 {"ICH9", 2},
164 {"ICH9R", 2},
165 {"ICH9DH", 2},
166 {"ICH9DO", 2},
167 {"ICH9M", 2},
168 {"ICH9M-E", 2},
169 {"ICH10", 2},
170 {"ICH10R", 2},
171 {"ICH10D", 2},
172 {"ICH10DO", 2},
173 {"PCH Desktop Full Featured", 2},
174 {"PCH Mobile Full Featured", 2},
175 {"P55", 2},
176 {"PM55", 2},
177 {"H55", 2},
178 {"QM57", 2},
179 {"H57", 2},
180 {"HM55", 2},
181 {"Q57", 2},
182 {"HM57", 2},
183 {"PCH Mobile SFF Full Featured", 2},
184 {"QS57", 2},
185 {"3400", 2},
186 {"3420", 2},
187 {"3450", 2},
188 {"EP80579", 2},
189 {"Cougar Point", 2},
190 {"Cougar Point Desktop", 2},
191 {"Cougar Point Mobile", 2},
192 {"Patsburg", 2},
193 {"DH89xxCC", 2},
194 {"Panther Point", 2},
195 {"Lynx Point", 2},
196 {NULL, 0}
197};
198
199/*
200 * This data only exists for exporting the supported PCI ids
201 * via MODULE_DEVICE_TABLE. We do not actually register a
202 * pci_driver, because the I/O Controller Hub has also other
203 * functions that probably will be registered by other drivers.
204 */
205static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = {
206 { PCI_VDEVICE(INTEL, 0x2410), TCO_ICH},
207 { PCI_VDEVICE(INTEL, 0x2420), TCO_ICH0},
208 { PCI_VDEVICE(INTEL, 0x2440), TCO_ICH2},
209 { PCI_VDEVICE(INTEL, 0x244c), TCO_ICH2M},
210 { PCI_VDEVICE(INTEL, 0x2480), TCO_ICH3},
211 { PCI_VDEVICE(INTEL, 0x248c), TCO_ICH3M},
212 { PCI_VDEVICE(INTEL, 0x24c0), TCO_ICH4},
213 { PCI_VDEVICE(INTEL, 0x24cc), TCO_ICH4M},
214 { PCI_VDEVICE(INTEL, 0x2450), TCO_CICH},
215 { PCI_VDEVICE(INTEL, 0x24d0), TCO_ICH5},
216 { PCI_VDEVICE(INTEL, 0x25a1), TCO_6300ESB},
217 { PCI_VDEVICE(INTEL, 0x2640), TCO_ICH6},
218 { PCI_VDEVICE(INTEL, 0x2641), TCO_ICH6M},
219 { PCI_VDEVICE(INTEL, 0x2642), TCO_ICH6W},
220 { PCI_VDEVICE(INTEL, 0x2670), TCO_631XESB},
221 { PCI_VDEVICE(INTEL, 0x2671), TCO_631XESB},
222 { PCI_VDEVICE(INTEL, 0x2672), TCO_631XESB},
223 { PCI_VDEVICE(INTEL, 0x2673), TCO_631XESB},
224 { PCI_VDEVICE(INTEL, 0x2674), TCO_631XESB},
225 { PCI_VDEVICE(INTEL, 0x2675), TCO_631XESB},
226 { PCI_VDEVICE(INTEL, 0x2676), TCO_631XESB},
227 { PCI_VDEVICE(INTEL, 0x2677), TCO_631XESB},
228 { PCI_VDEVICE(INTEL, 0x2678), TCO_631XESB},
229 { PCI_VDEVICE(INTEL, 0x2679), TCO_631XESB},
230 { PCI_VDEVICE(INTEL, 0x267a), TCO_631XESB},
231 { PCI_VDEVICE(INTEL, 0x267b), TCO_631XESB},
232 { PCI_VDEVICE(INTEL, 0x267c), TCO_631XESB},
233 { PCI_VDEVICE(INTEL, 0x267d), TCO_631XESB},
234 { PCI_VDEVICE(INTEL, 0x267e), TCO_631XESB},
235 { PCI_VDEVICE(INTEL, 0x267f), TCO_631XESB},
236 { PCI_VDEVICE(INTEL, 0x27b8), TCO_ICH7},
237 { PCI_VDEVICE(INTEL, 0x27b0), TCO_ICH7DH},
238 { PCI_VDEVICE(INTEL, 0x27b9), TCO_ICH7M},
239 { PCI_VDEVICE(INTEL, 0x27bd), TCO_ICH7MDH},
240 { PCI_VDEVICE(INTEL, 0x27bc), TCO_NM10},
241 { PCI_VDEVICE(INTEL, 0x2810), TCO_ICH8},
242 { PCI_VDEVICE(INTEL, 0x2812), TCO_ICH8DH},
243 { PCI_VDEVICE(INTEL, 0x2814), TCO_ICH8DO},
244 { PCI_VDEVICE(INTEL, 0x2815), TCO_ICH8M},
245 { PCI_VDEVICE(INTEL, 0x2811), TCO_ICH8ME},
246 { PCI_VDEVICE(INTEL, 0x2918), TCO_ICH9},
247 { PCI_VDEVICE(INTEL, 0x2916), TCO_ICH9R},
248 { PCI_VDEVICE(INTEL, 0x2912), TCO_ICH9DH},
249 { PCI_VDEVICE(INTEL, 0x2914), TCO_ICH9DO},
250 { PCI_VDEVICE(INTEL, 0x2919), TCO_ICH9M},
251 { PCI_VDEVICE(INTEL, 0x2917), TCO_ICH9ME},
252 { PCI_VDEVICE(INTEL, 0x3a18), TCO_ICH10},
253 { PCI_VDEVICE(INTEL, 0x3a16), TCO_ICH10R},
254 { PCI_VDEVICE(INTEL, 0x3a1a), TCO_ICH10D},
255 { PCI_VDEVICE(INTEL, 0x3a14), TCO_ICH10DO},
256 { PCI_VDEVICE(INTEL, 0x3b00), TCO_PCH},
257 { PCI_VDEVICE(INTEL, 0x3b01), TCO_PCHM},
258 { PCI_VDEVICE(INTEL, 0x3b02), TCO_P55},
259 { PCI_VDEVICE(INTEL, 0x3b03), TCO_PM55},
260 { PCI_VDEVICE(INTEL, 0x3b06), TCO_H55},
261 { PCI_VDEVICE(INTEL, 0x3b07), TCO_QM57},
262 { PCI_VDEVICE(INTEL, 0x3b08), TCO_H57},
263 { PCI_VDEVICE(INTEL, 0x3b09), TCO_HM55},
264 { PCI_VDEVICE(INTEL, 0x3b0a), TCO_Q57},
265 { PCI_VDEVICE(INTEL, 0x3b0b), TCO_HM57},
266 { PCI_VDEVICE(INTEL, 0x3b0d), TCO_PCHMSFF},
267 { PCI_VDEVICE(INTEL, 0x3b0f), TCO_QS57},
268 { PCI_VDEVICE(INTEL, 0x3b12), TCO_3400},
269 { PCI_VDEVICE(INTEL, 0x3b14), TCO_3420},
270 { PCI_VDEVICE(INTEL, 0x3b16), TCO_3450},
271 { PCI_VDEVICE(INTEL, 0x5031), TCO_EP80579},
272 { PCI_VDEVICE(INTEL, 0x1c41), TCO_CPT},
273 { PCI_VDEVICE(INTEL, 0x1c42), TCO_CPTD},
274 { PCI_VDEVICE(INTEL, 0x1c43), TCO_CPTM},
275 { PCI_VDEVICE(INTEL, 0x1c44), TCO_CPT},
276 { PCI_VDEVICE(INTEL, 0x1c45), TCO_CPT},
277 { PCI_VDEVICE(INTEL, 0x1c46), TCO_CPT},
278 { PCI_VDEVICE(INTEL, 0x1c47), TCO_CPT},
279 { PCI_VDEVICE(INTEL, 0x1c48), TCO_CPT},
280 { PCI_VDEVICE(INTEL, 0x1c49), TCO_CPT},
281 { PCI_VDEVICE(INTEL, 0x1c4a), TCO_CPT},
282 { PCI_VDEVICE(INTEL, 0x1c4b), TCO_CPT},
283 { PCI_VDEVICE(INTEL, 0x1c4c), TCO_CPT},
284 { PCI_VDEVICE(INTEL, 0x1c4d), TCO_CPT},
285 { PCI_VDEVICE(INTEL, 0x1c4e), TCO_CPT},
286 { PCI_VDEVICE(INTEL, 0x1c4f), TCO_CPT},
287 { PCI_VDEVICE(INTEL, 0x1c50), TCO_CPT},
288 { PCI_VDEVICE(INTEL, 0x1c51), TCO_CPT},
289 { PCI_VDEVICE(INTEL, 0x1c52), TCO_CPT},
290 { PCI_VDEVICE(INTEL, 0x1c53), TCO_CPT},
291 { PCI_VDEVICE(INTEL, 0x1c54), TCO_CPT},
292 { PCI_VDEVICE(INTEL, 0x1c55), TCO_CPT},
293 { PCI_VDEVICE(INTEL, 0x1c56), TCO_CPT},
294 { PCI_VDEVICE(INTEL, 0x1c57), TCO_CPT},
295 { PCI_VDEVICE(INTEL, 0x1c58), TCO_CPT},
296 { PCI_VDEVICE(INTEL, 0x1c59), TCO_CPT},
297 { PCI_VDEVICE(INTEL, 0x1c5a), TCO_CPT},
298 { PCI_VDEVICE(INTEL, 0x1c5b), TCO_CPT},
299 { PCI_VDEVICE(INTEL, 0x1c5c), TCO_CPT},
300 { PCI_VDEVICE(INTEL, 0x1c5d), TCO_CPT},
301 { PCI_VDEVICE(INTEL, 0x1c5e), TCO_CPT},
302 { PCI_VDEVICE(INTEL, 0x1c5f), TCO_CPT},
303 { PCI_VDEVICE(INTEL, 0x1d40), TCO_PBG},
304 { PCI_VDEVICE(INTEL, 0x1d41), TCO_PBG},
305 { PCI_VDEVICE(INTEL, 0x2310), TCO_DH89XXCC},
306 { PCI_VDEVICE(INTEL, 0x1e40), TCO_PPT},
307 { PCI_VDEVICE(INTEL, 0x1e41), TCO_PPT},
308 { PCI_VDEVICE(INTEL, 0x1e42), TCO_PPT},
309 { PCI_VDEVICE(INTEL, 0x1e43), TCO_PPT},
310 { PCI_VDEVICE(INTEL, 0x1e44), TCO_PPT},
311 { PCI_VDEVICE(INTEL, 0x1e45), TCO_PPT},
312 { PCI_VDEVICE(INTEL, 0x1e46), TCO_PPT},
313 { PCI_VDEVICE(INTEL, 0x1e47), TCO_PPT},
314 { PCI_VDEVICE(INTEL, 0x1e48), TCO_PPT},
315 { PCI_VDEVICE(INTEL, 0x1e49), TCO_PPT},
316 { PCI_VDEVICE(INTEL, 0x1e4a), TCO_PPT},
317 { PCI_VDEVICE(INTEL, 0x1e4b), TCO_PPT},
318 { PCI_VDEVICE(INTEL, 0x1e4c), TCO_PPT},
319 { PCI_VDEVICE(INTEL, 0x1e4d), TCO_PPT},
320 { PCI_VDEVICE(INTEL, 0x1e4e), TCO_PPT},
321 { PCI_VDEVICE(INTEL, 0x1e4f), TCO_PPT},
322 { PCI_VDEVICE(INTEL, 0x1e50), TCO_PPT},
323 { PCI_VDEVICE(INTEL, 0x1e51), TCO_PPT},
324 { PCI_VDEVICE(INTEL, 0x1e52), TCO_PPT},
325 { PCI_VDEVICE(INTEL, 0x1e53), TCO_PPT},
326 { PCI_VDEVICE(INTEL, 0x1e54), TCO_PPT},
327 { PCI_VDEVICE(INTEL, 0x1e55), TCO_PPT},
328 { PCI_VDEVICE(INTEL, 0x1e56), TCO_PPT},
329 { PCI_VDEVICE(INTEL, 0x1e57), TCO_PPT},
330 { PCI_VDEVICE(INTEL, 0x1e58), TCO_PPT},
331 { PCI_VDEVICE(INTEL, 0x1e59), TCO_PPT},
332 { PCI_VDEVICE(INTEL, 0x1e5a), TCO_PPT},
333 { PCI_VDEVICE(INTEL, 0x1e5b), TCO_PPT},
334 { PCI_VDEVICE(INTEL, 0x1e5c), TCO_PPT},
335 { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT},
336 { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT},
337 { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT},
338 { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT},
339 { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT},
340 { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT},
341 { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT},
342 { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT},
343 { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT},
344 { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT},
345 { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT},
346 { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT},
347 { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT},
348 { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT},
349 { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT},
350 { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT},
351 { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT},
352 { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT},
353 { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT},
354 { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT},
355 { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT},
356 { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT},
357 { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT},
358 { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT},
359 { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT},
360 { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT},
361 { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT},
362 { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT},
363 { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT},
364 { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT},
365 { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT},
366 { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT},
367 { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT},
368 { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT},
369 { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT},
370 { 0, }, /* End of list */
371};
372MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl);
373
374/* Address definitions for the TCO */ 74/* Address definitions for the TCO */
375/* TCO base address */ 75/* TCO base address */
376#define TCOBASE (iTCO_wdt_private.ACPIBASE + 0x60) 76#define TCOBASE (iTCO_wdt_private.tco_res->start)
377/* SMI Control and Enable Register */ 77/* SMI Control and Enable Register */
378#define SMI_EN (iTCO_wdt_private.ACPIBASE + 0x30) 78#define SMI_EN (iTCO_wdt_private.smi_res->start)
379 79
380#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */ 80#define TCO_RLD (TCOBASE + 0x00) /* TCO Timer Reload and Curr. Value */
381#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */ 81#define TCOv1_TMR (TCOBASE + 0x01) /* TCOv1 Timer Initial Value */
@@ -393,19 +93,18 @@ static char expect_release;
393static struct { /* this is private data for the iTCO_wdt device */ 93static struct { /* this is private data for the iTCO_wdt device */
394 /* TCO version/generation */ 94 /* TCO version/generation */
395 unsigned int iTCO_version; 95 unsigned int iTCO_version;
396 /* The device's ACPIBASE address (TCOBASE = ACPIBASE+0x60) */ 96 struct resource *tco_res;
397 unsigned long ACPIBASE; 97 struct resource *smi_res;
98 struct resource *gcs_res;
398 /* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/ 99 /* NO_REBOOT flag is Memory-Mapped GCS register bit 5 (TCO version 2)*/
399 unsigned long __iomem *gcs; 100 unsigned long __iomem *gcs;
400 /* the lock for io operations */ 101 /* the lock for io operations */
401 spinlock_t io_lock; 102 spinlock_t io_lock;
103 struct platform_device *dev;
402 /* the PCI-device */ 104 /* the PCI-device */
403 struct pci_dev *pdev; 105 struct pci_dev *pdev;
404} iTCO_wdt_private; 106} iTCO_wdt_private;
405 107
406/* the watchdog platform device */
407static struct platform_device *iTCO_wdt_platform_device;
408
409/* module parameters */ 108/* module parameters */
410#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */ 109#define WATCHDOG_HEARTBEAT 30 /* 30 sec default heartbeat */
411static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */ 110static int heartbeat = WATCHDOG_HEARTBEAT; /* in seconds */
@@ -485,7 +184,7 @@ static int iTCO_wdt_start(void)
485 184
486 spin_lock(&iTCO_wdt_private.io_lock); 185 spin_lock(&iTCO_wdt_private.io_lock);
487 186
488 iTCO_vendor_pre_start(iTCO_wdt_private.ACPIBASE, heartbeat); 187 iTCO_vendor_pre_start(iTCO_wdt_private.smi_res, heartbeat);
489 188
490 /* disable chipset's NO_REBOOT bit */ 189 /* disable chipset's NO_REBOOT bit */
491 if (iTCO_wdt_unset_NO_REBOOT_bit()) { 190 if (iTCO_wdt_unset_NO_REBOOT_bit()) {
@@ -519,7 +218,7 @@ static int iTCO_wdt_stop(void)
519 218
520 spin_lock(&iTCO_wdt_private.io_lock); 219 spin_lock(&iTCO_wdt_private.io_lock);
521 220
522 iTCO_vendor_pre_stop(iTCO_wdt_private.ACPIBASE); 221 iTCO_vendor_pre_stop(iTCO_wdt_private.smi_res);
523 222
524 /* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */ 223 /* Bit 11: TCO Timer Halt -> 1 = The TCO timer is disabled */
525 val = inw(TCO1_CNT); 224 val = inw(TCO1_CNT);
@@ -541,7 +240,7 @@ static int iTCO_wdt_keepalive(void)
541{ 240{
542 spin_lock(&iTCO_wdt_private.io_lock); 241 spin_lock(&iTCO_wdt_private.io_lock);
543 242
544 iTCO_vendor_pre_keepalive(iTCO_wdt_private.ACPIBASE, heartbeat); 243 iTCO_vendor_pre_keepalive(iTCO_wdt_private.smi_res, heartbeat);
545 244
546 /* Reload the timer by writing to the TCO Timer Counter register */ 245 /* Reload the timer by writing to the TCO Timer Counter register */
547 if (iTCO_wdt_private.iTCO_version == 2) 246 if (iTCO_wdt_private.iTCO_version == 2)
@@ -786,83 +485,120 @@ static struct miscdevice iTCO_wdt_miscdev = {
786 * Init & exit routines 485 * Init & exit routines
787 */ 486 */
788 487
789static int __devinit iTCO_wdt_init(struct pci_dev *pdev, 488static void __devexit iTCO_wdt_cleanup(void)
790 const struct pci_device_id *ent, struct platform_device *dev) 489{
490 /* Stop the timer before we leave */
491 if (!nowayout)
492 iTCO_wdt_stop();
493
494 /* Deregister */
495 misc_deregister(&iTCO_wdt_miscdev);
496
497 /* release resources */
498 release_region(iTCO_wdt_private.tco_res->start,
499 resource_size(iTCO_wdt_private.tco_res));
500 release_region(iTCO_wdt_private.smi_res->start,
501 resource_size(iTCO_wdt_private.smi_res));
502 if (iTCO_wdt_private.iTCO_version == 2) {
503 iounmap(iTCO_wdt_private.gcs);
504 release_mem_region(iTCO_wdt_private.gcs_res->start,
505 resource_size(iTCO_wdt_private.gcs_res));
506 }
507
508 iTCO_wdt_private.tco_res = NULL;
509 iTCO_wdt_private.smi_res = NULL;
510 iTCO_wdt_private.gcs_res = NULL;
511 iTCO_wdt_private.gcs = NULL;
512}
513
514static int __devinit iTCO_wdt_probe(struct platform_device *dev)
791{ 515{
792 int ret; 516 int ret = -ENODEV;
793 u32 base_address;
794 unsigned long RCBA;
795 unsigned long val32; 517 unsigned long val32;
518 struct lpc_ich_info *ich_info = dev->dev.platform_data;
519
520 if (!ich_info)
521 goto out;
522
523 spin_lock_init(&iTCO_wdt_private.io_lock);
524
525 iTCO_wdt_private.tco_res =
526 platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_TCO);
527 if (!iTCO_wdt_private.tco_res)
528 goto out;
529
530 iTCO_wdt_private.smi_res =
531 platform_get_resource(dev, IORESOURCE_IO, ICH_RES_IO_SMI);
532 if (!iTCO_wdt_private.smi_res)
533 goto out;
534
535 iTCO_wdt_private.iTCO_version = ich_info->iTCO_version;
536 iTCO_wdt_private.dev = dev;
537 iTCO_wdt_private.pdev = to_pci_dev(dev->dev.parent);
796 538
797 /* 539 /*
798 * Find the ACPI/PM base I/O address which is the base 540 * Get the Memory-Mapped GCS register, we need it for the
799 * for the TCO registers (TCOBASE=ACPIBASE + 0x60) 541 * NO_REBOOT flag (TCO v2).
800 * ACPIBASE is bits [15:7] from 0x40-0x43
801 */ 542 */
802 pci_read_config_dword(pdev, 0x40, &base_address);
803 base_address &= 0x0000ff80;
804 if (base_address == 0x00000000) {
805 /* Something's wrong here, ACPIBASE has to be set */
806 pr_err("failed to get TCOBASE address, device disabled by hardware/BIOS\n");
807 return -ENODEV;
808 }
809 iTCO_wdt_private.iTCO_version =
810 iTCO_chipset_info[ent->driver_data].iTCO_version;
811 iTCO_wdt_private.ACPIBASE = base_address;
812 iTCO_wdt_private.pdev = pdev;
813
814 /* Get the Memory-Mapped GCS register, we need it for the
815 NO_REBOOT flag (TCO v2). To get access to it you have to
816 read RCBA from PCI Config space 0xf0 and use it as base.
817 GCS = RCBA + ICH6_GCS(0x3410). */
818 if (iTCO_wdt_private.iTCO_version == 2) { 543 if (iTCO_wdt_private.iTCO_version == 2) {
819 pci_read_config_dword(pdev, 0xf0, &base_address); 544 iTCO_wdt_private.gcs_res = platform_get_resource(dev,
820 if ((base_address & 1) == 0) { 545 IORESOURCE_MEM,
821 pr_err("RCBA is disabled by hardware/BIOS, device disabled\n"); 546 ICH_RES_MEM_GCS);
822 ret = -ENODEV; 547
548 if (!iTCO_wdt_private.gcs_res)
549 goto out;
550
551 if (!request_mem_region(iTCO_wdt_private.gcs_res->start,
552 resource_size(iTCO_wdt_private.gcs_res), dev->name)) {
553 ret = -EBUSY;
823 goto out; 554 goto out;
824 } 555 }
825 RCBA = base_address & 0xffffc000; 556 iTCO_wdt_private.gcs = ioremap(iTCO_wdt_private.gcs_res->start,
826 iTCO_wdt_private.gcs = ioremap((RCBA + 0x3410), 4); 557 resource_size(iTCO_wdt_private.gcs_res));
558 if (!iTCO_wdt_private.gcs) {
559 ret = -EIO;
560 goto unreg_gcs;
561 }
827 } 562 }
828 563
829 /* Check chipset's NO_REBOOT bit */ 564 /* Check chipset's NO_REBOOT bit */
830 if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) { 565 if (iTCO_wdt_unset_NO_REBOOT_bit() && iTCO_vendor_check_noreboot_on()) {
831 pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n"); 566 pr_info("unable to reset NO_REBOOT flag, device disabled by hardware/BIOS\n");
832 ret = -ENODEV; /* Cannot reset NO_REBOOT bit */ 567 ret = -ENODEV; /* Cannot reset NO_REBOOT bit */
833 goto out_unmap; 568 goto unmap_gcs;
834 } 569 }
835 570
836 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */ 571 /* Set the NO_REBOOT bit to prevent later reboots, just for sure */
837 iTCO_wdt_set_NO_REBOOT_bit(); 572 iTCO_wdt_set_NO_REBOOT_bit();
838 573
839 /* The TCO logic uses the TCO_EN bit in the SMI_EN register */ 574 /* The TCO logic uses the TCO_EN bit in the SMI_EN register */
840 if (!request_region(SMI_EN, 4, "iTCO_wdt")) { 575 if (!request_region(iTCO_wdt_private.smi_res->start,
841 pr_err("I/O address 0x%04lx already in use, device disabled\n", 576 resource_size(iTCO_wdt_private.smi_res), dev->name)) {
577 pr_err("I/O address 0x%04llx already in use, device disabled\n",
842 SMI_EN); 578 SMI_EN);
843 ret = -EIO; 579 ret = -EBUSY;
844 goto out_unmap; 580 goto unmap_gcs;
845 } 581 }
846 if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) { 582 if (turn_SMI_watchdog_clear_off >= iTCO_wdt_private.iTCO_version) {
847 /* Bit 13: TCO_EN -> 0 = Disables TCO logic generating an SMI# */ 583 /*
584 * Bit 13: TCO_EN -> 0
585 * Disables TCO logic generating an SMI#
586 */
848 val32 = inl(SMI_EN); 587 val32 = inl(SMI_EN);
849 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */ 588 val32 &= 0xffffdfff; /* Turn off SMI clearing watchdog */
850 outl(val32, SMI_EN); 589 outl(val32, SMI_EN);
851 } 590 }
852 591
853 /* The TCO I/O registers reside in a 32-byte range pointed to 592 if (!request_region(iTCO_wdt_private.tco_res->start,
854 by the TCOBASE value */ 593 resource_size(iTCO_wdt_private.tco_res), dev->name)) {
855 if (!request_region(TCOBASE, 0x20, "iTCO_wdt")) { 594 pr_err("I/O address 0x%04llx already in use, device disabled\n",
856 pr_err("I/O address 0x%04lx already in use, device disabled\n",
857 TCOBASE); 595 TCOBASE);
858 ret = -EIO; 596 ret = -EBUSY;
859 goto unreg_smi_en; 597 goto unreg_smi;
860 } 598 }
861 599
862 pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04lx)\n", 600 pr_info("Found a %s TCO device (Version=%d, TCOBASE=0x%04llx)\n",
863 iTCO_chipset_info[ent->driver_data].name, 601 ich_info->name, ich_info->iTCO_version, TCOBASE);
864 iTCO_chipset_info[ent->driver_data].iTCO_version,
865 TCOBASE);
866 602
867 /* Clear out the (probably old) status */ 603 /* Clear out the (probably old) status */
868 outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */ 604 outw(0x0008, TCO1_STS); /* Clear the Time Out Status bit */
@@ -883,7 +619,7 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
883 if (ret != 0) { 619 if (ret != 0) {
884 pr_err("cannot register miscdev on minor=%d (err=%d)\n", 620 pr_err("cannot register miscdev on minor=%d (err=%d)\n",
885 WATCHDOG_MINOR, ret); 621 WATCHDOG_MINOR, ret);
886 goto unreg_region; 622 goto unreg_tco;
887 } 623 }
888 624
889 pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n", 625 pr_info("initialized. heartbeat=%d sec (nowayout=%d)\n",
@@ -891,62 +627,31 @@ static int __devinit iTCO_wdt_init(struct pci_dev *pdev,
891 627
892 return 0; 628 return 0;
893 629
894unreg_region: 630unreg_tco:
895 release_region(TCOBASE, 0x20); 631 release_region(iTCO_wdt_private.tco_res->start,
896unreg_smi_en: 632 resource_size(iTCO_wdt_private.tco_res));
897 release_region(SMI_EN, 4); 633unreg_smi:
898out_unmap: 634 release_region(iTCO_wdt_private.smi_res->start,
635 resource_size(iTCO_wdt_private.smi_res));
636unmap_gcs:
899 if (iTCO_wdt_private.iTCO_version == 2) 637 if (iTCO_wdt_private.iTCO_version == 2)
900 iounmap(iTCO_wdt_private.gcs); 638 iounmap(iTCO_wdt_private.gcs);
901out: 639unreg_gcs:
902 iTCO_wdt_private.ACPIBASE = 0;
903 return ret;
904}
905
906static void __devexit iTCO_wdt_cleanup(void)
907{
908 /* Stop the timer before we leave */
909 if (!nowayout)
910 iTCO_wdt_stop();
911
912 /* Deregister */
913 misc_deregister(&iTCO_wdt_miscdev);
914 release_region(TCOBASE, 0x20);
915 release_region(SMI_EN, 4);
916 if (iTCO_wdt_private.iTCO_version == 2) 640 if (iTCO_wdt_private.iTCO_version == 2)
917 iounmap(iTCO_wdt_private.gcs); 641 release_mem_region(iTCO_wdt_private.gcs_res->start,
918 pci_dev_put(iTCO_wdt_private.pdev); 642 resource_size(iTCO_wdt_private.gcs_res));
919 iTCO_wdt_private.ACPIBASE = 0; 643out:
920} 644 iTCO_wdt_private.tco_res = NULL;
921 645 iTCO_wdt_private.smi_res = NULL;
922static int __devinit iTCO_wdt_probe(struct platform_device *dev) 646 iTCO_wdt_private.gcs_res = NULL;
923{ 647 iTCO_wdt_private.gcs = NULL;
924 int ret = -ENODEV;
925 int found = 0;
926 struct pci_dev *pdev = NULL;
927 const struct pci_device_id *ent;
928
929 spin_lock_init(&iTCO_wdt_private.io_lock);
930
931 for_each_pci_dev(pdev) {
932 ent = pci_match_id(iTCO_wdt_pci_tbl, pdev);
933 if (ent) {
934 found++;
935 ret = iTCO_wdt_init(pdev, ent, dev);
936 if (!ret)
937 break;
938 }
939 }
940
941 if (!found)
942 pr_info("No device detected\n");
943 648
944 return ret; 649 return ret;
945} 650}
946 651
947static int __devexit iTCO_wdt_remove(struct platform_device *dev) 652static int __devexit iTCO_wdt_remove(struct platform_device *dev)
948{ 653{
949 if (iTCO_wdt_private.ACPIBASE) 654 if (iTCO_wdt_private.tco_res || iTCO_wdt_private.smi_res)
950 iTCO_wdt_cleanup(); 655 iTCO_wdt_cleanup();
951 656
952 return 0; 657 return 0;
@@ -977,23 +682,11 @@ static int __init iTCO_wdt_init_module(void)
977 if (err) 682 if (err)
978 return err; 683 return err;
979 684
980 iTCO_wdt_platform_device = platform_device_register_simple(DRV_NAME,
981 -1, NULL, 0);
982 if (IS_ERR(iTCO_wdt_platform_device)) {
983 err = PTR_ERR(iTCO_wdt_platform_device);
984 goto unreg_platform_driver;
985 }
986
987 return 0; 685 return 0;
988
989unreg_platform_driver:
990 platform_driver_unregister(&iTCO_wdt_driver);
991 return err;
992} 686}
993 687
994static void __exit iTCO_wdt_cleanup_module(void) 688static void __exit iTCO_wdt_cleanup_module(void)
995{ 689{
996 platform_device_unregister(iTCO_wdt_platform_device);
997 platform_driver_unregister(&iTCO_wdt_driver); 690 platform_driver_unregister(&iTCO_wdt_driver);
998 pr_info("Watchdog Module Unloaded\n"); 691 pr_info("Watchdog Module Unloaded\n");
999} 692}
diff --git a/drivers/watchdog/imx2_wdt.c b/drivers/watchdog/imx2_wdt.c
index 7a2b734fcdc7..bcfab2b00ad2 100644
--- a/drivers/watchdog/imx2_wdt.c
+++ b/drivers/watchdog/imx2_wdt.c
@@ -121,7 +121,7 @@ static void imx2_wdt_start(void)
121{ 121{
122 if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) { 122 if (!test_and_set_bit(IMX2_WDT_STATUS_STARTED, &imx2_wdt.status)) {
123 /* at our first start we enable clock and do initialisations */ 123 /* at our first start we enable clock and do initialisations */
124 clk_enable(imx2_wdt.clk); 124 clk_prepare_enable(imx2_wdt.clk);
125 125
126 imx2_wdt_setup(); 126 imx2_wdt_setup();
127 } else /* delete the timer that pings the watchdog after close */ 127 } else /* delete the timer that pings the watchdog after close */
diff --git a/drivers/watchdog/lantiq_wdt.c b/drivers/watchdog/lantiq_wdt.c
index a9593a3a32a0..2e74c3a8ee58 100644
--- a/drivers/watchdog/lantiq_wdt.c
+++ b/drivers/watchdog/lantiq_wdt.c
@@ -13,14 +13,15 @@
13#include <linux/fs.h> 13#include <linux/fs.h>
14#include <linux/miscdevice.h> 14#include <linux/miscdevice.h>
15#include <linux/watchdog.h> 15#include <linux/watchdog.h>
16#include <linux/platform_device.h> 16#include <linux/of_platform.h>
17#include <linux/uaccess.h> 17#include <linux/uaccess.h>
18#include <linux/clk.h> 18#include <linux/clk.h>
19#include <linux/io.h> 19#include <linux/io.h>
20 20
21#include <lantiq.h> 21#include <lantiq_soc.h>
22 22
23/* Section 3.4 of the datasheet 23/*
24 * Section 3.4 of the datasheet
24 * The password sequence protects the WDT control register from unintended 25 * The password sequence protects the WDT control register from unintended
25 * write actions, which might cause malfunction of the WDT. 26 * write actions, which might cause malfunction of the WDT.
26 * 27 *
@@ -70,7 +71,8 @@ ltq_wdt_disable(void)
70{ 71{
71 /* write the first password magic */ 72 /* write the first password magic */
72 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR); 73 ltq_w32(LTQ_WDT_PW1, ltq_wdt_membase + LTQ_WDT_CR);
73 /* write the second password magic with no config 74 /*
75 * write the second password magic with no config
74 * this turns the watchdog off 76 * this turns the watchdog off
75 */ 77 */
76 ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR); 78 ltq_w32(LTQ_WDT_PW2, ltq_wdt_membase + LTQ_WDT_CR);
@@ -184,7 +186,7 @@ static struct miscdevice ltq_wdt_miscdev = {
184 .fops = &ltq_wdt_fops, 186 .fops = &ltq_wdt_fops,
185}; 187};
186 188
187static int __init 189static int __devinit
188ltq_wdt_probe(struct platform_device *pdev) 190ltq_wdt_probe(struct platform_device *pdev)
189{ 191{
190 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 192 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -194,28 +196,27 @@ ltq_wdt_probe(struct platform_device *pdev)
194 dev_err(&pdev->dev, "cannot obtain I/O memory region"); 196 dev_err(&pdev->dev, "cannot obtain I/O memory region");
195 return -ENOENT; 197 return -ENOENT;
196 } 198 }
197 res = devm_request_mem_region(&pdev->dev, res->start, 199
198 resource_size(res), dev_name(&pdev->dev)); 200 ltq_wdt_membase = devm_request_and_ioremap(&pdev->dev, res);
199 if (!res) {
200 dev_err(&pdev->dev, "cannot request I/O memory region");
201 return -EBUSY;
202 }
203 ltq_wdt_membase = devm_ioremap_nocache(&pdev->dev, res->start,
204 resource_size(res));
205 if (!ltq_wdt_membase) { 201 if (!ltq_wdt_membase) {
206 dev_err(&pdev->dev, "cannot remap I/O memory region\n"); 202 dev_err(&pdev->dev, "cannot remap I/O memory region\n");
207 return -ENOMEM; 203 return -ENOMEM;
208 } 204 }
209 205
210 /* we do not need to enable the clock as it is always running */ 206 /* we do not need to enable the clock as it is always running */
211 clk = clk_get(&pdev->dev, "io"); 207 clk = clk_get_io();
212 WARN_ON(!clk); 208 if (IS_ERR(clk)) {
209 dev_err(&pdev->dev, "Failed to get clock\n");
210 return -ENOENT;
211 }
213 ltq_io_region_clk_rate = clk_get_rate(clk); 212 ltq_io_region_clk_rate = clk_get_rate(clk);
214 clk_put(clk); 213 clk_put(clk);
215 214
215 /* find out if the watchdog caused the last reboot */
216 if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST) 216 if (ltq_reset_cause() == LTQ_RST_CAUSE_WDTRST)
217 ltq_wdt_bootstatus = WDIOF_CARDRESET; 217 ltq_wdt_bootstatus = WDIOF_CARDRESET;
218 218
219 dev_info(&pdev->dev, "Init done\n");
219 return misc_register(&ltq_wdt_miscdev); 220 return misc_register(&ltq_wdt_miscdev);
220} 221}
221 222
@@ -227,33 +228,26 @@ ltq_wdt_remove(struct platform_device *pdev)
227 return 0; 228 return 0;
228} 229}
229 230
231static const struct of_device_id ltq_wdt_match[] = {
232 { .compatible = "lantiq,wdt" },
233 {},
234};
235MODULE_DEVICE_TABLE(of, ltq_wdt_match);
230 236
231static struct platform_driver ltq_wdt_driver = { 237static struct platform_driver ltq_wdt_driver = {
238 .probe = ltq_wdt_probe,
232 .remove = __devexit_p(ltq_wdt_remove), 239 .remove = __devexit_p(ltq_wdt_remove),
233 .driver = { 240 .driver = {
234 .name = "ltq_wdt", 241 .name = "wdt",
235 .owner = THIS_MODULE, 242 .owner = THIS_MODULE,
243 .of_match_table = ltq_wdt_match,
236 }, 244 },
237}; 245};
238 246
239static int __init 247module_platform_driver(ltq_wdt_driver);
240init_ltq_wdt(void)
241{
242 return platform_driver_probe(&ltq_wdt_driver, ltq_wdt_probe);
243}
244
245static void __exit
246exit_ltq_wdt(void)
247{
248 return platform_driver_unregister(&ltq_wdt_driver);
249}
250
251module_init(init_ltq_wdt);
252module_exit(exit_ltq_wdt);
253 248
254module_param(nowayout, bool, 0); 249module_param(nowayout, bool, 0);
255MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started"); 250MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started");
256
257MODULE_AUTHOR("John Crispin <blogic@openwrt.org>"); 251MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
258MODULE_DESCRIPTION("Lantiq SoC Watchdog"); 252MODULE_DESCRIPTION("Lantiq SoC Watchdog");
259MODULE_LICENSE("GPL"); 253MODULE_LICENSE("GPL");
diff --git a/drivers/watchdog/orion_wdt.c b/drivers/watchdog/orion_wdt.c
index 788aa158e78c..0f5736949c61 100644
--- a/drivers/watchdog/orion_wdt.c
+++ b/drivers/watchdog/orion_wdt.c
@@ -24,8 +24,8 @@
24#include <linux/uaccess.h> 24#include <linux/uaccess.h>
25#include <linux/io.h> 25#include <linux/io.h>
26#include <linux/spinlock.h> 26#include <linux/spinlock.h>
27#include <linux/clk.h>
27#include <mach/bridge-regs.h> 28#include <mach/bridge-regs.h>
28#include <plat/orion_wdt.h>
29 29
30/* 30/*
31 * Watchdog timer block registers. 31 * Watchdog timer block registers.
@@ -41,6 +41,7 @@
41static bool nowayout = WATCHDOG_NOWAYOUT; 41static bool nowayout = WATCHDOG_NOWAYOUT;
42static int heartbeat = -1; /* module parameter (seconds) */ 42static int heartbeat = -1; /* module parameter (seconds) */
43static unsigned int wdt_max_duration; /* (seconds) */ 43static unsigned int wdt_max_duration; /* (seconds) */
44static struct clk *clk;
44static unsigned int wdt_tclk; 45static unsigned int wdt_tclk;
45static void __iomem *wdt_reg; 46static void __iomem *wdt_reg;
46static unsigned long wdt_status; 47static unsigned long wdt_status;
@@ -237,16 +238,16 @@ static struct miscdevice orion_wdt_miscdev = {
237 238
238static int __devinit orion_wdt_probe(struct platform_device *pdev) 239static int __devinit orion_wdt_probe(struct platform_device *pdev)
239{ 240{
240 struct orion_wdt_platform_data *pdata = pdev->dev.platform_data;
241 struct resource *res; 241 struct resource *res;
242 int ret; 242 int ret;
243 243
244 if (pdata) { 244 clk = clk_get(&pdev->dev, NULL);
245 wdt_tclk = pdata->tclk; 245 if (IS_ERR(clk)) {
246 } else { 246 printk(KERN_ERR "Orion Watchdog missing clock\n");
247 pr_err("misses platform data\n");
248 return -ENODEV; 247 return -ENODEV;
249 } 248 }
249 clk_prepare_enable(clk);
250 wdt_tclk = clk_get_rate(clk);
250 251
251 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 252 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
252 253
@@ -282,6 +283,9 @@ static int __devexit orion_wdt_remove(struct platform_device *pdev)
282 if (!ret) 283 if (!ret)
283 orion_wdt_miscdev.parent = NULL; 284 orion_wdt_miscdev.parent = NULL;
284 285
286 clk_disable_unprepare(clk);
287 clk_put(clk);
288
285 return ret; 289 return ret;
286} 290}
287 291
diff --git a/drivers/xen/Makefile b/drivers/xen/Makefile
index 9adc5be57b13..fc3488631136 100644
--- a/drivers/xen/Makefile
+++ b/drivers/xen/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_XEN_SYS_HYPERVISOR) += sys-hypervisor.o
17obj-$(CONFIG_XEN_PVHVM) += platform-pci.o 17obj-$(CONFIG_XEN_PVHVM) += platform-pci.o
18obj-$(CONFIG_XEN_TMEM) += tmem.o 18obj-$(CONFIG_XEN_TMEM) += tmem.o
19obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o 19obj-$(CONFIG_SWIOTLB_XEN) += swiotlb-xen.o
20obj-$(CONFIG_XEN_DOM0) += pci.o 20obj-$(CONFIG_XEN_DOM0) += pci.o acpi.o
21obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/ 21obj-$(CONFIG_XEN_PCIDEV_BACKEND) += xen-pciback/
22obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o 22obj-$(CONFIG_XEN_PRIVCMD) += xen-privcmd.o
23obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o 23obj-$(CONFIG_XEN_ACPI_PROCESSOR) += xen-acpi-processor.o
diff --git a/drivers/xen/acpi.c b/drivers/xen/acpi.c
new file mode 100644
index 000000000000..119d42a2bf57
--- /dev/null
+++ b/drivers/xen/acpi.c
@@ -0,0 +1,62 @@
1/******************************************************************************
2 * acpi.c
3 * acpi file for domain 0 kernel
4 *
5 * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6 * Copyright (c) 2011 Yu Ke ke.yu@intel.com
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#include <xen/acpi.h>
34#include <xen/interface/platform.h>
35#include <asm/xen/hypercall.h>
36#include <asm/xen/hypervisor.h>
37
38int xen_acpi_notify_hypervisor_state(u8 sleep_state,
39 u32 pm1a_cnt, u32 pm1b_cnt)
40{
41 struct xen_platform_op op = {
42 .cmd = XENPF_enter_acpi_sleep,
43 .interface_version = XENPF_INTERFACE_VERSION,
44 .u = {
45 .enter_acpi_sleep = {
46 .pm1a_cnt_val = (u16)pm1a_cnt,
47 .pm1b_cnt_val = (u16)pm1b_cnt,
48 .sleep_state = sleep_state,
49 },
50 },
51 };
52
53 if ((pm1a_cnt & 0xffff0000) || (pm1b_cnt & 0xffff0000)) {
54 WARN(1, "Using more than 16bits of PM1A/B 0x%x/0x%x!"
55 "Email xen-devel@lists.xensource.com Thank you.\n", \
56 pm1a_cnt, pm1b_cnt);
57 return -1;
58 }
59
60 HYPERVISOR_dom0_op(&op);
61 return 1;
62}
diff --git a/drivers/xen/events.c b/drivers/xen/events.c
index 0a8a17cd80be..6908e4ce2a0d 100644
--- a/drivers/xen/events.c
+++ b/drivers/xen/events.c
@@ -611,7 +611,7 @@ static void disable_pirq(struct irq_data *data)
611 disable_dynirq(data); 611 disable_dynirq(data);
612} 612}
613 613
614static int find_irq_by_gsi(unsigned gsi) 614int xen_irq_from_gsi(unsigned gsi)
615{ 615{
616 struct irq_info *info; 616 struct irq_info *info;
617 617
@@ -625,6 +625,7 @@ static int find_irq_by_gsi(unsigned gsi)
625 625
626 return -1; 626 return -1;
627} 627}
628EXPORT_SYMBOL_GPL(xen_irq_from_gsi);
628 629
629/* 630/*
630 * Do not make any assumptions regarding the relationship between the 631 * Do not make any assumptions regarding the relationship between the
@@ -644,7 +645,7 @@ int xen_bind_pirq_gsi_to_irq(unsigned gsi,
644 645
645 mutex_lock(&irq_mapping_update_lock); 646 mutex_lock(&irq_mapping_update_lock);
646 647
647 irq = find_irq_by_gsi(gsi); 648 irq = xen_irq_from_gsi(gsi);
648 if (irq != -1) { 649 if (irq != -1) {
649 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n", 650 printk(KERN_INFO "xen_map_pirq_gsi: returning irq %d for gsi %u\n",
650 irq, gsi); 651 irq, gsi);
diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c
index f100ce20b16b..0bfc1ef11259 100644
--- a/drivers/xen/grant-table.c
+++ b/drivers/xen/grant-table.c
@@ -38,6 +38,7 @@
38#include <linux/vmalloc.h> 38#include <linux/vmalloc.h>
39#include <linux/uaccess.h> 39#include <linux/uaccess.h>
40#include <linux/io.h> 40#include <linux/io.h>
41#include <linux/hardirq.h>
41 42
42#include <xen/xen.h> 43#include <xen/xen.h>
43#include <xen/interface/xen.h> 44#include <xen/interface/xen.h>
@@ -426,10 +427,8 @@ static int gnttab_end_foreign_access_ref_v1(grant_ref_t ref, int readonly)
426 nflags = *pflags; 427 nflags = *pflags;
427 do { 428 do {
428 flags = nflags; 429 flags = nflags;
429 if (flags & (GTF_reading|GTF_writing)) { 430 if (flags & (GTF_reading|GTF_writing))
430 printk(KERN_ALERT "WARNING: g.e. still in use!\n");
431 return 0; 431 return 0;
432 }
433 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags); 432 } while ((nflags = sync_cmpxchg(pflags, flags, 0)) != flags);
434 433
435 return 1; 434 return 1;
@@ -458,12 +457,103 @@ static int gnttab_end_foreign_access_ref_v2(grant_ref_t ref, int readonly)
458 return 1; 457 return 1;
459} 458}
460 459
461int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly) 460static inline int _gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
462{ 461{
463 return gnttab_interface->end_foreign_access_ref(ref, readonly); 462 return gnttab_interface->end_foreign_access_ref(ref, readonly);
464} 463}
464
465int gnttab_end_foreign_access_ref(grant_ref_t ref, int readonly)
466{
467 if (_gnttab_end_foreign_access_ref(ref, readonly))
468 return 1;
469 pr_warn("WARNING: g.e. %#x still in use!\n", ref);
470 return 0;
471}
465EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref); 472EXPORT_SYMBOL_GPL(gnttab_end_foreign_access_ref);
466 473
474struct deferred_entry {
475 struct list_head list;
476 grant_ref_t ref;
477 bool ro;
478 uint16_t warn_delay;
479 struct page *page;
480};
481static LIST_HEAD(deferred_list);
482static void gnttab_handle_deferred(unsigned long);
483static DEFINE_TIMER(deferred_timer, gnttab_handle_deferred, 0, 0);
484
485static void gnttab_handle_deferred(unsigned long unused)
486{
487 unsigned int nr = 10;
488 struct deferred_entry *first = NULL;
489 unsigned long flags;
490
491 spin_lock_irqsave(&gnttab_list_lock, flags);
492 while (nr--) {
493 struct deferred_entry *entry
494 = list_first_entry(&deferred_list,
495 struct deferred_entry, list);
496
497 if (entry == first)
498 break;
499 list_del(&entry->list);
500 spin_unlock_irqrestore(&gnttab_list_lock, flags);
501 if (_gnttab_end_foreign_access_ref(entry->ref, entry->ro)) {
502 put_free_entry(entry->ref);
503 if (entry->page) {
504 pr_debug("freeing g.e. %#x (pfn %#lx)\n",
505 entry->ref, page_to_pfn(entry->page));
506 __free_page(entry->page);
507 } else
508 pr_info("freeing g.e. %#x\n", entry->ref);
509 kfree(entry);
510 entry = NULL;
511 } else {
512 if (!--entry->warn_delay)
513 pr_info("g.e. %#x still pending\n",
514 entry->ref);
515 if (!first)
516 first = entry;
517 }
518 spin_lock_irqsave(&gnttab_list_lock, flags);
519 if (entry)
520 list_add_tail(&entry->list, &deferred_list);
521 else if (list_empty(&deferred_list))
522 break;
523 }
524 if (!list_empty(&deferred_list) && !timer_pending(&deferred_timer)) {
525 deferred_timer.expires = jiffies + HZ;
526 add_timer(&deferred_timer);
527 }
528 spin_unlock_irqrestore(&gnttab_list_lock, flags);
529}
530
531static void gnttab_add_deferred(grant_ref_t ref, bool readonly,
532 struct page *page)
533{
534 struct deferred_entry *entry = kmalloc(sizeof(*entry), GFP_ATOMIC);
535 const char *what = KERN_WARNING "leaking";
536
537 if (entry) {
538 unsigned long flags;
539
540 entry->ref = ref;
541 entry->ro = readonly;
542 entry->page = page;
543 entry->warn_delay = 60;
544 spin_lock_irqsave(&gnttab_list_lock, flags);
545 list_add_tail(&entry->list, &deferred_list);
546 if (!timer_pending(&deferred_timer)) {
547 deferred_timer.expires = jiffies + HZ;
548 add_timer(&deferred_timer);
549 }
550 spin_unlock_irqrestore(&gnttab_list_lock, flags);
551 what = KERN_DEBUG "deferring";
552 }
553 printk("%s g.e. %#x (pfn %#lx)\n",
554 what, ref, page ? page_to_pfn(page) : -1);
555}
556
467void gnttab_end_foreign_access(grant_ref_t ref, int readonly, 557void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
468 unsigned long page) 558 unsigned long page)
469{ 559{
@@ -471,12 +561,9 @@ void gnttab_end_foreign_access(grant_ref_t ref, int readonly,
471 put_free_entry(ref); 561 put_free_entry(ref);
472 if (page != 0) 562 if (page != 0)
473 free_page(page); 563 free_page(page);
474 } else { 564 } else
475 /* XXX This needs to be fixed so that the ref and page are 565 gnttab_add_deferred(ref, readonly,
476 placed on a list to be freed up later. */ 566 page ? virt_to_page(page) : NULL);
477 printk(KERN_WARNING
478 "WARNING: leaking g.e. and page still in use!\n");
479 }
480} 567}
481EXPORT_SYMBOL_GPL(gnttab_end_foreign_access); 568EXPORT_SYMBOL_GPL(gnttab_end_foreign_access);
482 569
@@ -741,6 +828,7 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
741 struct page **pages, unsigned int count) 828 struct page **pages, unsigned int count)
742{ 829{
743 int i, ret; 830 int i, ret;
831 bool lazy = false;
744 pte_t *pte; 832 pte_t *pte;
745 unsigned long mfn; 833 unsigned long mfn;
746 834
@@ -751,6 +839,11 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
751 if (xen_feature(XENFEAT_auto_translated_physmap)) 839 if (xen_feature(XENFEAT_auto_translated_physmap))
752 return ret; 840 return ret;
753 841
842 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
843 arch_enter_lazy_mmu_mode();
844 lazy = true;
845 }
846
754 for (i = 0; i < count; i++) { 847 for (i = 0; i < count; i++) {
755 /* Do not add to override if the map failed. */ 848 /* Do not add to override if the map failed. */
756 if (map_ops[i].status) 849 if (map_ops[i].status)
@@ -769,6 +862,9 @@ int gnttab_map_refs(struct gnttab_map_grant_ref *map_ops,
769 return ret; 862 return ret;
770 } 863 }
771 864
865 if (lazy)
866 arch_leave_lazy_mmu_mode();
867
772 return ret; 868 return ret;
773} 869}
774EXPORT_SYMBOL_GPL(gnttab_map_refs); 870EXPORT_SYMBOL_GPL(gnttab_map_refs);
@@ -777,6 +873,7 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
777 struct page **pages, unsigned int count, bool clear_pte) 873 struct page **pages, unsigned int count, bool clear_pte)
778{ 874{
779 int i, ret; 875 int i, ret;
876 bool lazy = false;
780 877
781 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count); 878 ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, unmap_ops, count);
782 if (ret) 879 if (ret)
@@ -785,12 +882,20 @@ int gnttab_unmap_refs(struct gnttab_unmap_grant_ref *unmap_ops,
785 if (xen_feature(XENFEAT_auto_translated_physmap)) 882 if (xen_feature(XENFEAT_auto_translated_physmap))
786 return ret; 883 return ret;
787 884
885 if (!in_interrupt() && paravirt_get_lazy_mode() == PARAVIRT_LAZY_NONE) {
886 arch_enter_lazy_mmu_mode();
887 lazy = true;
888 }
889
788 for (i = 0; i < count; i++) { 890 for (i = 0; i < count; i++) {
789 ret = m2p_remove_override(pages[i], clear_pte); 891 ret = m2p_remove_override(pages[i], clear_pte);
790 if (ret) 892 if (ret)
791 return ret; 893 return ret;
792 } 894 }
793 895
896 if (lazy)
897 arch_leave_lazy_mmu_mode();
898
794 return ret; 899 return ret;
795} 900}
796EXPORT_SYMBOL_GPL(gnttab_unmap_refs); 901EXPORT_SYMBOL_GPL(gnttab_unmap_refs);
diff --git a/drivers/xen/xen-acpi-processor.c b/drivers/xen/xen-acpi-processor.c
index 0b48579a9cd6..7ff2569e17ae 100644
--- a/drivers/xen/xen-acpi-processor.c
+++ b/drivers/xen/xen-acpi-processor.c
@@ -29,6 +29,7 @@
29#include <acpi/acpi_drivers.h> 29#include <acpi/acpi_drivers.h>
30#include <acpi/processor.h> 30#include <acpi/processor.h>
31 31
32#include <xen/xen.h>
32#include <xen/interface/platform.h> 33#include <xen/interface/platform.h>
33#include <asm/xen/hypercall.h> 34#include <asm/xen/hypercall.h>
34 35
diff --git a/drivers/xen/xen-selfballoon.c b/drivers/xen/xen-selfballoon.c
index 146c94897016..7d041cb6da26 100644
--- a/drivers/xen/xen-selfballoon.c
+++ b/drivers/xen/xen-selfballoon.c
@@ -105,6 +105,12 @@ static unsigned int selfballoon_interval __read_mostly = 5;
105 */ 105 */
106static unsigned int selfballoon_min_usable_mb; 106static unsigned int selfballoon_min_usable_mb;
107 107
108/*
109 * Amount of RAM in MB to add to the target number of pages.
110 * Can be used to reserve some more room for caches and the like.
111 */
112static unsigned int selfballoon_reserved_mb;
113
108static void selfballoon_process(struct work_struct *work); 114static void selfballoon_process(struct work_struct *work);
109static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process); 115static DECLARE_DELAYED_WORK(selfballoon_worker, selfballoon_process);
110 116
@@ -217,7 +223,8 @@ static void selfballoon_process(struct work_struct *work)
217 cur_pages = totalram_pages; 223 cur_pages = totalram_pages;
218 tgt_pages = cur_pages; /* default is no change */ 224 tgt_pages = cur_pages; /* default is no change */
219 goal_pages = percpu_counter_read_positive(&vm_committed_as) + 225 goal_pages = percpu_counter_read_positive(&vm_committed_as) +
220 totalreserve_pages; 226 totalreserve_pages +
227 MB2PAGES(selfballoon_reserved_mb);
221#ifdef CONFIG_FRONTSWAP 228#ifdef CONFIG_FRONTSWAP
222 /* allow space for frontswap pages to be repatriated */ 229 /* allow space for frontswap pages to be repatriated */
223 if (frontswap_selfshrinking && frontswap_enabled) 230 if (frontswap_selfshrinking && frontswap_enabled)
@@ -397,6 +404,30 @@ static DEVICE_ATTR(selfballoon_min_usable_mb, S_IRUGO | S_IWUSR,
397 show_selfballoon_min_usable_mb, 404 show_selfballoon_min_usable_mb,
398 store_selfballoon_min_usable_mb); 405 store_selfballoon_min_usable_mb);
399 406
407SELFBALLOON_SHOW(selfballoon_reserved_mb, "%d\n",
408 selfballoon_reserved_mb);
409
410static ssize_t store_selfballoon_reserved_mb(struct device *dev,
411 struct device_attribute *attr,
412 const char *buf,
413 size_t count)
414{
415 unsigned long val;
416 int err;
417
418 if (!capable(CAP_SYS_ADMIN))
419 return -EPERM;
420 err = strict_strtoul(buf, 10, &val);
421 if (err || val == 0)
422 return -EINVAL;
423 selfballoon_reserved_mb = val;
424 return count;
425}
426
427static DEVICE_ATTR(selfballoon_reserved_mb, S_IRUGO | S_IWUSR,
428 show_selfballoon_reserved_mb,
429 store_selfballoon_reserved_mb);
430
400 431
401#ifdef CONFIG_FRONTSWAP 432#ifdef CONFIG_FRONTSWAP
402SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking); 433SELFBALLOON_SHOW(frontswap_selfshrinking, "%d\n", frontswap_selfshrinking);
@@ -480,6 +511,7 @@ static struct attribute *selfballoon_attrs[] = {
480 &dev_attr_selfballoon_downhysteresis.attr, 511 &dev_attr_selfballoon_downhysteresis.attr,
481 &dev_attr_selfballoon_uphysteresis.attr, 512 &dev_attr_selfballoon_uphysteresis.attr,
482 &dev_attr_selfballoon_min_usable_mb.attr, 513 &dev_attr_selfballoon_min_usable_mb.attr,
514 &dev_attr_selfballoon_reserved_mb.attr,
483#ifdef CONFIG_FRONTSWAP 515#ifdef CONFIG_FRONTSWAP
484 &dev_attr_frontswap_selfshrinking.attr, 516 &dev_attr_frontswap_selfshrinking.attr,
485 &dev_attr_frontswap_hysteresis.attr, 517 &dev_attr_frontswap_hysteresis.attr,
diff --git a/drivers/xen/xenbus/xenbus_comms.c b/drivers/xen/xenbus/xenbus_comms.c
index 2eff7a6aaa20..52fe7ad07666 100644
--- a/drivers/xen/xenbus/xenbus_comms.c
+++ b/drivers/xen/xenbus/xenbus_comms.c
@@ -234,3 +234,9 @@ int xb_init_comms(void)
234 234
235 return 0; 235 return 0;
236} 236}
237
238void xb_deinit_comms(void)
239{
240 unbind_from_irqhandler(xenbus_irq, &xb_waitq);
241 xenbus_irq = 0;
242}
diff --git a/drivers/xen/xenbus/xenbus_comms.h b/drivers/xen/xenbus/xenbus_comms.h
index 6e42800fa499..c8abd3b8a6c4 100644
--- a/drivers/xen/xenbus/xenbus_comms.h
+++ b/drivers/xen/xenbus/xenbus_comms.h
@@ -35,6 +35,7 @@
35 35
36int xs_init(void); 36int xs_init(void);
37int xb_init_comms(void); 37int xb_init_comms(void);
38void xb_deinit_comms(void);
38 39
39/* Low level routines. */ 40/* Low level routines. */
40int xb_write(const void *data, unsigned len); 41int xb_write(const void *data, unsigned len);
diff --git a/drivers/xen/xenbus/xenbus_dev_backend.c b/drivers/xen/xenbus/xenbus_dev_backend.c
index 3d3be78c1093..be738c43104b 100644
--- a/drivers/xen/xenbus/xenbus_dev_backend.c
+++ b/drivers/xen/xenbus/xenbus_dev_backend.c
@@ -8,7 +8,11 @@
8 8
9#include <xen/xen.h> 9#include <xen/xen.h>
10#include <xen/page.h> 10#include <xen/page.h>
11#include <xen/xenbus.h>
11#include <xen/xenbus_dev.h> 12#include <xen/xenbus_dev.h>
13#include <xen/grant_table.h>
14#include <xen/events.h>
15#include <asm/xen/hypervisor.h>
12 16
13#include "xenbus_comms.h" 17#include "xenbus_comms.h"
14 18
@@ -22,6 +26,50 @@ static int xenbus_backend_open(struct inode *inode, struct file *filp)
22 return nonseekable_open(inode, filp); 26 return nonseekable_open(inode, filp);
23} 27}
24 28
29static long xenbus_alloc(domid_t domid)
30{
31 struct evtchn_alloc_unbound arg;
32 int err = -EEXIST;
33
34 xs_suspend();
35
36 /* If xenstored_ready is nonzero, that means we have already talked to
37 * xenstore and set up watches. These watches will be restored by
38 * xs_resume, but that requires communication over the port established
39 * below that is not visible to anyone until the ioctl returns.
40 *
41 * This can be resolved by splitting the ioctl into two parts
42 * (postponing the resume until xenstored is active) but this is
43 * unnecessarily complex for the intended use where xenstored is only
44 * started once - so return -EEXIST if it's already running.
45 */
46 if (xenstored_ready)
47 goto out_err;
48
49 gnttab_grant_foreign_access_ref(GNTTAB_RESERVED_XENSTORE, domid,
50 virt_to_mfn(xen_store_interface), 0 /* writable */);
51
52 arg.dom = DOMID_SELF;
53 arg.remote_dom = domid;
54
55 err = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &arg);
56 if (err)
57 goto out_err;
58
59 if (xen_store_evtchn > 0)
60 xb_deinit_comms();
61
62 xen_store_evtchn = arg.port;
63
64 xs_resume();
65
66 return arg.port;
67
68 out_err:
69 xs_suspend_cancel();
70 return err;
71}
72
25static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data) 73static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned long data)
26{ 74{
27 if (!capable(CAP_SYS_ADMIN)) 75 if (!capable(CAP_SYS_ADMIN))
@@ -33,6 +81,9 @@ static long xenbus_backend_ioctl(struct file *file, unsigned int cmd, unsigned l
33 return xen_store_evtchn; 81 return xen_store_evtchn;
34 return -ENODEV; 82 return -ENODEV;
35 83
84 case IOCTL_XENBUS_BACKEND_SETUP:
85 return xenbus_alloc(data);
86
36 default: 87 default:
37 return -ENOTTY; 88 return -ENOTTY;
38 } 89 }
diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
index 014c8dd62962..57ccb7537dae 100644
--- a/fs/9p/vfs_inode.c
+++ b/fs/9p/vfs_inode.c
@@ -448,7 +448,7 @@ void v9fs_evict_inode(struct inode *inode)
448 struct v9fs_inode *v9inode = V9FS_I(inode); 448 struct v9fs_inode *v9inode = V9FS_I(inode);
449 449
450 truncate_inode_pages(inode->i_mapping, 0); 450 truncate_inode_pages(inode->i_mapping, 0);
451 end_writeback(inode); 451 clear_inode(inode);
452 filemap_fdatawrite(inode->i_mapping); 452 filemap_fdatawrite(inode->i_mapping);
453 453
454#ifdef CONFIG_9P_FSCACHE 454#ifdef CONFIG_9P_FSCACHE
diff --git a/fs/affs/inode.c b/fs/affs/inode.c
index 88a4b0b50058..8bc4a59f4e7e 100644
--- a/fs/affs/inode.c
+++ b/fs/affs/inode.c
@@ -264,7 +264,7 @@ affs_evict_inode(struct inode *inode)
264 } 264 }
265 265
266 invalidate_inode_buffers(inode); 266 invalidate_inode_buffers(inode);
267 end_writeback(inode); 267 clear_inode(inode);
268 affs_free_prealloc(inode); 268 affs_free_prealloc(inode);
269 cache_page = (unsigned long)AFFS_I(inode)->i_lc; 269 cache_page = (unsigned long)AFFS_I(inode)->i_lc;
270 if (cache_page) { 270 if (cache_page) {
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index d890ae3b2ce6..95cffd38239f 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -423,7 +423,7 @@ void afs_evict_inode(struct inode *inode)
423 ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode); 423 ASSERTCMP(inode->i_ino, ==, vnode->fid.vnode);
424 424
425 truncate_inode_pages(&inode->i_data, 0); 425 truncate_inode_pages(&inode->i_data, 0);
426 end_writeback(inode); 426 clear_inode(inode);
427 427
428 afs_give_up_callback(vnode); 428 afs_give_up_callback(vnode);
429 429
diff --git a/fs/autofs4/inode.c b/fs/autofs4/inode.c
index 6e488ebe7784..8a4fed8ead30 100644
--- a/fs/autofs4/inode.c
+++ b/fs/autofs4/inode.c
@@ -100,7 +100,7 @@ static int autofs4_show_options(struct seq_file *m, struct dentry *root)
100 100
101static void autofs4_evict_inode(struct inode *inode) 101static void autofs4_evict_inode(struct inode *inode)
102{ 102{
103 end_writeback(inode); 103 clear_inode(inode);
104 kfree(inode->i_private); 104 kfree(inode->i_private);
105} 105}
106 106
diff --git a/fs/bad_inode.c b/fs/bad_inode.c
index 37268c5bb98b..1b35d6bd06b0 100644
--- a/fs/bad_inode.c
+++ b/fs/bad_inode.c
@@ -292,7 +292,6 @@ static const struct inode_operations bad_inode_ops =
292 .getxattr = bad_inode_getxattr, 292 .getxattr = bad_inode_getxattr,
293 .listxattr = bad_inode_listxattr, 293 .listxattr = bad_inode_listxattr,
294 .removexattr = bad_inode_removexattr, 294 .removexattr = bad_inode_removexattr,
295 /* truncate_range returns void */
296}; 295};
297 296
298 297
diff --git a/fs/bfs/inode.c b/fs/bfs/inode.c
index e23dc7c8b884..9870417c26e7 100644
--- a/fs/bfs/inode.c
+++ b/fs/bfs/inode.c
@@ -174,7 +174,7 @@ static void bfs_evict_inode(struct inode *inode)
174 174
175 truncate_inode_pages(&inode->i_data, 0); 175 truncate_inode_pages(&inode->i_data, 0);
176 invalidate_inode_buffers(inode); 176 invalidate_inode_buffers(inode);
177 end_writeback(inode); 177 clear_inode(inode);
178 178
179 if (inode->i_nlink) 179 if (inode->i_nlink)
180 return; 180 return;
diff --git a/fs/binfmt_misc.c b/fs/binfmt_misc.c
index 613aa0618235..790b3cddca67 100644
--- a/fs/binfmt_misc.c
+++ b/fs/binfmt_misc.c
@@ -505,7 +505,7 @@ static struct inode *bm_get_inode(struct super_block *sb, int mode)
505 505
506static void bm_evict_inode(struct inode *inode) 506static void bm_evict_inode(struct inode *inode)
507{ 507{
508 end_writeback(inode); 508 clear_inode(inode);
509 kfree(inode->i_private); 509 kfree(inode->i_private);
510} 510}
511 511
diff --git a/fs/block_dev.c b/fs/block_dev.c
index ba11c30f302d..c2bbe1fb1326 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -487,7 +487,7 @@ static void bdev_evict_inode(struct inode *inode)
487 struct list_head *p; 487 struct list_head *p;
488 truncate_inode_pages(&inode->i_data, 0); 488 truncate_inode_pages(&inode->i_data, 0);
489 invalidate_inode_buffers(inode); /* is it needed here? */ 489 invalidate_inode_buffers(inode); /* is it needed here? */
490 end_writeback(inode); 490 clear_inode(inode);
491 spin_lock(&bdev_lock); 491 spin_lock(&bdev_lock);
492 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) { 492 while ( (p = bdev->bd_inodes.next) != &bdev->bd_inodes ) {
493 __bd_forget(list_entry(p, struct inode, i_devices)); 493 __bd_forget(list_entry(p, struct inode, i_devices));
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index 61b16c641ce0..ceb7b9c9edcc 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -3756,7 +3756,7 @@ void btrfs_evict_inode(struct inode *inode)
3756 btrfs_end_transaction(trans, root); 3756 btrfs_end_transaction(trans, root);
3757 btrfs_btree_balance_dirty(root, nr); 3757 btrfs_btree_balance_dirty(root, nr);
3758no_delete: 3758no_delete:
3759 end_writeback(inode); 3759 clear_inode(inode);
3760 return; 3760 return;
3761} 3761}
3762 3762
diff --git a/fs/cifs/Kconfig b/fs/cifs/Kconfig
index 2b243af70aa3..a08306a8bec9 100644
--- a/fs/cifs/Kconfig
+++ b/fs/cifs/Kconfig
@@ -158,3 +158,23 @@ config CIFS_NFSD_EXPORT
158 depends on CIFS && EXPERIMENTAL && BROKEN 158 depends on CIFS && EXPERIMENTAL && BROKEN
159 help 159 help
160 Allows NFS server to export a CIFS mounted share (nfsd over cifs) 160 Allows NFS server to export a CIFS mounted share (nfsd over cifs)
161
162config CIFS_SMB2
163 bool "SMB2 network file system support (EXPERIMENTAL)"
164 depends on EXPERIMENTAL && INET && BROKEN
165 select NLS
166 select KEYS
167 select FSCACHE
168 select DNS_RESOLVER
169
170 help
171 This enables experimental support for the SMB2 (Server Message Block
172 version 2) protocol. The SMB2 protocol is the successor to the
173 popular CIFS and SMB network file sharing protocols. SMB2 is the
174 native file sharing mechanism for recent versions of Windows
175 operating systems (since Vista). SMB2 enablement will eventually
176 allow users better performance, security and features, than would be
177 possible with cifs. Note that smb2 mount options also are simpler
178 (compared to cifs) due to protocol improvements.
179
180 Unless you are a developer or tester, say N.
diff --git a/fs/cifs/Makefile b/fs/cifs/Makefile
index 005d524c3a4a..4b4127544349 100644
--- a/fs/cifs/Makefile
+++ b/fs/cifs/Makefile
@@ -6,7 +6,7 @@ obj-$(CONFIG_CIFS) += cifs.o
6cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \ 6cifs-y := cifsfs.o cifssmb.o cifs_debug.o connect.o dir.o file.o inode.o \
7 link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \ 7 link.o misc.o netmisc.o smbencrypt.o transport.o asn1.o \
8 cifs_unicode.o nterr.o xattr.o cifsencrypt.o \ 8 cifs_unicode.o nterr.o xattr.o cifsencrypt.o \
9 readdir.o ioctl.o sess.o export.o 9 readdir.o ioctl.o sess.o export.o smb1ops.o
10 10
11cifs-$(CONFIG_CIFS_ACL) += cifsacl.o 11cifs-$(CONFIG_CIFS_ACL) += cifsacl.o
12 12
@@ -15,3 +15,5 @@ cifs-$(CONFIG_CIFS_UPCALL) += cifs_spnego.o
15cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o 15cifs-$(CONFIG_CIFS_DFS_UPCALL) += dns_resolve.o cifs_dfs_ref.o
16 16
17cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o 17cifs-$(CONFIG_CIFS_FSCACHE) += fscache.o cache.o
18
19cifs-$(CONFIG_CIFS_SMB2) += smb2ops.o
diff --git a/fs/cifs/README b/fs/cifs/README
index b7d782bab797..22ab7b5b8da7 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -608,11 +608,6 @@ Stats Lists summary resource usage information as well as per
608 in the kernel configuration. 608 in the kernel configuration.
609 609
610Configuration pseudo-files: 610Configuration pseudo-files:
611MultiuserMount If set to one, more than one CIFS session to
612 the same server ip address can be established
613 if more than one uid accesses the same mount
614 point and if the uids user/password mapping
615 information is available. (default is 0)
616PacketSigningEnabled If set to one, cifs packet signing is enabled 611PacketSigningEnabled If set to one, cifs packet signing is enabled
617 and will be used if the server requires 612 and will be used if the server requires
618 it. If set to two, cifs packet signing is 613 it. If set to two, cifs packet signing is
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 270464629416..e8140528ca5c 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -57,19 +57,21 @@ cifs_dump_mem(char *label, void *data, int length)
57 } 57 }
58} 58}
59 59
60#ifdef CONFIG_CIFS_DEBUG2
61void cifs_dump_detail(void *buf) 60void cifs_dump_detail(void *buf)
62{ 61{
62#ifdef CONFIG_CIFS_DEBUG2
63 struct smb_hdr *smb = (struct smb_hdr *)buf; 63 struct smb_hdr *smb = (struct smb_hdr *)buf;
64 64
65 cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d", 65 cERROR(1, "Cmd: %d Err: 0x%x Flags: 0x%x Flgs2: 0x%x Mid: %d Pid: %d",
66 smb->Command, smb->Status.CifsError, 66 smb->Command, smb->Status.CifsError,
67 smb->Flags, smb->Flags2, smb->Mid, smb->Pid); 67 smb->Flags, smb->Flags2, smb->Mid, smb->Pid);
68 cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb)); 68 cERROR(1, "smb buf %p len %d", smb, smbCalcSize(smb));
69#endif /* CONFIG_CIFS_DEBUG2 */
69} 70}
70 71
71void cifs_dump_mids(struct TCP_Server_Info *server) 72void cifs_dump_mids(struct TCP_Server_Info *server)
72{ 73{
74#ifdef CONFIG_CIFS_DEBUG2
73 struct list_head *tmp; 75 struct list_head *tmp;
74 struct mid_q_entry *mid_entry; 76 struct mid_q_entry *mid_entry;
75 77
@@ -102,8 +104,8 @@ void cifs_dump_mids(struct TCP_Server_Info *server)
102 } 104 }
103 } 105 }
104 spin_unlock(&GlobalMid_Lock); 106 spin_unlock(&GlobalMid_Lock);
105}
106#endif /* CONFIG_CIFS_DEBUG2 */ 107#endif /* CONFIG_CIFS_DEBUG2 */
108}
107 109
108#ifdef CONFIG_PROC_FS 110#ifdef CONFIG_PROC_FS
109static int cifs_debug_data_proc_show(struct seq_file *m, void *v) 111static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
@@ -420,7 +422,6 @@ static struct proc_dir_entry *proc_fs_cifs;
420static const struct file_operations cifsFYI_proc_fops; 422static const struct file_operations cifsFYI_proc_fops;
421static const struct file_operations cifs_lookup_cache_proc_fops; 423static const struct file_operations cifs_lookup_cache_proc_fops;
422static const struct file_operations traceSMB_proc_fops; 424static const struct file_operations traceSMB_proc_fops;
423static const struct file_operations cifs_multiuser_mount_proc_fops;
424static const struct file_operations cifs_security_flags_proc_fops; 425static const struct file_operations cifs_security_flags_proc_fops;
425static const struct file_operations cifs_linux_ext_proc_fops; 426static const struct file_operations cifs_linux_ext_proc_fops;
426 427
@@ -440,8 +441,6 @@ cifs_proc_init(void)
440 proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops); 441 proc_create("traceSMB", 0, proc_fs_cifs, &traceSMB_proc_fops);
441 proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs, 442 proc_create("LinuxExtensionsEnabled", 0, proc_fs_cifs,
442 &cifs_linux_ext_proc_fops); 443 &cifs_linux_ext_proc_fops);
443 proc_create("MultiuserMount", 0, proc_fs_cifs,
444 &cifs_multiuser_mount_proc_fops);
445 proc_create("SecurityFlags", 0, proc_fs_cifs, 444 proc_create("SecurityFlags", 0, proc_fs_cifs,
446 &cifs_security_flags_proc_fops); 445 &cifs_security_flags_proc_fops);
447 proc_create("LookupCacheEnabled", 0, proc_fs_cifs, 446 proc_create("LookupCacheEnabled", 0, proc_fs_cifs,
@@ -460,7 +459,6 @@ cifs_proc_clean(void)
460#ifdef CONFIG_CIFS_STATS 459#ifdef CONFIG_CIFS_STATS
461 remove_proc_entry("Stats", proc_fs_cifs); 460 remove_proc_entry("Stats", proc_fs_cifs);
462#endif 461#endif
463 remove_proc_entry("MultiuserMount", proc_fs_cifs);
464 remove_proc_entry("SecurityFlags", proc_fs_cifs); 462 remove_proc_entry("SecurityFlags", proc_fs_cifs);
465 remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs); 463 remove_proc_entry("LinuxExtensionsEnabled", proc_fs_cifs);
466 remove_proc_entry("LookupCacheEnabled", proc_fs_cifs); 464 remove_proc_entry("LookupCacheEnabled", proc_fs_cifs);
@@ -617,52 +615,6 @@ static const struct file_operations traceSMB_proc_fops = {
617 .write = traceSMB_proc_write, 615 .write = traceSMB_proc_write,
618}; 616};
619 617
620static int cifs_multiuser_mount_proc_show(struct seq_file *m, void *v)
621{
622 seq_printf(m, "%d\n", multiuser_mount);
623 return 0;
624}
625
626static int cifs_multiuser_mount_proc_open(struct inode *inode, struct file *fh)
627{
628 return single_open(fh, cifs_multiuser_mount_proc_show, NULL);
629}
630
631static ssize_t cifs_multiuser_mount_proc_write(struct file *file,
632 const char __user *buffer, size_t count, loff_t *ppos)
633{
634 char c;
635 int rc;
636 static bool warned;
637
638 rc = get_user(c, buffer);
639 if (rc)
640 return rc;
641 if (c == '0' || c == 'n' || c == 'N')
642 multiuser_mount = 0;
643 else if (c == '1' || c == 'y' || c == 'Y') {
644 multiuser_mount = 1;
645 if (!warned) {
646 warned = true;
647 printk(KERN_WARNING "CIFS VFS: The legacy multiuser "
648 "mount code is scheduled to be deprecated in "
649 "3.5. Please switch to using the multiuser "
650 "mount option.");
651 }
652 }
653
654 return count;
655}
656
657static const struct file_operations cifs_multiuser_mount_proc_fops = {
658 .owner = THIS_MODULE,
659 .open = cifs_multiuser_mount_proc_open,
660 .read = seq_read,
661 .llseek = seq_lseek,
662 .release = single_release,
663 .write = cifs_multiuser_mount_proc_write,
664};
665
666static int cifs_security_flags_proc_show(struct seq_file *m, void *v) 618static int cifs_security_flags_proc_show(struct seq_file *m, void *v)
667{ 619{
668 seq_printf(m, "0x%x\n", global_secflags); 620 seq_printf(m, "0x%x\n", global_secflags);
diff --git a/fs/cifs/cifs_debug.h b/fs/cifs/cifs_debug.h
index 566e0ae8dc2c..c0c68bb492d7 100644
--- a/fs/cifs/cifs_debug.h
+++ b/fs/cifs/cifs_debug.h
@@ -24,10 +24,10 @@
24#define _H_CIFS_DEBUG 24#define _H_CIFS_DEBUG
25 25
26void cifs_dump_mem(char *label, void *data, int length); 26void cifs_dump_mem(char *label, void *data, int length);
27#ifdef CONFIG_CIFS_DEBUG2
28#define DBG2 2
29void cifs_dump_detail(void *); 27void cifs_dump_detail(void *);
30void cifs_dump_mids(struct TCP_Server_Info *); 28void cifs_dump_mids(struct TCP_Server_Info *);
29#ifdef CONFIG_CIFS_DEBUG2
30#define DBG2 2
31#else 31#else
32#define DBG2 0 32#define DBG2 0
33#endif 33#endif
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 541ef81f6ae8..8b6e344eb0ba 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -56,7 +56,6 @@ int traceSMB = 0;
56bool enable_oplocks = true; 56bool enable_oplocks = true;
57unsigned int linuxExtEnabled = 1; 57unsigned int linuxExtEnabled = 1;
58unsigned int lookupCacheEnabled = 1; 58unsigned int lookupCacheEnabled = 1;
59unsigned int multiuser_mount = 0;
60unsigned int global_secflags = CIFSSEC_DEF; 59unsigned int global_secflags = CIFSSEC_DEF;
61/* unsigned int ntlmv2_support = 0; */ 60/* unsigned int ntlmv2_support = 0; */
62unsigned int sign_CIFS_PDUs = 1; 61unsigned int sign_CIFS_PDUs = 1;
@@ -125,7 +124,7 @@ cifs_read_super(struct super_block *sb)
125 goto out_no_root; 124 goto out_no_root;
126 } 125 }
127 126
128 /* do that *after* d_alloc_root() - we want NULL ->d_op for root here */ 127 /* do that *after* d_make_root() - we want NULL ->d_op for root here */
129 if (cifs_sb_master_tcon(cifs_sb)->nocase) 128 if (cifs_sb_master_tcon(cifs_sb)->nocase)
130 sb->s_d_op = &cifs_ci_dentry_ops; 129 sb->s_d_op = &cifs_ci_dentry_ops;
131 else 130 else
@@ -272,7 +271,7 @@ static void
272cifs_evict_inode(struct inode *inode) 271cifs_evict_inode(struct inode *inode)
273{ 272{
274 truncate_inode_pages(&inode->i_data, 0); 273 truncate_inode_pages(&inode->i_data, 0);
275 end_writeback(inode); 274 clear_inode(inode);
276 cifs_fscache_release_inode_cookie(inode); 275 cifs_fscache_release_inode_cookie(inode);
277} 276}
278 277
@@ -329,6 +328,19 @@ cifs_show_security(struct seq_file *s, struct TCP_Server_Info *server)
329 seq_printf(s, "i"); 328 seq_printf(s, "i");
330} 329}
331 330
331static void
332cifs_show_cache_flavor(struct seq_file *s, struct cifs_sb_info *cifs_sb)
333{
334 seq_printf(s, ",cache=");
335
336 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
337 seq_printf(s, "strict");
338 else if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
339 seq_printf(s, "none");
340 else
341 seq_printf(s, "loose");
342}
343
332/* 344/*
333 * cifs_show_options() is for displaying mount options in /proc/mounts. 345 * cifs_show_options() is for displaying mount options in /proc/mounts.
334 * Not all settable options are displayed but most of the important 346 * Not all settable options are displayed but most of the important
@@ -342,7 +354,9 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
342 struct sockaddr *srcaddr; 354 struct sockaddr *srcaddr;
343 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr; 355 srcaddr = (struct sockaddr *)&tcon->ses->server->srcaddr;
344 356
357 seq_printf(s, ",vers=%s", tcon->ses->server->vals->version_string);
345 cifs_show_security(s, tcon->ses->server); 358 cifs_show_security(s, tcon->ses->server);
359 cifs_show_cache_flavor(s, cifs_sb);
346 360
347 seq_printf(s, ",unc=%s", tcon->treeName); 361 seq_printf(s, ",unc=%s", tcon->treeName);
348 362
@@ -408,8 +422,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
408 seq_printf(s, ",rwpidforward"); 422 seq_printf(s, ",rwpidforward");
409 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL) 423 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOPOSIXBRL)
410 seq_printf(s, ",forcemand"); 424 seq_printf(s, ",forcemand");
411 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
412 seq_printf(s, ",directio");
413 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR) 425 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
414 seq_printf(s, ",nouser_xattr"); 426 seq_printf(s, ",nouser_xattr");
415 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR) 427 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
@@ -432,8 +444,6 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
432 seq_printf(s, ",nostrictsync"); 444 seq_printf(s, ",nostrictsync");
433 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM) 445 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_PERM)
434 seq_printf(s, ",noperm"); 446 seq_printf(s, ",noperm");
435 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_STRICT_IO)
436 seq_printf(s, ",strictcache");
437 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID) 447 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPUID)
438 seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid); 448 seq_printf(s, ",backupuid=%u", cifs_sb->mnt_backupuid);
439 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID) 449 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_BACKUPGID)
@@ -945,7 +955,6 @@ cifs_init_once(void *inode)
945 struct cifsInodeInfo *cifsi = inode; 955 struct cifsInodeInfo *cifsi = inode;
946 956
947 inode_init_once(&cifsi->vfs_inode); 957 inode_init_once(&cifsi->vfs_inode);
948 INIT_LIST_HEAD(&cifsi->llist);
949 mutex_init(&cifsi->lock_mutex); 958 mutex_init(&cifsi->lock_mutex);
950} 959}
951 960
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index 4ff6313f0a91..20350a93ed99 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -43,6 +43,7 @@
43 43
44#define CIFS_MIN_RCV_POOL 4 44#define CIFS_MIN_RCV_POOL 4
45 45
46#define MAX_REOPEN_ATT 5 /* these many maximum attempts to reopen a file */
46/* 47/*
47 * default attribute cache timeout (jiffies) 48 * default attribute cache timeout (jiffies)
48 */ 49 */
@@ -150,6 +151,57 @@ struct cifs_cred {
150 ***************************************************************** 151 *****************************************************************
151 */ 152 */
152 153
154enum smb_version {
155 Smb_1 = 1,
156 Smb_21,
157};
158
159struct mid_q_entry;
160struct TCP_Server_Info;
161struct cifsFileInfo;
162struct cifs_ses;
163
164struct smb_version_operations {
165 int (*send_cancel)(struct TCP_Server_Info *, void *,
166 struct mid_q_entry *);
167 bool (*compare_fids)(struct cifsFileInfo *, struct cifsFileInfo *);
168 /* setup request: allocate mid, sign message */
169 int (*setup_request)(struct cifs_ses *, struct kvec *, unsigned int,
170 struct mid_q_entry **);
171 /* check response: verify signature, map error */
172 int (*check_receive)(struct mid_q_entry *, struct TCP_Server_Info *,
173 bool);
174 void (*add_credits)(struct TCP_Server_Info *, const unsigned int);
175 void (*set_credits)(struct TCP_Server_Info *, const int);
176 int * (*get_credits_field)(struct TCP_Server_Info *);
177 /* data offset from read response message */
178 unsigned int (*read_data_offset)(char *);
179 /* data length from read response message */
180 unsigned int (*read_data_length)(char *);
181 /* map smb to linux error */
182 int (*map_error)(char *, bool);
183 /* find mid corresponding to the response message */
184 struct mid_q_entry * (*find_mid)(struct TCP_Server_Info *, char *);
185 void (*dump_detail)(void *);
186 /* verify the message */
187 int (*check_message)(char *, unsigned int);
188 bool (*is_oplock_break)(char *, struct TCP_Server_Info *);
189};
190
191struct smb_version_values {
192 char *version_string;
193 __u32 large_lock_type;
194 __u32 exclusive_lock_type;
195 __u32 shared_lock_type;
196 __u32 unlock_lock_type;
197 size_t header_size;
198 size_t max_header_size;
199 size_t read_rsp_size;
200};
201
202#define HEADER_SIZE(server) (server->vals->header_size)
203#define MAX_HEADER_SIZE(server) (server->vals->max_header_size)
204
153struct smb_vol { 205struct smb_vol {
154 char *username; 206 char *username;
155 char *password; 207 char *password;
@@ -205,6 +257,8 @@ struct smb_vol {
205 bool sockopt_tcp_nodelay:1; 257 bool sockopt_tcp_nodelay:1;
206 unsigned short int port; 258 unsigned short int port;
207 unsigned long actimeo; /* attribute cache timeout (jiffies) */ 259 unsigned long actimeo; /* attribute cache timeout (jiffies) */
260 struct smb_version_operations *ops;
261 struct smb_version_values *vals;
208 char *prepath; 262 char *prepath;
209 struct sockaddr_storage srcaddr; /* allow binding to a local IP */ 263 struct sockaddr_storage srcaddr; /* allow binding to a local IP */
210 struct nls_table *local_nls; 264 struct nls_table *local_nls;
@@ -242,6 +296,8 @@ struct TCP_Server_Info {
242 int srv_count; /* reference counter */ 296 int srv_count; /* reference counter */
243 /* 15 character server name + 0x20 16th byte indicating type = srv */ 297 /* 15 character server name + 0x20 16th byte indicating type = srv */
244 char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL]; 298 char server_RFC1001_name[RFC1001_NAME_LEN_WITH_NULL];
299 struct smb_version_operations *ops;
300 struct smb_version_values *vals;
245 enum statusEnum tcpStatus; /* what we think the status is */ 301 enum statusEnum tcpStatus; /* what we think the status is */
246 char *hostname; /* hostname portion of UNC string */ 302 char *hostname; /* hostname portion of UNC string */
247 struct socket *ssocket; 303 struct socket *ssocket;
@@ -321,16 +377,6 @@ in_flight(struct TCP_Server_Info *server)
321 return num; 377 return num;
322} 378}
323 379
324static inline int*
325get_credits_field(struct TCP_Server_Info *server)
326{
327 /*
328 * This will change to switch statement when we reserve slots for echos
329 * and oplock breaks.
330 */
331 return &server->credits;
332}
333
334static inline bool 380static inline bool
335has_credits(struct TCP_Server_Info *server, int *credits) 381has_credits(struct TCP_Server_Info *server, int *credits)
336{ 382{
@@ -341,16 +387,16 @@ has_credits(struct TCP_Server_Info *server, int *credits)
341 return num > 0; 387 return num > 0;
342} 388}
343 389
344static inline size_t 390static inline void
345header_size(void) 391add_credits(struct TCP_Server_Info *server, const unsigned int add)
346{ 392{
347 return sizeof(struct smb_hdr); 393 server->ops->add_credits(server, add);
348} 394}
349 395
350static inline size_t 396static inline void
351max_header_size(void) 397set_credits(struct TCP_Server_Info *server, const int val)
352{ 398{
353 return MAX_CIFS_HDR_SIZE; 399 server->ops->set_credits(server, val);
354} 400}
355 401
356/* 402/*
@@ -547,8 +593,7 @@ struct cifsLockInfo {
547 __u64 offset; 593 __u64 offset;
548 __u64 length; 594 __u64 length;
549 __u32 pid; 595 __u32 pid;
550 __u8 type; 596 __u32 type;
551 __u16 netfid;
552}; 597};
553 598
554/* 599/*
@@ -573,6 +618,10 @@ struct cifs_search_info {
573struct cifsFileInfo { 618struct cifsFileInfo {
574 struct list_head tlist; /* pointer to next fid owned by tcon */ 619 struct list_head tlist; /* pointer to next fid owned by tcon */
575 struct list_head flist; /* next fid (file instance) for this inode */ 620 struct list_head flist; /* next fid (file instance) for this inode */
621 struct list_head llist; /*
622 * brlocks held by this fid, protected by
623 * lock_mutex from cifsInodeInfo structure
624 */
576 unsigned int uid; /* allows finding which FileInfo structure */ 625 unsigned int uid; /* allows finding which FileInfo structure */
577 __u32 pid; /* process id who opened file */ 626 __u32 pid; /* process id who opened file */
578 __u16 netfid; /* file id from remote */ 627 __u16 netfid; /* file id from remote */
@@ -615,9 +664,12 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
615 */ 664 */
616 665
617struct cifsInodeInfo { 666struct cifsInodeInfo {
618 struct list_head llist; /* brlocks for this inode */
619 bool can_cache_brlcks; 667 bool can_cache_brlcks;
620 struct mutex lock_mutex; /* protect two fields above */ 668 struct mutex lock_mutex; /*
669 * protect the field above and llist
670 * from every cifsFileInfo structure
671 * from openFileList
672 */
621 /* BB add in lists for dirty pages i.e. write caching info for oplock */ 673 /* BB add in lists for dirty pages i.e. write caching info for oplock */
622 struct list_head openFileList; 674 struct list_head openFileList;
623 __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */ 675 __u32 cifsAttrs; /* e.g. DOS archive bit, sparse, compressed, system */
@@ -703,7 +755,6 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon,
703 755
704#endif 756#endif
705 757
706struct mid_q_entry;
707 758
708/* 759/*
709 * This is the prototype for the mid receive function. This function is for 760 * This is the prototype for the mid receive function. This function is for
@@ -1042,12 +1093,7 @@ GLOBAL_EXTERN atomic_t smBufAllocCount;
1042GLOBAL_EXTERN atomic_t midCount; 1093GLOBAL_EXTERN atomic_t midCount;
1043 1094
1044/* Misc globals */ 1095/* Misc globals */
1045GLOBAL_EXTERN unsigned int multiuser_mount; /* if enabled allows new sessions 1096GLOBAL_EXTERN bool enable_oplocks; /* enable or disable oplocks */
1046 to be established on existing mount if we
1047 have the uid/password or Kerberos credential
1048 or equivalent for current user */
1049/* enable or disable oplocks */
1050GLOBAL_EXTERN bool enable_oplocks;
1051GLOBAL_EXTERN unsigned int lookupCacheEnabled; 1097GLOBAL_EXTERN unsigned int lookupCacheEnabled;
1052GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent 1098GLOBAL_EXTERN unsigned int global_secflags; /* if on, session setup sent
1053 with more secure ntlmssp2 challenge/resp */ 1099 with more secure ntlmssp2 challenge/resp */
@@ -1074,4 +1120,11 @@ void cifs_oplock_break(struct work_struct *work);
1074extern const struct slow_work_ops cifs_oplock_break_ops; 1120extern const struct slow_work_ops cifs_oplock_break_ops;
1075extern struct workqueue_struct *cifsiod_wq; 1121extern struct workqueue_struct *cifsiod_wq;
1076 1122
1123/* Operations for different SMB versions */
1124#define SMB1_VERSION_STRING "1.0"
1125extern struct smb_version_operations smb1_operations;
1126extern struct smb_version_values smb1_values;
1127#define SMB21_VERSION_STRING "2.1"
1128extern struct smb_version_operations smb21_operations;
1129extern struct smb_version_values smb21_values;
1077#endif /* _CIFS_GLOB_H */ 1130#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 96192c1e380a..5ec21ecf7980 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -78,6 +78,8 @@ extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *,
78 int * /* bytes returned */ , const int long_op); 78 int * /* bytes returned */ , const int long_op);
79extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses, 79extern int SendReceiveNoRsp(const unsigned int xid, struct cifs_ses *ses,
80 char *in_buf, int flags); 80 char *in_buf, int flags);
81extern int cifs_setup_request(struct cifs_ses *, struct kvec *, unsigned int,
82 struct mid_q_entry **);
81extern int cifs_check_receive(struct mid_q_entry *mid, 83extern int cifs_check_receive(struct mid_q_entry *mid,
82 struct TCP_Server_Info *server, bool log_error); 84 struct TCP_Server_Info *server, bool log_error);
83extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *, 85extern int SendReceive2(const unsigned int /* xid */ , struct cifs_ses *,
@@ -88,9 +90,6 @@ extern int SendReceiveBlockingLock(const unsigned int xid,
88 struct smb_hdr *in_buf , 90 struct smb_hdr *in_buf ,
89 struct smb_hdr *out_buf, 91 struct smb_hdr *out_buf,
90 int *bytes_returned); 92 int *bytes_returned);
91extern void cifs_add_credits(struct TCP_Server_Info *server,
92 const unsigned int add);
93extern void cifs_set_credits(struct TCP_Server_Info *server, const int val);
94extern int checkSMB(char *buf, unsigned int length); 93extern int checkSMB(char *buf, unsigned int length);
95extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *); 94extern bool is_valid_oplock_break(char *, struct TCP_Server_Info *);
96extern bool backup_cred(struct cifs_sb_info *); 95extern bool backup_cred(struct cifs_sb_info *);
@@ -192,11 +191,13 @@ extern int CIFSTCon(unsigned int xid, struct cifs_ses *ses,
192 191
193extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon, 192extern int CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
194 const char *searchName, const struct nls_table *nls_codepage, 193 const char *searchName, const struct nls_table *nls_codepage,
195 __u16 *searchHandle, struct cifs_search_info *psrch_inf, 194 __u16 *searchHandle, __u16 search_flags,
195 struct cifs_search_info *psrch_inf,
196 int map, const char dirsep); 196 int map, const char dirsep);
197 197
198extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon, 198extern int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
199 __u16 searchHandle, struct cifs_search_info *psrch_inf); 199 __u16 searchHandle, __u16 search_flags,
200 struct cifs_search_info *psrch_inf);
200 201
201extern int CIFSFindClose(const int, struct cifs_tcon *tcon, 202extern int CIFSFindClose(const int, struct cifs_tcon *tcon,
202 const __u16 search_handle); 203 const __u16 search_handle);
@@ -464,6 +465,9 @@ extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8,
464 465
465/* asynchronous read support */ 466/* asynchronous read support */
466struct cifs_readdata { 467struct cifs_readdata {
468 struct kref refcount;
469 struct list_head list;
470 struct completion done;
467 struct cifsFileInfo *cfile; 471 struct cifsFileInfo *cfile;
468 struct address_space *mapping; 472 struct address_space *mapping;
469 __u64 offset; 473 __u64 offset;
@@ -472,12 +476,13 @@ struct cifs_readdata {
472 int result; 476 int result;
473 struct list_head pages; 477 struct list_head pages;
474 struct work_struct work; 478 struct work_struct work;
479 int (*marshal_iov) (struct cifs_readdata *rdata,
480 unsigned int remaining);
475 unsigned int nr_iov; 481 unsigned int nr_iov;
476 struct kvec iov[1]; 482 struct kvec iov[1];
477}; 483};
478 484
479struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages); 485void cifs_readdata_release(struct kref *refcount);
480void cifs_readdata_free(struct cifs_readdata *rdata);
481int cifs_async_readv(struct cifs_readdata *rdata); 486int cifs_async_readv(struct cifs_readdata *rdata);
482 487
483/* asynchronous write support */ 488/* asynchronous write support */
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index da2f5446fa7a..b5ad716b2642 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -87,7 +87,6 @@ static struct {
87#endif /* CIFS_POSIX */ 87#endif /* CIFS_POSIX */
88 88
89/* Forward declarations */ 89/* Forward declarations */
90static void cifs_readv_complete(struct work_struct *work);
91 90
92/* Mark as invalid, all open files on tree connections since they 91/* Mark as invalid, all open files on tree connections since they
93 were closed when session to server was lost */ 92 were closed when session to server was lost */
@@ -461,7 +460,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
461 server->maxReq = min_t(unsigned int, 460 server->maxReq = min_t(unsigned int,
462 le16_to_cpu(rsp->MaxMpxCount), 461 le16_to_cpu(rsp->MaxMpxCount),
463 cifs_max_pending); 462 cifs_max_pending);
464 cifs_set_credits(server, server->maxReq); 463 set_credits(server, server->maxReq);
465 server->maxBuf = le16_to_cpu(rsp->MaxBufSize); 464 server->maxBuf = le16_to_cpu(rsp->MaxBufSize);
466 server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs); 465 server->max_vcs = le16_to_cpu(rsp->MaxNumberVcs);
467 /* even though we do not use raw we might as well set this 466 /* even though we do not use raw we might as well set this
@@ -569,7 +568,7 @@ CIFSSMBNegotiate(unsigned int xid, struct cifs_ses *ses)
569 little endian */ 568 little endian */
570 server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount), 569 server->maxReq = min_t(unsigned int, le16_to_cpu(pSMBr->MaxMpxCount),
571 cifs_max_pending); 570 cifs_max_pending);
572 cifs_set_credits(server, server->maxReq); 571 set_credits(server, server->maxReq);
573 /* probably no need to store and check maxvcs */ 572 /* probably no need to store and check maxvcs */
574 server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize); 573 server->maxBuf = le32_to_cpu(pSMBr->MaxBufferSize);
575 server->max_rw = le32_to_cpu(pSMBr->MaxRawSize); 574 server->max_rw = le32_to_cpu(pSMBr->MaxRawSize);
@@ -721,7 +720,7 @@ cifs_echo_callback(struct mid_q_entry *mid)
721 struct TCP_Server_Info *server = mid->callback_data; 720 struct TCP_Server_Info *server = mid->callback_data;
722 721
723 DeleteMidQEntry(mid); 722 DeleteMidQEntry(mid);
724 cifs_add_credits(server, 1); 723 add_credits(server, 1);
725} 724}
726 725
727int 726int
@@ -1385,28 +1384,6 @@ openRetry:
1385 return rc; 1384 return rc;
1386} 1385}
1387 1386
1388struct cifs_readdata *
1389cifs_readdata_alloc(unsigned int nr_pages)
1390{
1391 struct cifs_readdata *rdata;
1392
1393 /* readdata + 1 kvec for each page */
1394 rdata = kzalloc(sizeof(*rdata) +
1395 sizeof(struct kvec) * nr_pages, GFP_KERNEL);
1396 if (rdata != NULL) {
1397 INIT_WORK(&rdata->work, cifs_readv_complete);
1398 INIT_LIST_HEAD(&rdata->pages);
1399 }
1400 return rdata;
1401}
1402
1403void
1404cifs_readdata_free(struct cifs_readdata *rdata)
1405{
1406 cifsFileInfo_put(rdata->cfile);
1407 kfree(rdata);
1408}
1409
1410/* 1387/*
1411 * Discard any remaining data in the current SMB. To do this, we borrow the 1388 * Discard any remaining data in the current SMB. To do this, we borrow the
1412 * current bigbuf. 1389 * current bigbuf.
@@ -1423,7 +1400,7 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1423 1400
1424 length = cifs_read_from_socket(server, server->bigbuf, 1401 length = cifs_read_from_socket(server, server->bigbuf,
1425 min_t(unsigned int, remaining, 1402 min_t(unsigned int, remaining,
1426 CIFSMaxBufSize + max_header_size())); 1403 CIFSMaxBufSize + MAX_HEADER_SIZE(server)));
1427 if (length < 0) 1404 if (length < 0)
1428 return length; 1405 return length;
1429 server->total_read += length; 1406 server->total_read += length;
@@ -1434,38 +1411,14 @@ cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1434 return 0; 1411 return 0;
1435} 1412}
1436 1413
1437static inline size_t
1438read_rsp_size(void)
1439{
1440 return sizeof(READ_RSP);
1441}
1442
1443static inline unsigned int
1444read_data_offset(char *buf)
1445{
1446 READ_RSP *rsp = (READ_RSP *)buf;
1447 return le16_to_cpu(rsp->DataOffset);
1448}
1449
1450static inline unsigned int
1451read_data_length(char *buf)
1452{
1453 READ_RSP *rsp = (READ_RSP *)buf;
1454 return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
1455 le16_to_cpu(rsp->DataLength);
1456}
1457
1458static int 1414static int
1459cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) 1415cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1460{ 1416{
1461 int length, len; 1417 int length, len;
1462 unsigned int data_offset, remaining, data_len; 1418 unsigned int data_offset, data_len;
1463 struct cifs_readdata *rdata = mid->callback_data; 1419 struct cifs_readdata *rdata = mid->callback_data;
1464 char *buf = server->smallbuf; 1420 char *buf = server->smallbuf;
1465 unsigned int buflen = get_rfc1002_length(buf) + 4; 1421 unsigned int buflen = get_rfc1002_length(buf) + 4;
1466 u64 eof;
1467 pgoff_t eof_index;
1468 struct page *page, *tpage;
1469 1422
1470 cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__, 1423 cFYI(1, "%s: mid=%llu offset=%llu bytes=%u", __func__,
1471 mid->mid, rdata->offset, rdata->bytes); 1424 mid->mid, rdata->offset, rdata->bytes);
@@ -1475,9 +1428,10 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1475 * can if there's not enough data. At this point, we've read down to 1428 * can if there's not enough data. At this point, we've read down to
1476 * the Mid. 1429 * the Mid.
1477 */ 1430 */
1478 len = min_t(unsigned int, buflen, read_rsp_size()) - header_size() + 1; 1431 len = min_t(unsigned int, buflen, server->vals->read_rsp_size) -
1432 HEADER_SIZE(server) + 1;
1479 1433
1480 rdata->iov[0].iov_base = buf + header_size() - 1; 1434 rdata->iov[0].iov_base = buf + HEADER_SIZE(server) - 1;
1481 rdata->iov[0].iov_len = len; 1435 rdata->iov[0].iov_len = len;
1482 1436
1483 length = cifs_readv_from_socket(server, rdata->iov, 1, len); 1437 length = cifs_readv_from_socket(server, rdata->iov, 1, len);
@@ -1486,7 +1440,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1486 server->total_read += length; 1440 server->total_read += length;
1487 1441
1488 /* Was the SMB read successful? */ 1442 /* Was the SMB read successful? */
1489 rdata->result = map_smb_to_linux_error(buf, false); 1443 rdata->result = server->ops->map_error(buf, false);
1490 if (rdata->result != 0) { 1444 if (rdata->result != 0) {
1491 cFYI(1, "%s: server returned error %d", __func__, 1445 cFYI(1, "%s: server returned error %d", __func__,
1492 rdata->result); 1446 rdata->result);
@@ -1494,14 +1448,15 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1494 } 1448 }
1495 1449
1496 /* Is there enough to get to the rest of the READ_RSP header? */ 1450 /* Is there enough to get to the rest of the READ_RSP header? */
1497 if (server->total_read < read_rsp_size()) { 1451 if (server->total_read < server->vals->read_rsp_size) {
1498 cFYI(1, "%s: server returned short header. got=%u expected=%zu", 1452 cFYI(1, "%s: server returned short header. got=%u expected=%zu",
1499 __func__, server->total_read, read_rsp_size()); 1453 __func__, server->total_read,
1454 server->vals->read_rsp_size);
1500 rdata->result = -EIO; 1455 rdata->result = -EIO;
1501 return cifs_readv_discard(server, mid); 1456 return cifs_readv_discard(server, mid);
1502 } 1457 }
1503 1458
1504 data_offset = read_data_offset(buf) + 4; 1459 data_offset = server->ops->read_data_offset(buf) + 4;
1505 if (data_offset < server->total_read) { 1460 if (data_offset < server->total_read) {
1506 /* 1461 /*
1507 * win2k8 sometimes sends an offset of 0 when the read 1462 * win2k8 sometimes sends an offset of 0 when the read
@@ -1540,7 +1495,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1540 rdata->iov[0].iov_base, rdata->iov[0].iov_len); 1495 rdata->iov[0].iov_base, rdata->iov[0].iov_len);
1541 1496
1542 /* how much data is in the response? */ 1497 /* how much data is in the response? */
1543 data_len = read_data_length(buf); 1498 data_len = server->ops->read_data_length(buf);
1544 if (data_offset + data_len > buflen) { 1499 if (data_offset + data_len > buflen) {
1545 /* data_len is corrupt -- discard frame */ 1500 /* data_len is corrupt -- discard frame */
1546 rdata->result = -EIO; 1501 rdata->result = -EIO;
@@ -1548,64 +1503,8 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1548 } 1503 }
1549 1504
1550 /* marshal up the page array */ 1505 /* marshal up the page array */
1551 len = 0; 1506 len = rdata->marshal_iov(rdata, data_len);
1552 remaining = data_len; 1507 data_len -= len;
1553 rdata->nr_iov = 1;
1554
1555 /* determine the eof that the server (probably) has */
1556 eof = CIFS_I(rdata->mapping->host)->server_eof;
1557 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
1558 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
1559
1560 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
1561 if (remaining >= PAGE_CACHE_SIZE) {
1562 /* enough data to fill the page */
1563 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
1564 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
1565 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
1566 rdata->nr_iov, page->index,
1567 rdata->iov[rdata->nr_iov].iov_base,
1568 rdata->iov[rdata->nr_iov].iov_len);
1569 ++rdata->nr_iov;
1570 len += PAGE_CACHE_SIZE;
1571 remaining -= PAGE_CACHE_SIZE;
1572 } else if (remaining > 0) {
1573 /* enough for partial page, fill and zero the rest */
1574 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
1575 rdata->iov[rdata->nr_iov].iov_len = remaining;
1576 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
1577 rdata->nr_iov, page->index,
1578 rdata->iov[rdata->nr_iov].iov_base,
1579 rdata->iov[rdata->nr_iov].iov_len);
1580 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
1581 '\0', PAGE_CACHE_SIZE - remaining);
1582 ++rdata->nr_iov;
1583 len += remaining;
1584 remaining = 0;
1585 } else if (page->index > eof_index) {
1586 /*
1587 * The VFS will not try to do readahead past the
1588 * i_size, but it's possible that we have outstanding
1589 * writes with gaps in the middle and the i_size hasn't
1590 * caught up yet. Populate those with zeroed out pages
1591 * to prevent the VFS from repeatedly attempting to
1592 * fill them until the writes are flushed.
1593 */
1594 zero_user(page, 0, PAGE_CACHE_SIZE);
1595 list_del(&page->lru);
1596 lru_cache_add_file(page);
1597 flush_dcache_page(page);
1598 SetPageUptodate(page);
1599 unlock_page(page);
1600 page_cache_release(page);
1601 } else {
1602 /* no need to hold page hostage */
1603 list_del(&page->lru);
1604 lru_cache_add_file(page);
1605 unlock_page(page);
1606 page_cache_release(page);
1607 }
1608 }
1609 1508
1610 /* issue the read if we have any iovecs left to fill */ 1509 /* issue the read if we have any iovecs left to fill */
1611 if (rdata->nr_iov > 1) { 1510 if (rdata->nr_iov > 1) {
@@ -1621,7 +1520,7 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1621 rdata->bytes = length; 1520 rdata->bytes = length;
1622 1521
1623 cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read, 1522 cFYI(1, "total_read=%u buflen=%u remaining=%u", server->total_read,
1624 buflen, remaining); 1523 buflen, data_len);
1625 1524
1626 /* discard anything left over */ 1525 /* discard anything left over */
1627 if (server->total_read < buflen) 1526 if (server->total_read < buflen)
@@ -1632,33 +1531,6 @@ cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid)
1632} 1531}
1633 1532
1634static void 1533static void
1635cifs_readv_complete(struct work_struct *work)
1636{
1637 struct cifs_readdata *rdata = container_of(work,
1638 struct cifs_readdata, work);
1639 struct page *page, *tpage;
1640
1641 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
1642 list_del(&page->lru);
1643 lru_cache_add_file(page);
1644
1645 if (rdata->result == 0) {
1646 kunmap(page);
1647 flush_dcache_page(page);
1648 SetPageUptodate(page);
1649 }
1650
1651 unlock_page(page);
1652
1653 if (rdata->result == 0)
1654 cifs_readpage_to_fscache(rdata->mapping->host, page);
1655
1656 page_cache_release(page);
1657 }
1658 cifs_readdata_free(rdata);
1659}
1660
1661static void
1662cifs_readv_callback(struct mid_q_entry *mid) 1534cifs_readv_callback(struct mid_q_entry *mid)
1663{ 1535{
1664 struct cifs_readdata *rdata = mid->callback_data; 1536 struct cifs_readdata *rdata = mid->callback_data;
@@ -1691,7 +1563,7 @@ cifs_readv_callback(struct mid_q_entry *mid)
1691 1563
1692 queue_work(cifsiod_wq, &rdata->work); 1564 queue_work(cifsiod_wq, &rdata->work);
1693 DeleteMidQEntry(mid); 1565 DeleteMidQEntry(mid);
1694 cifs_add_credits(server, 1); 1566 add_credits(server, 1);
1695} 1567}
1696 1568
1697/* cifs_async_readv - send an async write, and set up mid to handle result */ 1569/* cifs_async_readv - send an async write, and set up mid to handle result */
@@ -1744,12 +1616,15 @@ cifs_async_readv(struct cifs_readdata *rdata)
1744 rdata->iov[0].iov_base = smb; 1616 rdata->iov[0].iov_base = smb;
1745 rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; 1617 rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4;
1746 1618
1619 kref_get(&rdata->refcount);
1747 rc = cifs_call_async(tcon->ses->server, rdata->iov, 1, 1620 rc = cifs_call_async(tcon->ses->server, rdata->iov, 1,
1748 cifs_readv_receive, cifs_readv_callback, 1621 cifs_readv_receive, cifs_readv_callback,
1749 rdata, false); 1622 rdata, false);
1750 1623
1751 if (rc == 0) 1624 if (rc == 0)
1752 cifs_stats_inc(&tcon->num_reads); 1625 cifs_stats_inc(&tcon->num_reads);
1626 else
1627 kref_put(&rdata->refcount, cifs_readdata_release);
1753 1628
1754 cifs_small_buf_release(smb); 1629 cifs_small_buf_release(smb);
1755 return rc; 1630 return rc;
@@ -2135,7 +2010,7 @@ cifs_writev_callback(struct mid_q_entry *mid)
2135 2010
2136 queue_work(cifsiod_wq, &wdata->work); 2011 queue_work(cifsiod_wq, &wdata->work);
2137 DeleteMidQEntry(mid); 2012 DeleteMidQEntry(mid);
2138 cifs_add_credits(tcon->ses->server, 1); 2013 add_credits(tcon->ses->server, 1);
2139} 2014}
2140 2015
2141/* cifs_async_writev - send an async write, and set up mid to handle result */ 2016/* cifs_async_writev - send an async write, and set up mid to handle result */
@@ -4344,7 +4219,7 @@ int
4344CIFSFindFirst(const int xid, struct cifs_tcon *tcon, 4219CIFSFindFirst(const int xid, struct cifs_tcon *tcon,
4345 const char *searchName, 4220 const char *searchName,
4346 const struct nls_table *nls_codepage, 4221 const struct nls_table *nls_codepage,
4347 __u16 *pnetfid, 4222 __u16 *pnetfid, __u16 search_flags,
4348 struct cifs_search_info *psrch_inf, int remap, const char dirsep) 4223 struct cifs_search_info *psrch_inf, int remap, const char dirsep)
4349{ 4224{
4350/* level 257 SMB_ */ 4225/* level 257 SMB_ */
@@ -4416,8 +4291,7 @@ findFirstRetry:
4416 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM | 4291 cpu_to_le16(ATTR_READONLY | ATTR_HIDDEN | ATTR_SYSTEM |
4417 ATTR_DIRECTORY); 4292 ATTR_DIRECTORY);
4418 pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO)); 4293 pSMB->SearchCount = cpu_to_le16(CIFSMaxBufSize/sizeof(FILE_UNIX_INFO));
4419 pSMB->SearchFlags = cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | 4294 pSMB->SearchFlags = cpu_to_le16(search_flags);
4420 CIFS_SEARCH_RETURN_RESUME);
4421 pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); 4295 pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
4422 4296
4423 /* BB what should we set StorageType to? Does it matter? BB */ 4297 /* BB what should we set StorageType to? Does it matter? BB */
@@ -4487,8 +4361,8 @@ findFirstRetry:
4487 return rc; 4361 return rc;
4488} 4362}
4489 4363
4490int CIFSFindNext(const int xid, struct cifs_tcon *tcon, 4364int CIFSFindNext(const int xid, struct cifs_tcon *tcon, __u16 searchHandle,
4491 __u16 searchHandle, struct cifs_search_info *psrch_inf) 4365 __u16 search_flags, struct cifs_search_info *psrch_inf)
4492{ 4366{
4493 TRANSACTION2_FNEXT_REQ *pSMB = NULL; 4367 TRANSACTION2_FNEXT_REQ *pSMB = NULL;
4494 TRANSACTION2_FNEXT_RSP *pSMBr = NULL; 4368 TRANSACTION2_FNEXT_RSP *pSMBr = NULL;
@@ -4531,8 +4405,7 @@ int CIFSFindNext(const int xid, struct cifs_tcon *tcon,
4531 cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO)); 4405 cpu_to_le16(CIFSMaxBufSize / sizeof(FILE_UNIX_INFO));
4532 pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level); 4406 pSMB->InformationLevel = cpu_to_le16(psrch_inf->info_level);
4533 pSMB->ResumeKey = psrch_inf->resume_key; 4407 pSMB->ResumeKey = psrch_inf->resume_key;
4534 pSMB->SearchFlags = 4408 pSMB->SearchFlags = cpu_to_le16(search_flags);
4535 cpu_to_le16(CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME);
4536 4409
4537 name_len = psrch_inf->resume_name_len; 4410 name_len = psrch_inf->resume_name_len;
4538 params += name_len; 4411 params += name_len;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e0b56d7a19c5..ccafdedd0dbc 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * fs/cifs/connect.c 2 * fs/cifs/connect.c
3 * 3 *
4 * Copyright (C) International Business Machines Corp., 2002,2009 4 * Copyright (C) International Business Machines Corp., 2002,2011
5 * Author(s): Steve French (sfrench@us.ibm.com) 5 * Author(s): Steve French (sfrench@us.ibm.com)
6 * 6 *
7 * This library is free software; you can redistribute it and/or modify 7 * This library is free software; you can redistribute it and/or modify
@@ -102,7 +102,7 @@ enum {
102 Opt_srcaddr, Opt_prefixpath, 102 Opt_srcaddr, Opt_prefixpath,
103 Opt_iocharset, Opt_sockopt, 103 Opt_iocharset, Opt_sockopt,
104 Opt_netbiosname, Opt_servern, 104 Opt_netbiosname, Opt_servern,
105 Opt_ver, Opt_sec, 105 Opt_ver, Opt_vers, Opt_sec, Opt_cache,
106 106
107 /* Mount options to be ignored */ 107 /* Mount options to be ignored */
108 Opt_ignore, 108 Opt_ignore,
@@ -210,9 +210,9 @@ static const match_table_t cifs_mount_option_tokens = {
210 { Opt_netbiosname, "netbiosname=%s" }, 210 { Opt_netbiosname, "netbiosname=%s" },
211 { Opt_servern, "servern=%s" }, 211 { Opt_servern, "servern=%s" },
212 { Opt_ver, "ver=%s" }, 212 { Opt_ver, "ver=%s" },
213 { Opt_ver, "vers=%s" }, 213 { Opt_vers, "vers=%s" },
214 { Opt_ver, "version=%s" },
215 { Opt_sec, "sec=%s" }, 214 { Opt_sec, "sec=%s" },
215 { Opt_cache, "cache=%s" },
216 216
217 { Opt_ignore, "cred" }, 217 { Opt_ignore, "cred" },
218 { Opt_ignore, "credentials" }, 218 { Opt_ignore, "credentials" },
@@ -261,6 +261,26 @@ static const match_table_t cifs_secflavor_tokens = {
261 { Opt_sec_err, NULL } 261 { Opt_sec_err, NULL }
262}; 262};
263 263
264/* cache flavors */
265enum {
266 Opt_cache_loose,
267 Opt_cache_strict,
268 Opt_cache_none,
269 Opt_cache_err
270};
271
272static const match_table_t cifs_cacheflavor_tokens = {
273 { Opt_cache_loose, "loose" },
274 { Opt_cache_strict, "strict" },
275 { Opt_cache_none, "none" },
276 { Opt_cache_err, NULL }
277};
278
279static const match_table_t cifs_smb_version_tokens = {
280 { Smb_1, SMB1_VERSION_STRING },
281 { Smb_21, SMB21_VERSION_STRING },
282};
283
264static int ip_connect(struct TCP_Server_Info *server); 284static int ip_connect(struct TCP_Server_Info *server);
265static int generic_ip_connect(struct TCP_Server_Info *server); 285static int generic_ip_connect(struct TCP_Server_Info *server);
266static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink); 286static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -549,7 +569,7 @@ allocate_buffers(struct TCP_Server_Info *server)
549 } 569 }
550 } else if (server->large_buf) { 570 } else if (server->large_buf) {
551 /* we are reusing a dirty large buf, clear its start */ 571 /* we are reusing a dirty large buf, clear its start */
552 memset(server->bigbuf, 0, header_size()); 572 memset(server->bigbuf, 0, HEADER_SIZE(server));
553 } 573 }
554 574
555 if (!server->smallbuf) { 575 if (!server->smallbuf) {
@@ -563,7 +583,7 @@ allocate_buffers(struct TCP_Server_Info *server)
563 /* beginning of smb buffer is cleared in our buf_get */ 583 /* beginning of smb buffer is cleared in our buf_get */
564 } else { 584 } else {
565 /* if existing small buf clear beginning */ 585 /* if existing small buf clear beginning */
566 memset(server->smallbuf, 0, header_size()); 586 memset(server->smallbuf, 0, HEADER_SIZE(server));
567 } 587 }
568 588
569 return true; 589 return true;
@@ -764,25 +784,6 @@ is_smb_response(struct TCP_Server_Info *server, unsigned char type)
764 return false; 784 return false;
765} 785}
766 786
767static struct mid_q_entry *
768find_mid(struct TCP_Server_Info *server, char *buffer)
769{
770 struct smb_hdr *buf = (struct smb_hdr *)buffer;
771 struct mid_q_entry *mid;
772
773 spin_lock(&GlobalMid_Lock);
774 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
775 if (mid->mid == buf->Mid &&
776 mid->mid_state == MID_REQUEST_SUBMITTED &&
777 le16_to_cpu(mid->command) == buf->Command) {
778 spin_unlock(&GlobalMid_Lock);
779 return mid;
780 }
781 }
782 spin_unlock(&GlobalMid_Lock);
783 return NULL;
784}
785
786void 787void
787dequeue_mid(struct mid_q_entry *mid, bool malformed) 788dequeue_mid(struct mid_q_entry *mid, bool malformed)
788{ 789{
@@ -934,7 +935,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
934 unsigned int pdu_length = get_rfc1002_length(buf); 935 unsigned int pdu_length = get_rfc1002_length(buf);
935 936
936 /* make sure this will fit in a large buffer */ 937 /* make sure this will fit in a large buffer */
937 if (pdu_length > CIFSMaxBufSize + max_header_size() - 4) { 938 if (pdu_length > CIFSMaxBufSize + MAX_HEADER_SIZE(server) - 4) {
938 cERROR(1, "SMB response too long (%u bytes)", 939 cERROR(1, "SMB response too long (%u bytes)",
939 pdu_length); 940 pdu_length);
940 cifs_reconnect(server); 941 cifs_reconnect(server);
@@ -950,8 +951,8 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
950 } 951 }
951 952
952 /* now read the rest */ 953 /* now read the rest */
953 length = cifs_read_from_socket(server, buf + header_size() - 1, 954 length = cifs_read_from_socket(server, buf + HEADER_SIZE(server) - 1,
954 pdu_length - header_size() + 1 + 4); 955 pdu_length - HEADER_SIZE(server) + 1 + 4);
955 if (length < 0) 956 if (length < 0)
956 return length; 957 return length;
957 server->total_read += length; 958 server->total_read += length;
@@ -967,7 +968,7 @@ standard_receive3(struct TCP_Server_Info *server, struct mid_q_entry *mid)
967 * 48 bytes is enough to display the header and a little bit 968 * 48 bytes is enough to display the header and a little bit
968 * into the payload for debugging purposes. 969 * into the payload for debugging purposes.
969 */ 970 */
970 length = checkSMB(buf, server->total_read); 971 length = server->ops->check_message(buf, server->total_read);
971 if (length != 0) 972 if (length != 0)
972 cifs_dump_mem("Bad SMB: ", buf, 973 cifs_dump_mem("Bad SMB: ", buf,
973 min_t(unsigned int, server->total_read, 48)); 974 min_t(unsigned int, server->total_read, 48));
@@ -1025,7 +1026,7 @@ cifs_demultiplex_thread(void *p)
1025 continue; 1026 continue;
1026 1027
1027 /* make sure we have enough to get to the MID */ 1028 /* make sure we have enough to get to the MID */
1028 if (pdu_length < header_size() - 1 - 4) { 1029 if (pdu_length < HEADER_SIZE(server) - 1 - 4) {
1029 cERROR(1, "SMB response too short (%u bytes)", 1030 cERROR(1, "SMB response too short (%u bytes)",
1030 pdu_length); 1031 pdu_length);
1031 cifs_reconnect(server); 1032 cifs_reconnect(server);
@@ -1035,12 +1036,12 @@ cifs_demultiplex_thread(void *p)
1035 1036
1036 /* read down to the MID */ 1037 /* read down to the MID */
1037 length = cifs_read_from_socket(server, buf + 4, 1038 length = cifs_read_from_socket(server, buf + 4,
1038 header_size() - 1 - 4); 1039 HEADER_SIZE(server) - 1 - 4);
1039 if (length < 0) 1040 if (length < 0)
1040 continue; 1041 continue;
1041 server->total_read += length; 1042 server->total_read += length;
1042 1043
1043 mid_entry = find_mid(server, buf); 1044 mid_entry = server->ops->find_mid(server, buf);
1044 1045
1045 if (!mid_entry || !mid_entry->receive) 1046 if (!mid_entry || !mid_entry->receive)
1046 length = standard_receive3(server, mid_entry); 1047 length = standard_receive3(server, mid_entry);
@@ -1057,12 +1058,13 @@ cifs_demultiplex_thread(void *p)
1057 if (mid_entry != NULL) { 1058 if (mid_entry != NULL) {
1058 if (!mid_entry->multiRsp || mid_entry->multiEnd) 1059 if (!mid_entry->multiRsp || mid_entry->multiEnd)
1059 mid_entry->callback(mid_entry); 1060 mid_entry->callback(mid_entry);
1060 } else if (!is_valid_oplock_break(buf, server)) { 1061 } else if (!server->ops->is_oplock_break(buf, server)) {
1061 cERROR(1, "No task to wake, unknown frame received! " 1062 cERROR(1, "No task to wake, unknown frame received! "
1062 "NumMids %d", atomic_read(&midCount)); 1063 "NumMids %d", atomic_read(&midCount));
1063 cifs_dump_mem("Received Data is: ", buf, header_size()); 1064 cifs_dump_mem("Received Data is: ", buf,
1065 HEADER_SIZE(server));
1064#ifdef CONFIG_CIFS_DEBUG2 1066#ifdef CONFIG_CIFS_DEBUG2
1065 cifs_dump_detail(buf); 1067 server->ops->dump_detail(buf);
1066 cifs_dump_mids(server); 1068 cifs_dump_mids(server);
1067#endif /* CIFS_DEBUG2 */ 1069#endif /* CIFS_DEBUG2 */
1068 1070
@@ -1186,6 +1188,54 @@ static int cifs_parse_security_flavors(char *value,
1186} 1188}
1187 1189
1188static int 1190static int
1191cifs_parse_cache_flavor(char *value, struct smb_vol *vol)
1192{
1193 substring_t args[MAX_OPT_ARGS];
1194
1195 switch (match_token(value, cifs_cacheflavor_tokens, args)) {
1196 case Opt_cache_loose:
1197 vol->direct_io = false;
1198 vol->strict_io = false;
1199 break;
1200 case Opt_cache_strict:
1201 vol->direct_io = false;
1202 vol->strict_io = true;
1203 break;
1204 case Opt_cache_none:
1205 vol->direct_io = true;
1206 vol->strict_io = false;
1207 break;
1208 default:
1209 cERROR(1, "bad cache= option: %s", value);
1210 return 1;
1211 }
1212 return 0;
1213}
1214
1215static int
1216cifs_parse_smb_version(char *value, struct smb_vol *vol)
1217{
1218 substring_t args[MAX_OPT_ARGS];
1219
1220 switch (match_token(value, cifs_smb_version_tokens, args)) {
1221 case Smb_1:
1222 vol->ops = &smb1_operations;
1223 vol->vals = &smb1_values;
1224 break;
1225#ifdef CONFIG_CIFS_SMB2
1226 case Smb_21:
1227 vol->ops = &smb21_operations;
1228 vol->vals = &smb21_values;
1229 break;
1230#endif
1231 default:
1232 cERROR(1, "Unknown vers= option specified: %s", value);
1233 return 1;
1234 }
1235 return 0;
1236}
1237
1238static int
1189cifs_parse_mount_options(const char *mountdata, const char *devname, 1239cifs_parse_mount_options(const char *mountdata, const char *devname,
1190 struct smb_vol *vol) 1240 struct smb_vol *vol)
1191{ 1241{
@@ -1203,6 +1253,8 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1203 char *string = NULL; 1253 char *string = NULL;
1204 char *tmp_end, *value; 1254 char *tmp_end, *value;
1205 char delim; 1255 char delim;
1256 bool cache_specified = false;
1257 static bool cache_warned = false;
1206 1258
1207 separator[0] = ','; 1259 separator[0] = ',';
1208 separator[1] = 0; 1260 separator[1] = 0;
@@ -1236,6 +1288,10 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1236 1288
1237 vol->actimeo = CIFS_DEF_ACTIMEO; 1289 vol->actimeo = CIFS_DEF_ACTIMEO;
1238 1290
1291 /* FIXME: add autonegotiation -- for now, SMB1 is default */
1292 vol->ops = &smb1_operations;
1293 vol->vals = &smb1_values;
1294
1239 if (!mountdata) 1295 if (!mountdata)
1240 goto cifs_parse_mount_err; 1296 goto cifs_parse_mount_err;
1241 1297
@@ -1414,10 +1470,20 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1414 vol->seal = 1; 1470 vol->seal = 1;
1415 break; 1471 break;
1416 case Opt_direct: 1472 case Opt_direct:
1417 vol->direct_io = 1; 1473 cache_specified = true;
1474 vol->direct_io = true;
1475 vol->strict_io = false;
1476 cERROR(1, "The \"directio\" option will be removed in "
1477 "3.7. Please switch to the \"cache=none\" "
1478 "option.");
1418 break; 1479 break;
1419 case Opt_strictcache: 1480 case Opt_strictcache:
1420 vol->strict_io = 1; 1481 cache_specified = true;
1482 vol->direct_io = false;
1483 vol->strict_io = true;
1484 cERROR(1, "The \"strictcache\" option will be removed "
1485 "in 3.7. Please switch to the \"cache=strict\" "
1486 "option.");
1421 break; 1487 break;
1422 case Opt_noac: 1488 case Opt_noac:
1423 printk(KERN_WARNING "CIFS: Mount option noac not " 1489 printk(KERN_WARNING "CIFS: Mount option noac not "
@@ -1821,8 +1887,7 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1821 if (string == NULL) 1887 if (string == NULL)
1822 goto out_nomem; 1888 goto out_nomem;
1823 1889
1824 if (strnicmp(string, "cifs", 4) == 0 || 1890 if (strnicmp(string, "1", 1) == 0) {
1825 strnicmp(string, "1", 1) == 0) {
1826 /* This is the default */ 1891 /* This is the default */
1827 break; 1892 break;
1828 } 1893 }
@@ -1830,6 +1895,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1830 printk(KERN_WARNING "CIFS: Invalid version" 1895 printk(KERN_WARNING "CIFS: Invalid version"
1831 " specified\n"); 1896 " specified\n");
1832 goto cifs_parse_mount_err; 1897 goto cifs_parse_mount_err;
1898 case Opt_vers:
1899 string = match_strdup(args);
1900 if (string == NULL)
1901 goto out_nomem;
1902
1903 if (cifs_parse_smb_version(string, vol) != 0)
1904 goto cifs_parse_mount_err;
1905 break;
1833 case Opt_sec: 1906 case Opt_sec:
1834 string = match_strdup(args); 1907 string = match_strdup(args);
1835 if (string == NULL) 1908 if (string == NULL)
@@ -1838,6 +1911,15 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1838 if (cifs_parse_security_flavors(string, vol) != 0) 1911 if (cifs_parse_security_flavors(string, vol) != 0)
1839 goto cifs_parse_mount_err; 1912 goto cifs_parse_mount_err;
1840 break; 1913 break;
1914 case Opt_cache:
1915 cache_specified = true;
1916 string = match_strdup(args);
1917 if (string == NULL)
1918 goto out_nomem;
1919
1920 if (cifs_parse_cache_flavor(string, vol) != 0)
1921 goto cifs_parse_mount_err;
1922 break;
1841 default: 1923 default:
1842 /* 1924 /*
1843 * An option we don't recognize. Save it off for later 1925 * An option we don't recognize. Save it off for later
@@ -1881,6 +1963,14 @@ cifs_parse_mount_options(const char *mountdata, const char *devname,
1881 printk(KERN_NOTICE "CIFS: ignoring forcegid mount option " 1963 printk(KERN_NOTICE "CIFS: ignoring forcegid mount option "
1882 "specified with no gid= option.\n"); 1964 "specified with no gid= option.\n");
1883 1965
1966 /* FIXME: remove this block in 3.7 */
1967 if (!cache_specified && !cache_warned) {
1968 cache_warned = true;
1969 printk(KERN_NOTICE "CIFS: no cache= option specified, using "
1970 "\"cache=loose\". This default will change "
1971 "to \"cache=strict\" in 3.7.\n");
1972 }
1973
1884 kfree(mountdata_copy); 1974 kfree(mountdata_copy);
1885 return 0; 1975 return 0;
1886 1976
@@ -2041,6 +2131,9 @@ match_security(struct TCP_Server_Info *server, struct smb_vol *vol)
2041static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr, 2131static int match_server(struct TCP_Server_Info *server, struct sockaddr *addr,
2042 struct smb_vol *vol) 2132 struct smb_vol *vol)
2043{ 2133{
2134 if ((server->vals != vol->vals) || (server->ops != vol->ops))
2135 return 0;
2136
2044 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns)) 2137 if (!net_eq(cifs_net_ns(server), current->nsproxy->net_ns))
2045 return 0; 2138 return 0;
2046 2139
@@ -2163,6 +2256,8 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
2163 goto out_err; 2256 goto out_err;
2164 } 2257 }
2165 2258
2259 tcp_ses->ops = volume_info->ops;
2260 tcp_ses->vals = volume_info->vals;
2166 cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns)); 2261 cifs_set_net_ns(tcp_ses, get_net(current->nsproxy->net_ns));
2167 tcp_ses->hostname = extract_hostname(volume_info->UNC); 2262 tcp_ses->hostname = extract_hostname(volume_info->UNC);
2168 if (IS_ERR(tcp_ses->hostname)) { 2263 if (IS_ERR(tcp_ses->hostname)) {
@@ -3569,6 +3664,7 @@ cifs_setup_volume_info(struct smb_vol *volume_info, char *mount_data,
3569 if (cifs_parse_mount_options(mount_data, devname, volume_info)) 3664 if (cifs_parse_mount_options(mount_data, devname, volume_info))
3570 return -EINVAL; 3665 return -EINVAL;
3571 3666
3667
3572 if (volume_info->nullauth) { 3668 if (volume_info->nullauth) {
3573 cFYI(1, "Anonymous login"); 3669 cFYI(1, "Anonymous login");
3574 kfree(volume_info->username); 3670 kfree(volume_info->username);
@@ -4010,11 +4106,11 @@ int cifs_negotiate_protocol(unsigned int xid, struct cifs_ses *ses)
4010 if (server->maxBuf != 0) 4106 if (server->maxBuf != 0)
4011 return 0; 4107 return 0;
4012 4108
4013 cifs_set_credits(server, 1); 4109 set_credits(server, 1);
4014 rc = CIFSSMBNegotiate(xid, ses); 4110 rc = CIFSSMBNegotiate(xid, ses);
4015 if (rc == -EAGAIN) { 4111 if (rc == -EAGAIN) {
4016 /* retry only once on 1st time connection */ 4112 /* retry only once on 1st time connection */
4017 cifs_set_credits(server, 1); 4113 set_credits(server, 1);
4018 rc = CIFSSMBNegotiate(xid, ses); 4114 rc = CIFSSMBNegotiate(xid, ses);
4019 if (rc == -EAGAIN) 4115 if (rc == -EAGAIN)
4020 rc = -EHOSTDOWN; 4116 rc = -EHOSTDOWN;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 81725e9286e9..253170dfa716 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -264,6 +264,7 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
264 pCifsFile->tlink = cifs_get_tlink(tlink); 264 pCifsFile->tlink = cifs_get_tlink(tlink);
265 mutex_init(&pCifsFile->fh_mutex); 265 mutex_init(&pCifsFile->fh_mutex);
266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break); 266 INIT_WORK(&pCifsFile->oplock_break, cifs_oplock_break);
267 INIT_LIST_HEAD(&pCifsFile->llist);
267 268
268 spin_lock(&cifs_file_list_lock); 269 spin_lock(&cifs_file_list_lock);
269 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList)); 270 list_add(&pCifsFile->tlist, &(tlink_tcon(tlink)->openFileList));
@@ -334,9 +335,7 @@ void cifsFileInfo_put(struct cifsFileInfo *cifs_file)
334 * is closed anyway. 335 * is closed anyway.
335 */ 336 */
336 mutex_lock(&cifsi->lock_mutex); 337 mutex_lock(&cifsi->lock_mutex);
337 list_for_each_entry_safe(li, tmp, &cifsi->llist, llist) { 338 list_for_each_entry_safe(li, tmp, &cifs_file->llist, llist) {
338 if (li->netfid != cifs_file->netfid)
339 continue;
340 list_del(&li->llist); 339 list_del(&li->llist);
341 cifs_del_lock_waiters(li); 340 cifs_del_lock_waiters(li);
342 kfree(li); 341 kfree(li);
@@ -645,7 +644,7 @@ int cifs_closedir(struct inode *inode, struct file *file)
645} 644}
646 645
647static struct cifsLockInfo * 646static struct cifsLockInfo *
648cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid) 647cifs_lock_init(__u64 offset, __u64 length, __u8 type)
649{ 648{
650 struct cifsLockInfo *lock = 649 struct cifsLockInfo *lock =
651 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL); 650 kmalloc(sizeof(struct cifsLockInfo), GFP_KERNEL);
@@ -654,7 +653,6 @@ cifs_lock_init(__u64 offset, __u64 length, __u8 type, __u16 netfid)
654 lock->offset = offset; 653 lock->offset = offset;
655 lock->length = length; 654 lock->length = length;
656 lock->type = type; 655 lock->type = type;
657 lock->netfid = netfid;
658 lock->pid = current->tgid; 656 lock->pid = current->tgid;
659 INIT_LIST_HEAD(&lock->blist); 657 INIT_LIST_HEAD(&lock->blist);
660 init_waitqueue_head(&lock->block_q); 658 init_waitqueue_head(&lock->block_q);
@@ -672,19 +670,20 @@ cifs_del_lock_waiters(struct cifsLockInfo *lock)
672} 670}
673 671
674static bool 672static bool
675__cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset, 673cifs_find_fid_lock_conflict(struct cifsFileInfo *cfile, __u64 offset,
676 __u64 length, __u8 type, __u16 netfid, 674 __u64 length, __u8 type, struct cifsFileInfo *cur,
677 struct cifsLockInfo **conf_lock) 675 struct cifsLockInfo **conf_lock)
678{ 676{
679 struct cifsLockInfo *li, *tmp; 677 struct cifsLockInfo *li;
678 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
680 679
681 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) { 680 list_for_each_entry(li, &cfile->llist, llist) {
682 if (offset + length <= li->offset || 681 if (offset + length <= li->offset ||
683 offset >= li->offset + li->length) 682 offset >= li->offset + li->length)
684 continue; 683 continue;
685 else if ((type & LOCKING_ANDX_SHARED_LOCK) && 684 else if ((type & server->vals->shared_lock_type) &&
686 ((netfid == li->netfid && current->tgid == li->pid) || 685 ((server->ops->compare_fids(cur, cfile) &&
687 type == li->type)) 686 current->tgid == li->pid) || type == li->type))
688 continue; 687 continue;
689 else { 688 else {
690 *conf_lock = li; 689 *conf_lock = li;
@@ -695,11 +694,23 @@ __cifs_find_lock_conflict(struct cifsInodeInfo *cinode, __u64 offset,
695} 694}
696 695
697static bool 696static bool
698cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock, 697cifs_find_lock_conflict(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
699 struct cifsLockInfo **conf_lock) 698 __u8 type, struct cifsLockInfo **conf_lock)
700{ 699{
701 return __cifs_find_lock_conflict(cinode, lock->offset, lock->length, 700 bool rc = false;
702 lock->type, lock->netfid, conf_lock); 701 struct cifsFileInfo *fid, *tmp;
702 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
703
704 spin_lock(&cifs_file_list_lock);
705 list_for_each_entry_safe(fid, tmp, &cinode->openFileList, flist) {
706 rc = cifs_find_fid_lock_conflict(fid, offset, length, type,
707 cfile, conf_lock);
708 if (rc)
709 break;
710 }
711 spin_unlock(&cifs_file_list_lock);
712
713 return rc;
703} 714}
704 715
705/* 716/*
@@ -710,22 +721,24 @@ cifs_find_lock_conflict(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock,
710 * the server or 1 otherwise. 721 * the server or 1 otherwise.
711 */ 722 */
712static int 723static int
713cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length, 724cifs_lock_test(struct cifsFileInfo *cfile, __u64 offset, __u64 length,
714 __u8 type, __u16 netfid, struct file_lock *flock) 725 __u8 type, struct file_lock *flock)
715{ 726{
716 int rc = 0; 727 int rc = 0;
717 struct cifsLockInfo *conf_lock; 728 struct cifsLockInfo *conf_lock;
729 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
730 struct TCP_Server_Info *server = tlink_tcon(cfile->tlink)->ses->server;
718 bool exist; 731 bool exist;
719 732
720 mutex_lock(&cinode->lock_mutex); 733 mutex_lock(&cinode->lock_mutex);
721 734
722 exist = __cifs_find_lock_conflict(cinode, offset, length, type, netfid, 735 exist = cifs_find_lock_conflict(cfile, offset, length, type,
723 &conf_lock); 736 &conf_lock);
724 if (exist) { 737 if (exist) {
725 flock->fl_start = conf_lock->offset; 738 flock->fl_start = conf_lock->offset;
726 flock->fl_end = conf_lock->offset + conf_lock->length - 1; 739 flock->fl_end = conf_lock->offset + conf_lock->length - 1;
727 flock->fl_pid = conf_lock->pid; 740 flock->fl_pid = conf_lock->pid;
728 if (conf_lock->type & LOCKING_ANDX_SHARED_LOCK) 741 if (conf_lock->type & server->vals->shared_lock_type)
729 flock->fl_type = F_RDLCK; 742 flock->fl_type = F_RDLCK;
730 else 743 else
731 flock->fl_type = F_WRLCK; 744 flock->fl_type = F_WRLCK;
@@ -739,10 +752,11 @@ cifs_lock_test(struct cifsInodeInfo *cinode, __u64 offset, __u64 length,
739} 752}
740 753
741static void 754static void
742cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock) 755cifs_lock_add(struct cifsFileInfo *cfile, struct cifsLockInfo *lock)
743{ 756{
757 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
744 mutex_lock(&cinode->lock_mutex); 758 mutex_lock(&cinode->lock_mutex);
745 list_add_tail(&lock->llist, &cinode->llist); 759 list_add_tail(&lock->llist, &cfile->llist);
746 mutex_unlock(&cinode->lock_mutex); 760 mutex_unlock(&cinode->lock_mutex);
747} 761}
748 762
@@ -753,10 +767,11 @@ cifs_lock_add(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock)
753 * 3) -EACCESS, if there is a lock that prevents us and wait is false. 767 * 3) -EACCESS, if there is a lock that prevents us and wait is false.
754 */ 768 */
755static int 769static int
756cifs_lock_add_if(struct cifsInodeInfo *cinode, struct cifsLockInfo *lock, 770cifs_lock_add_if(struct cifsFileInfo *cfile, struct cifsLockInfo *lock,
757 bool wait) 771 bool wait)
758{ 772{
759 struct cifsLockInfo *conf_lock; 773 struct cifsLockInfo *conf_lock;
774 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode);
760 bool exist; 775 bool exist;
761 int rc = 0; 776 int rc = 0;
762 777
@@ -764,9 +779,10 @@ try_again:
764 exist = false; 779 exist = false;
765 mutex_lock(&cinode->lock_mutex); 780 mutex_lock(&cinode->lock_mutex);
766 781
767 exist = cifs_find_lock_conflict(cinode, lock, &conf_lock); 782 exist = cifs_find_lock_conflict(cfile, lock->offset, lock->length,
783 lock->type, &conf_lock);
768 if (!exist && cinode->can_cache_brlcks) { 784 if (!exist && cinode->can_cache_brlcks) {
769 list_add_tail(&lock->llist, &cinode->llist); 785 list_add_tail(&lock->llist, &cfile->llist);
770 mutex_unlock(&cinode->lock_mutex); 786 mutex_unlock(&cinode->lock_mutex);
771 return rc; 787 return rc;
772 } 788 }
@@ -888,7 +904,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
888 for (i = 0; i < 2; i++) { 904 for (i = 0; i < 2; i++) {
889 cur = buf; 905 cur = buf;
890 num = 0; 906 num = 0;
891 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) { 907 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
892 if (li->type != types[i]) 908 if (li->type != types[i])
893 continue; 909 continue;
894 cur->Pid = cpu_to_le16(li->pid); 910 cur->Pid = cpu_to_le16(li->pid);
@@ -898,7 +914,8 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
898 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32)); 914 cur->OffsetHigh = cpu_to_le32((u32)(li->offset>>32));
899 if (++num == max_num) { 915 if (++num == max_num) {
900 stored_rc = cifs_lockv(xid, tcon, cfile->netfid, 916 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
901 li->type, 0, num, buf); 917 (__u8)li->type, 0, num,
918 buf);
902 if (stored_rc) 919 if (stored_rc)
903 rc = stored_rc; 920 rc = stored_rc;
904 cur = buf; 921 cur = buf;
@@ -909,7 +926,7 @@ cifs_push_mandatory_locks(struct cifsFileInfo *cfile)
909 926
910 if (num) { 927 if (num) {
911 stored_rc = cifs_lockv(xid, tcon, cfile->netfid, 928 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
912 types[i], 0, num, buf); 929 (__u8)types[i], 0, num, buf);
913 if (stored_rc) 930 if (stored_rc)
914 rc = stored_rc; 931 rc = stored_rc;
915 } 932 }
@@ -1053,8 +1070,8 @@ cifs_push_locks(struct cifsFileInfo *cfile)
1053} 1070}
1054 1071
1055static void 1072static void
1056cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock, 1073cifs_read_flock(struct file_lock *flock, __u32 *type, int *lock, int *unlock,
1057 bool *wait_flag) 1074 bool *wait_flag, struct TCP_Server_Info *server)
1058{ 1075{
1059 if (flock->fl_flags & FL_POSIX) 1076 if (flock->fl_flags & FL_POSIX)
1060 cFYI(1, "Posix"); 1077 cFYI(1, "Posix");
@@ -1073,38 +1090,50 @@ cifs_read_flock(struct file_lock *flock, __u8 *type, int *lock, int *unlock,
1073 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE))) 1090 (~(FL_POSIX | FL_FLOCK | FL_SLEEP | FL_ACCESS | FL_LEASE)))
1074 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags); 1091 cFYI(1, "Unknown lock flags 0x%x", flock->fl_flags);
1075 1092
1076 *type = LOCKING_ANDX_LARGE_FILES; 1093 *type = server->vals->large_lock_type;
1077 if (flock->fl_type == F_WRLCK) { 1094 if (flock->fl_type == F_WRLCK) {
1078 cFYI(1, "F_WRLCK "); 1095 cFYI(1, "F_WRLCK ");
1096 *type |= server->vals->exclusive_lock_type;
1079 *lock = 1; 1097 *lock = 1;
1080 } else if (flock->fl_type == F_UNLCK) { 1098 } else if (flock->fl_type == F_UNLCK) {
1081 cFYI(1, "F_UNLCK"); 1099 cFYI(1, "F_UNLCK");
1100 *type |= server->vals->unlock_lock_type;
1082 *unlock = 1; 1101 *unlock = 1;
1083 /* Check if unlock includes more than one lock range */ 1102 /* Check if unlock includes more than one lock range */
1084 } else if (flock->fl_type == F_RDLCK) { 1103 } else if (flock->fl_type == F_RDLCK) {
1085 cFYI(1, "F_RDLCK"); 1104 cFYI(1, "F_RDLCK");
1086 *type |= LOCKING_ANDX_SHARED_LOCK; 1105 *type |= server->vals->shared_lock_type;
1087 *lock = 1; 1106 *lock = 1;
1088 } else if (flock->fl_type == F_EXLCK) { 1107 } else if (flock->fl_type == F_EXLCK) {
1089 cFYI(1, "F_EXLCK"); 1108 cFYI(1, "F_EXLCK");
1109 *type |= server->vals->exclusive_lock_type;
1090 *lock = 1; 1110 *lock = 1;
1091 } else if (flock->fl_type == F_SHLCK) { 1111 } else if (flock->fl_type == F_SHLCK) {
1092 cFYI(1, "F_SHLCK"); 1112 cFYI(1, "F_SHLCK");
1093 *type |= LOCKING_ANDX_SHARED_LOCK; 1113 *type |= server->vals->shared_lock_type;
1094 *lock = 1; 1114 *lock = 1;
1095 } else 1115 } else
1096 cFYI(1, "Unknown type of lock"); 1116 cFYI(1, "Unknown type of lock");
1097} 1117}
1098 1118
1099static int 1119static int
1100cifs_getlk(struct file *file, struct file_lock *flock, __u8 type, 1120cifs_mandatory_lock(int xid, struct cifsFileInfo *cfile, __u64 offset,
1121 __u64 length, __u32 type, int lock, int unlock, bool wait)
1122{
1123 return CIFSSMBLock(xid, tlink_tcon(cfile->tlink), cfile->netfid,
1124 current->tgid, length, offset, unlock, lock,
1125 (__u8)type, wait, 0);
1126}
1127
1128static int
1129cifs_getlk(struct file *file, struct file_lock *flock, __u32 type,
1101 bool wait_flag, bool posix_lck, int xid) 1130 bool wait_flag, bool posix_lck, int xid)
1102{ 1131{
1103 int rc = 0; 1132 int rc = 0;
1104 __u64 length = 1 + flock->fl_end - flock->fl_start; 1133 __u64 length = 1 + flock->fl_end - flock->fl_start;
1105 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 1134 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1106 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1135 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1107 struct cifsInodeInfo *cinode = CIFS_I(cfile->dentry->d_inode); 1136 struct TCP_Server_Info *server = tcon->ses->server;
1108 __u16 netfid = cfile->netfid; 1137 __u16 netfid = cfile->netfid;
1109 1138
1110 if (posix_lck) { 1139 if (posix_lck) {
@@ -1114,7 +1143,7 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1114 if (!rc) 1143 if (!rc)
1115 return rc; 1144 return rc;
1116 1145
1117 if (type & LOCKING_ANDX_SHARED_LOCK) 1146 if (type & server->vals->shared_lock_type)
1118 posix_lock_type = CIFS_RDLCK; 1147 posix_lock_type = CIFS_RDLCK;
1119 else 1148 else
1120 posix_lock_type = CIFS_WRLCK; 1149 posix_lock_type = CIFS_WRLCK;
@@ -1124,38 +1153,35 @@ cifs_getlk(struct file *file, struct file_lock *flock, __u8 type,
1124 return rc; 1153 return rc;
1125 } 1154 }
1126 1155
1127 rc = cifs_lock_test(cinode, flock->fl_start, length, type, netfid, 1156 rc = cifs_lock_test(cfile, flock->fl_start, length, type, flock);
1128 flock);
1129 if (!rc) 1157 if (!rc)
1130 return rc; 1158 return rc;
1131 1159
1132 /* BB we could chain these into one lock request BB */ 1160 /* BB we could chain these into one lock request BB */
1133 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length, 1161 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length, type,
1134 flock->fl_start, 0, 1, type, 0, 0); 1162 1, 0, false);
1135 if (rc == 0) { 1163 if (rc == 0) {
1136 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, 1164 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1137 length, flock->fl_start, 1, 0, 1165 type, 0, 1, false);
1138 type, 0, 0);
1139 flock->fl_type = F_UNLCK; 1166 flock->fl_type = F_UNLCK;
1140 if (rc != 0) 1167 if (rc != 0)
1141 cERROR(1, "Error unlocking previously locked " 1168 cERROR(1, "Error unlocking previously locked "
1142 "range %d during test of lock", rc); 1169 "range %d during test of lock", rc);
1143 return 0; 1170 return 0;
1144 } 1171 }
1145 1172
1146 if (type & LOCKING_ANDX_SHARED_LOCK) { 1173 if (type & server->vals->shared_lock_type) {
1147 flock->fl_type = F_WRLCK; 1174 flock->fl_type = F_WRLCK;
1148 return 0; 1175 return 0;
1149 } 1176 }
1150 1177
1151 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length, 1178 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1152 flock->fl_start, 0, 1, 1179 type | server->vals->shared_lock_type, 1, 0,
1153 type | LOCKING_ANDX_SHARED_LOCK, 0, 0); 1180 false);
1154 if (rc == 0) { 1181 if (rc == 0) {
1155 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, 1182 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1156 length, flock->fl_start, 1, 0, 1183 type | server->vals->shared_lock_type,
1157 type | LOCKING_ANDX_SHARED_LOCK, 1184 0, 1, false);
1158 0, 0);
1159 flock->fl_type = F_RDLCK; 1185 flock->fl_type = F_RDLCK;
1160 if (rc != 0) 1186 if (rc != 0)
1161 cERROR(1, "Error unlocking previously locked " 1187 cERROR(1, "Error unlocking previously locked "
@@ -1212,15 +1238,13 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1212 for (i = 0; i < 2; i++) { 1238 for (i = 0; i < 2; i++) {
1213 cur = buf; 1239 cur = buf;
1214 num = 0; 1240 num = 0;
1215 list_for_each_entry_safe(li, tmp, &cinode->llist, llist) { 1241 list_for_each_entry_safe(li, tmp, &cfile->llist, llist) {
1216 if (flock->fl_start > li->offset || 1242 if (flock->fl_start > li->offset ||
1217 (flock->fl_start + length) < 1243 (flock->fl_start + length) <
1218 (li->offset + li->length)) 1244 (li->offset + li->length))
1219 continue; 1245 continue;
1220 if (current->tgid != li->pid) 1246 if (current->tgid != li->pid)
1221 continue; 1247 continue;
1222 if (cfile->netfid != li->netfid)
1223 continue;
1224 if (types[i] != li->type) 1248 if (types[i] != li->type)
1225 continue; 1249 continue;
1226 if (!cinode->can_cache_brlcks) { 1250 if (!cinode->can_cache_brlcks) {
@@ -1233,7 +1257,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1233 cpu_to_le32((u32)(li->offset>>32)); 1257 cpu_to_le32((u32)(li->offset>>32));
1234 /* 1258 /*
1235 * We need to save a lock here to let us add 1259 * We need to save a lock here to let us add
1236 * it again to the inode list if the unlock 1260 * it again to the file's list if the unlock
1237 * range request fails on the server. 1261 * range request fails on the server.
1238 */ 1262 */
1239 list_move(&li->llist, &tmp_llist); 1263 list_move(&li->llist, &tmp_llist);
@@ -1247,10 +1271,10 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1247 * We failed on the unlock range 1271 * We failed on the unlock range
1248 * request - add all locks from 1272 * request - add all locks from
1249 * the tmp list to the head of 1273 * the tmp list to the head of
1250 * the inode list. 1274 * the file's list.
1251 */ 1275 */
1252 cifs_move_llist(&tmp_llist, 1276 cifs_move_llist(&tmp_llist,
1253 &cinode->llist); 1277 &cfile->llist);
1254 rc = stored_rc; 1278 rc = stored_rc;
1255 } else 1279 } else
1256 /* 1280 /*
@@ -1265,7 +1289,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1265 } else { 1289 } else {
1266 /* 1290 /*
1267 * We can cache brlock requests - simply remove 1291 * We can cache brlock requests - simply remove
1268 * a lock from the inode list. 1292 * a lock from the file's list.
1269 */ 1293 */
1270 list_del(&li->llist); 1294 list_del(&li->llist);
1271 cifs_del_lock_waiters(li); 1295 cifs_del_lock_waiters(li);
@@ -1276,7 +1300,7 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1276 stored_rc = cifs_lockv(xid, tcon, cfile->netfid, 1300 stored_rc = cifs_lockv(xid, tcon, cfile->netfid,
1277 types[i], num, 0, buf); 1301 types[i], num, 0, buf);
1278 if (stored_rc) { 1302 if (stored_rc) {
1279 cifs_move_llist(&tmp_llist, &cinode->llist); 1303 cifs_move_llist(&tmp_llist, &cfile->llist);
1280 rc = stored_rc; 1304 rc = stored_rc;
1281 } else 1305 } else
1282 cifs_free_llist(&tmp_llist); 1306 cifs_free_llist(&tmp_llist);
@@ -1289,14 +1313,14 @@ cifs_unlock_range(struct cifsFileInfo *cfile, struct file_lock *flock, int xid)
1289} 1313}
1290 1314
1291static int 1315static int
1292cifs_setlk(struct file *file, struct file_lock *flock, __u8 type, 1316cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
1293 bool wait_flag, bool posix_lck, int lock, int unlock, int xid) 1317 bool wait_flag, bool posix_lck, int lock, int unlock, int xid)
1294{ 1318{
1295 int rc = 0; 1319 int rc = 0;
1296 __u64 length = 1 + flock->fl_end - flock->fl_start; 1320 __u64 length = 1 + flock->fl_end - flock->fl_start;
1297 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data; 1321 struct cifsFileInfo *cfile = (struct cifsFileInfo *)file->private_data;
1298 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink); 1322 struct cifs_tcon *tcon = tlink_tcon(cfile->tlink);
1299 struct cifsInodeInfo *cinode = CIFS_I(file->f_path.dentry->d_inode); 1323 struct TCP_Server_Info *server = tcon->ses->server;
1300 __u16 netfid = cfile->netfid; 1324 __u16 netfid = cfile->netfid;
1301 1325
1302 if (posix_lck) { 1326 if (posix_lck) {
@@ -1306,7 +1330,7 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1306 if (!rc || rc < 0) 1330 if (!rc || rc < 0)
1307 return rc; 1331 return rc;
1308 1332
1309 if (type & LOCKING_ANDX_SHARED_LOCK) 1333 if (type & server->vals->shared_lock_type)
1310 posix_lock_type = CIFS_RDLCK; 1334 posix_lock_type = CIFS_RDLCK;
1311 else 1335 else
1312 posix_lock_type = CIFS_WRLCK; 1336 posix_lock_type = CIFS_WRLCK;
@@ -1323,24 +1347,24 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u8 type,
1323 if (lock) { 1347 if (lock) {
1324 struct cifsLockInfo *lock; 1348 struct cifsLockInfo *lock;
1325 1349
1326 lock = cifs_lock_init(flock->fl_start, length, type, netfid); 1350 lock = cifs_lock_init(flock->fl_start, length, type);
1327 if (!lock) 1351 if (!lock)
1328 return -ENOMEM; 1352 return -ENOMEM;
1329 1353
1330 rc = cifs_lock_add_if(cinode, lock, wait_flag); 1354 rc = cifs_lock_add_if(cfile, lock, wait_flag);
1331 if (rc < 0) 1355 if (rc < 0)
1332 kfree(lock); 1356 kfree(lock);
1333 if (rc <= 0) 1357 if (rc <= 0)
1334 goto out; 1358 goto out;
1335 1359
1336 rc = CIFSSMBLock(xid, tcon, netfid, current->tgid, length, 1360 rc = cifs_mandatory_lock(xid, cfile, flock->fl_start, length,
1337 flock->fl_start, 0, 1, type, wait_flag, 0); 1361 type, 1, 0, wait_flag);
1338 if (rc) { 1362 if (rc) {
1339 kfree(lock); 1363 kfree(lock);
1340 goto out; 1364 goto out;
1341 } 1365 }
1342 1366
1343 cifs_lock_add(cinode, lock); 1367 cifs_lock_add(cfile, lock);
1344 } else if (unlock) 1368 } else if (unlock)
1345 rc = cifs_unlock_range(cfile, flock, xid); 1369 rc = cifs_unlock_range(cfile, flock, xid);
1346 1370
@@ -1361,7 +1385,7 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1361 struct cifsInodeInfo *cinode; 1385 struct cifsInodeInfo *cinode;
1362 struct cifsFileInfo *cfile; 1386 struct cifsFileInfo *cfile;
1363 __u16 netfid; 1387 __u16 netfid;
1364 __u8 type; 1388 __u32 type;
1365 1389
1366 rc = -EACCES; 1390 rc = -EACCES;
1367 xid = GetXid(); 1391 xid = GetXid();
@@ -1370,11 +1394,13 @@ int cifs_lock(struct file *file, int cmd, struct file_lock *flock)
1370 "end: %lld", cmd, flock->fl_flags, flock->fl_type, 1394 "end: %lld", cmd, flock->fl_flags, flock->fl_type,
1371 flock->fl_start, flock->fl_end); 1395 flock->fl_start, flock->fl_end);
1372 1396
1373 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag);
1374
1375 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1376 cfile = (struct cifsFileInfo *)file->private_data; 1397 cfile = (struct cifsFileInfo *)file->private_data;
1377 tcon = tlink_tcon(cfile->tlink); 1398 tcon = tlink_tcon(cfile->tlink);
1399
1400 cifs_read_flock(flock, &type, &lock, &unlock, &wait_flag,
1401 tcon->ses->server);
1402
1403 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
1378 netfid = cfile->netfid; 1404 netfid = cfile->netfid;
1379 cinode = CIFS_I(file->f_path.dentry->d_inode); 1405 cinode = CIFS_I(file->f_path.dentry->d_inode);
1380 1406
@@ -1539,10 +1565,11 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
1539struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode, 1565struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1540 bool fsuid_only) 1566 bool fsuid_only)
1541{ 1567{
1542 struct cifsFileInfo *open_file; 1568 struct cifsFileInfo *open_file, *inv_file = NULL;
1543 struct cifs_sb_info *cifs_sb; 1569 struct cifs_sb_info *cifs_sb;
1544 bool any_available = false; 1570 bool any_available = false;
1545 int rc; 1571 int rc;
1572 unsigned int refind = 0;
1546 1573
1547 /* Having a null inode here (because mapping->host was set to zero by 1574 /* Having a null inode here (because mapping->host was set to zero by
1548 the VFS or MM) should not happen but we had reports of on oops (due to 1575 the VFS or MM) should not happen but we had reports of on oops (due to
@@ -1562,40 +1589,25 @@ struct cifsFileInfo *find_writable_file(struct cifsInodeInfo *cifs_inode,
1562 1589
1563 spin_lock(&cifs_file_list_lock); 1590 spin_lock(&cifs_file_list_lock);
1564refind_writable: 1591refind_writable:
1592 if (refind > MAX_REOPEN_ATT) {
1593 spin_unlock(&cifs_file_list_lock);
1594 return NULL;
1595 }
1565 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) { 1596 list_for_each_entry(open_file, &cifs_inode->openFileList, flist) {
1566 if (!any_available && open_file->pid != current->tgid) 1597 if (!any_available && open_file->pid != current->tgid)
1567 continue; 1598 continue;
1568 if (fsuid_only && open_file->uid != current_fsuid()) 1599 if (fsuid_only && open_file->uid != current_fsuid())
1569 continue; 1600 continue;
1570 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) { 1601 if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
1571 cifsFileInfo_get(open_file);
1572
1573 if (!open_file->invalidHandle) { 1602 if (!open_file->invalidHandle) {
1574 /* found a good writable file */ 1603 /* found a good writable file */
1604 cifsFileInfo_get(open_file);
1575 spin_unlock(&cifs_file_list_lock); 1605 spin_unlock(&cifs_file_list_lock);
1576 return open_file; 1606 return open_file;
1607 } else {
1608 if (!inv_file)
1609 inv_file = open_file;
1577 } 1610 }
1578
1579 spin_unlock(&cifs_file_list_lock);
1580
1581 /* Had to unlock since following call can block */
1582 rc = cifs_reopen_file(open_file, false);
1583 if (!rc)
1584 return open_file;
1585
1586 /* if it fails, try another handle if possible */
1587 cFYI(1, "wp failed on reopen file");
1588 cifsFileInfo_put(open_file);
1589
1590 spin_lock(&cifs_file_list_lock);
1591
1592 /* else we simply continue to the next entry. Thus
1593 we do not loop on reopen errors. If we
1594 can not reopen the file, for example if we
1595 reconnected to a server with another client
1596 racing to delete or lock the file we would not
1597 make progress if we restarted before the beginning
1598 of the loop here. */
1599 } 1611 }
1600 } 1612 }
1601 /* couldn't find useable FH with same pid, try any available */ 1613 /* couldn't find useable FH with same pid, try any available */
@@ -1603,7 +1615,30 @@ refind_writable:
1603 any_available = true; 1615 any_available = true;
1604 goto refind_writable; 1616 goto refind_writable;
1605 } 1617 }
1618
1619 if (inv_file) {
1620 any_available = false;
1621 cifsFileInfo_get(inv_file);
1622 }
1623
1606 spin_unlock(&cifs_file_list_lock); 1624 spin_unlock(&cifs_file_list_lock);
1625
1626 if (inv_file) {
1627 rc = cifs_reopen_file(inv_file, false);
1628 if (!rc)
1629 return inv_file;
1630 else {
1631 spin_lock(&cifs_file_list_lock);
1632 list_move_tail(&inv_file->flist,
1633 &cifs_inode->openFileList);
1634 spin_unlock(&cifs_file_list_lock);
1635 cifsFileInfo_put(inv_file);
1636 spin_lock(&cifs_file_list_lock);
1637 ++refind;
1638 goto refind_writable;
1639 }
1640 }
1641
1607 return NULL; 1642 return NULL;
1608} 1643}
1609 1644
@@ -2339,24 +2374,224 @@ ssize_t cifs_strict_writev(struct kiocb *iocb, const struct iovec *iov,
2339 return cifs_user_writev(iocb, iov, nr_segs, pos); 2374 return cifs_user_writev(iocb, iov, nr_segs, pos);
2340} 2375}
2341 2376
2377static struct cifs_readdata *
2378cifs_readdata_alloc(unsigned int nr_vecs, work_func_t complete)
2379{
2380 struct cifs_readdata *rdata;
2381
2382 rdata = kzalloc(sizeof(*rdata) +
2383 sizeof(struct kvec) * nr_vecs, GFP_KERNEL);
2384 if (rdata != NULL) {
2385 kref_init(&rdata->refcount);
2386 INIT_LIST_HEAD(&rdata->list);
2387 init_completion(&rdata->done);
2388 INIT_WORK(&rdata->work, complete);
2389 INIT_LIST_HEAD(&rdata->pages);
2390 }
2391 return rdata;
2392}
2393
2394void
2395cifs_readdata_release(struct kref *refcount)
2396{
2397 struct cifs_readdata *rdata = container_of(refcount,
2398 struct cifs_readdata, refcount);
2399
2400 if (rdata->cfile)
2401 cifsFileInfo_put(rdata->cfile);
2402
2403 kfree(rdata);
2404}
2405
2406static int
2407cifs_read_allocate_pages(struct list_head *list, unsigned int npages)
2408{
2409 int rc = 0;
2410 struct page *page, *tpage;
2411 unsigned int i;
2412
2413 for (i = 0; i < npages; i++) {
2414 page = alloc_page(GFP_KERNEL|__GFP_HIGHMEM);
2415 if (!page) {
2416 rc = -ENOMEM;
2417 break;
2418 }
2419 list_add(&page->lru, list);
2420 }
2421
2422 if (rc) {
2423 list_for_each_entry_safe(page, tpage, list, lru) {
2424 list_del(&page->lru);
2425 put_page(page);
2426 }
2427 }
2428 return rc;
2429}
2430
2431static void
2432cifs_uncached_readdata_release(struct kref *refcount)
2433{
2434 struct page *page, *tpage;
2435 struct cifs_readdata *rdata = container_of(refcount,
2436 struct cifs_readdata, refcount);
2437
2438 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2439 list_del(&page->lru);
2440 put_page(page);
2441 }
2442 cifs_readdata_release(refcount);
2443}
2444
2445static int
2446cifs_retry_async_readv(struct cifs_readdata *rdata)
2447{
2448 int rc;
2449
2450 do {
2451 if (rdata->cfile->invalidHandle) {
2452 rc = cifs_reopen_file(rdata->cfile, true);
2453 if (rc != 0)
2454 continue;
2455 }
2456 rc = cifs_async_readv(rdata);
2457 } while (rc == -EAGAIN);
2458
2459 return rc;
2460}
2461
2462/**
2463 * cifs_readdata_to_iov - copy data from pages in response to an iovec
2464 * @rdata: the readdata response with list of pages holding data
2465 * @iov: vector in which we should copy the data
2466 * @nr_segs: number of segments in vector
2467 * @offset: offset into file of the first iovec
2468 * @copied: used to return the amount of data copied to the iov
2469 *
2470 * This function copies data from a list of pages in a readdata response into
2471 * an array of iovecs. It will first calculate where the data should go
2472 * based on the info in the readdata and then copy the data into that spot.
2473 */
2474static ssize_t
2475cifs_readdata_to_iov(struct cifs_readdata *rdata, const struct iovec *iov,
2476 unsigned long nr_segs, loff_t offset, ssize_t *copied)
2477{
2478 int rc = 0;
2479 struct iov_iter ii;
2480 size_t pos = rdata->offset - offset;
2481 struct page *page, *tpage;
2482 ssize_t remaining = rdata->bytes;
2483 unsigned char *pdata;
2484
2485 /* set up iov_iter and advance to the correct offset */
2486 iov_iter_init(&ii, iov, nr_segs, iov_length(iov, nr_segs), 0);
2487 iov_iter_advance(&ii, pos);
2488
2489 *copied = 0;
2490 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2491 ssize_t copy;
2492
2493 /* copy a whole page or whatever's left */
2494 copy = min_t(ssize_t, remaining, PAGE_SIZE);
2495
2496 /* ...but limit it to whatever space is left in the iov */
2497 copy = min_t(ssize_t, copy, iov_iter_count(&ii));
2498
2499 /* go while there's data to be copied and no errors */
2500 if (copy && !rc) {
2501 pdata = kmap(page);
2502 rc = memcpy_toiovecend(ii.iov, pdata, ii.iov_offset,
2503 (int)copy);
2504 kunmap(page);
2505 if (!rc) {
2506 *copied += copy;
2507 remaining -= copy;
2508 iov_iter_advance(&ii, copy);
2509 }
2510 }
2511
2512 list_del(&page->lru);
2513 put_page(page);
2514 }
2515
2516 return rc;
2517}
2518
2519static void
2520cifs_uncached_readv_complete(struct work_struct *work)
2521{
2522 struct cifs_readdata *rdata = container_of(work,
2523 struct cifs_readdata, work);
2524
2525 /* if the result is non-zero then the pages weren't kmapped */
2526 if (rdata->result == 0) {
2527 struct page *page;
2528
2529 list_for_each_entry(page, &rdata->pages, lru)
2530 kunmap(page);
2531 }
2532
2533 complete(&rdata->done);
2534 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2535}
2536
2537static int
2538cifs_uncached_read_marshal_iov(struct cifs_readdata *rdata,
2539 unsigned int remaining)
2540{
2541 int len = 0;
2542 struct page *page, *tpage;
2543
2544 rdata->nr_iov = 1;
2545 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2546 if (remaining >= PAGE_SIZE) {
2547 /* enough data to fill the page */
2548 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2549 rdata->iov[rdata->nr_iov].iov_len = PAGE_SIZE;
2550 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2551 rdata->nr_iov, page->index,
2552 rdata->iov[rdata->nr_iov].iov_base,
2553 rdata->iov[rdata->nr_iov].iov_len);
2554 ++rdata->nr_iov;
2555 len += PAGE_SIZE;
2556 remaining -= PAGE_SIZE;
2557 } else if (remaining > 0) {
2558 /* enough for partial page, fill and zero the rest */
2559 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2560 rdata->iov[rdata->nr_iov].iov_len = remaining;
2561 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2562 rdata->nr_iov, page->index,
2563 rdata->iov[rdata->nr_iov].iov_base,
2564 rdata->iov[rdata->nr_iov].iov_len);
2565 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2566 '\0', PAGE_SIZE - remaining);
2567 ++rdata->nr_iov;
2568 len += remaining;
2569 remaining = 0;
2570 } else {
2571 /* no need to hold page hostage */
2572 list_del(&page->lru);
2573 put_page(page);
2574 }
2575 }
2576
2577 return len;
2578}
2579
2342static ssize_t 2580static ssize_t
2343cifs_iovec_read(struct file *file, const struct iovec *iov, 2581cifs_iovec_read(struct file *file, const struct iovec *iov,
2344 unsigned long nr_segs, loff_t *poffset) 2582 unsigned long nr_segs, loff_t *poffset)
2345{ 2583{
2346 int rc; 2584 ssize_t rc;
2347 int xid;
2348 ssize_t total_read;
2349 unsigned int bytes_read = 0;
2350 size_t len, cur_len; 2585 size_t len, cur_len;
2351 int iov_offset = 0; 2586 ssize_t total_read = 0;
2587 loff_t offset = *poffset;
2588 unsigned int npages;
2352 struct cifs_sb_info *cifs_sb; 2589 struct cifs_sb_info *cifs_sb;
2353 struct cifs_tcon *pTcon; 2590 struct cifs_tcon *tcon;
2354 struct cifsFileInfo *open_file; 2591 struct cifsFileInfo *open_file;
2355 struct smb_com_read_rsp *pSMBr; 2592 struct cifs_readdata *rdata, *tmp;
2356 struct cifs_io_parms io_parms; 2593 struct list_head rdata_list;
2357 char *read_data; 2594 pid_t pid;
2358 unsigned int rsize;
2359 __u32 pid;
2360 2595
2361 if (!nr_segs) 2596 if (!nr_segs)
2362 return 0; 2597 return 0;
@@ -2365,14 +2600,10 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
2365 if (!len) 2600 if (!len)
2366 return 0; 2601 return 0;
2367 2602
2368 xid = GetXid(); 2603 INIT_LIST_HEAD(&rdata_list);
2369 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); 2604 cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
2370
2371 /* FIXME: set up handlers for larger reads and/or convert to async */
2372 rsize = min_t(unsigned int, cifs_sb->rsize, CIFSMaxBufSize);
2373
2374 open_file = file->private_data; 2605 open_file = file->private_data;
2375 pTcon = tlink_tcon(open_file->tlink); 2606 tcon = tlink_tcon(open_file->tlink);
2376 2607
2377 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) 2608 if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD)
2378 pid = open_file->pid; 2609 pid = open_file->pid;
@@ -2382,56 +2613,78 @@ cifs_iovec_read(struct file *file, const struct iovec *iov,
2382 if ((file->f_flags & O_ACCMODE) == O_WRONLY) 2613 if ((file->f_flags & O_ACCMODE) == O_WRONLY)
2383 cFYI(1, "attempting read on write only file instance"); 2614 cFYI(1, "attempting read on write only file instance");
2384 2615
2385 for (total_read = 0; total_read < len; total_read += bytes_read) { 2616 do {
2386 cur_len = min_t(const size_t, len - total_read, rsize); 2617 cur_len = min_t(const size_t, len - total_read, cifs_sb->rsize);
2387 rc = -EAGAIN; 2618 npages = DIV_ROUND_UP(cur_len, PAGE_SIZE);
2388 read_data = NULL;
2389 2619
2390 while (rc == -EAGAIN) { 2620 /* allocate a readdata struct */
2391 int buf_type = CIFS_NO_BUFFER; 2621 rdata = cifs_readdata_alloc(npages,
2392 if (open_file->invalidHandle) { 2622 cifs_uncached_readv_complete);
2393 rc = cifs_reopen_file(open_file, true); 2623 if (!rdata) {
2394 if (rc != 0) 2624 rc = -ENOMEM;
2395 break; 2625 goto error;
2396 }
2397 io_parms.netfid = open_file->netfid;
2398 io_parms.pid = pid;
2399 io_parms.tcon = pTcon;
2400 io_parms.offset = *poffset;
2401 io_parms.length = cur_len;
2402 rc = CIFSSMBRead(xid, &io_parms, &bytes_read,
2403 &read_data, &buf_type);
2404 pSMBr = (struct smb_com_read_rsp *)read_data;
2405 if (read_data) {
2406 char *data_offset = read_data + 4 +
2407 le16_to_cpu(pSMBr->DataOffset);
2408 if (memcpy_toiovecend(iov, data_offset,
2409 iov_offset, bytes_read))
2410 rc = -EFAULT;
2411 if (buf_type == CIFS_SMALL_BUFFER)
2412 cifs_small_buf_release(read_data);
2413 else if (buf_type == CIFS_LARGE_BUFFER)
2414 cifs_buf_release(read_data);
2415 read_data = NULL;
2416 iov_offset += bytes_read;
2417 }
2418 } 2626 }
2419 2627
2420 if (rc || (bytes_read == 0)) { 2628 rc = cifs_read_allocate_pages(&rdata->pages, npages);
2421 if (total_read) { 2629 if (rc)
2422 break; 2630 goto error;
2423 } else { 2631
2424 FreeXid(xid); 2632 rdata->cfile = cifsFileInfo_get(open_file);
2425 return rc; 2633 rdata->offset = offset;
2634 rdata->bytes = cur_len;
2635 rdata->pid = pid;
2636 rdata->marshal_iov = cifs_uncached_read_marshal_iov;
2637
2638 rc = cifs_retry_async_readv(rdata);
2639error:
2640 if (rc) {
2641 kref_put(&rdata->refcount,
2642 cifs_uncached_readdata_release);
2643 break;
2644 }
2645
2646 list_add_tail(&rdata->list, &rdata_list);
2647 offset += cur_len;
2648 len -= cur_len;
2649 } while (len > 0);
2650
2651 /* if at least one read request send succeeded, then reset rc */
2652 if (!list_empty(&rdata_list))
2653 rc = 0;
2654
2655 /* the loop below should proceed in the order of increasing offsets */
2656restart_loop:
2657 list_for_each_entry_safe(rdata, tmp, &rdata_list, list) {
2658 if (!rc) {
2659 ssize_t copied;
2660
2661 /* FIXME: freezable sleep too? */
2662 rc = wait_for_completion_killable(&rdata->done);
2663 if (rc)
2664 rc = -EINTR;
2665 else if (rdata->result)
2666 rc = rdata->result;
2667 else {
2668 rc = cifs_readdata_to_iov(rdata, iov,
2669 nr_segs, *poffset,
2670 &copied);
2671 total_read += copied;
2672 }
2673
2674 /* resend call if it's a retryable error */
2675 if (rc == -EAGAIN) {
2676 rc = cifs_retry_async_readv(rdata);
2677 goto restart_loop;
2426 } 2678 }
2427 } else {
2428 cifs_stats_bytes_read(pTcon, bytes_read);
2429 *poffset += bytes_read;
2430 } 2679 }
2680 list_del_init(&rdata->list);
2681 kref_put(&rdata->refcount, cifs_uncached_readdata_release);
2431 } 2682 }
2432 2683
2433 FreeXid(xid); 2684 cifs_stats_bytes_read(tcon, total_read);
2434 return total_read; 2685 *poffset += total_read;
2686
2687 return total_read ? total_read : rc;
2435} 2688}
2436 2689
2437ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov, 2690ssize_t cifs_user_readv(struct kiocb *iocb, const struct iovec *iov,
@@ -2606,6 +2859,100 @@ int cifs_file_mmap(struct file *file, struct vm_area_struct *vma)
2606 return rc; 2859 return rc;
2607} 2860}
2608 2861
2862static void
2863cifs_readv_complete(struct work_struct *work)
2864{
2865 struct cifs_readdata *rdata = container_of(work,
2866 struct cifs_readdata, work);
2867 struct page *page, *tpage;
2868
2869 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2870 list_del(&page->lru);
2871 lru_cache_add_file(page);
2872
2873 if (rdata->result == 0) {
2874 kunmap(page);
2875 flush_dcache_page(page);
2876 SetPageUptodate(page);
2877 }
2878
2879 unlock_page(page);
2880
2881 if (rdata->result == 0)
2882 cifs_readpage_to_fscache(rdata->mapping->host, page);
2883
2884 page_cache_release(page);
2885 }
2886 kref_put(&rdata->refcount, cifs_readdata_release);
2887}
2888
2889static int
2890cifs_readpages_marshal_iov(struct cifs_readdata *rdata, unsigned int remaining)
2891{
2892 int len = 0;
2893 struct page *page, *tpage;
2894 u64 eof;
2895 pgoff_t eof_index;
2896
2897 /* determine the eof that the server (probably) has */
2898 eof = CIFS_I(rdata->mapping->host)->server_eof;
2899 eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0;
2900 cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index);
2901
2902 rdata->nr_iov = 1;
2903 list_for_each_entry_safe(page, tpage, &rdata->pages, lru) {
2904 if (remaining >= PAGE_CACHE_SIZE) {
2905 /* enough data to fill the page */
2906 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2907 rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE;
2908 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2909 rdata->nr_iov, page->index,
2910 rdata->iov[rdata->nr_iov].iov_base,
2911 rdata->iov[rdata->nr_iov].iov_len);
2912 ++rdata->nr_iov;
2913 len += PAGE_CACHE_SIZE;
2914 remaining -= PAGE_CACHE_SIZE;
2915 } else if (remaining > 0) {
2916 /* enough for partial page, fill and zero the rest */
2917 rdata->iov[rdata->nr_iov].iov_base = kmap(page);
2918 rdata->iov[rdata->nr_iov].iov_len = remaining;
2919 cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu",
2920 rdata->nr_iov, page->index,
2921 rdata->iov[rdata->nr_iov].iov_base,
2922 rdata->iov[rdata->nr_iov].iov_len);
2923 memset(rdata->iov[rdata->nr_iov].iov_base + remaining,
2924 '\0', PAGE_CACHE_SIZE - remaining);
2925 ++rdata->nr_iov;
2926 len += remaining;
2927 remaining = 0;
2928 } else if (page->index > eof_index) {
2929 /*
2930 * The VFS will not try to do readahead past the
2931 * i_size, but it's possible that we have outstanding
2932 * writes with gaps in the middle and the i_size hasn't
2933 * caught up yet. Populate those with zeroed out pages
2934 * to prevent the VFS from repeatedly attempting to
2935 * fill them until the writes are flushed.
2936 */
2937 zero_user(page, 0, PAGE_CACHE_SIZE);
2938 list_del(&page->lru);
2939 lru_cache_add_file(page);
2940 flush_dcache_page(page);
2941 SetPageUptodate(page);
2942 unlock_page(page);
2943 page_cache_release(page);
2944 } else {
2945 /* no need to hold page hostage */
2946 list_del(&page->lru);
2947 lru_cache_add_file(page);
2948 unlock_page(page);
2949 page_cache_release(page);
2950 }
2951 }
2952
2953 return len;
2954}
2955
2609static int cifs_readpages(struct file *file, struct address_space *mapping, 2956static int cifs_readpages(struct file *file, struct address_space *mapping,
2610 struct list_head *page_list, unsigned num_pages) 2957 struct list_head *page_list, unsigned num_pages)
2611{ 2958{
@@ -2708,7 +3055,7 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2708 nr_pages++; 3055 nr_pages++;
2709 } 3056 }
2710 3057
2711 rdata = cifs_readdata_alloc(nr_pages); 3058 rdata = cifs_readdata_alloc(nr_pages, cifs_readv_complete);
2712 if (!rdata) { 3059 if (!rdata) {
2713 /* best to give up if we're out of mem */ 3060 /* best to give up if we're out of mem */
2714 list_for_each_entry_safe(page, tpage, &tmplist, lru) { 3061 list_for_each_entry_safe(page, tpage, &tmplist, lru) {
@@ -2722,24 +3069,16 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2722 } 3069 }
2723 3070
2724 spin_lock(&cifs_file_list_lock); 3071 spin_lock(&cifs_file_list_lock);
2725 cifsFileInfo_get(open_file);
2726 spin_unlock(&cifs_file_list_lock); 3072 spin_unlock(&cifs_file_list_lock);
2727 rdata->cfile = open_file; 3073 rdata->cfile = cifsFileInfo_get(open_file);
2728 rdata->mapping = mapping; 3074 rdata->mapping = mapping;
2729 rdata->offset = offset; 3075 rdata->offset = offset;
2730 rdata->bytes = bytes; 3076 rdata->bytes = bytes;
2731 rdata->pid = pid; 3077 rdata->pid = pid;
3078 rdata->marshal_iov = cifs_readpages_marshal_iov;
2732 list_splice_init(&tmplist, &rdata->pages); 3079 list_splice_init(&tmplist, &rdata->pages);
2733 3080
2734 do { 3081 rc = cifs_retry_async_readv(rdata);
2735 if (open_file->invalidHandle) {
2736 rc = cifs_reopen_file(open_file, true);
2737 if (rc != 0)
2738 continue;
2739 }
2740 rc = cifs_async_readv(rdata);
2741 } while (rc == -EAGAIN);
2742
2743 if (rc != 0) { 3082 if (rc != 0) {
2744 list_for_each_entry_safe(page, tpage, &rdata->pages, 3083 list_for_each_entry_safe(page, tpage, &rdata->pages,
2745 lru) { 3084 lru) {
@@ -2748,9 +3087,11 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
2748 unlock_page(page); 3087 unlock_page(page);
2749 page_cache_release(page); 3088 page_cache_release(page);
2750 } 3089 }
2751 cifs_readdata_free(rdata); 3090 kref_put(&rdata->refcount, cifs_readdata_release);
2752 break; 3091 break;
2753 } 3092 }
3093
3094 kref_put(&rdata->refcount, cifs_readdata_release);
2754 } 3095 }
2755 3096
2756 return rc; 3097 return rc;
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 4221b5e48a42..6d2667f0c98c 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -51,7 +51,15 @@ long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
51 cifs_sb = CIFS_SB(inode->i_sb); 51 cifs_sb = CIFS_SB(inode->i_sb);
52 52
53 switch (command) { 53 switch (command) {
54 static bool warned = false;
54 case CIFS_IOC_CHECKUMOUNT: 55 case CIFS_IOC_CHECKUMOUNT:
56 if (!warned) {
57 warned = true;
58 cERROR(1, "the CIFS_IOC_CHECKMOUNT ioctl will "
59 "be deprecated in 3.7. Please "
60 "migrate away from the use of "
61 "umount.cifs");
62 }
55 cFYI(1, "User unmount attempted"); 63 cFYI(1, "User unmount attempted");
56 if (cifs_sb->mnt_uid == current_uid()) 64 if (cifs_sb->mnt_uid == current_uid())
57 rc = 0; 65 rc = 0;
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index c29d1aa2c54f..e2552d2b2e42 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -306,8 +306,6 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
306 const struct cifs_tcon *treeCon, int word_count 306 const struct cifs_tcon *treeCon, int word_count
307 /* length of fixed section (word count) in two byte units */) 307 /* length of fixed section (word count) in two byte units */)
308{ 308{
309 struct list_head *temp_item;
310 struct cifs_ses *ses;
311 char *temp = (char *) buffer; 309 char *temp = (char *) buffer;
312 310
313 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */ 311 memset(temp, 0, 256); /* bigger than MAX_CIFS_HDR_SIZE */
@@ -337,51 +335,6 @@ header_assemble(struct smb_hdr *buffer, char smb_command /* command */ ,
337 /* Uid is not converted */ 335 /* Uid is not converted */
338 buffer->Uid = treeCon->ses->Suid; 336 buffer->Uid = treeCon->ses->Suid;
339 buffer->Mid = GetNextMid(treeCon->ses->server); 337 buffer->Mid = GetNextMid(treeCon->ses->server);
340 if (multiuser_mount != 0) {
341 /* For the multiuser case, there are few obvious technically */
342 /* possible mechanisms to match the local linux user (uid) */
343 /* to a valid remote smb user (smb_uid): */
344 /* 1) Query Winbind (or other local pam/nss daemon */
345 /* for userid/password/logon_domain or credential */
346 /* 2) Query Winbind for uid to sid to username mapping */
347 /* and see if we have a matching password for existing*/
348 /* session for that user perhas getting password by */
349 /* adding a new pam_cifs module that stores passwords */
350 /* so that the cifs vfs can get at that for all logged*/
351 /* on users */
352 /* 3) (Which is the mechanism we have chosen) */
353 /* Search through sessions to the same server for a */
354 /* a match on the uid that was passed in on mount */
355 /* with the current processes uid (or euid?) and use */
356 /* that smb uid. If no existing smb session for */
357 /* that uid found, use the default smb session ie */
358 /* the smb session for the volume mounted which is */
359 /* the same as would be used if the multiuser mount */
360 /* flag were disabled. */
361
362 /* BB Add support for establishing new tCon and SMB Session */
363 /* with userid/password pairs found on the smb session */
364 /* for other target tcp/ip addresses BB */
365 if (current_fsuid() != treeCon->ses->linux_uid) {
366 cFYI(1, "Multiuser mode and UID "
367 "did not match tcon uid");
368 spin_lock(&cifs_tcp_ses_lock);
369 list_for_each(temp_item, &treeCon->ses->server->smb_ses_list) {
370 ses = list_entry(temp_item, struct cifs_ses, smb_ses_list);
371 if (ses->linux_uid == current_fsuid()) {
372 if (ses->server == treeCon->ses->server) {
373 cFYI(1, "found matching uid substitute right smb_uid");
374 buffer->Uid = ses->Suid;
375 break;
376 } else {
377 /* BB eventually call cifs_setup_session here */
378 cFYI(1, "local UID found but no smb sess with this server exists");
379 }
380 }
381 }
382 spin_unlock(&cifs_tcp_ses_lock);
383 }
384 }
385 } 338 }
386 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS) 339 if (treeCon->Flags & SMB_SHARE_IS_IN_DFS)
387 buffer->Flags2 |= SMBFLG2_DFS; 340 buffer->Flags2 |= SMBFLG2_DFS;
@@ -700,22 +653,3 @@ backup_cred(struct cifs_sb_info *cifs_sb)
700 653
701 return false; 654 return false;
702} 655}
703
704void
705cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
706{
707 spin_lock(&server->req_lock);
708 server->credits += add;
709 server->in_flight--;
710 spin_unlock(&server->req_lock);
711 wake_up(&server->request_q);
712}
713
714void
715cifs_set_credits(struct TCP_Server_Info *server, const int val)
716{
717 spin_lock(&server->req_lock);
718 server->credits = val;
719 server->oplocks = val > 1 ? enable_oplocks : false;
720 spin_unlock(&server->req_lock);
721}
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index e2bbc683e018..0a8224d1c4c5 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -219,6 +219,7 @@ int get_symlink_reparse_path(char *full_path, struct cifs_sb_info *cifs_sb,
219 219
220static int initiate_cifs_search(const int xid, struct file *file) 220static int initiate_cifs_search(const int xid, struct file *file)
221{ 221{
222 __u16 search_flags;
222 int rc = 0; 223 int rc = 0;
223 char *full_path = NULL; 224 char *full_path = NULL;
224 struct cifsFileInfo *cifsFile; 225 struct cifsFileInfo *cifsFile;
@@ -270,8 +271,12 @@ ffirst_retry:
270 cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO; 271 cifsFile->srch_inf.info_level = SMB_FIND_FILE_DIRECTORY_INFO;
271 } 272 }
272 273
274 search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
275 if (backup_cred(cifs_sb))
276 search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
277
273 rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls, 278 rc = CIFSFindFirst(xid, pTcon, full_path, cifs_sb->local_nls,
274 &cifsFile->netfid, &cifsFile->srch_inf, 279 &cifsFile->netfid, search_flags, &cifsFile->srch_inf,
275 cifs_sb->mnt_cifs_flags & 280 cifs_sb->mnt_cifs_flags &
276 CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb)); 281 CIFS_MOUNT_MAP_SPECIAL_CHR, CIFS_DIR_SEP(cifs_sb));
277 if (rc == 0) 282 if (rc == 0)
@@ -502,11 +507,13 @@ static int cifs_save_resume_key(const char *current_entry,
502static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon, 507static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
503 struct file *file, char **ppCurrentEntry, int *num_to_ret) 508 struct file *file, char **ppCurrentEntry, int *num_to_ret)
504{ 509{
510 __u16 search_flags;
505 int rc = 0; 511 int rc = 0;
506 int pos_in_buf = 0; 512 int pos_in_buf = 0;
507 loff_t first_entry_in_buffer; 513 loff_t first_entry_in_buffer;
508 loff_t index_to_find = file->f_pos; 514 loff_t index_to_find = file->f_pos;
509 struct cifsFileInfo *cifsFile = file->private_data; 515 struct cifsFileInfo *cifsFile = file->private_data;
516 struct cifs_sb_info *cifs_sb = CIFS_SB(file->f_path.dentry->d_sb);
510 /* check if index in the buffer */ 517 /* check if index in the buffer */
511 518
512 if ((cifsFile == NULL) || (ppCurrentEntry == NULL) || 519 if ((cifsFile == NULL) || (ppCurrentEntry == NULL) ||
@@ -560,10 +567,14 @@ static int find_cifs_entry(const int xid, struct cifs_tcon *pTcon,
560 cifsFile); 567 cifsFile);
561 } 568 }
562 569
570 search_flags = CIFS_SEARCH_CLOSE_AT_END | CIFS_SEARCH_RETURN_RESUME;
571 if (backup_cred(cifs_sb))
572 search_flags |= CIFS_SEARCH_BACKUP_SEARCH;
573
563 while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) && 574 while ((index_to_find >= cifsFile->srch_inf.index_of_last_entry) &&
564 (rc == 0) && !cifsFile->srch_inf.endOfSearch) { 575 (rc == 0) && !cifsFile->srch_inf.endOfSearch) {
565 cFYI(1, "calling findnext2"); 576 cFYI(1, "calling findnext2");
566 rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, 577 rc = CIFSFindNext(xid, pTcon, cifsFile->netfid, search_flags,
567 &cifsFile->srch_inf); 578 &cifsFile->srch_inf);
568 /* FindFirst/Next set last_entry to NULL on malformed reply */ 579 /* FindFirst/Next set last_entry to NULL on malformed reply */
569 if (cifsFile->srch_inf.last_entry) 580 if (cifsFile->srch_inf.last_entry)
diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
new file mode 100644
index 000000000000..d9d615fbed3f
--- /dev/null
+++ b/fs/cifs/smb1ops.c
@@ -0,0 +1,154 @@
1/*
2 * SMB1 (CIFS) version specific operations
3 *
4 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
5 *
6 * This library is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2 as published
8 * by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include "cifsglob.h"
21#include "cifsproto.h"
22#include "cifs_debug.h"
23#include "cifspdu.h"
24
25/*
26 * An NT cancel request header looks just like the original request except:
27 *
28 * The Command is SMB_COM_NT_CANCEL
29 * The WordCount is zeroed out
30 * The ByteCount is zeroed out
31 *
32 * This function mangles an existing request buffer into a
33 * SMB_COM_NT_CANCEL request and then sends it.
34 */
35static int
36send_nt_cancel(struct TCP_Server_Info *server, void *buf,
37 struct mid_q_entry *mid)
38{
39 int rc = 0;
40 struct smb_hdr *in_buf = (struct smb_hdr *)buf;
41
42 /* -4 for RFC1001 length and +2 for BCC field */
43 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
44 in_buf->Command = SMB_COM_NT_CANCEL;
45 in_buf->WordCount = 0;
46 put_bcc(0, in_buf);
47
48 mutex_lock(&server->srv_mutex);
49 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
50 if (rc) {
51 mutex_unlock(&server->srv_mutex);
52 return rc;
53 }
54 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
55 mutex_unlock(&server->srv_mutex);
56
57 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
58 in_buf->Mid, rc);
59
60 return rc;
61}
62
63static bool
64cifs_compare_fids(struct cifsFileInfo *ob1, struct cifsFileInfo *ob2)
65{
66 return ob1->netfid == ob2->netfid;
67}
68
69static unsigned int
70cifs_read_data_offset(char *buf)
71{
72 READ_RSP *rsp = (READ_RSP *)buf;
73 return le16_to_cpu(rsp->DataOffset);
74}
75
76static unsigned int
77cifs_read_data_length(char *buf)
78{
79 READ_RSP *rsp = (READ_RSP *)buf;
80 return (le16_to_cpu(rsp->DataLengthHigh) << 16) +
81 le16_to_cpu(rsp->DataLength);
82}
83
84static struct mid_q_entry *
85cifs_find_mid(struct TCP_Server_Info *server, char *buffer)
86{
87 struct smb_hdr *buf = (struct smb_hdr *)buffer;
88 struct mid_q_entry *mid;
89
90 spin_lock(&GlobalMid_Lock);
91 list_for_each_entry(mid, &server->pending_mid_q, qhead) {
92 if (mid->mid == buf->Mid &&
93 mid->mid_state == MID_REQUEST_SUBMITTED &&
94 le16_to_cpu(mid->command) == buf->Command) {
95 spin_unlock(&GlobalMid_Lock);
96 return mid;
97 }
98 }
99 spin_unlock(&GlobalMid_Lock);
100 return NULL;
101}
102
103static void
104cifs_add_credits(struct TCP_Server_Info *server, const unsigned int add)
105{
106 spin_lock(&server->req_lock);
107 server->credits += add;
108 server->in_flight--;
109 spin_unlock(&server->req_lock);
110 wake_up(&server->request_q);
111}
112
113static void
114cifs_set_credits(struct TCP_Server_Info *server, const int val)
115{
116 spin_lock(&server->req_lock);
117 server->credits = val;
118 server->oplocks = val > 1 ? enable_oplocks : false;
119 spin_unlock(&server->req_lock);
120}
121
122static int *
123cifs_get_credits_field(struct TCP_Server_Info *server)
124{
125 return &server->credits;
126}
127
128struct smb_version_operations smb1_operations = {
129 .send_cancel = send_nt_cancel,
130 .compare_fids = cifs_compare_fids,
131 .setup_request = cifs_setup_request,
132 .check_receive = cifs_check_receive,
133 .add_credits = cifs_add_credits,
134 .set_credits = cifs_set_credits,
135 .get_credits_field = cifs_get_credits_field,
136 .read_data_offset = cifs_read_data_offset,
137 .read_data_length = cifs_read_data_length,
138 .map_error = map_smb_to_linux_error,
139 .find_mid = cifs_find_mid,
140 .check_message = checkSMB,
141 .dump_detail = cifs_dump_detail,
142 .is_oplock_break = is_valid_oplock_break,
143};
144
145struct smb_version_values smb1_values = {
146 .version_string = SMB1_VERSION_STRING,
147 .large_lock_type = LOCKING_ANDX_LARGE_FILES,
148 .exclusive_lock_type = 0,
149 .shared_lock_type = LOCKING_ANDX_SHARED_LOCK,
150 .unlock_lock_type = 0,
151 .header_size = sizeof(struct smb_hdr),
152 .max_header_size = MAX_CIFS_HDR_SIZE,
153 .read_rsp_size = sizeof(READ_RSP),
154};
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
new file mode 100644
index 000000000000..f065e89756a1
--- /dev/null
+++ b/fs/cifs/smb2ops.c
@@ -0,0 +1,27 @@
1/*
2 * SMB2 version specific operations
3 *
4 * Copyright (c) 2012, Jeff Layton <jlayton@redhat.com>
5 *
6 * This library is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License v2 as published
8 * by the Free Software Foundation.
9 *
10 * This library is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
13 * the GNU Lesser General Public License for more details.
14 *
15 * You should have received a copy of the GNU Lesser General Public License
16 * along with this library; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19
20#include "cifsglob.h"
21
22struct smb_version_operations smb21_operations = {
23};
24
25struct smb_version_values smb21_values = {
26 .version_string = SMB21_VERSION_STRING,
27};
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 0961336513d5..1b36ffe6a47b 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -304,7 +304,8 @@ wait_for_free_credits(struct TCP_Server_Info *server, const int optype,
304static int 304static int
305wait_for_free_request(struct TCP_Server_Info *server, const int optype) 305wait_for_free_request(struct TCP_Server_Info *server, const int optype)
306{ 306{
307 return wait_for_free_credits(server, optype, get_credits_field(server)); 307 return wait_for_free_credits(server, optype,
308 server->ops->get_credits_field(server));
308} 309}
309 310
310static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf, 311static int allocate_mid(struct cifs_ses *ses, struct smb_hdr *in_buf,
@@ -396,7 +397,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
396 rc = cifs_setup_async_request(server, iov, nvec, &mid); 397 rc = cifs_setup_async_request(server, iov, nvec, &mid);
397 if (rc) { 398 if (rc) {
398 mutex_unlock(&server->srv_mutex); 399 mutex_unlock(&server->srv_mutex);
399 cifs_add_credits(server, 1); 400 add_credits(server, 1);
400 wake_up(&server->request_q); 401 wake_up(&server->request_q);
401 return rc; 402 return rc;
402 } 403 }
@@ -418,7 +419,7 @@ cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov,
418 return rc; 419 return rc;
419out_err: 420out_err:
420 delete_mid(mid); 421 delete_mid(mid);
421 cifs_add_credits(server, 1); 422 add_credits(server, 1);
422 wake_up(&server->request_q); 423 wake_up(&server->request_q);
423 return rc; 424 return rc;
424} 425}
@@ -483,41 +484,11 @@ cifs_sync_mid_result(struct mid_q_entry *mid, struct TCP_Server_Info *server)
483 return rc; 484 return rc;
484} 485}
485 486
486/* 487static inline int
487 * An NT cancel request header looks just like the original request except: 488send_cancel(struct TCP_Server_Info *server, void *buf, struct mid_q_entry *mid)
488 *
489 * The Command is SMB_COM_NT_CANCEL
490 * The WordCount is zeroed out
491 * The ByteCount is zeroed out
492 *
493 * This function mangles an existing request buffer into a
494 * SMB_COM_NT_CANCEL request and then sends it.
495 */
496static int
497send_nt_cancel(struct TCP_Server_Info *server, struct smb_hdr *in_buf,
498 struct mid_q_entry *mid)
499{ 489{
500 int rc = 0; 490 return server->ops->send_cancel ?
501 491 server->ops->send_cancel(server, buf, mid) : 0;
502 /* -4 for RFC1001 length and +2 for BCC field */
503 in_buf->smb_buf_length = cpu_to_be32(sizeof(struct smb_hdr) - 4 + 2);
504 in_buf->Command = SMB_COM_NT_CANCEL;
505 in_buf->WordCount = 0;
506 put_bcc(0, in_buf);
507
508 mutex_lock(&server->srv_mutex);
509 rc = cifs_sign_smb(in_buf, server, &mid->sequence_number);
510 if (rc) {
511 mutex_unlock(&server->srv_mutex);
512 return rc;
513 }
514 rc = smb_send(server, in_buf, be32_to_cpu(in_buf->smb_buf_length));
515 mutex_unlock(&server->srv_mutex);
516
517 cFYI(1, "issued NT_CANCEL for mid %u, rc = %d",
518 in_buf->Mid, rc);
519
520 return rc;
521} 492}
522 493
523int 494int
@@ -544,7 +515,7 @@ cifs_check_receive(struct mid_q_entry *mid, struct TCP_Server_Info *server,
544 return map_smb_to_linux_error(mid->resp_buf, log_error); 515 return map_smb_to_linux_error(mid->resp_buf, log_error);
545} 516}
546 517
547static int 518int
548cifs_setup_request(struct cifs_ses *ses, struct kvec *iov, 519cifs_setup_request(struct cifs_ses *ses, struct kvec *iov,
549 unsigned int nvec, struct mid_q_entry **ret_mid) 520 unsigned int nvec, struct mid_q_entry **ret_mid)
550{ 521{
@@ -607,12 +578,12 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
607 578
608 mutex_lock(&ses->server->srv_mutex); 579 mutex_lock(&ses->server->srv_mutex);
609 580
610 rc = cifs_setup_request(ses, iov, n_vec, &midQ); 581 rc = ses->server->ops->setup_request(ses, iov, n_vec, &midQ);
611 if (rc) { 582 if (rc) {
612 mutex_unlock(&ses->server->srv_mutex); 583 mutex_unlock(&ses->server->srv_mutex);
613 cifs_small_buf_release(buf); 584 cifs_small_buf_release(buf);
614 /* Update # of requests on wire to server */ 585 /* Update # of requests on wire to server */
615 cifs_add_credits(ses->server, 1); 586 add_credits(ses->server, 1);
616 return rc; 587 return rc;
617 } 588 }
618 589
@@ -636,13 +607,13 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
636 607
637 rc = wait_for_response(ses->server, midQ); 608 rc = wait_for_response(ses->server, midQ);
638 if (rc != 0) { 609 if (rc != 0) {
639 send_nt_cancel(ses->server, (struct smb_hdr *)buf, midQ); 610 send_cancel(ses->server, buf, midQ);
640 spin_lock(&GlobalMid_Lock); 611 spin_lock(&GlobalMid_Lock);
641 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 612 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
642 midQ->callback = DeleteMidQEntry; 613 midQ->callback = DeleteMidQEntry;
643 spin_unlock(&GlobalMid_Lock); 614 spin_unlock(&GlobalMid_Lock);
644 cifs_small_buf_release(buf); 615 cifs_small_buf_release(buf);
645 cifs_add_credits(ses->server, 1); 616 add_credits(ses->server, 1);
646 return rc; 617 return rc;
647 } 618 }
648 spin_unlock(&GlobalMid_Lock); 619 spin_unlock(&GlobalMid_Lock);
@@ -652,7 +623,7 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
652 623
653 rc = cifs_sync_mid_result(midQ, ses->server); 624 rc = cifs_sync_mid_result(midQ, ses->server);
654 if (rc != 0) { 625 if (rc != 0) {
655 cifs_add_credits(ses->server, 1); 626 add_credits(ses->server, 1);
656 return rc; 627 return rc;
657 } 628 }
658 629
@@ -670,14 +641,15 @@ SendReceive2(const unsigned int xid, struct cifs_ses *ses,
670 else 641 else
671 *pRespBufType = CIFS_SMALL_BUFFER; 642 *pRespBufType = CIFS_SMALL_BUFFER;
672 643
673 rc = cifs_check_receive(midQ, ses->server, flags & CIFS_LOG_ERROR); 644 rc = ses->server->ops->check_receive(midQ, ses->server,
645 flags & CIFS_LOG_ERROR);
674 646
675 /* mark it so buf will not be freed by delete_mid */ 647 /* mark it so buf will not be freed by delete_mid */
676 if ((flags & CIFS_NO_RESP) == 0) 648 if ((flags & CIFS_NO_RESP) == 0)
677 midQ->resp_buf = NULL; 649 midQ->resp_buf = NULL;
678out: 650out:
679 delete_mid(midQ); 651 delete_mid(midQ);
680 cifs_add_credits(ses->server, 1); 652 add_credits(ses->server, 1);
681 653
682 return rc; 654 return rc;
683} 655}
@@ -727,7 +699,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
727 if (rc) { 699 if (rc) {
728 mutex_unlock(&ses->server->srv_mutex); 700 mutex_unlock(&ses->server->srv_mutex);
729 /* Update # of requests on wire to server */ 701 /* Update # of requests on wire to server */
730 cifs_add_credits(ses->server, 1); 702 add_credits(ses->server, 1);
731 return rc; 703 return rc;
732 } 704 }
733 705
@@ -753,13 +725,13 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
753 725
754 rc = wait_for_response(ses->server, midQ); 726 rc = wait_for_response(ses->server, midQ);
755 if (rc != 0) { 727 if (rc != 0) {
756 send_nt_cancel(ses->server, in_buf, midQ); 728 send_cancel(ses->server, in_buf, midQ);
757 spin_lock(&GlobalMid_Lock); 729 spin_lock(&GlobalMid_Lock);
758 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 730 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
759 /* no longer considered to be "in-flight" */ 731 /* no longer considered to be "in-flight" */
760 midQ->callback = DeleteMidQEntry; 732 midQ->callback = DeleteMidQEntry;
761 spin_unlock(&GlobalMid_Lock); 733 spin_unlock(&GlobalMid_Lock);
762 cifs_add_credits(ses->server, 1); 734 add_credits(ses->server, 1);
763 return rc; 735 return rc;
764 } 736 }
765 spin_unlock(&GlobalMid_Lock); 737 spin_unlock(&GlobalMid_Lock);
@@ -767,7 +739,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
767 739
768 rc = cifs_sync_mid_result(midQ, ses->server); 740 rc = cifs_sync_mid_result(midQ, ses->server);
769 if (rc != 0) { 741 if (rc != 0) {
770 cifs_add_credits(ses->server, 1); 742 add_credits(ses->server, 1);
771 return rc; 743 return rc;
772 } 744 }
773 745
@@ -783,7 +755,7 @@ SendReceive(const unsigned int xid, struct cifs_ses *ses,
783 rc = cifs_check_receive(midQ, ses->server, 0); 755 rc = cifs_check_receive(midQ, ses->server, 0);
784out: 756out:
785 delete_mid(midQ); 757 delete_mid(midQ);
786 cifs_add_credits(ses->server, 1); 758 add_credits(ses->server, 1);
787 759
788 return rc; 760 return rc;
789} 761}
@@ -898,7 +870,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
898 if (in_buf->Command == SMB_COM_TRANSACTION2) { 870 if (in_buf->Command == SMB_COM_TRANSACTION2) {
899 /* POSIX lock. We send a NT_CANCEL SMB to cause the 871 /* POSIX lock. We send a NT_CANCEL SMB to cause the
900 blocking lock to return. */ 872 blocking lock to return. */
901 rc = send_nt_cancel(ses->server, in_buf, midQ); 873 rc = send_cancel(ses->server, in_buf, midQ);
902 if (rc) { 874 if (rc) {
903 delete_mid(midQ); 875 delete_mid(midQ);
904 return rc; 876 return rc;
@@ -919,7 +891,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifs_tcon *tcon,
919 891
920 rc = wait_for_response(ses->server, midQ); 892 rc = wait_for_response(ses->server, midQ);
921 if (rc) { 893 if (rc) {
922 send_nt_cancel(ses->server, in_buf, midQ); 894 send_cancel(ses->server, in_buf, midQ);
923 spin_lock(&GlobalMid_Lock); 895 spin_lock(&GlobalMid_Lock);
924 if (midQ->mid_state == MID_REQUEST_SUBMITTED) { 896 if (midQ->mid_state == MID_REQUEST_SUBMITTED) {
925 /* no longer considered to be "in-flight" */ 897 /* no longer considered to be "in-flight" */
diff --git a/fs/coda/inode.c b/fs/coda/inode.c
index 2870597b5c9d..f1813120d753 100644
--- a/fs/coda/inode.c
+++ b/fs/coda/inode.c
@@ -244,7 +244,7 @@ static void coda_put_super(struct super_block *sb)
244static void coda_evict_inode(struct inode *inode) 244static void coda_evict_inode(struct inode *inode)
245{ 245{
246 truncate_inode_pages(&inode->i_data, 0); 246 truncate_inode_pages(&inode->i_data, 0);
247 end_writeback(inode); 247 clear_inode(inode);
248 coda_cache_clear_inode(inode); 248 coda_cache_clear_inode(inode);
249} 249}
250 250
diff --git a/fs/debugfs/file.c b/fs/debugfs/file.c
index 5dfafdd1dbd3..2340f6978d6e 100644
--- a/fs/debugfs/file.c
+++ b/fs/debugfs/file.c
@@ -20,6 +20,7 @@
20#include <linux/namei.h> 20#include <linux/namei.h>
21#include <linux/debugfs.h> 21#include <linux/debugfs.h>
22#include <linux/io.h> 22#include <linux/io.h>
23#include <linux/slab.h>
23 24
24static ssize_t default_read_file(struct file *file, char __user *buf, 25static ssize_t default_read_file(struct file *file, char __user *buf,
25 size_t count, loff_t *ppos) 26 size_t count, loff_t *ppos)
@@ -520,6 +521,133 @@ struct dentry *debugfs_create_blob(const char *name, umode_t mode,
520} 521}
521EXPORT_SYMBOL_GPL(debugfs_create_blob); 522EXPORT_SYMBOL_GPL(debugfs_create_blob);
522 523
524struct array_data {
525 void *array;
526 u32 elements;
527};
528
529static int u32_array_open(struct inode *inode, struct file *file)
530{
531 file->private_data = NULL;
532 return nonseekable_open(inode, file);
533}
534
535static size_t format_array(char *buf, size_t bufsize, const char *fmt,
536 u32 *array, u32 array_size)
537{
538 size_t ret = 0;
539 u32 i;
540
541 for (i = 0; i < array_size; i++) {
542 size_t len;
543
544 len = snprintf(buf, bufsize, fmt, array[i]);
545 len++; /* ' ' or '\n' */
546 ret += len;
547
548 if (buf) {
549 buf += len;
550 bufsize -= len;
551 buf[-1] = (i == array_size-1) ? '\n' : ' ';
552 }
553 }
554
555 ret++; /* \0 */
556 if (buf)
557 *buf = '\0';
558
559 return ret;
560}
561
562static char *format_array_alloc(const char *fmt, u32 *array,
563 u32 array_size)
564{
565 size_t len = format_array(NULL, 0, fmt, array, array_size);
566 char *ret;
567
568 ret = kmalloc(len, GFP_KERNEL);
569 if (ret == NULL)
570 return NULL;
571
572 format_array(ret, len, fmt, array, array_size);
573 return ret;
574}
575
576static ssize_t u32_array_read(struct file *file, char __user *buf, size_t len,
577 loff_t *ppos)
578{
579 struct inode *inode = file->f_path.dentry->d_inode;
580 struct array_data *data = inode->i_private;
581 size_t size;
582
583 if (*ppos == 0) {
584 if (file->private_data) {
585 kfree(file->private_data);
586 file->private_data = NULL;
587 }
588
589 file->private_data = format_array_alloc("%u", data->array,
590 data->elements);
591 }
592
593 size = 0;
594 if (file->private_data)
595 size = strlen(file->private_data);
596
597 return simple_read_from_buffer(buf, len, ppos,
598 file->private_data, size);
599}
600
601static int u32_array_release(struct inode *inode, struct file *file)
602{
603 kfree(file->private_data);
604
605 return 0;
606}
607
608static const struct file_operations u32_array_fops = {
609 .owner = THIS_MODULE,
610 .open = u32_array_open,
611 .release = u32_array_release,
612 .read = u32_array_read,
613 .llseek = no_llseek,
614};
615
616/**
617 * debugfs_create_u32_array - create a debugfs file that is used to read u32
618 * array.
619 * @name: a pointer to a string containing the name of the file to create.
620 * @mode: the permission that the file should have.
621 * @parent: a pointer to the parent dentry for this file. This should be a
622 * directory dentry if set. If this parameter is %NULL, then the
623 * file will be created in the root of the debugfs filesystem.
624 * @array: u32 array that provides data.
625 * @elements: total number of elements in the array.
626 *
627 * This function creates a file in debugfs with the given name that exports
628 * @array as data. If the @mode variable is so set it can be read from.
629 * Writing is not supported. Seek within the file is also not supported.
630 * Once array is created its size can not be changed.
631 *
632 * The function returns a pointer to dentry on success. If debugfs is not
633 * enabled in the kernel, the value -%ENODEV will be returned.
634 */
635struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
636 struct dentry *parent,
637 u32 *array, u32 elements)
638{
639 struct array_data *data = kmalloc(sizeof(*data), GFP_KERNEL);
640
641 if (data == NULL)
642 return NULL;
643
644 data->array = array;
645 data->elements = elements;
646
647 return debugfs_create_file(name, mode, parent, data, &u32_array_fops);
648}
649EXPORT_SYMBOL_GPL(debugfs_create_u32_array);
650
523#ifdef CONFIG_HAS_IOMEM 651#ifdef CONFIG_HAS_IOMEM
524 652
525/* 653/*
diff --git a/fs/ecryptfs/super.c b/fs/ecryptfs/super.c
index 2dd946b636d2..e879cf8ff0b1 100644
--- a/fs/ecryptfs/super.c
+++ b/fs/ecryptfs/super.c
@@ -133,7 +133,7 @@ static int ecryptfs_statfs(struct dentry *dentry, struct kstatfs *buf)
133static void ecryptfs_evict_inode(struct inode *inode) 133static void ecryptfs_evict_inode(struct inode *inode)
134{ 134{
135 truncate_inode_pages(&inode->i_data, 0); 135 truncate_inode_pages(&inode->i_data, 0);
136 end_writeback(inode); 136 clear_inode(inode);
137 iput(ecryptfs_inode_to_lower(inode)); 137 iput(ecryptfs_inode_to_lower(inode));
138} 138}
139 139
diff --git a/fs/exofs/Kbuild b/fs/exofs/Kbuild
index 352ba149d23e..389ba8312d5d 100644
--- a/fs/exofs/Kbuild
+++ b/fs/exofs/Kbuild
@@ -16,5 +16,5 @@
16libore-y := ore.o ore_raid.o 16libore-y := ore.o ore_raid.o
17obj-$(CONFIG_ORE) += libore.o 17obj-$(CONFIG_ORE) += libore.o
18 18
19exofs-y := inode.o file.o symlink.o namei.o dir.o super.o 19exofs-y := inode.o file.o symlink.o namei.o dir.o super.o sys.o
20obj-$(CONFIG_EXOFS_FS) += exofs.o 20obj-$(CONFIG_EXOFS_FS) += exofs.o
diff --git a/fs/exofs/exofs.h b/fs/exofs/exofs.h
index ca9d49665ef6..fffe86fd7a42 100644
--- a/fs/exofs/exofs.h
+++ b/fs/exofs/exofs.h
@@ -56,6 +56,9 @@
56struct exofs_dev { 56struct exofs_dev {
57 struct ore_dev ored; 57 struct ore_dev ored;
58 unsigned did; 58 unsigned did;
59 unsigned urilen;
60 uint8_t *uri;
61 struct kobject ed_kobj;
59}; 62};
60/* 63/*
61 * our extension to the in-memory superblock 64 * our extension to the in-memory superblock
@@ -73,6 +76,7 @@ struct exofs_sb_info {
73 struct ore_layout layout; /* Default files layout */ 76 struct ore_layout layout; /* Default files layout */
74 struct ore_comp one_comp; /* id & cred of partition id=0*/ 77 struct ore_comp one_comp; /* id & cred of partition id=0*/
75 struct ore_components oc; /* comps for the partition */ 78 struct ore_components oc; /* comps for the partition */
79 struct kobject s_kobj; /* holds per-sbi kobject */
76}; 80};
77 81
78/* 82/*
@@ -176,6 +180,16 @@ void exofs_make_credential(u8 cred_a[OSD_CAP_LEN],
176 const struct osd_obj_id *obj); 180 const struct osd_obj_id *obj);
177int exofs_sbi_write_stats(struct exofs_sb_info *sbi); 181int exofs_sbi_write_stats(struct exofs_sb_info *sbi);
178 182
183/* sys.c */
184int exofs_sysfs_init(void);
185void exofs_sysfs_uninit(void);
186int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
187 struct exofs_dt_device_info *dt_dev);
188void exofs_sysfs_sb_del(struct exofs_sb_info *sbi);
189int exofs_sysfs_odev_add(struct exofs_dev *edev,
190 struct exofs_sb_info *sbi);
191void exofs_sysfs_dbg_print(void);
192
179/********************* 193/*********************
180 * operation vectors * 194 * operation vectors *
181 *********************/ 195 *********************/
diff --git a/fs/exofs/inode.c b/fs/exofs/inode.c
index ea5e1f97806a..5badb0c039de 100644
--- a/fs/exofs/inode.c
+++ b/fs/exofs/inode.c
@@ -1473,7 +1473,7 @@ void exofs_evict_inode(struct inode *inode)
1473 goto no_delete; 1473 goto no_delete;
1474 1474
1475 inode->i_size = 0; 1475 inode->i_size = 0;
1476 end_writeback(inode); 1476 clear_inode(inode);
1477 1477
1478 /* if we are deleting an obj that hasn't been created yet, wait. 1478 /* if we are deleting an obj that hasn't been created yet, wait.
1479 * This also makes sure that create_done cannot be called with an 1479 * This also makes sure that create_done cannot be called with an
@@ -1503,5 +1503,5 @@ void exofs_evict_inode(struct inode *inode)
1503 return; 1503 return;
1504 1504
1505no_delete: 1505no_delete:
1506 end_writeback(inode); 1506 clear_inode(inode);
1507} 1507}
diff --git a/fs/exofs/super.c b/fs/exofs/super.c
index 735ca06430ac..433783624d10 100644
--- a/fs/exofs/super.c
+++ b/fs/exofs/super.c
@@ -472,6 +472,7 @@ static void exofs_put_super(struct super_block *sb)
472 _exofs_print_device("Unmounting", NULL, ore_comp_dev(&sbi->oc, 0), 472 _exofs_print_device("Unmounting", NULL, ore_comp_dev(&sbi->oc, 0),
473 sbi->one_comp.obj.partition); 473 sbi->one_comp.obj.partition);
474 474
475 exofs_sysfs_sb_del(sbi);
475 bdi_destroy(&sbi->bdi); 476 bdi_destroy(&sbi->bdi);
476 exofs_free_sbi(sbi); 477 exofs_free_sbi(sbi);
477 sb->s_fs_info = NULL; 478 sb->s_fs_info = NULL;
@@ -632,6 +633,12 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
632 memcpy(&sbi->oc.ods[numdevs], &sbi->oc.ods[0], 633 memcpy(&sbi->oc.ods[numdevs], &sbi->oc.ods[0],
633 (numdevs - 1) * sizeof(sbi->oc.ods[0])); 634 (numdevs - 1) * sizeof(sbi->oc.ods[0]));
634 635
636 /* create sysfs subdir under which we put the device table
637 * And cluster layout. A Superblock is identified by the string:
638 * "dev[0].osdname"_"pid"
639 */
640 exofs_sysfs_sb_add(sbi, &dt->dt_dev_table[0]);
641
635 for (i = 0; i < numdevs; i++) { 642 for (i = 0; i < numdevs; i++) {
636 struct exofs_fscb fscb; 643 struct exofs_fscb fscb;
637 struct osd_dev_info odi; 644 struct osd_dev_info odi;
@@ -657,6 +664,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
657 eds[i].ored.od = fscb_od; 664 eds[i].ored.od = fscb_od;
658 ++sbi->oc.numdevs; 665 ++sbi->oc.numdevs;
659 fscb_od = NULL; 666 fscb_od = NULL;
667 exofs_sysfs_odev_add(&eds[i], sbi);
660 continue; 668 continue;
661 } 669 }
662 670
@@ -682,6 +690,7 @@ static int exofs_read_lookup_dev_table(struct exofs_sb_info *sbi,
682 odi.osdname); 690 odi.osdname);
683 goto out; 691 goto out;
684 } 692 }
693 exofs_sysfs_odev_add(&eds[i], sbi);
685 694
686 /* TODO: verify other information is correct and FS-uuid 695 /* TODO: verify other information is correct and FS-uuid
687 * matches. Benny what did you say about device table 696 * matches. Benny what did you say about device table
@@ -745,7 +754,6 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
745 sbi->one_comp.obj.partition = opts->pid; 754 sbi->one_comp.obj.partition = opts->pid;
746 sbi->one_comp.obj.id = 0; 755 sbi->one_comp.obj.id = 0;
747 exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj); 756 exofs_make_credential(sbi->one_comp.cred, &sbi->one_comp.obj);
748 sbi->oc.numdevs = 1;
749 sbi->oc.single_comp = EC_SINGLE_COMP; 757 sbi->oc.single_comp = EC_SINGLE_COMP;
750 sbi->oc.comps = &sbi->one_comp; 758 sbi->oc.comps = &sbi->one_comp;
751 759
@@ -804,6 +812,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
804 goto free_sbi; 812 goto free_sbi;
805 813
806 ore_comp_set_dev(&sbi->oc, 0, od); 814 ore_comp_set_dev(&sbi->oc, 0, od);
815 sbi->oc.numdevs = 1;
807 } 816 }
808 817
809 __sbi_read_stats(sbi); 818 __sbi_read_stats(sbi);
@@ -844,6 +853,7 @@ static int exofs_fill_super(struct super_block *sb, void *data, int silent)
844 goto free_sbi; 853 goto free_sbi;
845 } 854 }
846 855
856 exofs_sysfs_dbg_print();
847 _exofs_print_device("Mounting", opts->dev_name, 857 _exofs_print_device("Mounting", opts->dev_name,
848 ore_comp_dev(&sbi->oc, 0), 858 ore_comp_dev(&sbi->oc, 0),
849 sbi->one_comp.obj.partition); 859 sbi->one_comp.obj.partition);
@@ -1023,6 +1033,9 @@ static int __init init_exofs(void)
1023 if (err) 1033 if (err)
1024 goto out_d; 1034 goto out_d;
1025 1035
1036 /* We don't fail if sysfs creation failed */
1037 exofs_sysfs_init();
1038
1026 return 0; 1039 return 0;
1027out_d: 1040out_d:
1028 destroy_inodecache(); 1041 destroy_inodecache();
@@ -1032,6 +1045,7 @@ out:
1032 1045
1033static void __exit exit_exofs(void) 1046static void __exit exit_exofs(void)
1034{ 1047{
1048 exofs_sysfs_uninit();
1035 unregister_filesystem(&exofs_type); 1049 unregister_filesystem(&exofs_type);
1036 destroy_inodecache(); 1050 destroy_inodecache();
1037} 1051}
diff --git a/fs/exofs/sys.c b/fs/exofs/sys.c
new file mode 100644
index 000000000000..e32bc919e4e3
--- /dev/null
+++ b/fs/exofs/sys.c
@@ -0,0 +1,200 @@
1/*
2 * Copyright (C) 2012
3 * Sachin Bhamare <sbhamare@panasas.com>
4 * Boaz Harrosh <bharrosh@panasas.com>
5 *
6 * This file is part of exofs.
7 *
8 * exofs is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License 2 as published by
10 * the Free Software Foundation.
11 *
12 * exofs is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with exofs; if not, write to the:
19 * Free Software Foundation <licensing@fsf.org>
20 */
21
22#include <linux/kobject.h>
23#include <linux/device.h>
24
25#include "exofs.h"
26
27struct odev_attr {
28 struct attribute attr;
29 ssize_t (*show)(struct exofs_dev *, char *);
30 ssize_t (*store)(struct exofs_dev *, const char *, size_t);
31};
32
33static ssize_t odev_attr_show(struct kobject *kobj, struct attribute *attr,
34 char *buf)
35{
36 struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
37 struct odev_attr *a = container_of(attr, struct odev_attr, attr);
38
39 return a->show ? a->show(edp, buf) : 0;
40}
41
42static ssize_t odev_attr_store(struct kobject *kobj, struct attribute *attr,
43 const char *buf, size_t len)
44{
45 struct exofs_dev *edp = container_of(kobj, struct exofs_dev, ed_kobj);
46 struct odev_attr *a = container_of(attr, struct odev_attr, attr);
47
48 return a->store ? a->store(edp, buf, len) : len;
49}
50
51static const struct sysfs_ops odev_attr_ops = {
52 .show = odev_attr_show,
53 .store = odev_attr_store,
54};
55
56
57static struct kset *exofs_kset;
58
59static ssize_t osdname_show(struct exofs_dev *edp, char *buf)
60{
61 struct osd_dev *odev = edp->ored.od;
62 const struct osd_dev_info *odi = osduld_device_info(odev);
63
64 return snprintf(buf, odi->osdname_len + 1, "%s", odi->osdname);
65}
66
67static ssize_t systemid_show(struct exofs_dev *edp, char *buf)
68{
69 struct osd_dev *odev = edp->ored.od;
70 const struct osd_dev_info *odi = osduld_device_info(odev);
71
72 memcpy(buf, odi->systemid, odi->systemid_len);
73 return odi->systemid_len;
74}
75
76static ssize_t uri_show(struct exofs_dev *edp, char *buf)
77{
78 return snprintf(buf, edp->urilen, "%s", edp->uri);
79}
80
81static ssize_t uri_store(struct exofs_dev *edp, const char *buf, size_t len)
82{
83 edp->urilen = strlen(buf) + 1;
84 edp->uri = krealloc(edp->uri, edp->urilen, GFP_KERNEL);
85 strncpy(edp->uri, buf, edp->urilen);
86 return edp->urilen;
87}
88
89#define OSD_ATTR(name, mode, show, store) \
90 static struct odev_attr odev_attr_##name = \
91 __ATTR(name, mode, show, store)
92
93OSD_ATTR(osdname, S_IRUGO, osdname_show, NULL);
94OSD_ATTR(systemid, S_IRUGO, systemid_show, NULL);
95OSD_ATTR(uri, S_IRWXU, uri_show, uri_store);
96
97static struct attribute *odev_attrs[] = {
98 &odev_attr_osdname.attr,
99 &odev_attr_systemid.attr,
100 &odev_attr_uri.attr,
101 NULL,
102};
103
104static struct kobj_type odev_ktype = {
105 .default_attrs = odev_attrs,
106 .sysfs_ops = &odev_attr_ops,
107};
108
109static struct kobj_type uuid_ktype = {
110};
111
112void exofs_sysfs_dbg_print()
113{
114#ifdef CONFIG_EXOFS_DEBUG
115 struct kobject *k_name, *k_tmp;
116
117 list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
118 printk(KERN_INFO "%s: name %s ref %d\n",
119 __func__, kobject_name(k_name),
120 (int)atomic_read(&k_name->kref.refcount));
121 }
122#endif
123}
124/*
125 * This function removes all kobjects under exofs_kset
126 * At the end of it, exofs_kset kobject will have a refcount
127 * of 1 which gets decremented only on exofs module unload
128 */
129void exofs_sysfs_sb_del(struct exofs_sb_info *sbi)
130{
131 struct kobject *k_name, *k_tmp;
132 struct kobject *s_kobj = &sbi->s_kobj;
133
134 list_for_each_entry_safe(k_name, k_tmp, &exofs_kset->list, entry) {
135 /* Remove all that are children of this SBI */
136 if (k_name->parent == s_kobj)
137 kobject_put(k_name);
138 }
139 kobject_put(s_kobj);
140}
141
142/*
143 * This function creates sysfs entries to hold the current exofs cluster
144 * instance (uniquely identified by osdname,pid tuple).
145 * This function gets called once per exofs mount instance.
146 */
147int exofs_sysfs_sb_add(struct exofs_sb_info *sbi,
148 struct exofs_dt_device_info *dt_dev)
149{
150 struct kobject *s_kobj;
151 int retval = 0;
152 uint64_t pid = sbi->one_comp.obj.partition;
153
154 /* allocate new uuid dirent */
155 s_kobj = &sbi->s_kobj;
156 s_kobj->kset = exofs_kset;
157 retval = kobject_init_and_add(s_kobj, &uuid_ktype,
158 &exofs_kset->kobj, "%s_%llx", dt_dev->osdname, pid);
159 if (retval) {
160 EXOFS_ERR("ERROR: Failed to create sysfs entry for "
161 "uuid-%s_%llx => %d\n", dt_dev->osdname, pid, retval);
162 return -ENOMEM;
163 }
164 return 0;
165}
166
167int exofs_sysfs_odev_add(struct exofs_dev *edev, struct exofs_sb_info *sbi)
168{
169 struct kobject *d_kobj;
170 int retval = 0;
171
172 /* create osd device group which contains following attributes
173 * osdname, systemid & uri
174 */
175 d_kobj = &edev->ed_kobj;
176 d_kobj->kset = exofs_kset;
177 retval = kobject_init_and_add(d_kobj, &odev_ktype,
178 &sbi->s_kobj, "dev%u", edev->did);
179 if (retval) {
180 EXOFS_ERR("ERROR: Failed to create sysfs entry for "
181 "device dev%u\n", edev->did);
182 return retval;
183 }
184 return 0;
185}
186
187int exofs_sysfs_init(void)
188{
189 exofs_kset = kset_create_and_add("exofs", NULL, fs_kobj);
190 if (!exofs_kset) {
191 EXOFS_ERR("ERROR: kset_create_and_add exofs failed\n");
192 return -ENOMEM;
193 }
194 return 0;
195}
196
197void exofs_sysfs_uninit(void)
198{
199 kset_unregister(exofs_kset);
200}
diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
index 030c6d277e14..1c3613998862 100644
--- a/fs/ext2/balloc.c
+++ b/fs/ext2/balloc.c
@@ -165,7 +165,6 @@ static void release_blocks(struct super_block *sb, int count)
165 struct ext2_sb_info *sbi = EXT2_SB(sb); 165 struct ext2_sb_info *sbi = EXT2_SB(sb);
166 166
167 percpu_counter_add(&sbi->s_freeblocks_counter, count); 167 percpu_counter_add(&sbi->s_freeblocks_counter, count);
168 sb->s_dirt = 1;
169 } 168 }
170} 169}
171 170
@@ -180,7 +179,6 @@ static void group_adjust_blocks(struct super_block *sb, int group_no,
180 free_blocks = le16_to_cpu(desc->bg_free_blocks_count); 179 free_blocks = le16_to_cpu(desc->bg_free_blocks_count);
181 desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count); 180 desc->bg_free_blocks_count = cpu_to_le16(free_blocks + count);
182 spin_unlock(sb_bgl_lock(sbi, group_no)); 181 spin_unlock(sb_bgl_lock(sbi, group_no));
183 sb->s_dirt = 1;
184 mark_buffer_dirty(bh); 182 mark_buffer_dirty(bh);
185 } 183 }
186} 184}
@@ -479,7 +477,7 @@ void ext2_discard_reservation(struct inode *inode)
479} 477}
480 478
481/** 479/**
482 * ext2_free_blocks_sb() -- Free given blocks and update quota and i_blocks 480 * ext2_free_blocks() -- Free given blocks and update quota and i_blocks
483 * @inode: inode 481 * @inode: inode
484 * @block: start physcial block to free 482 * @block: start physcial block to free
485 * @count: number of blocks to free 483 * @count: number of blocks to free
diff --git a/fs/ext2/ialloc.c b/fs/ext2/ialloc.c
index 8b15cf8cef37..c13eb7b91a11 100644
--- a/fs/ext2/ialloc.c
+++ b/fs/ext2/ialloc.c
@@ -81,7 +81,6 @@ static void ext2_release_inode(struct super_block *sb, int group, int dir)
81 spin_unlock(sb_bgl_lock(EXT2_SB(sb), group)); 81 spin_unlock(sb_bgl_lock(EXT2_SB(sb), group));
82 if (dir) 82 if (dir)
83 percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter); 83 percpu_counter_dec(&EXT2_SB(sb)->s_dirs_counter);
84 sb->s_dirt = 1;
85 mark_buffer_dirty(bh); 84 mark_buffer_dirty(bh);
86} 85}
87 86
@@ -543,7 +542,6 @@ got:
543 } 542 }
544 spin_unlock(sb_bgl_lock(sbi, group)); 543 spin_unlock(sb_bgl_lock(sbi, group));
545 544
546 sb->s_dirt = 1;
547 mark_buffer_dirty(bh2); 545 mark_buffer_dirty(bh2);
548 if (test_opt(sb, GRPID)) { 546 if (test_opt(sb, GRPID)) {
549 inode->i_mode = mode; 547 inode->i_mode = mode;
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index f9fa95f8443d..264d315f6c47 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -90,7 +90,7 @@ void ext2_evict_inode(struct inode * inode)
90 } 90 }
91 91
92 invalidate_inode_buffers(inode); 92 invalidate_inode_buffers(inode);
93 end_writeback(inode); 93 clear_inode(inode);
94 94
95 ext2_discard_reservation(inode); 95 ext2_discard_reservation(inode);
96 rsv = EXT2_I(inode)->i_block_alloc_info; 96 rsv = EXT2_I(inode)->i_block_alloc_info;
diff --git a/fs/ext2/super.c b/fs/ext2/super.c
index 38f816071ddb..b3621cb7ea31 100644
--- a/fs/ext2/super.c
+++ b/fs/ext2/super.c
@@ -130,9 +130,6 @@ static void ext2_put_super (struct super_block * sb)
130 130
131 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED); 131 dquot_disable(sb, -1, DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED);
132 132
133 if (sb->s_dirt)
134 ext2_write_super(sb);
135
136 ext2_xattr_put_super(sb); 133 ext2_xattr_put_super(sb);
137 if (!(sb->s_flags & MS_RDONLY)) { 134 if (!(sb->s_flags & MS_RDONLY)) {
138 struct ext2_super_block *es = sbi->s_es; 135 struct ext2_super_block *es = sbi->s_es;
@@ -307,7 +304,6 @@ static const struct super_operations ext2_sops = {
307 .write_inode = ext2_write_inode, 304 .write_inode = ext2_write_inode,
308 .evict_inode = ext2_evict_inode, 305 .evict_inode = ext2_evict_inode,
309 .put_super = ext2_put_super, 306 .put_super = ext2_put_super,
310 .write_super = ext2_write_super,
311 .sync_fs = ext2_sync_fs, 307 .sync_fs = ext2_sync_fs,
312 .statfs = ext2_statfs, 308 .statfs = ext2_statfs,
313 .remount_fs = ext2_remount, 309 .remount_fs = ext2_remount,
@@ -358,11 +354,6 @@ static struct dentry *ext2_fh_to_parent(struct super_block *sb, struct fid *fid,
358 ext2_nfs_get_inode); 354 ext2_nfs_get_inode);
359} 355}
360 356
361/* Yes, most of these are left as NULL!!
362 * A NULL value implies the default, which works with ext2-like file
363 * systems, but can be improved upon.
364 * Currently only get_parent is required.
365 */
366static const struct export_operations ext2_export_ops = { 357static const struct export_operations ext2_export_ops = {
367 .fh_to_dentry = ext2_fh_to_dentry, 358 .fh_to_dentry = ext2_fh_to_dentry,
368 .fh_to_parent = ext2_fh_to_parent, 359 .fh_to_parent = ext2_fh_to_parent,
@@ -1176,7 +1167,6 @@ static void ext2_sync_super(struct super_block *sb, struct ext2_super_block *es,
1176 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 1167 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
1177 if (wait) 1168 if (wait)
1178 sync_dirty_buffer(EXT2_SB(sb)->s_sbh); 1169 sync_dirty_buffer(EXT2_SB(sb)->s_sbh);
1179 sb->s_dirt = 0;
1180} 1170}
1181 1171
1182/* 1172/*
@@ -1209,8 +1199,6 @@ void ext2_write_super(struct super_block *sb)
1209{ 1199{
1210 if (!(sb->s_flags & MS_RDONLY)) 1200 if (!(sb->s_flags & MS_RDONLY))
1211 ext2_sync_fs(sb, 1); 1201 ext2_sync_fs(sb, 1);
1212 else
1213 sb->s_dirt = 0;
1214} 1202}
1215 1203
1216static int ext2_remount (struct super_block * sb, int * flags, char * data) 1204static int ext2_remount (struct super_block * sb, int * flags, char * data)
@@ -1456,7 +1444,6 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
1456 struct buffer_head tmp_bh; 1444 struct buffer_head tmp_bh;
1457 struct buffer_head *bh; 1445 struct buffer_head *bh;
1458 1446
1459 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
1460 while (towrite > 0) { 1447 while (towrite > 0) {
1461 tocopy = sb->s_blocksize - offset < towrite ? 1448 tocopy = sb->s_blocksize - offset < towrite ?
1462 sb->s_blocksize - offset : towrite; 1449 sb->s_blocksize - offset : towrite;
@@ -1486,16 +1473,13 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type,
1486 blk++; 1473 blk++;
1487 } 1474 }
1488out: 1475out:
1489 if (len == towrite) { 1476 if (len == towrite)
1490 mutex_unlock(&inode->i_mutex);
1491 return err; 1477 return err;
1492 }
1493 if (inode->i_size < off+len-towrite) 1478 if (inode->i_size < off+len-towrite)
1494 i_size_write(inode, off+len-towrite); 1479 i_size_write(inode, off+len-towrite);
1495 inode->i_version++; 1480 inode->i_version++;
1496 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 1481 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
1497 mark_inode_dirty(inode); 1482 mark_inode_dirty(inode);
1498 mutex_unlock(&inode->i_mutex);
1499 return len - towrite; 1483 return len - towrite;
1500} 1484}
1501 1485
diff --git a/fs/ext2/xattr.c b/fs/ext2/xattr.c
index 6dcafc7efdfd..b6754dbbce3c 100644
--- a/fs/ext2/xattr.c
+++ b/fs/ext2/xattr.c
@@ -339,7 +339,6 @@ static void ext2_xattr_update_super_block(struct super_block *sb)
339 spin_lock(&EXT2_SB(sb)->s_lock); 339 spin_lock(&EXT2_SB(sb)->s_lock);
340 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR); 340 EXT2_SET_COMPAT_FEATURE(sb, EXT2_FEATURE_COMPAT_EXT_ATTR);
341 spin_unlock(&EXT2_SB(sb)->s_lock); 341 spin_unlock(&EXT2_SB(sb)->s_lock);
342 sb->s_dirt = 1;
343 mark_buffer_dirty(EXT2_SB(sb)->s_sbh); 342 mark_buffer_dirty(EXT2_SB(sb)->s_sbh);
344} 343}
345 344
diff --git a/fs/ext3/dir.c b/fs/ext3/dir.c
index cc761ad8fa57..92490e9f85ca 100644
--- a/fs/ext3/dir.c
+++ b/fs/ext3/dir.c
@@ -21,30 +21,15 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/compat.h>
24#include "ext3.h" 25#include "ext3.h"
25 26
26static unsigned char ext3_filetype_table[] = { 27static unsigned char ext3_filetype_table[] = {
27 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK 28 DT_UNKNOWN, DT_REG, DT_DIR, DT_CHR, DT_BLK, DT_FIFO, DT_SOCK, DT_LNK
28}; 29};
29 30
30static int ext3_readdir(struct file *, void *, filldir_t);
31static int ext3_dx_readdir(struct file * filp, 31static int ext3_dx_readdir(struct file * filp,
32 void * dirent, filldir_t filldir); 32 void * dirent, filldir_t filldir);
33static int ext3_release_dir (struct inode * inode,
34 struct file * filp);
35
36const struct file_operations ext3_dir_operations = {
37 .llseek = generic_file_llseek,
38 .read = generic_read_dir,
39 .readdir = ext3_readdir, /* we take BKL. needed?*/
40 .unlocked_ioctl = ext3_ioctl,
41#ifdef CONFIG_COMPAT
42 .compat_ioctl = ext3_compat_ioctl,
43#endif
44 .fsync = ext3_sync_file, /* BKL held */
45 .release = ext3_release_dir,
46};
47
48 33
49static unsigned char get_dtype(struct super_block *sb, int filetype) 34static unsigned char get_dtype(struct super_block *sb, int filetype)
50{ 35{
@@ -55,6 +40,25 @@ static unsigned char get_dtype(struct super_block *sb, int filetype)
55 return (ext3_filetype_table[filetype]); 40 return (ext3_filetype_table[filetype]);
56} 41}
57 42
43/**
44 * Check if the given dir-inode refers to an htree-indexed directory
45 * (or a directory which chould potentially get coverted to use htree
46 * indexing).
47 *
48 * Return 1 if it is a dx dir, 0 if not
49 */
50static int is_dx_dir(struct inode *inode)
51{
52 struct super_block *sb = inode->i_sb;
53
54 if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
55 EXT3_FEATURE_COMPAT_DIR_INDEX) &&
56 ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
57 ((inode->i_size >> sb->s_blocksize_bits) == 1)))
58 return 1;
59
60 return 0;
61}
58 62
59int ext3_check_dir_entry (const char * function, struct inode * dir, 63int ext3_check_dir_entry (const char * function, struct inode * dir,
60 struct ext3_dir_entry_2 * de, 64 struct ext3_dir_entry_2 * de,
@@ -94,18 +98,13 @@ static int ext3_readdir(struct file * filp,
94 unsigned long offset; 98 unsigned long offset;
95 int i, stored; 99 int i, stored;
96 struct ext3_dir_entry_2 *de; 100 struct ext3_dir_entry_2 *de;
97 struct super_block *sb;
98 int err; 101 int err;
99 struct inode *inode = filp->f_path.dentry->d_inode; 102 struct inode *inode = filp->f_path.dentry->d_inode;
103 struct super_block *sb = inode->i_sb;
100 int ret = 0; 104 int ret = 0;
101 int dir_has_error = 0; 105 int dir_has_error = 0;
102 106
103 sb = inode->i_sb; 107 if (is_dx_dir(inode)) {
104
105 if (EXT3_HAS_COMPAT_FEATURE(inode->i_sb,
106 EXT3_FEATURE_COMPAT_DIR_INDEX) &&
107 ((EXT3_I(inode)->i_flags & EXT3_INDEX_FL) ||
108 ((inode->i_size >> sb->s_blocksize_bits) == 1))) {
109 err = ext3_dx_readdir(filp, dirent, filldir); 108 err = ext3_dx_readdir(filp, dirent, filldir);
110 if (err != ERR_BAD_DX_DIR) { 109 if (err != ERR_BAD_DX_DIR) {
111 ret = err; 110 ret = err;
@@ -227,22 +226,87 @@ out:
227 return ret; 226 return ret;
228} 227}
229 228
229static inline int is_32bit_api(void)
230{
231#ifdef CONFIG_COMPAT
232 return is_compat_task();
233#else
234 return (BITS_PER_LONG == 32);
235#endif
236}
237
230/* 238/*
231 * These functions convert from the major/minor hash to an f_pos 239 * These functions convert from the major/minor hash to an f_pos
232 * value. 240 * value for dx directories
233 * 241 *
234 * Currently we only use major hash numer. This is unfortunate, but 242 * Upper layer (for example NFS) should specify FMODE_32BITHASH or
235 * on 32-bit machines, the same VFS interface is used for lseek and 243 * FMODE_64BITHASH explicitly. On the other hand, we allow ext3 to be mounted
236 * llseek, so if we use the 64 bit offset, then the 32-bit versions of 244 * directly on both 32-bit and 64-bit nodes, under such case, neither
237 * lseek/telldir/seekdir will blow out spectacularly, and from within 245 * FMODE_32BITHASH nor FMODE_64BITHASH is specified.
238 * the ext2 low-level routine, we don't know if we're being called by
239 * a 64-bit version of the system call or the 32-bit version of the
240 * system call. Worse yet, NFSv2 only allows for a 32-bit readdir
241 * cookie. Sigh.
242 */ 246 */
243#define hash2pos(major, minor) (major >> 1) 247static inline loff_t hash2pos(struct file *filp, __u32 major, __u32 minor)
244#define pos2maj_hash(pos) ((pos << 1) & 0xffffffff) 248{
245#define pos2min_hash(pos) (0) 249 if ((filp->f_mode & FMODE_32BITHASH) ||
250 (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
251 return major >> 1;
252 else
253 return ((__u64)(major >> 1) << 32) | (__u64)minor;
254}
255
256static inline __u32 pos2maj_hash(struct file *filp, loff_t pos)
257{
258 if ((filp->f_mode & FMODE_32BITHASH) ||
259 (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
260 return (pos << 1) & 0xffffffff;
261 else
262 return ((pos >> 32) << 1) & 0xffffffff;
263}
264
265static inline __u32 pos2min_hash(struct file *filp, loff_t pos)
266{
267 if ((filp->f_mode & FMODE_32BITHASH) ||
268 (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
269 return 0;
270 else
271 return pos & 0xffffffff;
272}
273
274/*
275 * Return 32- or 64-bit end-of-file for dx directories
276 */
277static inline loff_t ext3_get_htree_eof(struct file *filp)
278{
279 if ((filp->f_mode & FMODE_32BITHASH) ||
280 (!(filp->f_mode & FMODE_64BITHASH) && is_32bit_api()))
281 return EXT3_HTREE_EOF_32BIT;
282 else
283 return EXT3_HTREE_EOF_64BIT;
284}
285
286
287/*
288 * ext3_dir_llseek() calls generic_file_llseek[_size]() to handle both
289 * non-htree and htree directories, where the "offset" is in terms
290 * of the filename hash value instead of the byte offset.
291 *
292 * Because we may return a 64-bit hash that is well beyond s_maxbytes,
293 * we need to pass the max hash as the maximum allowable offset in
294 * the htree directory case.
295 *
296 * NOTE: offsets obtained *before* ext3_set_inode_flag(dir, EXT3_INODE_INDEX)
297 * will be invalid once the directory was converted into a dx directory
298 */
299loff_t ext3_dir_llseek(struct file *file, loff_t offset, int origin)
300{
301 struct inode *inode = file->f_mapping->host;
302 int dx_dir = is_dx_dir(inode);
303
304 if (likely(dx_dir))
305 return generic_file_llseek_size(file, offset, origin,
306 ext3_get_htree_eof(file));
307 else
308 return generic_file_llseek(file, offset, origin);
309}
246 310
247/* 311/*
248 * This structure holds the nodes of the red-black tree used to store 312 * This structure holds the nodes of the red-black tree used to store
@@ -303,15 +367,16 @@ static void free_rb_tree_fname(struct rb_root *root)
303} 367}
304 368
305 369
306static struct dir_private_info *ext3_htree_create_dir_info(loff_t pos) 370static struct dir_private_info *ext3_htree_create_dir_info(struct file *filp,
371 loff_t pos)
307{ 372{
308 struct dir_private_info *p; 373 struct dir_private_info *p;
309 374
310 p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL); 375 p = kzalloc(sizeof(struct dir_private_info), GFP_KERNEL);
311 if (!p) 376 if (!p)
312 return NULL; 377 return NULL;
313 p->curr_hash = pos2maj_hash(pos); 378 p->curr_hash = pos2maj_hash(filp, pos);
314 p->curr_minor_hash = pos2min_hash(pos); 379 p->curr_minor_hash = pos2min_hash(filp, pos);
315 return p; 380 return p;
316} 381}
317 382
@@ -401,7 +466,7 @@ static int call_filldir(struct file * filp, void * dirent,
401 printk("call_filldir: called with null fname?!?\n"); 466 printk("call_filldir: called with null fname?!?\n");
402 return 0; 467 return 0;
403 } 468 }
404 curr_pos = hash2pos(fname->hash, fname->minor_hash); 469 curr_pos = hash2pos(filp, fname->hash, fname->minor_hash);
405 while (fname) { 470 while (fname) {
406 error = filldir(dirent, fname->name, 471 error = filldir(dirent, fname->name,
407 fname->name_len, curr_pos, 472 fname->name_len, curr_pos,
@@ -426,13 +491,13 @@ static int ext3_dx_readdir(struct file * filp,
426 int ret; 491 int ret;
427 492
428 if (!info) { 493 if (!info) {
429 info = ext3_htree_create_dir_info(filp->f_pos); 494 info = ext3_htree_create_dir_info(filp, filp->f_pos);
430 if (!info) 495 if (!info)
431 return -ENOMEM; 496 return -ENOMEM;
432 filp->private_data = info; 497 filp->private_data = info;
433 } 498 }
434 499
435 if (filp->f_pos == EXT3_HTREE_EOF) 500 if (filp->f_pos == ext3_get_htree_eof(filp))
436 return 0; /* EOF */ 501 return 0; /* EOF */
437 502
438 /* Some one has messed with f_pos; reset the world */ 503 /* Some one has messed with f_pos; reset the world */
@@ -440,8 +505,8 @@ static int ext3_dx_readdir(struct file * filp,
440 free_rb_tree_fname(&info->root); 505 free_rb_tree_fname(&info->root);
441 info->curr_node = NULL; 506 info->curr_node = NULL;
442 info->extra_fname = NULL; 507 info->extra_fname = NULL;
443 info->curr_hash = pos2maj_hash(filp->f_pos); 508 info->curr_hash = pos2maj_hash(filp, filp->f_pos);
444 info->curr_minor_hash = pos2min_hash(filp->f_pos); 509 info->curr_minor_hash = pos2min_hash(filp, filp->f_pos);
445 } 510 }
446 511
447 /* 512 /*
@@ -473,7 +538,7 @@ static int ext3_dx_readdir(struct file * filp,
473 if (ret < 0) 538 if (ret < 0)
474 return ret; 539 return ret;
475 if (ret == 0) { 540 if (ret == 0) {
476 filp->f_pos = EXT3_HTREE_EOF; 541 filp->f_pos = ext3_get_htree_eof(filp);
477 break; 542 break;
478 } 543 }
479 info->curr_node = rb_first(&info->root); 544 info->curr_node = rb_first(&info->root);
@@ -493,7 +558,7 @@ static int ext3_dx_readdir(struct file * filp,
493 info->curr_minor_hash = fname->minor_hash; 558 info->curr_minor_hash = fname->minor_hash;
494 } else { 559 } else {
495 if (info->next_hash == ~0) { 560 if (info->next_hash == ~0) {
496 filp->f_pos = EXT3_HTREE_EOF; 561 filp->f_pos = ext3_get_htree_eof(filp);
497 break; 562 break;
498 } 563 }
499 info->curr_hash = info->next_hash; 564 info->curr_hash = info->next_hash;
@@ -512,3 +577,15 @@ static int ext3_release_dir (struct inode * inode, struct file * filp)
512 577
513 return 0; 578 return 0;
514} 579}
580
581const struct file_operations ext3_dir_operations = {
582 .llseek = ext3_dir_llseek,
583 .read = generic_read_dir,
584 .readdir = ext3_readdir,
585 .unlocked_ioctl = ext3_ioctl,
586#ifdef CONFIG_COMPAT
587 .compat_ioctl = ext3_compat_ioctl,
588#endif
589 .fsync = ext3_sync_file,
590 .release = ext3_release_dir,
591};
diff --git a/fs/ext3/ext3.h b/fs/ext3/ext3.h
index 7977973a24f0..e85ff15a060e 100644
--- a/fs/ext3/ext3.h
+++ b/fs/ext3/ext3.h
@@ -920,7 +920,11 @@ struct dx_hash_info
920 u32 *seed; 920 u32 *seed;
921}; 921};
922 922
923#define EXT3_HTREE_EOF 0x7fffffff 923
924/* 32 and 64 bit signed EOF for dx directories */
925#define EXT3_HTREE_EOF_32BIT ((1UL << (32 - 1)) - 1)
926#define EXT3_HTREE_EOF_64BIT ((1ULL << (64 - 1)) - 1)
927
924 928
925/* 929/*
926 * Control parameters used by ext3_htree_next_block 930 * Control parameters used by ext3_htree_next_block
diff --git a/fs/ext3/hash.c b/fs/ext3/hash.c
index d10231ddcf8a..ede315cdf126 100644
--- a/fs/ext3/hash.c
+++ b/fs/ext3/hash.c
@@ -198,8 +198,8 @@ int ext3fs_dirhash(const char *name, int len, struct dx_hash_info *hinfo)
198 return -1; 198 return -1;
199 } 199 }
200 hash = hash & ~1; 200 hash = hash & ~1;
201 if (hash == (EXT3_HTREE_EOF << 1)) 201 if (hash == (EXT3_HTREE_EOF_32BIT << 1))
202 hash = (EXT3_HTREE_EOF-1) << 1; 202 hash = (EXT3_HTREE_EOF_32BIT - 1) << 1;
203 hinfo->hash = hash; 203 hinfo->hash = hash;
204 hinfo->minor_hash = minor_hash; 204 hinfo->minor_hash = minor_hash;
205 return 0; 205 return 0;
diff --git a/fs/ext3/ialloc.c b/fs/ext3/ialloc.c
index e3c39e4cec19..082afd78b107 100644
--- a/fs/ext3/ialloc.c
+++ b/fs/ext3/ialloc.c
@@ -180,8 +180,7 @@ error_return:
180 * It's OK to put directory into a group unless 180 * It's OK to put directory into a group unless
181 * it has too many directories already (max_dirs) or 181 * it has too many directories already (max_dirs) or
182 * it has too few free inodes left (min_inodes) or 182 * it has too few free inodes left (min_inodes) or
183 * it has too few free blocks left (min_blocks) or 183 * it has too few free blocks left (min_blocks).
184 * it's already running too large debt (max_debt).
185 * Parent's group is preferred, if it doesn't satisfy these 184 * Parent's group is preferred, if it doesn't satisfy these
186 * conditions we search cyclically through the rest. If none 185 * conditions we search cyclically through the rest. If none
187 * of the groups look good we just look for a group with more 186 * of the groups look good we just look for a group with more
@@ -191,21 +190,16 @@ error_return:
191 * when we allocate an inode, within 0--255. 190 * when we allocate an inode, within 0--255.
192 */ 191 */
193 192
194#define INODE_COST 64
195#define BLOCK_COST 256
196
197static int find_group_orlov(struct super_block *sb, struct inode *parent) 193static int find_group_orlov(struct super_block *sb, struct inode *parent)
198{ 194{
199 int parent_group = EXT3_I(parent)->i_block_group; 195 int parent_group = EXT3_I(parent)->i_block_group;
200 struct ext3_sb_info *sbi = EXT3_SB(sb); 196 struct ext3_sb_info *sbi = EXT3_SB(sb);
201 struct ext3_super_block *es = sbi->s_es;
202 int ngroups = sbi->s_groups_count; 197 int ngroups = sbi->s_groups_count;
203 int inodes_per_group = EXT3_INODES_PER_GROUP(sb); 198 int inodes_per_group = EXT3_INODES_PER_GROUP(sb);
204 unsigned int freei, avefreei; 199 unsigned int freei, avefreei;
205 ext3_fsblk_t freeb, avefreeb; 200 ext3_fsblk_t freeb, avefreeb;
206 ext3_fsblk_t blocks_per_dir;
207 unsigned int ndirs; 201 unsigned int ndirs;
208 int max_debt, max_dirs, min_inodes; 202 int max_dirs, min_inodes;
209 ext3_grpblk_t min_blocks; 203 ext3_grpblk_t min_blocks;
210 int group = -1, i; 204 int group = -1, i;
211 struct ext3_group_desc *desc; 205 struct ext3_group_desc *desc;
@@ -242,20 +236,10 @@ static int find_group_orlov(struct super_block *sb, struct inode *parent)
242 goto fallback; 236 goto fallback;
243 } 237 }
244 238
245 blocks_per_dir = (le32_to_cpu(es->s_blocks_count) - freeb) / ndirs;
246
247 max_dirs = ndirs / ngroups + inodes_per_group / 16; 239 max_dirs = ndirs / ngroups + inodes_per_group / 16;
248 min_inodes = avefreei - inodes_per_group / 4; 240 min_inodes = avefreei - inodes_per_group / 4;
249 min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4; 241 min_blocks = avefreeb - EXT3_BLOCKS_PER_GROUP(sb) / 4;
250 242
251 max_debt = EXT3_BLOCKS_PER_GROUP(sb) / max(blocks_per_dir, (ext3_fsblk_t)BLOCK_COST);
252 if (max_debt * INODE_COST > inodes_per_group)
253 max_debt = inodes_per_group / INODE_COST;
254 if (max_debt > 255)
255 max_debt = 255;
256 if (max_debt == 0)
257 max_debt = 1;
258
259 for (i = 0; i < ngroups; i++) { 243 for (i = 0; i < ngroups; i++) {
260 group = (parent_group + i) % ngroups; 244 group = (parent_group + i) % ngroups;
261 desc = ext3_get_group_desc (sb, group, NULL); 245 desc = ext3_get_group_desc (sb, group, NULL);
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index a09790a412b1..9a4a5c48b1c9 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -272,18 +272,18 @@ void ext3_evict_inode (struct inode *inode)
272 if (ext3_mark_inode_dirty(handle, inode)) { 272 if (ext3_mark_inode_dirty(handle, inode)) {
273 /* If that failed, just dquot_drop() and be done with that */ 273 /* If that failed, just dquot_drop() and be done with that */
274 dquot_drop(inode); 274 dquot_drop(inode);
275 end_writeback(inode); 275 clear_inode(inode);
276 } else { 276 } else {
277 ext3_xattr_delete_inode(handle, inode); 277 ext3_xattr_delete_inode(handle, inode);
278 dquot_free_inode(inode); 278 dquot_free_inode(inode);
279 dquot_drop(inode); 279 dquot_drop(inode);
280 end_writeback(inode); 280 clear_inode(inode);
281 ext3_free_inode(handle, inode); 281 ext3_free_inode(handle, inode);
282 } 282 }
283 ext3_journal_stop(handle); 283 ext3_journal_stop(handle);
284 return; 284 return;
285no_delete: 285no_delete:
286 end_writeback(inode); 286 clear_inode(inode);
287 dquot_drop(inode); 287 dquot_drop(inode);
288} 288}
289 289
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index 94ef7e616129..8c3a44b7c375 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -3015,7 +3015,6 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
3015 (unsigned long long)off, (unsigned long long)len); 3015 (unsigned long long)off, (unsigned long long)len);
3016 return -EIO; 3016 return -EIO;
3017 } 3017 }
3018 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
3019 bh = ext3_bread(handle, inode, blk, 1, &err); 3018 bh = ext3_bread(handle, inode, blk, 1, &err);
3020 if (!bh) 3019 if (!bh)
3021 goto out; 3020 goto out;
@@ -3039,10 +3038,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
3039 } 3038 }
3040 brelse(bh); 3039 brelse(bh);
3041out: 3040out:
3042 if (err) { 3041 if (err)
3043 mutex_unlock(&inode->i_mutex);
3044 return err; 3042 return err;
3045 }
3046 if (inode->i_size < off + len) { 3043 if (inode->i_size < off + len) {
3047 i_size_write(inode, off + len); 3044 i_size_write(inode, off + len);
3048 EXT3_I(inode)->i_disksize = inode->i_size; 3045 EXT3_I(inode)->i_disksize = inode->i_size;
@@ -3050,7 +3047,6 @@ out:
3050 inode->i_version++; 3047 inode->i_version++;
3051 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 3048 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
3052 ext3_mark_inode_dirty(handle, inode); 3049 ext3_mark_inode_dirty(handle, inode);
3053 mutex_unlock(&inode->i_mutex);
3054 return len; 3050 return len;
3055} 3051}
3056 3052
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 436b4223df66..35b5954489ee 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -1007,7 +1007,7 @@ static void destroy_inodecache(void)
1007void ext4_clear_inode(struct inode *inode) 1007void ext4_clear_inode(struct inode *inode)
1008{ 1008{
1009 invalidate_inode_buffers(inode); 1009 invalidate_inode_buffers(inode);
1010 end_writeback(inode); 1010 clear_inode(inode);
1011 dquot_drop(inode); 1011 dquot_drop(inode);
1012 ext4_discard_preallocations(inode); 1012 ext4_discard_preallocations(inode);
1013 if (EXT4_I(inode)->jinode) { 1013 if (EXT4_I(inode)->jinode) {
@@ -4758,7 +4758,6 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
4758 return -EIO; 4758 return -EIO;
4759 } 4759 }
4760 4760
4761 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
4762 bh = ext4_bread(handle, inode, blk, 1, &err); 4761 bh = ext4_bread(handle, inode, blk, 1, &err);
4763 if (!bh) 4762 if (!bh)
4764 goto out; 4763 goto out;
@@ -4774,16 +4773,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
4774 err = ext4_handle_dirty_metadata(handle, NULL, bh); 4773 err = ext4_handle_dirty_metadata(handle, NULL, bh);
4775 brelse(bh); 4774 brelse(bh);
4776out: 4775out:
4777 if (err) { 4776 if (err)
4778 mutex_unlock(&inode->i_mutex);
4779 return err; 4777 return err;
4780 }
4781 if (inode->i_size < off + len) { 4778 if (inode->i_size < off + len) {
4782 i_size_write(inode, off + len); 4779 i_size_write(inode, off + len);
4783 EXT4_I(inode)->i_disksize = inode->i_size; 4780 EXT4_I(inode)->i_disksize = inode->i_size;
4784 ext4_mark_inode_dirty(handle, inode); 4781 ext4_mark_inode_dirty(handle, inode);
4785 } 4782 }
4786 mutex_unlock(&inode->i_mutex);
4787 return len; 4783 return len;
4788} 4784}
4789 4785
diff --git a/fs/fat/inode.c b/fs/fat/inode.c
index 21687e31acc0..b3d290c1b513 100644
--- a/fs/fat/inode.c
+++ b/fs/fat/inode.c
@@ -454,7 +454,7 @@ static void fat_evict_inode(struct inode *inode)
454 fat_truncate_blocks(inode, 0); 454 fat_truncate_blocks(inode, 0);
455 } 455 }
456 invalidate_inode_buffers(inode); 456 invalidate_inode_buffers(inode);
457 end_writeback(inode); 457 clear_inode(inode);
458 fat_cache_inval_inode(inode); 458 fat_cache_inval_inode(inode);
459 fat_detach(inode); 459 fat_detach(inode);
460} 460}
diff --git a/fs/freevxfs/vxfs_inode.c b/fs/freevxfs/vxfs_inode.c
index cf9ef918a2a9..ef67c95f12d4 100644
--- a/fs/freevxfs/vxfs_inode.c
+++ b/fs/freevxfs/vxfs_inode.c
@@ -355,6 +355,6 @@ void
355vxfs_evict_inode(struct inode *ip) 355vxfs_evict_inode(struct inode *ip)
356{ 356{
357 truncate_inode_pages(&ip->i_data, 0); 357 truncate_inode_pages(&ip->i_data, 0);
358 end_writeback(ip); 358 clear_inode(ip);
359 call_rcu(&ip->i_rcu, vxfs_i_callback); 359 call_rcu(&ip->i_rcu, vxfs_i_callback);
360} 360}
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index 539f36cf3e4a..8d2fb8c88cf3 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -231,11 +231,8 @@ static void requeue_io(struct inode *inode, struct bdi_writeback *wb)
231 231
232static void inode_sync_complete(struct inode *inode) 232static void inode_sync_complete(struct inode *inode)
233{ 233{
234 /* 234 inode->i_state &= ~I_SYNC;
235 * Prevent speculative execution through 235 /* Waiters must see I_SYNC cleared before being woken up */
236 * spin_unlock(&wb->list_lock);
237 */
238
239 smp_mb(); 236 smp_mb();
240 wake_up_bit(&inode->i_state, __I_SYNC); 237 wake_up_bit(&inode->i_state, __I_SYNC);
241} 238}
@@ -329,10 +326,12 @@ static int write_inode(struct inode *inode, struct writeback_control *wbc)
329} 326}
330 327
331/* 328/*
332 * Wait for writeback on an inode to complete. 329 * Wait for writeback on an inode to complete. Called with i_lock held.
330 * Caller must make sure inode cannot go away when we drop i_lock.
333 */ 331 */
334static void inode_wait_for_writeback(struct inode *inode, 332static void __inode_wait_for_writeback(struct inode *inode)
335 struct bdi_writeback *wb) 333 __releases(inode->i_lock)
334 __acquires(inode->i_lock)
336{ 335{
337 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC); 336 DEFINE_WAIT_BIT(wq, &inode->i_state, __I_SYNC);
338 wait_queue_head_t *wqh; 337 wait_queue_head_t *wqh;
@@ -340,70 +339,119 @@ static void inode_wait_for_writeback(struct inode *inode,
340 wqh = bit_waitqueue(&inode->i_state, __I_SYNC); 339 wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
341 while (inode->i_state & I_SYNC) { 340 while (inode->i_state & I_SYNC) {
342 spin_unlock(&inode->i_lock); 341 spin_unlock(&inode->i_lock);
343 spin_unlock(&wb->list_lock);
344 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE); 342 __wait_on_bit(wqh, &wq, inode_wait, TASK_UNINTERRUPTIBLE);
345 spin_lock(&wb->list_lock);
346 spin_lock(&inode->i_lock); 343 spin_lock(&inode->i_lock);
347 } 344 }
348} 345}
349 346
350/* 347/*
351 * Write out an inode's dirty pages. Called under wb->list_lock and 348 * Wait for writeback on an inode to complete. Caller must have inode pinned.
352 * inode->i_lock. Either the caller has an active reference on the inode or
353 * the inode has I_WILL_FREE set.
354 *
355 * If `wait' is set, wait on the writeout.
356 *
357 * The whole writeout design is quite complex and fragile. We want to avoid
358 * starvation of particular inodes when others are being redirtied, prevent
359 * livelocks, etc.
360 */ 349 */
361static int 350void inode_wait_for_writeback(struct inode *inode)
362writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
363 struct writeback_control *wbc)
364{ 351{
365 struct address_space *mapping = inode->i_mapping; 352 spin_lock(&inode->i_lock);
366 long nr_to_write = wbc->nr_to_write; 353 __inode_wait_for_writeback(inode);
367 unsigned dirty; 354 spin_unlock(&inode->i_lock);
368 int ret; 355}
369 356
370 assert_spin_locked(&wb->list_lock); 357/*
371 assert_spin_locked(&inode->i_lock); 358 * Sleep until I_SYNC is cleared. This function must be called with i_lock
359 * held and drops it. It is aimed for callers not holding any inode reference
360 * so once i_lock is dropped, inode can go away.
361 */
362static void inode_sleep_on_writeback(struct inode *inode)
363 __releases(inode->i_lock)
364{
365 DEFINE_WAIT(wait);
366 wait_queue_head_t *wqh = bit_waitqueue(&inode->i_state, __I_SYNC);
367 int sleep;
372 368
373 if (!atomic_read(&inode->i_count)) 369 prepare_to_wait(wqh, &wait, TASK_UNINTERRUPTIBLE);
374 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING))); 370 sleep = inode->i_state & I_SYNC;
375 else 371 spin_unlock(&inode->i_lock);
376 WARN_ON(inode->i_state & I_WILL_FREE); 372 if (sleep)
373 schedule();
374 finish_wait(wqh, &wait);
375}
377 376
378 if (inode->i_state & I_SYNC) { 377/*
378 * Find proper writeback list for the inode depending on its current state and
379 * possibly also change of its state while we were doing writeback. Here we
380 * handle things such as livelock prevention or fairness of writeback among
381 * inodes. This function can be called only by flusher thread - noone else
382 * processes all inodes in writeback lists and requeueing inodes behind flusher
383 * thread's back can have unexpected consequences.
384 */
385static void requeue_inode(struct inode *inode, struct bdi_writeback *wb,
386 struct writeback_control *wbc)
387{
388 if (inode->i_state & I_FREEING)
389 return;
390
391 /*
392 * Sync livelock prevention. Each inode is tagged and synced in one
393 * shot. If still dirty, it will be redirty_tail()'ed below. Update
394 * the dirty time to prevent enqueue and sync it again.
395 */
396 if ((inode->i_state & I_DIRTY) &&
397 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
398 inode->dirtied_when = jiffies;
399
400 if (wbc->pages_skipped) {
379 /* 401 /*
380 * If this inode is locked for writeback and we are not doing 402 * writeback is not making progress due to locked
381 * writeback-for-data-integrity, move it to b_more_io so that 403 * buffers. Skip this inode for now.
382 * writeback can proceed with the other inodes on s_io.
383 *
384 * We'll have another go at writing back this inode when we
385 * completed a full scan of b_io.
386 */ 404 */
387 if (wbc->sync_mode != WB_SYNC_ALL) { 405 redirty_tail(inode, wb);
406 return;
407 }
408
409 if (mapping_tagged(inode->i_mapping, PAGECACHE_TAG_DIRTY)) {
410 /*
411 * We didn't write back all the pages. nfs_writepages()
412 * sometimes bales out without doing anything.
413 */
414 if (wbc->nr_to_write <= 0) {
415 /* Slice used up. Queue for next turn. */
388 requeue_io(inode, wb); 416 requeue_io(inode, wb);
389 trace_writeback_single_inode_requeue(inode, wbc, 417 } else {
390 nr_to_write); 418 /*
391 return 0; 419 * Writeback blocked by something other than
420 * congestion. Delay the inode for some time to
421 * avoid spinning on the CPU (100% iowait)
422 * retrying writeback of the dirty page/inode
423 * that cannot be performed immediately.
424 */
425 redirty_tail(inode, wb);
392 } 426 }
393 427 } else if (inode->i_state & I_DIRTY) {
394 /* 428 /*
395 * It's a data-integrity sync. We must wait. 429 * Filesystems can dirty the inode during writeback operations,
430 * such as delayed allocation during submission or metadata
431 * updates after data IO completion.
396 */ 432 */
397 inode_wait_for_writeback(inode, wb); 433 redirty_tail(inode, wb);
434 } else {
435 /* The inode is clean. Remove from writeback lists. */
436 list_del_init(&inode->i_wb_list);
398 } 437 }
438}
399 439
400 BUG_ON(inode->i_state & I_SYNC); 440/*
441 * Write out an inode and its dirty pages. Do not update the writeback list
442 * linkage. That is left to the caller. The caller is also responsible for
443 * setting I_SYNC flag and calling inode_sync_complete() to clear it.
444 */
445static int
446__writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
447 struct writeback_control *wbc)
448{
449 struct address_space *mapping = inode->i_mapping;
450 long nr_to_write = wbc->nr_to_write;
451 unsigned dirty;
452 int ret;
401 453
402 /* Set I_SYNC, reset I_DIRTY_PAGES */ 454 WARN_ON(!(inode->i_state & I_SYNC));
403 inode->i_state |= I_SYNC;
404 inode->i_state &= ~I_DIRTY_PAGES;
405 spin_unlock(&inode->i_lock);
406 spin_unlock(&wb->list_lock);
407 455
408 ret = do_writepages(mapping, wbc); 456 ret = do_writepages(mapping, wbc);
409 457
@@ -424,6 +472,9 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
424 * write_inode() 472 * write_inode()
425 */ 473 */
426 spin_lock(&inode->i_lock); 474 spin_lock(&inode->i_lock);
475 /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
476 if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
477 inode->i_state &= ~I_DIRTY_PAGES;
427 dirty = inode->i_state & I_DIRTY; 478 dirty = inode->i_state & I_DIRTY;
428 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC); 479 inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
429 spin_unlock(&inode->i_lock); 480 spin_unlock(&inode->i_lock);
@@ -433,60 +484,67 @@ writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
433 if (ret == 0) 484 if (ret == 0)
434 ret = err; 485 ret = err;
435 } 486 }
487 trace_writeback_single_inode(inode, wbc, nr_to_write);
488 return ret;
489}
490
491/*
492 * Write out an inode's dirty pages. Either the caller has an active reference
493 * on the inode or the inode has I_WILL_FREE set.
494 *
495 * This function is designed to be called for writing back one inode which
496 * we go e.g. from filesystem. Flusher thread uses __writeback_single_inode()
497 * and does more profound writeback list handling in writeback_sb_inodes().
498 */
499static int
500writeback_single_inode(struct inode *inode, struct bdi_writeback *wb,
501 struct writeback_control *wbc)
502{
503 int ret = 0;
436 504
437 spin_lock(&wb->list_lock);
438 spin_lock(&inode->i_lock); 505 spin_lock(&inode->i_lock);
439 inode->i_state &= ~I_SYNC; 506 if (!atomic_read(&inode->i_count))
440 if (!(inode->i_state & I_FREEING)) { 507 WARN_ON(!(inode->i_state & (I_WILL_FREE|I_FREEING)));
508 else
509 WARN_ON(inode->i_state & I_WILL_FREE);
510
511 if (inode->i_state & I_SYNC) {
512 if (wbc->sync_mode != WB_SYNC_ALL)
513 goto out;
441 /* 514 /*
442 * Sync livelock prevention. Each inode is tagged and synced in 515 * It's a data-integrity sync. We must wait. Since callers hold
443 * one shot. If still dirty, it will be redirty_tail()'ed below. 516 * inode reference or inode has I_WILL_FREE set, it cannot go
444 * Update the dirty time to prevent enqueue and sync it again. 517 * away under us.
445 */ 518 */
446 if ((inode->i_state & I_DIRTY) && 519 __inode_wait_for_writeback(inode);
447 (wbc->sync_mode == WB_SYNC_ALL || wbc->tagged_writepages))
448 inode->dirtied_when = jiffies;
449
450 if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) {
451 /*
452 * We didn't write back all the pages. nfs_writepages()
453 * sometimes bales out without doing anything.
454 */
455 inode->i_state |= I_DIRTY_PAGES;
456 if (wbc->nr_to_write <= 0) {
457 /*
458 * slice used up: queue for next turn
459 */
460 requeue_io(inode, wb);
461 } else {
462 /*
463 * Writeback blocked by something other than
464 * congestion. Delay the inode for some time to
465 * avoid spinning on the CPU (100% iowait)
466 * retrying writeback of the dirty page/inode
467 * that cannot be performed immediately.
468 */
469 redirty_tail(inode, wb);
470 }
471 } else if (inode->i_state & I_DIRTY) {
472 /*
473 * Filesystems can dirty the inode during writeback
474 * operations, such as delayed allocation during
475 * submission or metadata updates after data IO
476 * completion.
477 */
478 redirty_tail(inode, wb);
479 } else {
480 /*
481 * The inode is clean. At this point we either have
482 * a reference to the inode or it's on it's way out.
483 * No need to add it back to the LRU.
484 */
485 list_del_init(&inode->i_wb_list);
486 }
487 } 520 }
521 WARN_ON(inode->i_state & I_SYNC);
522 /*
523 * Skip inode if it is clean. We don't want to mess with writeback
524 * lists in this function since flusher thread may be doing for example
525 * sync in parallel and if we move the inode, it could get skipped. So
526 * here we make sure inode is on some writeback list and leave it there
527 * unless we have completely cleaned the inode.
528 */
529 if (!(inode->i_state & I_DIRTY))
530 goto out;
531 inode->i_state |= I_SYNC;
532 spin_unlock(&inode->i_lock);
533
534 ret = __writeback_single_inode(inode, wb, wbc);
535
536 spin_lock(&wb->list_lock);
537 spin_lock(&inode->i_lock);
538 /*
539 * If inode is clean, remove it from writeback lists. Otherwise don't
540 * touch it. See comment above for explanation.
541 */
542 if (!(inode->i_state & I_DIRTY))
543 list_del_init(&inode->i_wb_list);
544 spin_unlock(&wb->list_lock);
488 inode_sync_complete(inode); 545 inode_sync_complete(inode);
489 trace_writeback_single_inode(inode, wbc, nr_to_write); 546out:
547 spin_unlock(&inode->i_lock);
490 return ret; 548 return ret;
491} 549}
492 550
@@ -580,29 +638,57 @@ static long writeback_sb_inodes(struct super_block *sb,
580 redirty_tail(inode, wb); 638 redirty_tail(inode, wb);
581 continue; 639 continue;
582 } 640 }
583 __iget(inode); 641 if ((inode->i_state & I_SYNC) && wbc.sync_mode != WB_SYNC_ALL) {
642 /*
643 * If this inode is locked for writeback and we are not
644 * doing writeback-for-data-integrity, move it to
645 * b_more_io so that writeback can proceed with the
646 * other inodes on s_io.
647 *
648 * We'll have another go at writing back this inode
649 * when we completed a full scan of b_io.
650 */
651 spin_unlock(&inode->i_lock);
652 requeue_io(inode, wb);
653 trace_writeback_sb_inodes_requeue(inode);
654 continue;
655 }
656 spin_unlock(&wb->list_lock);
657
658 /*
659 * We already requeued the inode if it had I_SYNC set and we
660 * are doing WB_SYNC_NONE writeback. So this catches only the
661 * WB_SYNC_ALL case.
662 */
663 if (inode->i_state & I_SYNC) {
664 /* Wait for I_SYNC. This function drops i_lock... */
665 inode_sleep_on_writeback(inode);
666 /* Inode may be gone, start again */
667 continue;
668 }
669 inode->i_state |= I_SYNC;
670 spin_unlock(&inode->i_lock);
671
584 write_chunk = writeback_chunk_size(wb->bdi, work); 672 write_chunk = writeback_chunk_size(wb->bdi, work);
585 wbc.nr_to_write = write_chunk; 673 wbc.nr_to_write = write_chunk;
586 wbc.pages_skipped = 0; 674 wbc.pages_skipped = 0;
587 675
588 writeback_single_inode(inode, wb, &wbc); 676 /*
677 * We use I_SYNC to pin the inode in memory. While it is set
678 * evict_inode() will wait so the inode cannot be freed.
679 */
680 __writeback_single_inode(inode, wb, &wbc);
589 681
590 work->nr_pages -= write_chunk - wbc.nr_to_write; 682 work->nr_pages -= write_chunk - wbc.nr_to_write;
591 wrote += write_chunk - wbc.nr_to_write; 683 wrote += write_chunk - wbc.nr_to_write;
684 spin_lock(&wb->list_lock);
685 spin_lock(&inode->i_lock);
592 if (!(inode->i_state & I_DIRTY)) 686 if (!(inode->i_state & I_DIRTY))
593 wrote++; 687 wrote++;
594 if (wbc.pages_skipped) { 688 requeue_inode(inode, wb, &wbc);
595 /* 689 inode_sync_complete(inode);
596 * writeback is not making progress due to locked
597 * buffers. Skip this inode for now.
598 */
599 redirty_tail(inode, wb);
600 }
601 spin_unlock(&inode->i_lock); 690 spin_unlock(&inode->i_lock);
602 spin_unlock(&wb->list_lock); 691 cond_resched_lock(&wb->list_lock);
603 iput(inode);
604 cond_resched();
605 spin_lock(&wb->list_lock);
606 /* 692 /*
607 * bail out to wb_writeback() often enough to check 693 * bail out to wb_writeback() often enough to check
608 * background threshold and other termination conditions. 694 * background threshold and other termination conditions.
@@ -796,8 +882,10 @@ static long wb_writeback(struct bdi_writeback *wb,
796 trace_writeback_wait(wb->bdi, work); 882 trace_writeback_wait(wb->bdi, work);
797 inode = wb_inode(wb->b_more_io.prev); 883 inode = wb_inode(wb->b_more_io.prev);
798 spin_lock(&inode->i_lock); 884 spin_lock(&inode->i_lock);
799 inode_wait_for_writeback(inode, wb); 885 spin_unlock(&wb->list_lock);
800 spin_unlock(&inode->i_lock); 886 /* This function drops i_lock... */
887 inode_sleep_on_writeback(inode);
888 spin_lock(&wb->list_lock);
801 } 889 }
802 } 890 }
803 spin_unlock(&wb->list_lock); 891 spin_unlock(&wb->list_lock);
@@ -1331,7 +1419,6 @@ EXPORT_SYMBOL(sync_inodes_sb);
1331int write_inode_now(struct inode *inode, int sync) 1419int write_inode_now(struct inode *inode, int sync)
1332{ 1420{
1333 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1421 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb;
1334 int ret;
1335 struct writeback_control wbc = { 1422 struct writeback_control wbc = {
1336 .nr_to_write = LONG_MAX, 1423 .nr_to_write = LONG_MAX,
1337 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE, 1424 .sync_mode = sync ? WB_SYNC_ALL : WB_SYNC_NONE,
@@ -1343,12 +1430,7 @@ int write_inode_now(struct inode *inode, int sync)
1343 wbc.nr_to_write = 0; 1430 wbc.nr_to_write = 0;
1344 1431
1345 might_sleep(); 1432 might_sleep();
1346 spin_lock(&wb->list_lock); 1433 return writeback_single_inode(inode, wb, &wbc);
1347 spin_lock(&inode->i_lock);
1348 ret = writeback_single_inode(inode, wb, &wbc);
1349 spin_unlock(&inode->i_lock);
1350 spin_unlock(&wb->list_lock);
1351 return ret;
1352} 1434}
1353EXPORT_SYMBOL(write_inode_now); 1435EXPORT_SYMBOL(write_inode_now);
1354 1436
@@ -1365,15 +1447,7 @@ EXPORT_SYMBOL(write_inode_now);
1365 */ 1447 */
1366int sync_inode(struct inode *inode, struct writeback_control *wbc) 1448int sync_inode(struct inode *inode, struct writeback_control *wbc)
1367{ 1449{
1368 struct bdi_writeback *wb = &inode_to_bdi(inode)->wb; 1450 return writeback_single_inode(inode, &inode_to_bdi(inode)->wb, wbc);
1369 int ret;
1370
1371 spin_lock(&wb->list_lock);
1372 spin_lock(&inode->i_lock);
1373 ret = writeback_single_inode(inode, wb, wbc);
1374 spin_unlock(&inode->i_lock);
1375 spin_unlock(&wb->list_lock);
1376 return ret;
1377} 1451}
1378EXPORT_SYMBOL(sync_inode); 1452EXPORT_SYMBOL(sync_inode);
1379 1453
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 26783eb2b1fc..56f6dcf30768 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -122,7 +122,7 @@ static void fuse_destroy_inode(struct inode *inode)
122static void fuse_evict_inode(struct inode *inode) 122static void fuse_evict_inode(struct inode *inode)
123{ 123{
124 truncate_inode_pages(&inode->i_data, 0); 124 truncate_inode_pages(&inode->i_data, 0);
125 end_writeback(inode); 125 clear_inode(inode);
126 if (inode->i_sb->s_flags & MS_ACTIVE) { 126 if (inode->i_sb->s_flags & MS_ACTIVE) {
127 struct fuse_conn *fc = get_fuse_conn(inode); 127 struct fuse_conn *fc = get_fuse_conn(inode);
128 struct fuse_inode *fi = get_fuse_inode(inode); 128 struct fuse_inode *fi = get_fuse_inode(inode);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 6172fa77ad59..713e621c240b 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1554,7 +1554,7 @@ out_unlock:
1554out: 1554out:
1555 /* Case 3 starts here */ 1555 /* Case 3 starts here */
1556 truncate_inode_pages(&inode->i_data, 0); 1556 truncate_inode_pages(&inode->i_data, 0);
1557 end_writeback(inode); 1557 clear_inode(inode);
1558 gfs2_dir_hash_inval(ip); 1558 gfs2_dir_hash_inval(ip);
1559 ip->i_gl->gl_object = NULL; 1559 ip->i_gl->gl_object = NULL;
1560 flush_delayed_work_sync(&ip->i_gl->gl_work); 1560 flush_delayed_work_sync(&ip->i_gl->gl_work);
diff --git a/fs/hfs/inode.c b/fs/hfs/inode.c
index 737dbeb64320..761ec06354b4 100644
--- a/fs/hfs/inode.c
+++ b/fs/hfs/inode.c
@@ -532,7 +532,7 @@ out:
532void hfs_evict_inode(struct inode *inode) 532void hfs_evict_inode(struct inode *inode)
533{ 533{
534 truncate_inode_pages(&inode->i_data, 0); 534 truncate_inode_pages(&inode->i_data, 0);
535 end_writeback(inode); 535 clear_inode(inode);
536 if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) { 536 if (HFS_IS_RSRC(inode) && HFS_I(inode)->rsrc_inode) {
537 HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL; 537 HFS_I(HFS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
538 iput(HFS_I(inode)->rsrc_inode); 538 iput(HFS_I(inode)->rsrc_inode);
diff --git a/fs/hfsplus/super.c b/fs/hfsplus/super.c
index ceb1c281eefb..a9bca4b8768b 100644
--- a/fs/hfsplus/super.c
+++ b/fs/hfsplus/super.c
@@ -154,7 +154,7 @@ static void hfsplus_evict_inode(struct inode *inode)
154{ 154{
155 dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino); 155 dprint(DBG_INODE, "hfsplus_evict_inode: %lu\n", inode->i_ino);
156 truncate_inode_pages(&inode->i_data, 0); 156 truncate_inode_pages(&inode->i_data, 0);
157 end_writeback(inode); 157 clear_inode(inode);
158 if (HFSPLUS_IS_RSRC(inode)) { 158 if (HFSPLUS_IS_RSRC(inode)) {
159 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL; 159 HFSPLUS_I(HFSPLUS_I(inode)->rsrc_inode)->rsrc_inode = NULL;
160 iput(HFSPLUS_I(inode)->rsrc_inode); 160 iput(HFSPLUS_I(inode)->rsrc_inode);
diff --git a/fs/hostfs/hostfs_kern.c b/fs/hostfs/hostfs_kern.c
index 07c516bfea76..2afa5bbccf9b 100644
--- a/fs/hostfs/hostfs_kern.c
+++ b/fs/hostfs/hostfs_kern.c
@@ -240,7 +240,7 @@ static struct inode *hostfs_alloc_inode(struct super_block *sb)
240static void hostfs_evict_inode(struct inode *inode) 240static void hostfs_evict_inode(struct inode *inode)
241{ 241{
242 truncate_inode_pages(&inode->i_data, 0); 242 truncate_inode_pages(&inode->i_data, 0);
243 end_writeback(inode); 243 clear_inode(inode);
244 if (HOSTFS_I(inode)->fd != -1) { 244 if (HOSTFS_I(inode)->fd != -1) {
245 close_file(&HOSTFS_I(inode)->fd); 245 close_file(&HOSTFS_I(inode)->fd);
246 HOSTFS_I(inode)->fd = -1; 246 HOSTFS_I(inode)->fd = -1;
diff --git a/fs/hpfs/inode.c b/fs/hpfs/inode.c
index 3b2cec29972b..b43066cbdc6a 100644
--- a/fs/hpfs/inode.c
+++ b/fs/hpfs/inode.c
@@ -299,7 +299,7 @@ void hpfs_write_if_changed(struct inode *inode)
299void hpfs_evict_inode(struct inode *inode) 299void hpfs_evict_inode(struct inode *inode)
300{ 300{
301 truncate_inode_pages(&inode->i_data, 0); 301 truncate_inode_pages(&inode->i_data, 0);
302 end_writeback(inode); 302 clear_inode(inode);
303 if (!inode->i_nlink) { 303 if (!inode->i_nlink) {
304 hpfs_lock(inode->i_sb); 304 hpfs_lock(inode->i_sb);
305 hpfs_remove_fnode(inode->i_sb, inode->i_ino); 305 hpfs_remove_fnode(inode->i_sb, inode->i_ino);
diff --git a/fs/hppfs/hppfs.c b/fs/hppfs/hppfs.c
index a80e45a690ac..d4f93b52cec5 100644
--- a/fs/hppfs/hppfs.c
+++ b/fs/hppfs/hppfs.c
@@ -614,7 +614,7 @@ static struct inode *hppfs_alloc_inode(struct super_block *sb)
614 614
615void hppfs_evict_inode(struct inode *ino) 615void hppfs_evict_inode(struct inode *ino)
616{ 616{
617 end_writeback(ino); 617 clear_inode(ino);
618 dput(HPPFS_I(ino)->proc_dentry); 618 dput(HPPFS_I(ino)->proc_dentry);
619 mntput(ino->i_sb->s_fs_info); 619 mntput(ino->i_sb->s_fs_info);
620} 620}
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 001ef01d2fe2..cc9281b6c628 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -393,7 +393,7 @@ static void truncate_hugepages(struct inode *inode, loff_t lstart)
393static void hugetlbfs_evict_inode(struct inode *inode) 393static void hugetlbfs_evict_inode(struct inode *inode)
394{ 394{
395 truncate_hugepages(inode, 0); 395 truncate_hugepages(inode, 0);
396 end_writeback(inode); 396 clear_inode(inode);
397} 397}
398 398
399static inline void 399static inline void
diff --git a/fs/inode.c b/fs/inode.c
index da93f7d160d4..6bc8761cc333 100644
--- a/fs/inode.c
+++ b/fs/inode.c
@@ -486,7 +486,7 @@ void __remove_inode_hash(struct inode *inode)
486} 486}
487EXPORT_SYMBOL(__remove_inode_hash); 487EXPORT_SYMBOL(__remove_inode_hash);
488 488
489void end_writeback(struct inode *inode) 489void clear_inode(struct inode *inode)
490{ 490{
491 might_sleep(); 491 might_sleep();
492 /* 492 /*
@@ -500,11 +500,10 @@ void end_writeback(struct inode *inode)
500 BUG_ON(!list_empty(&inode->i_data.private_list)); 500 BUG_ON(!list_empty(&inode->i_data.private_list));
501 BUG_ON(!(inode->i_state & I_FREEING)); 501 BUG_ON(!(inode->i_state & I_FREEING));
502 BUG_ON(inode->i_state & I_CLEAR); 502 BUG_ON(inode->i_state & I_CLEAR);
503 inode_sync_wait(inode);
504 /* don't need i_lock here, no concurrent mods to i_state */ 503 /* don't need i_lock here, no concurrent mods to i_state */
505 inode->i_state = I_FREEING | I_CLEAR; 504 inode->i_state = I_FREEING | I_CLEAR;
506} 505}
507EXPORT_SYMBOL(end_writeback); 506EXPORT_SYMBOL(clear_inode);
508 507
509/* 508/*
510 * Free the inode passed in, removing it from the lists it is still connected 509 * Free the inode passed in, removing it from the lists it is still connected
@@ -531,12 +530,20 @@ static void evict(struct inode *inode)
531 530
532 inode_sb_list_del(inode); 531 inode_sb_list_del(inode);
533 532
533 /*
534 * Wait for flusher thread to be done with the inode so that filesystem
535 * does not start destroying it while writeback is still running. Since
536 * the inode has I_FREEING set, flusher thread won't start new work on
537 * the inode. We just have to wait for running writeback to finish.
538 */
539 inode_wait_for_writeback(inode);
540
534 if (op->evict_inode) { 541 if (op->evict_inode) {
535 op->evict_inode(inode); 542 op->evict_inode(inode);
536 } else { 543 } else {
537 if (inode->i_data.nrpages) 544 if (inode->i_data.nrpages)
538 truncate_inode_pages(&inode->i_data, 0); 545 truncate_inode_pages(&inode->i_data, 0);
539 end_writeback(inode); 546 clear_inode(inode);
540 } 547 }
541 if (S_ISBLK(inode->i_mode) && inode->i_bdev) 548 if (S_ISBLK(inode->i_mode) && inode->i_bdev)
542 bd_forget(inode); 549 bd_forget(inode);
diff --git a/fs/jbd/checkpoint.c b/fs/jbd/checkpoint.c
index 05f0754f2b46..08c03044abdd 100644
--- a/fs/jbd/checkpoint.c
+++ b/fs/jbd/checkpoint.c
@@ -508,20 +508,19 @@ int cleanup_journal_tail(journal_t *journal)
508 /* 508 /*
509 * We need to make sure that any blocks that were recently written out 509 * We need to make sure that any blocks that were recently written out
510 * --- perhaps by log_do_checkpoint() --- are flushed out before we 510 * --- perhaps by log_do_checkpoint() --- are flushed out before we
511 * drop the transactions from the journal. It's unlikely this will be 511 * drop the transactions from the journal. Similarly we need to be sure
512 * necessary, especially with an appropriately sized journal, but we 512 * superblock makes it to disk before next transaction starts reusing
513 * need this to guarantee correctness. Fortunately 513 * freed space (otherwise we could replay some blocks of the new
514 * cleanup_journal_tail() doesn't get called all that often. 514 * transaction thinking they belong to the old one). So we use
515 * WRITE_FLUSH_FUA. It's unlikely this will be necessary, especially
516 * with an appropriately sized journal, but we need this to guarantee
517 * correctness. Fortunately cleanup_journal_tail() doesn't get called
518 * all that often.
515 */ 519 */
516 if (journal->j_flags & JFS_BARRIER) 520 journal_update_sb_log_tail(journal, first_tid, blocknr,
517 blkdev_issue_flush(journal->j_fs_dev, GFP_KERNEL, NULL); 521 WRITE_FLUSH_FUA);
518 522
519 spin_lock(&journal->j_state_lock); 523 spin_lock(&journal->j_state_lock);
520 if (!tid_gt(first_tid, journal->j_tail_sequence)) {
521 spin_unlock(&journal->j_state_lock);
522 /* Someone else cleaned up journal so return 0 */
523 return 0;
524 }
525 /* OK, update the superblock to recover the freed space. 524 /* OK, update the superblock to recover the freed space.
526 * Physical blocks come first: have we wrapped beyond the end of 525 * Physical blocks come first: have we wrapped beyond the end of
527 * the log? */ 526 * the log? */
@@ -539,8 +538,6 @@ int cleanup_journal_tail(journal_t *journal)
539 journal->j_tail_sequence = first_tid; 538 journal->j_tail_sequence = first_tid;
540 journal->j_tail = blocknr; 539 journal->j_tail = blocknr;
541 spin_unlock(&journal->j_state_lock); 540 spin_unlock(&journal->j_state_lock);
542 if (!(journal->j_flags & JFS_ABORT))
543 journal_update_superblock(journal, 1);
544 return 0; 541 return 0;
545} 542}
546 543
diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c
index f2b9a571f4cf..52c15c776029 100644
--- a/fs/jbd/commit.c
+++ b/fs/jbd/commit.c
@@ -298,6 +298,7 @@ void journal_commit_transaction(journal_t *journal)
298 int tag_flag; 298 int tag_flag;
299 int i; 299 int i;
300 struct blk_plug plug; 300 struct blk_plug plug;
301 int write_op = WRITE;
301 302
302 /* 303 /*
303 * First job: lock down the current transaction and wait for 304 * First job: lock down the current transaction and wait for
@@ -307,7 +308,16 @@ void journal_commit_transaction(journal_t *journal)
307 /* Do we need to erase the effects of a prior journal_flush? */ 308 /* Do we need to erase the effects of a prior journal_flush? */
308 if (journal->j_flags & JFS_FLUSHED) { 309 if (journal->j_flags & JFS_FLUSHED) {
309 jbd_debug(3, "super block updated\n"); 310 jbd_debug(3, "super block updated\n");
310 journal_update_superblock(journal, 1); 311 mutex_lock(&journal->j_checkpoint_mutex);
312 /*
313 * We hold j_checkpoint_mutex so tail cannot change under us.
314 * We don't need any special data guarantees for writing sb
315 * since journal is empty and it is ok for write to be
316 * flushed only with transaction commit.
317 */
318 journal_update_sb_log_tail(journal, journal->j_tail_sequence,
319 journal->j_tail, WRITE_SYNC);
320 mutex_unlock(&journal->j_checkpoint_mutex);
311 } else { 321 } else {
312 jbd_debug(3, "superblock not updated\n"); 322 jbd_debug(3, "superblock not updated\n");
313 } 323 }
@@ -413,13 +423,16 @@ void journal_commit_transaction(journal_t *journal)
413 423
414 jbd_debug (3, "JBD: commit phase 2\n"); 424 jbd_debug (3, "JBD: commit phase 2\n");
415 425
426 if (tid_geq(journal->j_commit_waited, commit_transaction->t_tid))
427 write_op = WRITE_SYNC;
428
416 /* 429 /*
417 * Now start flushing things to disk, in the order they appear 430 * Now start flushing things to disk, in the order they appear
418 * on the transaction lists. Data blocks go first. 431 * on the transaction lists. Data blocks go first.
419 */ 432 */
420 blk_start_plug(&plug); 433 blk_start_plug(&plug);
421 err = journal_submit_data_buffers(journal, commit_transaction, 434 err = journal_submit_data_buffers(journal, commit_transaction,
422 WRITE_SYNC); 435 write_op);
423 blk_finish_plug(&plug); 436 blk_finish_plug(&plug);
424 437
425 /* 438 /*
@@ -478,7 +491,7 @@ void journal_commit_transaction(journal_t *journal)
478 491
479 blk_start_plug(&plug); 492 blk_start_plug(&plug);
480 493
481 journal_write_revoke_records(journal, commit_transaction, WRITE_SYNC); 494 journal_write_revoke_records(journal, commit_transaction, write_op);
482 495
483 /* 496 /*
484 * If we found any dirty or locked buffers, then we should have 497 * If we found any dirty or locked buffers, then we should have
@@ -649,7 +662,7 @@ start_journal_io:
649 clear_buffer_dirty(bh); 662 clear_buffer_dirty(bh);
650 set_buffer_uptodate(bh); 663 set_buffer_uptodate(bh);
651 bh->b_end_io = journal_end_buffer_io_sync; 664 bh->b_end_io = journal_end_buffer_io_sync;
652 submit_bh(WRITE_SYNC, bh); 665 submit_bh(write_op, bh);
653 } 666 }
654 cond_resched(); 667 cond_resched();
655 668
diff --git a/fs/jbd/journal.c b/fs/jbd/journal.c
index 0971e9217808..425c2f2cf170 100644
--- a/fs/jbd/journal.c
+++ b/fs/jbd/journal.c
@@ -563,6 +563,8 @@ int log_wait_commit(journal_t *journal, tid_t tid)
563 spin_unlock(&journal->j_state_lock); 563 spin_unlock(&journal->j_state_lock);
564#endif 564#endif
565 spin_lock(&journal->j_state_lock); 565 spin_lock(&journal->j_state_lock);
566 if (!tid_geq(journal->j_commit_waited, tid))
567 journal->j_commit_waited = tid;
566 while (tid_gt(tid, journal->j_commit_sequence)) { 568 while (tid_gt(tid, journal->j_commit_sequence)) {
567 jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n", 569 jbd_debug(1, "JBD: want %d, j_commit_sequence=%d\n",
568 tid, journal->j_commit_sequence); 570 tid, journal->j_commit_sequence);
@@ -921,8 +923,33 @@ static int journal_reset(journal_t *journal)
921 923
922 journal->j_max_transaction_buffers = journal->j_maxlen / 4; 924 journal->j_max_transaction_buffers = journal->j_maxlen / 4;
923 925
924 /* Add the dynamic fields and write it to disk. */ 926 /*
925 journal_update_superblock(journal, 1); 927 * As a special case, if the on-disk copy is already marked as needing
928 * no recovery (s_start == 0), then we can safely defer the superblock
929 * update until the next commit by setting JFS_FLUSHED. This avoids
930 * attempting a write to a potential-readonly device.
931 */
932 if (sb->s_start == 0) {
933 jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
934 "(start %u, seq %d, errno %d)\n",
935 journal->j_tail, journal->j_tail_sequence,
936 journal->j_errno);
937 journal->j_flags |= JFS_FLUSHED;
938 } else {
939 /* Lock here to make assertions happy... */
940 mutex_lock(&journal->j_checkpoint_mutex);
941 /*
942 * Update log tail information. We use WRITE_FUA since new
943 * transaction will start reusing journal space and so we
944 * must make sure information about current log tail is on
945 * disk before that.
946 */
947 journal_update_sb_log_tail(journal,
948 journal->j_tail_sequence,
949 journal->j_tail,
950 WRITE_FUA);
951 mutex_unlock(&journal->j_checkpoint_mutex);
952 }
926 return journal_start_thread(journal); 953 return journal_start_thread(journal);
927} 954}
928 955
@@ -999,35 +1026,15 @@ int journal_create(journal_t *journal)
999 return journal_reset(journal); 1026 return journal_reset(journal);
1000} 1027}
1001 1028
1002/** 1029static void journal_write_superblock(journal_t *journal, int write_op)
1003 * void journal_update_superblock() - Update journal sb on disk.
1004 * @journal: The journal to update.
1005 * @wait: Set to '0' if you don't want to wait for IO completion.
1006 *
1007 * Update a journal's dynamic superblock fields and write it to disk,
1008 * optionally waiting for the IO to complete.
1009 */
1010void journal_update_superblock(journal_t *journal, int wait)
1011{ 1030{
1012 journal_superblock_t *sb = journal->j_superblock;
1013 struct buffer_head *bh = journal->j_sb_buffer; 1031 struct buffer_head *bh = journal->j_sb_buffer;
1032 int ret;
1014 1033
1015 /* 1034 trace_journal_write_superblock(journal, write_op);
1016 * As a special case, if the on-disk copy is already marked as needing 1035 if (!(journal->j_flags & JFS_BARRIER))
1017 * no recovery (s_start == 0) and there are no outstanding transactions 1036 write_op &= ~(REQ_FUA | REQ_FLUSH);
1018 * in the filesystem, then we can safely defer the superblock update 1037 lock_buffer(bh);
1019 * until the next commit by setting JFS_FLUSHED. This avoids
1020 * attempting a write to a potential-readonly device.
1021 */
1022 if (sb->s_start == 0 && journal->j_tail_sequence ==
1023 journal->j_transaction_sequence) {
1024 jbd_debug(1,"JBD: Skipping superblock update on recovered sb "
1025 "(start %u, seq %d, errno %d)\n",
1026 journal->j_tail, journal->j_tail_sequence,
1027 journal->j_errno);
1028 goto out;
1029 }
1030
1031 if (buffer_write_io_error(bh)) { 1038 if (buffer_write_io_error(bh)) {
1032 char b[BDEVNAME_SIZE]; 1039 char b[BDEVNAME_SIZE];
1033 /* 1040 /*
@@ -1045,42 +1052,100 @@ void journal_update_superblock(journal_t *journal, int wait)
1045 set_buffer_uptodate(bh); 1052 set_buffer_uptodate(bh);
1046 } 1053 }
1047 1054
1055 get_bh(bh);
1056 bh->b_end_io = end_buffer_write_sync;
1057 ret = submit_bh(write_op, bh);
1058 wait_on_buffer(bh);
1059 if (buffer_write_io_error(bh)) {
1060 clear_buffer_write_io_error(bh);
1061 set_buffer_uptodate(bh);
1062 ret = -EIO;
1063 }
1064 if (ret) {
1065 char b[BDEVNAME_SIZE];
1066 printk(KERN_ERR "JBD: Error %d detected "
1067 "when updating journal superblock for %s.\n",
1068 ret, journal_dev_name(journal, b));
1069 }
1070}
1071
1072/**
1073 * journal_update_sb_log_tail() - Update log tail in journal sb on disk.
1074 * @journal: The journal to update.
1075 * @tail_tid: TID of the new transaction at the tail of the log
1076 * @tail_block: The first block of the transaction at the tail of the log
1077 * @write_op: With which operation should we write the journal sb
1078 *
1079 * Update a journal's superblock information about log tail and write it to
1080 * disk, waiting for the IO to complete.
1081 */
1082void journal_update_sb_log_tail(journal_t *journal, tid_t tail_tid,
1083 unsigned int tail_block, int write_op)
1084{
1085 journal_superblock_t *sb = journal->j_superblock;
1086
1087 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
1088 jbd_debug(1,"JBD: updating superblock (start %u, seq %u)\n",
1089 tail_block, tail_tid);
1090
1091 sb->s_sequence = cpu_to_be32(tail_tid);
1092 sb->s_start = cpu_to_be32(tail_block);
1093
1094 journal_write_superblock(journal, write_op);
1095
1096 /* Log is no longer empty */
1097 spin_lock(&journal->j_state_lock);
1098 WARN_ON(!sb->s_sequence);
1099 journal->j_flags &= ~JFS_FLUSHED;
1100 spin_unlock(&journal->j_state_lock);
1101}
1102
1103/**
1104 * mark_journal_empty() - Mark on disk journal as empty.
1105 * @journal: The journal to update.
1106 *
1107 * Update a journal's dynamic superblock fields to show that journal is empty.
1108 * Write updated superblock to disk waiting for IO to complete.
1109 */
1110static void mark_journal_empty(journal_t *journal)
1111{
1112 journal_superblock_t *sb = journal->j_superblock;
1113
1114 BUG_ON(!mutex_is_locked(&journal->j_checkpoint_mutex));
1048 spin_lock(&journal->j_state_lock); 1115 spin_lock(&journal->j_state_lock);
1049 jbd_debug(1,"JBD: updating superblock (start %u, seq %d, errno %d)\n", 1116 jbd_debug(1, "JBD: Marking journal as empty (seq %d)\n",
1050 journal->j_tail, journal->j_tail_sequence, journal->j_errno); 1117 journal->j_tail_sequence);
1051 1118
1052 sb->s_sequence = cpu_to_be32(journal->j_tail_sequence); 1119 sb->s_sequence = cpu_to_be32(journal->j_tail_sequence);
1053 sb->s_start = cpu_to_be32(journal->j_tail); 1120 sb->s_start = cpu_to_be32(0);
1054 sb->s_errno = cpu_to_be32(journal->j_errno);
1055 spin_unlock(&journal->j_state_lock); 1121 spin_unlock(&journal->j_state_lock);
1056 1122
1057 BUFFER_TRACE(bh, "marking dirty"); 1123 journal_write_superblock(journal, WRITE_FUA);
1058 mark_buffer_dirty(bh);
1059 if (wait) {
1060 sync_dirty_buffer(bh);
1061 if (buffer_write_io_error(bh)) {
1062 char b[BDEVNAME_SIZE];
1063 printk(KERN_ERR "JBD: I/O error detected "
1064 "when updating journal superblock for %s.\n",
1065 journal_dev_name(journal, b));
1066 clear_buffer_write_io_error(bh);
1067 set_buffer_uptodate(bh);
1068 }
1069 } else
1070 write_dirty_buffer(bh, WRITE);
1071 1124
1072 trace_jbd_update_superblock_end(journal, wait); 1125 spin_lock(&journal->j_state_lock);
1073out: 1126 /* Log is empty */
1074 /* If we have just flushed the log (by marking s_start==0), then 1127 journal->j_flags |= JFS_FLUSHED;
1075 * any future commit will have to be careful to update the 1128 spin_unlock(&journal->j_state_lock);
1076 * superblock again to re-record the true start of the log. */ 1129}
1130
1131/**
1132 * journal_update_sb_errno() - Update error in the journal.
1133 * @journal: The journal to update.
1134 *
1135 * Update a journal's errno. Write updated superblock to disk waiting for IO
1136 * to complete.
1137 */
1138static void journal_update_sb_errno(journal_t *journal)
1139{
1140 journal_superblock_t *sb = journal->j_superblock;
1077 1141
1078 spin_lock(&journal->j_state_lock); 1142 spin_lock(&journal->j_state_lock);
1079 if (sb->s_start) 1143 jbd_debug(1, "JBD: updating superblock error (errno %d)\n",
1080 journal->j_flags &= ~JFS_FLUSHED; 1144 journal->j_errno);
1081 else 1145 sb->s_errno = cpu_to_be32(journal->j_errno);
1082 journal->j_flags |= JFS_FLUSHED;
1083 spin_unlock(&journal->j_state_lock); 1146 spin_unlock(&journal->j_state_lock);
1147
1148 journal_write_superblock(journal, WRITE_SYNC);
1084} 1149}
1085 1150
1086/* 1151/*
@@ -1251,6 +1316,8 @@ int journal_destroy(journal_t *journal)
1251 1316
1252 /* Force any old transactions to disk */ 1317 /* Force any old transactions to disk */
1253 1318
1319 /* We cannot race with anybody but must keep assertions happy */
1320 mutex_lock(&journal->j_checkpoint_mutex);
1254 /* Totally anal locking here... */ 1321 /* Totally anal locking here... */
1255 spin_lock(&journal->j_list_lock); 1322 spin_lock(&journal->j_list_lock);
1256 while (journal->j_checkpoint_transactions != NULL) { 1323 while (journal->j_checkpoint_transactions != NULL) {
@@ -1266,16 +1333,14 @@ int journal_destroy(journal_t *journal)
1266 1333
1267 if (journal->j_sb_buffer) { 1334 if (journal->j_sb_buffer) {
1268 if (!is_journal_aborted(journal)) { 1335 if (!is_journal_aborted(journal)) {
1269 /* We can now mark the journal as empty. */
1270 journal->j_tail = 0;
1271 journal->j_tail_sequence = 1336 journal->j_tail_sequence =
1272 ++journal->j_transaction_sequence; 1337 ++journal->j_transaction_sequence;
1273 journal_update_superblock(journal, 1); 1338 mark_journal_empty(journal);
1274 } else { 1339 } else
1275 err = -EIO; 1340 err = -EIO;
1276 }
1277 brelse(journal->j_sb_buffer); 1341 brelse(journal->j_sb_buffer);
1278 } 1342 }
1343 mutex_unlock(&journal->j_checkpoint_mutex);
1279 1344
1280 if (journal->j_inode) 1345 if (journal->j_inode)
1281 iput(journal->j_inode); 1346 iput(journal->j_inode);
@@ -1455,7 +1520,6 @@ int journal_flush(journal_t *journal)
1455{ 1520{
1456 int err = 0; 1521 int err = 0;
1457 transaction_t *transaction = NULL; 1522 transaction_t *transaction = NULL;
1458 unsigned int old_tail;
1459 1523
1460 spin_lock(&journal->j_state_lock); 1524 spin_lock(&journal->j_state_lock);
1461 1525
@@ -1490,6 +1554,7 @@ int journal_flush(journal_t *journal)
1490 if (is_journal_aborted(journal)) 1554 if (is_journal_aborted(journal))
1491 return -EIO; 1555 return -EIO;
1492 1556
1557 mutex_lock(&journal->j_checkpoint_mutex);
1493 cleanup_journal_tail(journal); 1558 cleanup_journal_tail(journal);
1494 1559
1495 /* Finally, mark the journal as really needing no recovery. 1560 /* Finally, mark the journal as really needing no recovery.
@@ -1497,14 +1562,9 @@ int journal_flush(journal_t *journal)
1497 * the magic code for a fully-recovered superblock. Any future 1562 * the magic code for a fully-recovered superblock. Any future
1498 * commits of data to the journal will restore the current 1563 * commits of data to the journal will restore the current
1499 * s_start value. */ 1564 * s_start value. */
1565 mark_journal_empty(journal);
1566 mutex_unlock(&journal->j_checkpoint_mutex);
1500 spin_lock(&journal->j_state_lock); 1567 spin_lock(&journal->j_state_lock);
1501 old_tail = journal->j_tail;
1502 journal->j_tail = 0;
1503 spin_unlock(&journal->j_state_lock);
1504 journal_update_superblock(journal, 1);
1505 spin_lock(&journal->j_state_lock);
1506 journal->j_tail = old_tail;
1507
1508 J_ASSERT(!journal->j_running_transaction); 1568 J_ASSERT(!journal->j_running_transaction);
1509 J_ASSERT(!journal->j_committing_transaction); 1569 J_ASSERT(!journal->j_committing_transaction);
1510 J_ASSERT(!journal->j_checkpoint_transactions); 1570 J_ASSERT(!journal->j_checkpoint_transactions);
@@ -1544,8 +1604,12 @@ int journal_wipe(journal_t *journal, int write)
1544 write ? "Clearing" : "Ignoring"); 1604 write ? "Clearing" : "Ignoring");
1545 1605
1546 err = journal_skip_recovery(journal); 1606 err = journal_skip_recovery(journal);
1547 if (write) 1607 if (write) {
1548 journal_update_superblock(journal, 1); 1608 /* Lock to make assertions happy... */
1609 mutex_lock(&journal->j_checkpoint_mutex);
1610 mark_journal_empty(journal);
1611 mutex_unlock(&journal->j_checkpoint_mutex);
1612 }
1549 1613
1550 no_recovery: 1614 no_recovery:
1551 return err; 1615 return err;
@@ -1613,7 +1677,7 @@ static void __journal_abort_soft (journal_t *journal, int errno)
1613 __journal_abort_hard(journal); 1677 __journal_abort_hard(journal);
1614 1678
1615 if (errno) 1679 if (errno)
1616 journal_update_superblock(journal, 1); 1680 journal_update_sb_errno(journal);
1617} 1681}
1618 1682
1619/** 1683/**
diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c
index b2a7e5244e39..febc10db5ced 100644
--- a/fs/jbd/transaction.c
+++ b/fs/jbd/transaction.c
@@ -1433,8 +1433,6 @@ int journal_stop(handle_t *handle)
1433 } 1433 }
1434 } 1434 }
1435 1435
1436 if (handle->h_sync)
1437 transaction->t_synchronous_commit = 1;
1438 current->journal_info = NULL; 1436 current->journal_info = NULL;
1439 spin_lock(&journal->j_state_lock); 1437 spin_lock(&journal->j_state_lock);
1440 spin_lock(&transaction->t_handle_lock); 1438 spin_lock(&transaction->t_handle_lock);
diff --git a/fs/jffs2/fs.c b/fs/jffs2/fs.c
index bb6f993ebca9..3d3092eda811 100644
--- a/fs/jffs2/fs.c
+++ b/fs/jffs2/fs.c
@@ -240,7 +240,7 @@ void jffs2_evict_inode (struct inode *inode)
240 jffs2_dbg(1, "%s(): ino #%lu mode %o\n", 240 jffs2_dbg(1, "%s(): ino #%lu mode %o\n",
241 __func__, inode->i_ino, inode->i_mode); 241 __func__, inode->i_ino, inode->i_mode);
242 truncate_inode_pages(&inode->i_data, 0); 242 truncate_inode_pages(&inode->i_data, 0);
243 end_writeback(inode); 243 clear_inode(inode);
244 jffs2_do_clear_inode(c, f); 244 jffs2_do_clear_inode(c, f);
245} 245}
246 246
diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c
index 77b69b27f825..4692bf3ca8cb 100644
--- a/fs/jfs/inode.c
+++ b/fs/jfs/inode.c
@@ -169,7 +169,7 @@ void jfs_evict_inode(struct inode *inode)
169 } else { 169 } else {
170 truncate_inode_pages(&inode->i_data, 0); 170 truncate_inode_pages(&inode->i_data, 0);
171 } 171 }
172 end_writeback(inode); 172 clear_inode(inode);
173 dquot_drop(inode); 173 dquot_drop(inode);
174} 174}
175 175
diff --git a/fs/logfs/readwrite.c b/fs/logfs/readwrite.c
index e3ab5e5a904c..f1cb512c5019 100644
--- a/fs/logfs/readwrite.c
+++ b/fs/logfs/readwrite.c
@@ -2175,7 +2175,7 @@ void logfs_evict_inode(struct inode *inode)
2175 } 2175 }
2176 } 2176 }
2177 truncate_inode_pages(&inode->i_data, 0); 2177 truncate_inode_pages(&inode->i_data, 0);
2178 end_writeback(inode); 2178 clear_inode(inode);
2179 2179
2180 /* Cheaper version of write_inode. All changes are concealed in 2180 /* Cheaper version of write_inode. All changes are concealed in
2181 * aliases, which are moved back. No write to the medium happens. 2181 * aliases, which are moved back. No write to the medium happens.
diff --git a/fs/minix/inode.c b/fs/minix/inode.c
index fcb05d2c6b5f..2a503ad020d5 100644
--- a/fs/minix/inode.c
+++ b/fs/minix/inode.c
@@ -32,7 +32,7 @@ static void minix_evict_inode(struct inode *inode)
32 minix_truncate(inode); 32 minix_truncate(inode);
33 } 33 }
34 invalidate_inode_buffers(inode); 34 invalidate_inode_buffers(inode);
35 end_writeback(inode); 35 clear_inode(inode);
36 if (!inode->i_nlink) 36 if (!inode->i_nlink)
37 minix_free_inode(inode); 37 minix_free_inode(inode);
38} 38}
diff --git a/fs/namei.c b/fs/namei.c
index 93ff12b1a1de..c651f02c9fec 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -1452,7 +1452,8 @@ EXPORT_SYMBOL(full_name_hash);
1452 */ 1452 */
1453static inline unsigned long hash_name(const char *name, unsigned int *hashp) 1453static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1454{ 1454{
1455 unsigned long a, mask, hash, len; 1455 unsigned long a, b, adata, bdata, mask, hash, len;
1456 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
1456 1457
1457 hash = a = 0; 1458 hash = a = 0;
1458 len = -sizeof(unsigned long); 1459 len = -sizeof(unsigned long);
@@ -1460,17 +1461,18 @@ static inline unsigned long hash_name(const char *name, unsigned int *hashp)
1460 hash = (hash + a) * 9; 1461 hash = (hash + a) * 9;
1461 len += sizeof(unsigned long); 1462 len += sizeof(unsigned long);
1462 a = load_unaligned_zeropad(name+len); 1463 a = load_unaligned_zeropad(name+len);
1463 /* Do we have any NUL or '/' bytes in this word? */ 1464 b = a ^ REPEAT_BYTE('/');
1464 mask = has_zero(a) | has_zero(a ^ REPEAT_BYTE('/')); 1465 } while (!(has_zero(a, &adata, &constants) | has_zero(b, &bdata, &constants)));
1465 } while (!mask); 1466
1466 1467 adata = prep_zero_mask(a, adata, &constants);
1467 /* The mask *below* the first high bit set */ 1468 bdata = prep_zero_mask(b, bdata, &constants);
1468 mask = (mask - 1) & ~mask; 1469
1469 mask >>= 7; 1470 mask = create_zero_mask(adata | bdata);
1470 hash += a & mask; 1471
1472 hash += a & zero_bytemask(mask);
1471 *hashp = fold_hash(hash); 1473 *hashp = fold_hash(hash);
1472 1474
1473 return len + count_masked_bytes(mask); 1475 return len + find_zero(mask);
1474} 1476}
1475 1477
1476#else 1478#else
diff --git a/fs/ncpfs/inode.c b/fs/ncpfs/inode.c
index 87484fb8d177..333df07ae3bd 100644
--- a/fs/ncpfs/inode.c
+++ b/fs/ncpfs/inode.c
@@ -292,7 +292,7 @@ static void
292ncp_evict_inode(struct inode *inode) 292ncp_evict_inode(struct inode *inode)
293{ 293{
294 truncate_inode_pages(&inode->i_data, 0); 294 truncate_inode_pages(&inode->i_data, 0);
295 end_writeback(inode); 295 clear_inode(inode);
296 296
297 if (S_ISDIR(inode->i_mode)) { 297 if (S_ISDIR(inode->i_mode)) {
298 DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino); 298 DDPRINTK("ncp_evict_inode: put directory %ld\n", inode->i_ino);
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
index 2a0e6c599147..f90f4f5cd421 100644
--- a/fs/nfs/Kconfig
+++ b/fs/nfs/Kconfig
@@ -29,9 +29,20 @@ config NFS_FS
29 29
30 If unsure, say N. 30 If unsure, say N.
31 31
32config NFS_V2
33 bool "NFS client support for NFS version 2"
34 depends on NFS_FS
35 default y
36 help
37 This option enables support for version 2 of the NFS protocol
38 (RFC 1094) in the kernel's NFS client.
39
40 If unsure, say Y.
41
32config NFS_V3 42config NFS_V3
33 bool "NFS client support for NFS version 3" 43 bool "NFS client support for NFS version 3"
34 depends on NFS_FS 44 depends on NFS_FS
45 default y
35 help 46 help
36 This option enables support for version 3 of the NFS protocol 47 This option enables support for version 3 of the NFS protocol
37 (RFC 1813) in the kernel's NFS client. 48 (RFC 1813) in the kernel's NFS client.
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index b58613d0abb3..7ddd45d9f170 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -4,11 +4,12 @@
4 4
5obj-$(CONFIG_NFS_FS) += nfs.o 5obj-$(CONFIG_NFS_FS) += nfs.o
6 6
7nfs-y := client.o dir.o file.o getroot.o inode.o super.o nfs2xdr.o \ 7nfs-y := client.o dir.o file.o getroot.o inode.o super.o \
8 direct.o pagelist.o proc.o read.o symlink.o unlink.o \ 8 direct.o pagelist.o read.o symlink.o unlink.o \
9 write.o namespace.o mount_clnt.o \ 9 write.o namespace.o mount_clnt.o \
10 dns_resolve.o cache_lib.o 10 dns_resolve.o cache_lib.o
11nfs-$(CONFIG_ROOT_NFS) += nfsroot.o 11nfs-$(CONFIG_ROOT_NFS) += nfsroot.o
12nfs-$(CONFIG_NFS_V2) += proc.o nfs2xdr.o
12nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o 13nfs-$(CONFIG_NFS_V3) += nfs3proc.o nfs3xdr.o
13nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o 14nfs-$(CONFIG_NFS_V3_ACL) += nfs3acl.o
14nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \ 15nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index 7f6a23f0244e..7ae8a608956f 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -187,7 +187,6 @@ static void bl_end_io_read(struct bio *bio, int err)
187 struct parallel_io *par = bio->bi_private; 187 struct parallel_io *par = bio->bi_private;
188 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 188 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
189 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 189 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
190 struct nfs_read_data *rdata = (struct nfs_read_data *)par->data;
191 190
192 do { 191 do {
193 struct page *page = bvec->bv_page; 192 struct page *page = bvec->bv_page;
@@ -198,9 +197,12 @@ static void bl_end_io_read(struct bio *bio, int err)
198 SetPageUptodate(page); 197 SetPageUptodate(page);
199 } while (bvec >= bio->bi_io_vec); 198 } while (bvec >= bio->bi_io_vec);
200 if (!uptodate) { 199 if (!uptodate) {
201 if (!rdata->pnfs_error) 200 struct nfs_read_data *rdata = par->data;
202 rdata->pnfs_error = -EIO; 201 struct nfs_pgio_header *header = rdata->header;
203 pnfs_set_lo_fail(rdata->lseg); 202
203 if (!header->pnfs_error)
204 header->pnfs_error = -EIO;
205 pnfs_set_lo_fail(header->lseg);
204 } 206 }
205 bio_put(bio); 207 bio_put(bio);
206 put_parallel(par); 208 put_parallel(par);
@@ -221,7 +223,7 @@ bl_end_par_io_read(void *data, int unused)
221{ 223{
222 struct nfs_read_data *rdata = data; 224 struct nfs_read_data *rdata = data;
223 225
224 rdata->task.tk_status = rdata->pnfs_error; 226 rdata->task.tk_status = rdata->header->pnfs_error;
225 INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup); 227 INIT_WORK(&rdata->task.u.tk_work, bl_read_cleanup);
226 schedule_work(&rdata->task.u.tk_work); 228 schedule_work(&rdata->task.u.tk_work);
227} 229}
@@ -229,6 +231,7 @@ bl_end_par_io_read(void *data, int unused)
229static enum pnfs_try_status 231static enum pnfs_try_status
230bl_read_pagelist(struct nfs_read_data *rdata) 232bl_read_pagelist(struct nfs_read_data *rdata)
231{ 233{
234 struct nfs_pgio_header *header = rdata->header;
232 int i, hole; 235 int i, hole;
233 struct bio *bio = NULL; 236 struct bio *bio = NULL;
234 struct pnfs_block_extent *be = NULL, *cow_read = NULL; 237 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -239,7 +242,7 @@ bl_read_pagelist(struct nfs_read_data *rdata)
239 int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT; 242 int pg_index = rdata->args.pgbase >> PAGE_CACHE_SHIFT;
240 243
241 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__, 244 dprintk("%s enter nr_pages %u offset %lld count %u\n", __func__,
242 rdata->npages, f_offset, (unsigned int)rdata->args.count); 245 rdata->pages.npages, f_offset, (unsigned int)rdata->args.count);
243 246
244 par = alloc_parallel(rdata); 247 par = alloc_parallel(rdata);
245 if (!par) 248 if (!par)
@@ -249,17 +252,17 @@ bl_read_pagelist(struct nfs_read_data *rdata)
249 252
250 isect = (sector_t) (f_offset >> SECTOR_SHIFT); 253 isect = (sector_t) (f_offset >> SECTOR_SHIFT);
251 /* Code assumes extents are page-aligned */ 254 /* Code assumes extents are page-aligned */
252 for (i = pg_index; i < rdata->npages; i++) { 255 for (i = pg_index; i < rdata->pages.npages; i++) {
253 if (!extent_length) { 256 if (!extent_length) {
254 /* We've used up the previous extent */ 257 /* We've used up the previous extent */
255 bl_put_extent(be); 258 bl_put_extent(be);
256 bl_put_extent(cow_read); 259 bl_put_extent(cow_read);
257 bio = bl_submit_bio(READ, bio); 260 bio = bl_submit_bio(READ, bio);
258 /* Get the next one */ 261 /* Get the next one */
259 be = bl_find_get_extent(BLK_LSEG2EXT(rdata->lseg), 262 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
260 isect, &cow_read); 263 isect, &cow_read);
261 if (!be) { 264 if (!be) {
262 rdata->pnfs_error = -EIO; 265 header->pnfs_error = -EIO;
263 goto out; 266 goto out;
264 } 267 }
265 extent_length = be->be_length - 268 extent_length = be->be_length -
@@ -282,11 +285,12 @@ bl_read_pagelist(struct nfs_read_data *rdata)
282 struct pnfs_block_extent *be_read; 285 struct pnfs_block_extent *be_read;
283 286
284 be_read = (hole && cow_read) ? cow_read : be; 287 be_read = (hole && cow_read) ? cow_read : be;
285 bio = bl_add_page_to_bio(bio, rdata->npages - i, READ, 288 bio = bl_add_page_to_bio(bio, rdata->pages.npages - i,
289 READ,
286 isect, pages[i], be_read, 290 isect, pages[i], be_read,
287 bl_end_io_read, par); 291 bl_end_io_read, par);
288 if (IS_ERR(bio)) { 292 if (IS_ERR(bio)) {
289 rdata->pnfs_error = PTR_ERR(bio); 293 header->pnfs_error = PTR_ERR(bio);
290 bio = NULL; 294 bio = NULL;
291 goto out; 295 goto out;
292 } 296 }
@@ -294,9 +298,9 @@ bl_read_pagelist(struct nfs_read_data *rdata)
294 isect += PAGE_CACHE_SECTORS; 298 isect += PAGE_CACHE_SECTORS;
295 extent_length -= PAGE_CACHE_SECTORS; 299 extent_length -= PAGE_CACHE_SECTORS;
296 } 300 }
297 if ((isect << SECTOR_SHIFT) >= rdata->inode->i_size) { 301 if ((isect << SECTOR_SHIFT) >= header->inode->i_size) {
298 rdata->res.eof = 1; 302 rdata->res.eof = 1;
299 rdata->res.count = rdata->inode->i_size - f_offset; 303 rdata->res.count = header->inode->i_size - f_offset;
300 } else { 304 } else {
301 rdata->res.count = (isect << SECTOR_SHIFT) - f_offset; 305 rdata->res.count = (isect << SECTOR_SHIFT) - f_offset;
302 } 306 }
@@ -345,7 +349,6 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
345 struct parallel_io *par = bio->bi_private; 349 struct parallel_io *par = bio->bi_private;
346 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 350 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
347 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 351 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
348 struct nfs_write_data *wdata = (struct nfs_write_data *)par->data;
349 352
350 do { 353 do {
351 struct page *page = bvec->bv_page; 354 struct page *page = bvec->bv_page;
@@ -358,9 +361,12 @@ static void bl_end_io_write_zero(struct bio *bio, int err)
358 } while (bvec >= bio->bi_io_vec); 361 } while (bvec >= bio->bi_io_vec);
359 362
360 if (unlikely(!uptodate)) { 363 if (unlikely(!uptodate)) {
361 if (!wdata->pnfs_error) 364 struct nfs_write_data *data = par->data;
362 wdata->pnfs_error = -EIO; 365 struct nfs_pgio_header *header = data->header;
363 pnfs_set_lo_fail(wdata->lseg); 366
367 if (!header->pnfs_error)
368 header->pnfs_error = -EIO;
369 pnfs_set_lo_fail(header->lseg);
364 } 370 }
365 bio_put(bio); 371 bio_put(bio);
366 put_parallel(par); 372 put_parallel(par);
@@ -370,12 +376,13 @@ static void bl_end_io_write(struct bio *bio, int err)
370{ 376{
371 struct parallel_io *par = bio->bi_private; 377 struct parallel_io *par = bio->bi_private;
372 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 378 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
373 struct nfs_write_data *wdata = (struct nfs_write_data *)par->data; 379 struct nfs_write_data *data = par->data;
380 struct nfs_pgio_header *header = data->header;
374 381
375 if (!uptodate) { 382 if (!uptodate) {
376 if (!wdata->pnfs_error) 383 if (!header->pnfs_error)
377 wdata->pnfs_error = -EIO; 384 header->pnfs_error = -EIO;
378 pnfs_set_lo_fail(wdata->lseg); 385 pnfs_set_lo_fail(header->lseg);
379 } 386 }
380 bio_put(bio); 387 bio_put(bio);
381 put_parallel(par); 388 put_parallel(par);
@@ -391,9 +398,9 @@ static void bl_write_cleanup(struct work_struct *work)
391 dprintk("%s enter\n", __func__); 398 dprintk("%s enter\n", __func__);
392 task = container_of(work, struct rpc_task, u.tk_work); 399 task = container_of(work, struct rpc_task, u.tk_work);
393 wdata = container_of(task, struct nfs_write_data, task); 400 wdata = container_of(task, struct nfs_write_data, task);
394 if (likely(!wdata->pnfs_error)) { 401 if (likely(!wdata->header->pnfs_error)) {
395 /* Marks for LAYOUTCOMMIT */ 402 /* Marks for LAYOUTCOMMIT */
396 mark_extents_written(BLK_LSEG2EXT(wdata->lseg), 403 mark_extents_written(BLK_LSEG2EXT(wdata->header->lseg),
397 wdata->args.offset, wdata->args.count); 404 wdata->args.offset, wdata->args.count);
398 } 405 }
399 pnfs_ld_write_done(wdata); 406 pnfs_ld_write_done(wdata);
@@ -404,12 +411,12 @@ static void bl_end_par_io_write(void *data, int num_se)
404{ 411{
405 struct nfs_write_data *wdata = data; 412 struct nfs_write_data *wdata = data;
406 413
407 if (unlikely(wdata->pnfs_error)) { 414 if (unlikely(wdata->header->pnfs_error)) {
408 bl_free_short_extents(&BLK_LSEG2EXT(wdata->lseg)->bl_inval, 415 bl_free_short_extents(&BLK_LSEG2EXT(wdata->header->lseg)->bl_inval,
409 num_se); 416 num_se);
410 } 417 }
411 418
412 wdata->task.tk_status = wdata->pnfs_error; 419 wdata->task.tk_status = wdata->header->pnfs_error;
413 wdata->verf.committed = NFS_FILE_SYNC; 420 wdata->verf.committed = NFS_FILE_SYNC;
414 INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup); 421 INIT_WORK(&wdata->task.u.tk_work, bl_write_cleanup);
415 schedule_work(&wdata->task.u.tk_work); 422 schedule_work(&wdata->task.u.tk_work);
@@ -540,6 +547,7 @@ check_page:
540static enum pnfs_try_status 547static enum pnfs_try_status
541bl_write_pagelist(struct nfs_write_data *wdata, int sync) 548bl_write_pagelist(struct nfs_write_data *wdata, int sync)
542{ 549{
550 struct nfs_pgio_header *header = wdata->header;
543 int i, ret, npg_zero, pg_index, last = 0; 551 int i, ret, npg_zero, pg_index, last = 0;
544 struct bio *bio = NULL; 552 struct bio *bio = NULL;
545 struct pnfs_block_extent *be = NULL, *cow_read = NULL; 553 struct pnfs_block_extent *be = NULL, *cow_read = NULL;
@@ -552,7 +560,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
552 pgoff_t index; 560 pgoff_t index;
553 u64 temp; 561 u64 temp;
554 int npg_per_block = 562 int npg_per_block =
555 NFS_SERVER(wdata->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT; 563 NFS_SERVER(header->inode)->pnfs_blksize >> PAGE_CACHE_SHIFT;
556 564
557 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset); 565 dprintk("%s enter, %Zu@%lld\n", __func__, count, offset);
558 /* At this point, wdata->pages is a (sequential) list of nfs_pages. 566 /* At this point, wdata->pages is a (sequential) list of nfs_pages.
@@ -566,7 +574,7 @@ bl_write_pagelist(struct nfs_write_data *wdata, int sync)
566 /* At this point, have to be more careful with error handling */ 574 /* At this point, have to be more careful with error handling */
567 575
568 isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT); 576 isect = (sector_t) ((offset & (long)PAGE_CACHE_MASK) >> SECTOR_SHIFT);
569 be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), isect, &cow_read); 577 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg), isect, &cow_read);
570 if (!be || !is_writable(be, isect)) { 578 if (!be || !is_writable(be, isect)) {
571 dprintk("%s no matching extents!\n", __func__); 579 dprintk("%s no matching extents!\n", __func__);
572 goto out_mds; 580 goto out_mds;
@@ -597,10 +605,10 @@ fill_invalid_ext:
597 dprintk("%s zero %dth page: index %lu isect %llu\n", 605 dprintk("%s zero %dth page: index %lu isect %llu\n",
598 __func__, npg_zero, index, 606 __func__, npg_zero, index,
599 (unsigned long long)isect); 607 (unsigned long long)isect);
600 page = bl_find_get_zeroing_page(wdata->inode, index, 608 page = bl_find_get_zeroing_page(header->inode, index,
601 cow_read); 609 cow_read);
602 if (unlikely(IS_ERR(page))) { 610 if (unlikely(IS_ERR(page))) {
603 wdata->pnfs_error = PTR_ERR(page); 611 header->pnfs_error = PTR_ERR(page);
604 goto out; 612 goto out;
605 } else if (page == NULL) 613 } else if (page == NULL)
606 goto next_page; 614 goto next_page;
@@ -612,7 +620,7 @@ fill_invalid_ext:
612 __func__, ret); 620 __func__, ret);
613 end_page_writeback(page); 621 end_page_writeback(page);
614 page_cache_release(page); 622 page_cache_release(page);
615 wdata->pnfs_error = ret; 623 header->pnfs_error = ret;
616 goto out; 624 goto out;
617 } 625 }
618 if (likely(!bl_push_one_short_extent(be->be_inval))) 626 if (likely(!bl_push_one_short_extent(be->be_inval)))
@@ -620,11 +628,11 @@ fill_invalid_ext:
620 else { 628 else {
621 end_page_writeback(page); 629 end_page_writeback(page);
622 page_cache_release(page); 630 page_cache_release(page);
623 wdata->pnfs_error = -ENOMEM; 631 header->pnfs_error = -ENOMEM;
624 goto out; 632 goto out;
625 } 633 }
626 /* FIXME: This should be done in bi_end_io */ 634 /* FIXME: This should be done in bi_end_io */
627 mark_extents_written(BLK_LSEG2EXT(wdata->lseg), 635 mark_extents_written(BLK_LSEG2EXT(header->lseg),
628 page->index << PAGE_CACHE_SHIFT, 636 page->index << PAGE_CACHE_SHIFT,
629 PAGE_CACHE_SIZE); 637 PAGE_CACHE_SIZE);
630 638
@@ -632,7 +640,7 @@ fill_invalid_ext:
632 isect, page, be, 640 isect, page, be,
633 bl_end_io_write_zero, par); 641 bl_end_io_write_zero, par);
634 if (IS_ERR(bio)) { 642 if (IS_ERR(bio)) {
635 wdata->pnfs_error = PTR_ERR(bio); 643 header->pnfs_error = PTR_ERR(bio);
636 bio = NULL; 644 bio = NULL;
637 goto out; 645 goto out;
638 } 646 }
@@ -647,16 +655,16 @@ next_page:
647 655
648 /* Middle pages */ 656 /* Middle pages */
649 pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT; 657 pg_index = wdata->args.pgbase >> PAGE_CACHE_SHIFT;
650 for (i = pg_index; i < wdata->npages; i++) { 658 for (i = pg_index; i < wdata->pages.npages; i++) {
651 if (!extent_length) { 659 if (!extent_length) {
652 /* We've used up the previous extent */ 660 /* We've used up the previous extent */
653 bl_put_extent(be); 661 bl_put_extent(be);
654 bio = bl_submit_bio(WRITE, bio); 662 bio = bl_submit_bio(WRITE, bio);
655 /* Get the next one */ 663 /* Get the next one */
656 be = bl_find_get_extent(BLK_LSEG2EXT(wdata->lseg), 664 be = bl_find_get_extent(BLK_LSEG2EXT(header->lseg),
657 isect, NULL); 665 isect, NULL);
658 if (!be || !is_writable(be, isect)) { 666 if (!be || !is_writable(be, isect)) {
659 wdata->pnfs_error = -EINVAL; 667 header->pnfs_error = -EINVAL;
660 goto out; 668 goto out;
661 } 669 }
662 if (be->be_state == PNFS_BLOCK_INVALID_DATA) { 670 if (be->be_state == PNFS_BLOCK_INVALID_DATA) {
@@ -664,7 +672,7 @@ next_page:
664 be->be_inval))) 672 be->be_inval)))
665 par->bse_count++; 673 par->bse_count++;
666 else { 674 else {
667 wdata->pnfs_error = -ENOMEM; 675 header->pnfs_error = -ENOMEM;
668 goto out; 676 goto out;
669 } 677 }
670 } 678 }
@@ -677,15 +685,15 @@ next_page:
677 if (unlikely(ret)) { 685 if (unlikely(ret)) {
678 dprintk("%s bl_mark_sectors_init fail %d\n", 686 dprintk("%s bl_mark_sectors_init fail %d\n",
679 __func__, ret); 687 __func__, ret);
680 wdata->pnfs_error = ret; 688 header->pnfs_error = ret;
681 goto out; 689 goto out;
682 } 690 }
683 } 691 }
684 bio = bl_add_page_to_bio(bio, wdata->npages - i, WRITE, 692 bio = bl_add_page_to_bio(bio, wdata->pages.npages - i, WRITE,
685 isect, pages[i], be, 693 isect, pages[i], be,
686 bl_end_io_write, par); 694 bl_end_io_write, par);
687 if (IS_ERR(bio)) { 695 if (IS_ERR(bio)) {
688 wdata->pnfs_error = PTR_ERR(bio); 696 header->pnfs_error = PTR_ERR(bio);
689 bio = NULL; 697 bio = NULL;
690 goto out; 698 goto out;
691 } 699 }
diff --git a/fs/nfs/blocklayout/blocklayoutdev.c b/fs/nfs/blocklayout/blocklayoutdev.c
index a5c88a554d92..c96554245ccf 100644
--- a/fs/nfs/blocklayout/blocklayoutdev.c
+++ b/fs/nfs/blocklayout/blocklayoutdev.c
@@ -123,7 +123,7 @@ nfs4_blk_decode_device(struct nfs_server *server,
123 uint8_t *dataptr; 123 uint8_t *dataptr;
124 DECLARE_WAITQUEUE(wq, current); 124 DECLARE_WAITQUEUE(wq, current);
125 int offset, len, i, rc; 125 int offset, len, i, rc;
126 struct net *net = server->nfs_client->net; 126 struct net *net = server->nfs_client->cl_net;
127 struct nfs_net *nn = net_generic(net, nfs_net_id); 127 struct nfs_net *nn = net_generic(net, nfs_net_id);
128 struct bl_dev_msg *reply = &nn->bl_mount_reply; 128 struct bl_dev_msg *reply = &nn->bl_mount_reply;
129 129
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 60f7e4ec842c..7d108753af81 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -65,7 +65,7 @@ static DECLARE_WAIT_QUEUE_HEAD(nfs_client_active_wq);
65static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion) 65static int nfs_get_cb_ident_idr(struct nfs_client *clp, int minorversion)
66{ 66{
67 int ret = 0; 67 int ret = 0;
68 struct nfs_net *nn = net_generic(clp->net, nfs_net_id); 68 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
69 69
70 if (clp->rpc_ops->version != 4 || minorversion != 0) 70 if (clp->rpc_ops->version != 4 || minorversion != 0)
71 return ret; 71 return ret;
@@ -90,7 +90,9 @@ static bool nfs4_disable_idmapping = true;
90 * RPC cruft for NFS 90 * RPC cruft for NFS
91 */ 91 */
92static const struct rpc_version *nfs_version[5] = { 92static const struct rpc_version *nfs_version[5] = {
93#ifdef CONFIG_NFS_V2
93 [2] = &nfs_version2, 94 [2] = &nfs_version2,
95#endif
94#ifdef CONFIG_NFS_V3 96#ifdef CONFIG_NFS_V3
95 [3] = &nfs_version3, 97 [3] = &nfs_version3,
96#endif 98#endif
@@ -129,6 +131,7 @@ const struct rpc_program nfsacl_program = {
129#endif /* CONFIG_NFS_V3_ACL */ 131#endif /* CONFIG_NFS_V3_ACL */
130 132
131struct nfs_client_initdata { 133struct nfs_client_initdata {
134 unsigned long init_flags;
132 const char *hostname; 135 const char *hostname;
133 const struct sockaddr *addr; 136 const struct sockaddr *addr;
134 size_t addrlen; 137 size_t addrlen;
@@ -172,7 +175,7 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
172 clp->cl_rpcclient = ERR_PTR(-EINVAL); 175 clp->cl_rpcclient = ERR_PTR(-EINVAL);
173 176
174 clp->cl_proto = cl_init->proto; 177 clp->cl_proto = cl_init->proto;
175 clp->net = get_net(cl_init->net); 178 clp->cl_net = get_net(cl_init->net);
176 179
177#ifdef CONFIG_NFS_V4 180#ifdef CONFIG_NFS_V4
178 err = nfs_get_cb_ident_idr(clp, cl_init->minorversion); 181 err = nfs_get_cb_ident_idr(clp, cl_init->minorversion);
@@ -182,7 +185,6 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
182 spin_lock_init(&clp->cl_lock); 185 spin_lock_init(&clp->cl_lock);
183 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state); 186 INIT_DELAYED_WORK(&clp->cl_renewd, nfs4_renew_state);
184 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client"); 187 rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS client");
185 clp->cl_boot_time = CURRENT_TIME;
186 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED; 188 clp->cl_state = 1 << NFS4CLNT_LEASE_EXPIRED;
187 clp->cl_minorversion = cl_init->minorversion; 189 clp->cl_minorversion = cl_init->minorversion;
188 clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion]; 190 clp->cl_mvops = nfs_v4_minor_ops[cl_init->minorversion];
@@ -207,6 +209,7 @@ static void nfs4_shutdown_session(struct nfs_client *clp)
207 if (nfs4_has_session(clp)) { 209 if (nfs4_has_session(clp)) {
208 nfs4_deviceid_purge_client(clp); 210 nfs4_deviceid_purge_client(clp);
209 nfs4_destroy_session(clp->cl_session); 211 nfs4_destroy_session(clp->cl_session);
212 nfs4_destroy_clientid(clp);
210 } 213 }
211 214
212} 215}
@@ -235,6 +238,9 @@ static void nfs4_shutdown_client(struct nfs_client *clp)
235 nfs_idmap_delete(clp); 238 nfs_idmap_delete(clp);
236 239
237 rpc_destroy_wait_queue(&clp->cl_rpcwaitq); 240 rpc_destroy_wait_queue(&clp->cl_rpcwaitq);
241 kfree(clp->cl_serverowner);
242 kfree(clp->cl_serverscope);
243 kfree(clp->cl_implid);
238} 244}
239 245
240/* idr_remove_all is not needed as all id's are removed by nfs_put_client */ 246/* idr_remove_all is not needed as all id's are removed by nfs_put_client */
@@ -248,7 +254,7 @@ void nfs_cleanup_cb_ident_idr(struct net *net)
248/* nfs_client_lock held */ 254/* nfs_client_lock held */
249static void nfs_cb_idr_remove_locked(struct nfs_client *clp) 255static void nfs_cb_idr_remove_locked(struct nfs_client *clp)
250{ 256{
251 struct nfs_net *nn = net_generic(clp->net, nfs_net_id); 257 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
252 258
253 if (clp->cl_cb_ident) 259 if (clp->cl_cb_ident)
254 idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident); 260 idr_remove(&nn->cb_ident_idr, clp->cl_cb_ident);
@@ -301,10 +307,8 @@ static void nfs_free_client(struct nfs_client *clp)
301 if (clp->cl_machine_cred != NULL) 307 if (clp->cl_machine_cred != NULL)
302 put_rpccred(clp->cl_machine_cred); 308 put_rpccred(clp->cl_machine_cred);
303 309
304 put_net(clp->net); 310 put_net(clp->cl_net);
305 kfree(clp->cl_hostname); 311 kfree(clp->cl_hostname);
306 kfree(clp->server_scope);
307 kfree(clp->impl_id);
308 kfree(clp); 312 kfree(clp);
309 313
310 dprintk("<-- nfs_free_client()\n"); 314 dprintk("<-- nfs_free_client()\n");
@@ -321,7 +325,7 @@ void nfs_put_client(struct nfs_client *clp)
321 return; 325 return;
322 326
323 dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count)); 327 dprintk("--> nfs_put_client({%d})\n", atomic_read(&clp->cl_count));
324 nn = net_generic(clp->net, nfs_net_id); 328 nn = net_generic(clp->cl_net, nfs_net_id);
325 329
326 if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) { 330 if (atomic_dec_and_lock(&clp->cl_count, &nn->nfs_client_lock)) {
327 list_del(&clp->cl_share_link); 331 list_del(&clp->cl_share_link);
@@ -456,6 +460,8 @@ static bool nfs4_cb_match_client(const struct sockaddr *addr,
456 clp->cl_cons_state == NFS_CS_SESSION_INITING)) 460 clp->cl_cons_state == NFS_CS_SESSION_INITING))
457 return false; 461 return false;
458 462
463 smp_rmb();
464
459 /* Match the version and minorversion */ 465 /* Match the version and minorversion */
460 if (clp->rpc_ops->version != 4 || 466 if (clp->rpc_ops->version != 4 ||
461 clp->cl_minorversion != minorversion) 467 clp->cl_minorversion != minorversion)
@@ -504,6 +510,47 @@ static struct nfs_client *nfs_match_client(const struct nfs_client_initdata *dat
504 return NULL; 510 return NULL;
505} 511}
506 512
513static bool nfs_client_init_is_complete(const struct nfs_client *clp)
514{
515 return clp->cl_cons_state != NFS_CS_INITING;
516}
517
518int nfs_wait_client_init_complete(const struct nfs_client *clp)
519{
520 return wait_event_killable(nfs_client_active_wq,
521 nfs_client_init_is_complete(clp));
522}
523
524/*
525 * Found an existing client. Make sure it's ready before returning.
526 */
527static struct nfs_client *
528nfs_found_client(const struct nfs_client_initdata *cl_init,
529 struct nfs_client *clp)
530{
531 int error;
532
533 error = nfs_wait_client_init_complete(clp);
534 if (error < 0) {
535 nfs_put_client(clp);
536 return ERR_PTR(-ERESTARTSYS);
537 }
538
539 if (clp->cl_cons_state < NFS_CS_READY) {
540 error = clp->cl_cons_state;
541 nfs_put_client(clp);
542 return ERR_PTR(error);
543 }
544
545 smp_rmb();
546
547 BUG_ON(clp->cl_cons_state != NFS_CS_READY);
548
549 dprintk("<-- %s found nfs_client %p for %s\n",
550 __func__, clp, cl_init->hostname ?: "");
551 return clp;
552}
553
507/* 554/*
508 * Look up a client by IP address and protocol version 555 * Look up a client by IP address and protocol version
509 * - creates a new record if one doesn't yet exist 556 * - creates a new record if one doesn't yet exist
@@ -512,11 +559,9 @@ static struct nfs_client *
512nfs_get_client(const struct nfs_client_initdata *cl_init, 559nfs_get_client(const struct nfs_client_initdata *cl_init,
513 const struct rpc_timeout *timeparms, 560 const struct rpc_timeout *timeparms,
514 const char *ip_addr, 561 const char *ip_addr,
515 rpc_authflavor_t authflavour, 562 rpc_authflavor_t authflavour)
516 int noresvport)
517{ 563{
518 struct nfs_client *clp, *new = NULL; 564 struct nfs_client *clp, *new = NULL;
519 int error;
520 struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id); 565 struct nfs_net *nn = net_generic(cl_init->net, nfs_net_id);
521 566
522 dprintk("--> nfs_get_client(%s,v%u)\n", 567 dprintk("--> nfs_get_client(%s,v%u)\n",
@@ -527,60 +572,29 @@ nfs_get_client(const struct nfs_client_initdata *cl_init,
527 spin_lock(&nn->nfs_client_lock); 572 spin_lock(&nn->nfs_client_lock);
528 573
529 clp = nfs_match_client(cl_init); 574 clp = nfs_match_client(cl_init);
530 if (clp) 575 if (clp) {
531 goto found_client; 576 spin_unlock(&nn->nfs_client_lock);
532 if (new) 577 if (new)
533 goto install_client; 578 nfs_free_client(new);
579 return nfs_found_client(cl_init, clp);
580 }
581 if (new) {
582 list_add(&new->cl_share_link, &nn->nfs_client_list);
583 spin_unlock(&nn->nfs_client_lock);
584 new->cl_flags = cl_init->init_flags;
585 return cl_init->rpc_ops->init_client(new,
586 timeparms, ip_addr,
587 authflavour);
588 }
534 589
535 spin_unlock(&nn->nfs_client_lock); 590 spin_unlock(&nn->nfs_client_lock);
536 591
537 new = nfs_alloc_client(cl_init); 592 new = nfs_alloc_client(cl_init);
538 } while (!IS_ERR(new)); 593 } while (!IS_ERR(new));
539 594
540 dprintk("--> nfs_get_client() = %ld [failed]\n", PTR_ERR(new)); 595 dprintk("<-- nfs_get_client() Failed to find %s (%ld)\n",
596 cl_init->hostname ?: "", PTR_ERR(new));
541 return new; 597 return new;
542
543 /* install a new client and return with it unready */
544install_client:
545 clp = new;
546 list_add(&clp->cl_share_link, &nn->nfs_client_list);
547 spin_unlock(&nn->nfs_client_lock);
548
549 error = cl_init->rpc_ops->init_client(clp, timeparms, ip_addr,
550 authflavour, noresvport);
551 if (error < 0) {
552 nfs_put_client(clp);
553 return ERR_PTR(error);
554 }
555 dprintk("--> nfs_get_client() = %p [new]\n", clp);
556 return clp;
557
558 /* found an existing client
559 * - make sure it's ready before returning
560 */
561found_client:
562 spin_unlock(&nn->nfs_client_lock);
563
564 if (new)
565 nfs_free_client(new);
566
567 error = wait_event_killable(nfs_client_active_wq,
568 clp->cl_cons_state < NFS_CS_INITING);
569 if (error < 0) {
570 nfs_put_client(clp);
571 return ERR_PTR(-ERESTARTSYS);
572 }
573
574 if (clp->cl_cons_state < NFS_CS_READY) {
575 error = clp->cl_cons_state;
576 nfs_put_client(clp);
577 return ERR_PTR(error);
578 }
579
580 BUG_ON(clp->cl_cons_state != NFS_CS_READY);
581
582 dprintk("--> nfs_get_client() = %p [share]\n", clp);
583 return clp;
584} 598}
585 599
586/* 600/*
@@ -588,27 +602,12 @@ found_client:
588 */ 602 */
589void nfs_mark_client_ready(struct nfs_client *clp, int state) 603void nfs_mark_client_ready(struct nfs_client *clp, int state)
590{ 604{
605 smp_wmb();
591 clp->cl_cons_state = state; 606 clp->cl_cons_state = state;
592 wake_up_all(&nfs_client_active_wq); 607 wake_up_all(&nfs_client_active_wq);
593} 608}
594 609
595/* 610/*
596 * With sessions, the client is not marked ready until after a
597 * successful EXCHANGE_ID and CREATE_SESSION.
598 *
599 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
600 * other versions of NFS can be tried.
601 */
602int nfs4_check_client_ready(struct nfs_client *clp)
603{
604 if (!nfs4_has_session(clp))
605 return 0;
606 if (clp->cl_cons_state < NFS_CS_READY)
607 return -EPROTONOSUPPORT;
608 return 0;
609}
610
611/*
612 * Initialise the timeout values for a connection 611 * Initialise the timeout values for a connection
613 */ 612 */
614static void nfs_init_timeout_values(struct rpc_timeout *to, int proto, 613static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
@@ -654,12 +653,11 @@ static void nfs_init_timeout_values(struct rpc_timeout *to, int proto,
654 */ 653 */
655static int nfs_create_rpc_client(struct nfs_client *clp, 654static int nfs_create_rpc_client(struct nfs_client *clp,
656 const struct rpc_timeout *timeparms, 655 const struct rpc_timeout *timeparms,
657 rpc_authflavor_t flavor, 656 rpc_authflavor_t flavor)
658 int discrtry, int noresvport)
659{ 657{
660 struct rpc_clnt *clnt = NULL; 658 struct rpc_clnt *clnt = NULL;
661 struct rpc_create_args args = { 659 struct rpc_create_args args = {
662 .net = clp->net, 660 .net = clp->cl_net,
663 .protocol = clp->cl_proto, 661 .protocol = clp->cl_proto,
664 .address = (struct sockaddr *)&clp->cl_addr, 662 .address = (struct sockaddr *)&clp->cl_addr,
665 .addrsize = clp->cl_addrlen, 663 .addrsize = clp->cl_addrlen,
@@ -670,9 +668,9 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
670 .authflavor = flavor, 668 .authflavor = flavor,
671 }; 669 };
672 670
673 if (discrtry) 671 if (test_bit(NFS_CS_DISCRTRY, &clp->cl_flags))
674 args.flags |= RPC_CLNT_CREATE_DISCRTRY; 672 args.flags |= RPC_CLNT_CREATE_DISCRTRY;
675 if (noresvport) 673 if (test_bit(NFS_CS_NORESVPORT, &clp->cl_flags))
676 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT; 674 args.flags |= RPC_CLNT_CREATE_NONPRIVPORT;
677 675
678 if (!IS_ERR(clp->cl_rpcclient)) 676 if (!IS_ERR(clp->cl_rpcclient))
@@ -713,7 +711,7 @@ static int nfs_start_lockd(struct nfs_server *server)
713 .nfs_version = clp->rpc_ops->version, 711 .nfs_version = clp->rpc_ops->version,
714 .noresvport = server->flags & NFS_MOUNT_NORESVPORT ? 712 .noresvport = server->flags & NFS_MOUNT_NORESVPORT ?
715 1 : 0, 713 1 : 0,
716 .net = clp->net, 714 .net = clp->cl_net,
717 }; 715 };
718 716
719 if (nlm_init.nfs_version > 3) 717 if (nlm_init.nfs_version > 3)
@@ -805,36 +803,43 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
805 return 0; 803 return 0;
806} 804}
807 805
808/* 806/**
809 * Initialise an NFS2 or NFS3 client 807 * nfs_init_client - Initialise an NFS2 or NFS3 client
808 *
809 * @clp: nfs_client to initialise
810 * @timeparms: timeout parameters for underlying RPC transport
811 * @ip_addr: IP presentation address (not used)
812 * @authflavor: authentication flavor for underlying RPC transport
813 *
814 * Returns pointer to an NFS client, or an ERR_PTR value.
810 */ 815 */
811int nfs_init_client(struct nfs_client *clp, const struct rpc_timeout *timeparms, 816struct nfs_client *nfs_init_client(struct nfs_client *clp,
812 const char *ip_addr, rpc_authflavor_t authflavour, 817 const struct rpc_timeout *timeparms,
813 int noresvport) 818 const char *ip_addr, rpc_authflavor_t authflavour)
814{ 819{
815 int error; 820 int error;
816 821
817 if (clp->cl_cons_state == NFS_CS_READY) { 822 if (clp->cl_cons_state == NFS_CS_READY) {
818 /* the client is already initialised */ 823 /* the client is already initialised */
819 dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp); 824 dprintk("<-- nfs_init_client() = 0 [already %p]\n", clp);
820 return 0; 825 return clp;
821 } 826 }
822 827
823 /* 828 /*
824 * Create a client RPC handle for doing FSSTAT with UNIX auth only 829 * Create a client RPC handle for doing FSSTAT with UNIX auth only
825 * - RFC 2623, sec 2.3.2 830 * - RFC 2623, sec 2.3.2
826 */ 831 */
827 error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX, 832 error = nfs_create_rpc_client(clp, timeparms, RPC_AUTH_UNIX);
828 0, noresvport);
829 if (error < 0) 833 if (error < 0)
830 goto error; 834 goto error;
831 nfs_mark_client_ready(clp, NFS_CS_READY); 835 nfs_mark_client_ready(clp, NFS_CS_READY);
832 return 0; 836 return clp;
833 837
834error: 838error:
835 nfs_mark_client_ready(clp, error); 839 nfs_mark_client_ready(clp, error);
840 nfs_put_client(clp);
836 dprintk("<-- nfs_init_client() = xerror %d\n", error); 841 dprintk("<-- nfs_init_client() = xerror %d\n", error);
837 return error; 842 return ERR_PTR(error);
838} 843}
839 844
840/* 845/*
@@ -847,7 +852,7 @@ static int nfs_init_server(struct nfs_server *server,
847 .hostname = data->nfs_server.hostname, 852 .hostname = data->nfs_server.hostname,
848 .addr = (const struct sockaddr *)&data->nfs_server.address, 853 .addr = (const struct sockaddr *)&data->nfs_server.address,
849 .addrlen = data->nfs_server.addrlen, 854 .addrlen = data->nfs_server.addrlen,
850 .rpc_ops = &nfs_v2_clientops, 855 .rpc_ops = NULL,
851 .proto = data->nfs_server.protocol, 856 .proto = data->nfs_server.protocol,
852 .net = data->net, 857 .net = data->net,
853 }; 858 };
@@ -857,17 +862,28 @@ static int nfs_init_server(struct nfs_server *server,
857 862
858 dprintk("--> nfs_init_server()\n"); 863 dprintk("--> nfs_init_server()\n");
859 864
865 switch (data->version) {
866#ifdef CONFIG_NFS_V2
867 case 2:
868 cl_init.rpc_ops = &nfs_v2_clientops;
869 break;
870#endif
860#ifdef CONFIG_NFS_V3 871#ifdef CONFIG_NFS_V3
861 if (data->version == 3) 872 case 3:
862 cl_init.rpc_ops = &nfs_v3_clientops; 873 cl_init.rpc_ops = &nfs_v3_clientops;
874 break;
863#endif 875#endif
876 default:
877 return -EPROTONOSUPPORT;
878 }
864 879
865 nfs_init_timeout_values(&timeparms, data->nfs_server.protocol, 880 nfs_init_timeout_values(&timeparms, data->nfs_server.protocol,
866 data->timeo, data->retrans); 881 data->timeo, data->retrans);
882 if (data->flags & NFS_MOUNT_NORESVPORT)
883 set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
867 884
868 /* Allocate or find a client reference we can use */ 885 /* Allocate or find a client reference we can use */
869 clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX, 886 clp = nfs_get_client(&cl_init, &timeparms, NULL, RPC_AUTH_UNIX);
870 data->flags & NFS_MOUNT_NORESVPORT);
871 if (IS_ERR(clp)) { 887 if (IS_ERR(clp)) {
872 dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp)); 888 dprintk("<-- nfs_init_server() = error %ld\n", PTR_ERR(clp));
873 return PTR_ERR(clp); 889 return PTR_ERR(clp);
@@ -880,7 +896,7 @@ static int nfs_init_server(struct nfs_server *server,
880 server->options = data->options; 896 server->options = data->options;
881 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID| 897 server->caps |= NFS_CAP_HARDLINKS|NFS_CAP_SYMLINKS|NFS_CAP_FILEID|
882 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP| 898 NFS_CAP_MODE|NFS_CAP_NLINK|NFS_CAP_OWNER|NFS_CAP_OWNER_GROUP|
883 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME; 899 NFS_CAP_ATIME|NFS_CAP_CTIME|NFS_CAP_MTIME|NFS_CAP_CHANGE_ATTR;
884 900
885 if (data->rsize) 901 if (data->rsize)
886 server->rsize = nfs_block_size(data->rsize, NULL); 902 server->rsize = nfs_block_size(data->rsize, NULL);
@@ -1048,7 +1064,7 @@ static void nfs_server_copy_userdata(struct nfs_server *target, struct nfs_serve
1048static void nfs_server_insert_lists(struct nfs_server *server) 1064static void nfs_server_insert_lists(struct nfs_server *server)
1049{ 1065{
1050 struct nfs_client *clp = server->nfs_client; 1066 struct nfs_client *clp = server->nfs_client;
1051 struct nfs_net *nn = net_generic(clp->net, nfs_net_id); 1067 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
1052 1068
1053 spin_lock(&nn->nfs_client_lock); 1069 spin_lock(&nn->nfs_client_lock);
1054 list_add_tail_rcu(&server->client_link, &clp->cl_superblocks); 1070 list_add_tail_rcu(&server->client_link, &clp->cl_superblocks);
@@ -1065,7 +1081,7 @@ static void nfs_server_remove_lists(struct nfs_server *server)
1065 1081
1066 if (clp == NULL) 1082 if (clp == NULL)
1067 return; 1083 return;
1068 nn = net_generic(clp->net, nfs_net_id); 1084 nn = net_generic(clp->cl_net, nfs_net_id);
1069 spin_lock(&nn->nfs_client_lock); 1085 spin_lock(&nn->nfs_client_lock);
1070 list_del_rcu(&server->client_link); 1086 list_del_rcu(&server->client_link);
1071 if (list_empty(&clp->cl_superblocks)) 1087 if (list_empty(&clp->cl_superblocks))
@@ -1333,21 +1349,27 @@ static int nfs4_init_client_minor_version(struct nfs_client *clp)
1333 * so that the client back channel can find the 1349 * so that the client back channel can find the
1334 * nfs_client struct 1350 * nfs_client struct
1335 */ 1351 */
1336 clp->cl_cons_state = NFS_CS_SESSION_INITING; 1352 nfs_mark_client_ready(clp, NFS_CS_SESSION_INITING);
1337 } 1353 }
1338#endif /* CONFIG_NFS_V4_1 */ 1354#endif /* CONFIG_NFS_V4_1 */
1339 1355
1340 return nfs4_init_callback(clp); 1356 return nfs4_init_callback(clp);
1341} 1357}
1342 1358
1343/* 1359/**
1344 * Initialise an NFS4 client record 1360 * nfs4_init_client - Initialise an NFS4 client record
1361 *
1362 * @clp: nfs_client to initialise
1363 * @timeparms: timeout parameters for underlying RPC transport
1364 * @ip_addr: callback IP address in presentation format
1365 * @authflavor: authentication flavor for underlying RPC transport
1366 *
1367 * Returns pointer to an NFS client, or an ERR_PTR value.
1345 */ 1368 */
1346int nfs4_init_client(struct nfs_client *clp, 1369struct nfs_client *nfs4_init_client(struct nfs_client *clp,
1347 const struct rpc_timeout *timeparms, 1370 const struct rpc_timeout *timeparms,
1348 const char *ip_addr, 1371 const char *ip_addr,
1349 rpc_authflavor_t authflavour, 1372 rpc_authflavor_t authflavour)
1350 int noresvport)
1351{ 1373{
1352 char buf[INET6_ADDRSTRLEN + 1]; 1374 char buf[INET6_ADDRSTRLEN + 1];
1353 int error; 1375 int error;
@@ -1355,14 +1377,14 @@ int nfs4_init_client(struct nfs_client *clp,
1355 if (clp->cl_cons_state == NFS_CS_READY) { 1377 if (clp->cl_cons_state == NFS_CS_READY) {
1356 /* the client is initialised already */ 1378 /* the client is initialised already */
1357 dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp); 1379 dprintk("<-- nfs4_init_client() = 0 [already %p]\n", clp);
1358 return 0; 1380 return clp;
1359 } 1381 }
1360 1382
1361 /* Check NFS protocol revision and initialize RPC op vector */ 1383 /* Check NFS protocol revision and initialize RPC op vector */
1362 clp->rpc_ops = &nfs_v4_clientops; 1384 clp->rpc_ops = &nfs_v4_clientops;
1363 1385
1364 error = nfs_create_rpc_client(clp, timeparms, authflavour, 1386 __set_bit(NFS_CS_DISCRTRY, &clp->cl_flags);
1365 1, noresvport); 1387 error = nfs_create_rpc_client(clp, timeparms, authflavour);
1366 if (error < 0) 1388 if (error < 0)
1367 goto error; 1389 goto error;
1368 1390
@@ -1395,12 +1417,13 @@ int nfs4_init_client(struct nfs_client *clp,
1395 1417
1396 if (!nfs4_has_session(clp)) 1418 if (!nfs4_has_session(clp))
1397 nfs_mark_client_ready(clp, NFS_CS_READY); 1419 nfs_mark_client_ready(clp, NFS_CS_READY);
1398 return 0; 1420 return clp;
1399 1421
1400error: 1422error:
1401 nfs_mark_client_ready(clp, error); 1423 nfs_mark_client_ready(clp, error);
1424 nfs_put_client(clp);
1402 dprintk("<-- nfs4_init_client() = xerror %d\n", error); 1425 dprintk("<-- nfs4_init_client() = xerror %d\n", error);
1403 return error; 1426 return ERR_PTR(error);
1404} 1427}
1405 1428
1406/* 1429/*
@@ -1429,9 +1452,11 @@ static int nfs4_set_client(struct nfs_server *server,
1429 1452
1430 dprintk("--> nfs4_set_client()\n"); 1453 dprintk("--> nfs4_set_client()\n");
1431 1454
1455 if (server->flags & NFS_MOUNT_NORESVPORT)
1456 set_bit(NFS_CS_NORESVPORT, &cl_init.init_flags);
1457
1432 /* Allocate or find a client reference we can use */ 1458 /* Allocate or find a client reference we can use */
1433 clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour, 1459 clp = nfs_get_client(&cl_init, timeparms, ip_addr, authflavour);
1434 server->flags & NFS_MOUNT_NORESVPORT);
1435 if (IS_ERR(clp)) { 1460 if (IS_ERR(clp)) {
1436 error = PTR_ERR(clp); 1461 error = PTR_ERR(clp);
1437 goto error; 1462 goto error;
@@ -1465,8 +1490,8 @@ error:
1465 * the MDS. 1490 * the MDS.
1466 */ 1491 */
1467struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, 1492struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
1468 const struct sockaddr *ds_addr, 1493 const struct sockaddr *ds_addr, int ds_addrlen,
1469 int ds_addrlen, int ds_proto) 1494 int ds_proto, unsigned int ds_timeo, unsigned int ds_retrans)
1470{ 1495{
1471 struct nfs_client_initdata cl_init = { 1496 struct nfs_client_initdata cl_init = {
1472 .addr = ds_addr, 1497 .addr = ds_addr,
@@ -1474,14 +1499,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
1474 .rpc_ops = &nfs_v4_clientops, 1499 .rpc_ops = &nfs_v4_clientops,
1475 .proto = ds_proto, 1500 .proto = ds_proto,
1476 .minorversion = mds_clp->cl_minorversion, 1501 .minorversion = mds_clp->cl_minorversion,
1477 .net = mds_clp->net, 1502 .net = mds_clp->cl_net,
1478 };
1479 struct rpc_timeout ds_timeout = {
1480 .to_initval = 15 * HZ,
1481 .to_maxval = 15 * HZ,
1482 .to_retries = 1,
1483 .to_exponential = 1,
1484 }; 1503 };
1504 struct rpc_timeout ds_timeout;
1485 struct nfs_client *clp; 1505 struct nfs_client *clp;
1486 1506
1487 /* 1507 /*
@@ -1489,8 +1509,9 @@ struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
1489 * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS 1509 * cl_ipaddr so as to use the same EXCHANGE_ID co_ownerid as the MDS
1490 * (section 13.1 RFC 5661). 1510 * (section 13.1 RFC 5661).
1491 */ 1511 */
1512 nfs_init_timeout_values(&ds_timeout, ds_proto, ds_timeo, ds_retrans);
1492 clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr, 1513 clp = nfs_get_client(&cl_init, &ds_timeout, mds_clp->cl_ipaddr,
1493 mds_clp->cl_rpcclient->cl_auth->au_flavor, 0); 1514 mds_clp->cl_rpcclient->cl_auth->au_flavor);
1494 1515
1495 dprintk("<-- %s %p\n", __func__, clp); 1516 dprintk("<-- %s %p\n", __func__, clp);
1496 return clp; 1517 return clp;
@@ -1701,7 +1722,7 @@ struct nfs_server *nfs4_create_referral_server(struct nfs_clone_mount *data,
1701 rpc_protocol(parent_server->client), 1722 rpc_protocol(parent_server->client),
1702 parent_server->client->cl_timeout, 1723 parent_server->client->cl_timeout,
1703 parent_client->cl_mvops->minor_version, 1724 parent_client->cl_mvops->minor_version,
1704 parent_client->net); 1725 parent_client->cl_net);
1705 if (error < 0) 1726 if (error < 0)
1706 goto error; 1727 goto error;
1707 1728
@@ -1805,6 +1826,7 @@ void nfs_clients_init(struct net *net)
1805 idr_init(&nn->cb_ident_idr); 1826 idr_init(&nn->cb_ident_idr);
1806#endif 1827#endif
1807 spin_lock_init(&nn->nfs_client_lock); 1828 spin_lock_init(&nn->nfs_client_lock);
1829 nn->boot_time = CURRENT_TIME;
1808} 1830}
1809 1831
1810#ifdef CONFIG_PROC_FS 1832#ifdef CONFIG_PROC_FS
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 89af1d269274..bd3a9601d32d 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -316,6 +316,10 @@ out:
316 * nfs_client_return_marked_delegations - return previously marked delegations 316 * nfs_client_return_marked_delegations - return previously marked delegations
317 * @clp: nfs_client to process 317 * @clp: nfs_client to process
318 * 318 *
319 * Note that this function is designed to be called by the state
320 * manager thread. For this reason, it cannot flush the dirty data,
321 * since that could deadlock in case of a state recovery error.
322 *
319 * Returns zero on success, or a negative errno value. 323 * Returns zero on success, or a negative errno value.
320 */ 324 */
321int nfs_client_return_marked_delegations(struct nfs_client *clp) 325int nfs_client_return_marked_delegations(struct nfs_client *clp)
@@ -340,11 +344,9 @@ restart:
340 server); 344 server);
341 rcu_read_unlock(); 345 rcu_read_unlock();
342 346
343 if (delegation != NULL) { 347 if (delegation != NULL)
344 filemap_flush(inode->i_mapping);
345 err = __nfs_inode_return_delegation(inode, 348 err = __nfs_inode_return_delegation(inode,
346 delegation, 0); 349 delegation, 0);
347 }
348 iput(inode); 350 iput(inode);
349 if (!err) 351 if (!err)
350 goto restart; 352 goto restart;
@@ -380,6 +382,10 @@ void nfs_inode_return_delegation_noreclaim(struct inode *inode)
380 * nfs_inode_return_delegation - synchronously return a delegation 382 * nfs_inode_return_delegation - synchronously return a delegation
381 * @inode: inode to process 383 * @inode: inode to process
382 * 384 *
385 * This routine will always flush any dirty data to disk on the
386 * assumption that if we need to return the delegation, then
387 * we should stop caching.
388 *
383 * Returns zero on success, or a negative errno value. 389 * Returns zero on success, or a negative errno value.
384 */ 390 */
385int nfs_inode_return_delegation(struct inode *inode) 391int nfs_inode_return_delegation(struct inode *inode)
@@ -389,10 +395,10 @@ int nfs_inode_return_delegation(struct inode *inode)
389 struct nfs_delegation *delegation; 395 struct nfs_delegation *delegation;
390 int err = 0; 396 int err = 0;
391 397
398 nfs_wb_all(inode);
392 if (rcu_access_pointer(nfsi->delegation) != NULL) { 399 if (rcu_access_pointer(nfsi->delegation) != NULL) {
393 delegation = nfs_detach_delegation(nfsi, server); 400 delegation = nfs_detach_delegation(nfsi, server);
394 if (delegation != NULL) { 401 if (delegation != NULL) {
395 nfs_wb_all(inode);
396 err = __nfs_inode_return_delegation(inode, delegation, 1); 402 err = __nfs_inode_return_delegation(inode, delegation, 1);
397 } 403 }
398 } 404 }
@@ -538,6 +544,8 @@ int nfs_async_inode_return_delegation(struct inode *inode,
538 struct nfs_client *clp = server->nfs_client; 544 struct nfs_client *clp = server->nfs_client;
539 struct nfs_delegation *delegation; 545 struct nfs_delegation *delegation;
540 546
547 filemap_flush(inode->i_mapping);
548
541 rcu_read_lock(); 549 rcu_read_lock();
542 delegation = rcu_dereference(NFS_I(inode)->delegation); 550 delegation = rcu_dereference(NFS_I(inode)->delegation);
543 551
diff --git a/fs/nfs/delegation.h b/fs/nfs/delegation.h
index cd6a7a8dadae..72709c4193fa 100644
--- a/fs/nfs/delegation.h
+++ b/fs/nfs/delegation.h
@@ -66,6 +66,7 @@ static inline int nfs_have_delegation(struct inode *inode, fmode_t flags)
66 66
67static inline int nfs_inode_return_delegation(struct inode *inode) 67static inline int nfs_inode_return_delegation(struct inode *inode)
68{ 68{
69 nfs_wb_all(inode);
69 return 0; 70 return 0;
70} 71}
71#endif 72#endif
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index eedd24d0ad2e..0989a2099688 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -475,6 +475,29 @@ different:
475} 475}
476 476
477static 477static
478bool nfs_use_readdirplus(struct inode *dir, struct file *filp)
479{
480 if (!nfs_server_capable(dir, NFS_CAP_READDIRPLUS))
481 return false;
482 if (test_and_clear_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags))
483 return true;
484 if (filp->f_pos == 0)
485 return true;
486 return false;
487}
488
489/*
490 * This function is called by the lookup code to request the use of
491 * readdirplus to accelerate any future lookups in the same
492 * directory.
493 */
494static
495void nfs_advise_use_readdirplus(struct inode *dir)
496{
497 set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(dir)->flags);
498}
499
500static
478void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry) 501void nfs_prime_dcache(struct dentry *parent, struct nfs_entry *entry)
479{ 502{
480 struct qstr filename = QSTR_INIT(entry->name, entry->len); 503 struct qstr filename = QSTR_INIT(entry->name, entry->len);
@@ -871,7 +894,7 @@ static int nfs_readdir(struct file *filp, void *dirent, filldir_t filldir)
871 desc->file = filp; 894 desc->file = filp;
872 desc->dir_cookie = &dir_ctx->dir_cookie; 895 desc->dir_cookie = &dir_ctx->dir_cookie;
873 desc->decode = NFS_PROTO(inode)->decode_dirent; 896 desc->decode = NFS_PROTO(inode)->decode_dirent;
874 desc->plus = NFS_USE_READDIRPLUS(inode); 897 desc->plus = nfs_use_readdirplus(inode, filp) ? 1 : 0;
875 898
876 nfs_block_sillyrename(dentry); 899 nfs_block_sillyrename(dentry);
877 res = nfs_revalidate_mapping(inode, filp->f_mapping); 900 res = nfs_revalidate_mapping(inode, filp->f_mapping);
@@ -1111,7 +1134,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
1111 if (!inode) { 1134 if (!inode) {
1112 if (nfs_neg_need_reval(dir, dentry, nd)) 1135 if (nfs_neg_need_reval(dir, dentry, nd))
1113 goto out_bad; 1136 goto out_bad;
1114 goto out_valid; 1137 goto out_valid_noent;
1115 } 1138 }
1116 1139
1117 if (is_bad_inode(inode)) { 1140 if (is_bad_inode(inode)) {
@@ -1140,7 +1163,7 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
1140 if (fhandle == NULL || fattr == NULL) 1163 if (fhandle == NULL || fattr == NULL)
1141 goto out_error; 1164 goto out_error;
1142 1165
1143 error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr); 1166 error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
1144 if (error) 1167 if (error)
1145 goto out_bad; 1168 goto out_bad;
1146 if (nfs_compare_fh(NFS_FH(inode), fhandle)) 1169 if (nfs_compare_fh(NFS_FH(inode), fhandle))
@@ -1153,6 +1176,9 @@ static int nfs_lookup_revalidate(struct dentry *dentry, struct nameidata *nd)
1153out_set_verifier: 1176out_set_verifier:
1154 nfs_set_verifier(dentry, nfs_save_change_attribute(dir)); 1177 nfs_set_verifier(dentry, nfs_save_change_attribute(dir));
1155 out_valid: 1178 out_valid:
1179 /* Success: notify readdir to use READDIRPLUS */
1180 nfs_advise_use_readdirplus(dir);
1181 out_valid_noent:
1156 dput(parent); 1182 dput(parent);
1157 dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n", 1183 dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
1158 __func__, dentry->d_parent->d_name.name, 1184 __func__, dentry->d_parent->d_name.name,
@@ -1296,7 +1322,7 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
1296 parent = dentry->d_parent; 1322 parent = dentry->d_parent;
1297 /* Protect against concurrent sillydeletes */ 1323 /* Protect against concurrent sillydeletes */
1298 nfs_block_sillyrename(parent); 1324 nfs_block_sillyrename(parent);
1299 error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr); 1325 error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
1300 if (error == -ENOENT) 1326 if (error == -ENOENT)
1301 goto no_entry; 1327 goto no_entry;
1302 if (error < 0) { 1328 if (error < 0) {
@@ -1308,6 +1334,9 @@ static struct dentry *nfs_lookup(struct inode *dir, struct dentry * dentry, stru
1308 if (IS_ERR(res)) 1334 if (IS_ERR(res))
1309 goto out_unblock_sillyrename; 1335 goto out_unblock_sillyrename;
1310 1336
1337 /* Success: notify readdir to use READDIRPLUS */
1338 nfs_advise_use_readdirplus(dir);
1339
1311no_entry: 1340no_entry:
1312 res = d_materialise_unique(dentry, inode); 1341 res = d_materialise_unique(dentry, inode);
1313 if (res != NULL) { 1342 if (res != NULL) {
@@ -1643,7 +1672,7 @@ int nfs_instantiate(struct dentry *dentry, struct nfs_fh *fhandle,
1643 if (dentry->d_inode) 1672 if (dentry->d_inode)
1644 goto out; 1673 goto out;
1645 if (fhandle->size == 0) { 1674 if (fhandle->size == 0) {
1646 error = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, &dentry->d_name, fhandle, fattr); 1675 error = NFS_PROTO(dir)->lookup(dir, &dentry->d_name, fhandle, fattr);
1647 if (error) 1676 if (error)
1648 goto out_error; 1677 goto out_error;
1649 } 1678 }
diff --git a/fs/nfs/direct.c b/fs/nfs/direct.c
index 481be7f7bdd3..23d170bc44f4 100644
--- a/fs/nfs/direct.c
+++ b/fs/nfs/direct.c
@@ -56,6 +56,7 @@
56 56
57#include "internal.h" 57#include "internal.h"
58#include "iostat.h" 58#include "iostat.h"
59#include "pnfs.h"
59 60
60#define NFSDBG_FACILITY NFSDBG_VFS 61#define NFSDBG_FACILITY NFSDBG_VFS
61 62
@@ -81,16 +82,19 @@ struct nfs_direct_req {
81 struct completion completion; /* wait for i/o completion */ 82 struct completion completion; /* wait for i/o completion */
82 83
83 /* commit state */ 84 /* commit state */
84 struct list_head rewrite_list; /* saved nfs_write_data structs */ 85 struct nfs_mds_commit_info mds_cinfo; /* Storage for cinfo */
85 struct nfs_write_data * commit_data; /* special write_data for commits */ 86 struct pnfs_ds_commit_info ds_cinfo; /* Storage for cinfo */
87 struct work_struct work;
86 int flags; 88 int flags;
87#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */ 89#define NFS_ODIRECT_DO_COMMIT (1) /* an unstable reply was received */
88#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */ 90#define NFS_ODIRECT_RESCHED_WRITES (2) /* write verification failed */
89 struct nfs_writeverf verf; /* unstable write verifier */ 91 struct nfs_writeverf verf; /* unstable write verifier */
90}; 92};
91 93
94static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops;
95static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops;
92static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode); 96static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode);
93static const struct rpc_call_ops nfs_write_direct_ops; 97static void nfs_direct_write_schedule_work(struct work_struct *work);
94 98
95static inline void get_dreq(struct nfs_direct_req *dreq) 99static inline void get_dreq(struct nfs_direct_req *dreq)
96{ 100{
@@ -124,22 +128,6 @@ ssize_t nfs_direct_IO(int rw, struct kiocb *iocb, const struct iovec *iov, loff_
124 return -EINVAL; 128 return -EINVAL;
125} 129}
126 130
127static void nfs_direct_dirty_pages(struct page **pages, unsigned int pgbase, size_t count)
128{
129 unsigned int npages;
130 unsigned int i;
131
132 if (count == 0)
133 return;
134 pages += (pgbase >> PAGE_SHIFT);
135 npages = (count + (pgbase & ~PAGE_MASK) + PAGE_SIZE - 1) >> PAGE_SHIFT;
136 for (i = 0; i < npages; i++) {
137 struct page *page = pages[i];
138 if (!PageCompound(page))
139 set_page_dirty(page);
140 }
141}
142
143static void nfs_direct_release_pages(struct page **pages, unsigned int npages) 131static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
144{ 132{
145 unsigned int i; 133 unsigned int i;
@@ -147,26 +135,30 @@ static void nfs_direct_release_pages(struct page **pages, unsigned int npages)
147 page_cache_release(pages[i]); 135 page_cache_release(pages[i]);
148} 136}
149 137
138void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
139 struct nfs_direct_req *dreq)
140{
141 cinfo->lock = &dreq->lock;
142 cinfo->mds = &dreq->mds_cinfo;
143 cinfo->ds = &dreq->ds_cinfo;
144 cinfo->dreq = dreq;
145 cinfo->completion_ops = &nfs_direct_commit_completion_ops;
146}
147
150static inline struct nfs_direct_req *nfs_direct_req_alloc(void) 148static inline struct nfs_direct_req *nfs_direct_req_alloc(void)
151{ 149{
152 struct nfs_direct_req *dreq; 150 struct nfs_direct_req *dreq;
153 151
154 dreq = kmem_cache_alloc(nfs_direct_cachep, GFP_KERNEL); 152 dreq = kmem_cache_zalloc(nfs_direct_cachep, GFP_KERNEL);
155 if (!dreq) 153 if (!dreq)
156 return NULL; 154 return NULL;
157 155
158 kref_init(&dreq->kref); 156 kref_init(&dreq->kref);
159 kref_get(&dreq->kref); 157 kref_get(&dreq->kref);
160 init_completion(&dreq->completion); 158 init_completion(&dreq->completion);
161 INIT_LIST_HEAD(&dreq->rewrite_list); 159 INIT_LIST_HEAD(&dreq->mds_cinfo.list);
162 dreq->iocb = NULL; 160 INIT_WORK(&dreq->work, nfs_direct_write_schedule_work);
163 dreq->ctx = NULL;
164 dreq->l_ctx = NULL;
165 spin_lock_init(&dreq->lock); 161 spin_lock_init(&dreq->lock);
166 atomic_set(&dreq->io_count, 0);
167 dreq->count = 0;
168 dreq->error = 0;
169 dreq->flags = 0;
170 162
171 return dreq; 163 return dreq;
172} 164}
@@ -226,47 +218,80 @@ static void nfs_direct_complete(struct nfs_direct_req *dreq)
226 nfs_direct_req_release(dreq); 218 nfs_direct_req_release(dreq);
227} 219}
228 220
229/* 221static void nfs_direct_readpage_release(struct nfs_page *req)
230 * We must hold a reference to all the pages in this direct read request
231 * until the RPCs complete. This could be long *after* we are woken up in
232 * nfs_direct_wait (for instance, if someone hits ^C on a slow server).
233 */
234static void nfs_direct_read_result(struct rpc_task *task, void *calldata)
235{ 222{
236 struct nfs_read_data *data = calldata; 223 dprintk("NFS: direct read done (%s/%lld %d@%lld)\n",
237 224 req->wb_context->dentry->d_inode->i_sb->s_id,
238 nfs_readpage_result(task, data); 225 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
226 req->wb_bytes,
227 (long long)req_offset(req));
228 nfs_release_request(req);
239} 229}
240 230
241static void nfs_direct_read_release(void *calldata) 231static void nfs_direct_read_completion(struct nfs_pgio_header *hdr)
242{ 232{
233 unsigned long bytes = 0;
234 struct nfs_direct_req *dreq = hdr->dreq;
243 235
244 struct nfs_read_data *data = calldata; 236 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
245 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 237 goto out_put;
246 int status = data->task.tk_status;
247 238
248 spin_lock(&dreq->lock); 239 spin_lock(&dreq->lock);
249 if (unlikely(status < 0)) { 240 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) && (hdr->good_bytes == 0))
250 dreq->error = status; 241 dreq->error = hdr->error;
251 spin_unlock(&dreq->lock); 242 else
252 } else { 243 dreq->count += hdr->good_bytes;
253 dreq->count += data->res.count; 244 spin_unlock(&dreq->lock);
254 spin_unlock(&dreq->lock);
255 nfs_direct_dirty_pages(data->pagevec,
256 data->args.pgbase,
257 data->res.count);
258 }
259 nfs_direct_release_pages(data->pagevec, data->npages);
260 245
246 while (!list_empty(&hdr->pages)) {
247 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
248 struct page *page = req->wb_page;
249
250 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
251 if (bytes > hdr->good_bytes)
252 zero_user(page, 0, PAGE_SIZE);
253 else if (hdr->good_bytes - bytes < PAGE_SIZE)
254 zero_user_segment(page,
255 hdr->good_bytes & ~PAGE_MASK,
256 PAGE_SIZE);
257 }
258 if (!PageCompound(page)) {
259 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
260 if (bytes < hdr->good_bytes)
261 set_page_dirty(page);
262 } else
263 set_page_dirty(page);
264 }
265 bytes += req->wb_bytes;
266 nfs_list_remove_request(req);
267 nfs_direct_readpage_release(req);
268 }
269out_put:
261 if (put_dreq(dreq)) 270 if (put_dreq(dreq))
262 nfs_direct_complete(dreq); 271 nfs_direct_complete(dreq);
263 nfs_readdata_free(data); 272 hdr->release(hdr);
273}
274
275static void nfs_read_sync_pgio_error(struct list_head *head)
276{
277 struct nfs_page *req;
278
279 while (!list_empty(head)) {
280 req = nfs_list_entry(head->next);
281 nfs_list_remove_request(req);
282 nfs_release_request(req);
283 }
264} 284}
265 285
266static const struct rpc_call_ops nfs_read_direct_ops = { 286static void nfs_direct_pgio_init(struct nfs_pgio_header *hdr)
267 .rpc_call_prepare = nfs_read_prepare, 287{
268 .rpc_call_done = nfs_direct_read_result, 288 get_dreq(hdr->dreq);
269 .rpc_release = nfs_direct_read_release, 289}
290
291static const struct nfs_pgio_completion_ops nfs_direct_read_completion_ops = {
292 .error_cleanup = nfs_read_sync_pgio_error,
293 .init_hdr = nfs_direct_pgio_init,
294 .completion = nfs_direct_read_completion,
270}; 295};
271 296
272/* 297/*
@@ -276,107 +301,82 @@ static const struct rpc_call_ops nfs_read_direct_ops = {
276 * handled automatically by nfs_direct_read_result(). Otherwise, if 301 * handled automatically by nfs_direct_read_result(). Otherwise, if
277 * no requests have been sent, just return an error. 302 * no requests have been sent, just return an error.
278 */ 303 */
279static ssize_t nfs_direct_read_schedule_segment(struct nfs_direct_req *dreq, 304static ssize_t nfs_direct_read_schedule_segment(struct nfs_pageio_descriptor *desc,
280 const struct iovec *iov, 305 const struct iovec *iov,
281 loff_t pos) 306 loff_t pos)
282{ 307{
308 struct nfs_direct_req *dreq = desc->pg_dreq;
283 struct nfs_open_context *ctx = dreq->ctx; 309 struct nfs_open_context *ctx = dreq->ctx;
284 struct inode *inode = ctx->dentry->d_inode; 310 struct inode *inode = ctx->dentry->d_inode;
285 unsigned long user_addr = (unsigned long)iov->iov_base; 311 unsigned long user_addr = (unsigned long)iov->iov_base;
286 size_t count = iov->iov_len; 312 size_t count = iov->iov_len;
287 size_t rsize = NFS_SERVER(inode)->rsize; 313 size_t rsize = NFS_SERVER(inode)->rsize;
288 struct rpc_task *task;
289 struct rpc_message msg = {
290 .rpc_cred = ctx->cred,
291 };
292 struct rpc_task_setup task_setup_data = {
293 .rpc_client = NFS_CLIENT(inode),
294 .rpc_message = &msg,
295 .callback_ops = &nfs_read_direct_ops,
296 .workqueue = nfsiod_workqueue,
297 .flags = RPC_TASK_ASYNC,
298 };
299 unsigned int pgbase; 314 unsigned int pgbase;
300 int result; 315 int result;
301 ssize_t started = 0; 316 ssize_t started = 0;
317 struct page **pagevec = NULL;
318 unsigned int npages;
302 319
303 do { 320 do {
304 struct nfs_read_data *data;
305 size_t bytes; 321 size_t bytes;
322 int i;
306 323
307 pgbase = user_addr & ~PAGE_MASK; 324 pgbase = user_addr & ~PAGE_MASK;
308 bytes = min(rsize,count); 325 bytes = min(max_t(size_t, rsize, PAGE_SIZE), count);
309 326
310 result = -ENOMEM; 327 result = -ENOMEM;
311 data = nfs_readdata_alloc(nfs_page_array_len(pgbase, bytes)); 328 npages = nfs_page_array_len(pgbase, bytes);
312 if (unlikely(!data)) 329 if (!pagevec)
330 pagevec = kmalloc(npages * sizeof(struct page *),
331 GFP_KERNEL);
332 if (!pagevec)
313 break; 333 break;
314
315 down_read(&current->mm->mmap_sem); 334 down_read(&current->mm->mmap_sem);
316 result = get_user_pages(current, current->mm, user_addr, 335 result = get_user_pages(current, current->mm, user_addr,
317 data->npages, 1, 0, data->pagevec, NULL); 336 npages, 1, 0, pagevec, NULL);
318 up_read(&current->mm->mmap_sem); 337 up_read(&current->mm->mmap_sem);
319 if (result < 0) { 338 if (result < 0)
320 nfs_readdata_free(data);
321 break; 339 break;
322 } 340 if ((unsigned)result < npages) {
323 if ((unsigned)result < data->npages) {
324 bytes = result * PAGE_SIZE; 341 bytes = result * PAGE_SIZE;
325 if (bytes <= pgbase) { 342 if (bytes <= pgbase) {
326 nfs_direct_release_pages(data->pagevec, result); 343 nfs_direct_release_pages(pagevec, result);
327 nfs_readdata_free(data);
328 break; 344 break;
329 } 345 }
330 bytes -= pgbase; 346 bytes -= pgbase;
331 data->npages = result; 347 npages = result;
332 } 348 }
333 349
334 get_dreq(dreq); 350 for (i = 0; i < npages; i++) {
335 351 struct nfs_page *req;
336 data->req = (struct nfs_page *) dreq; 352 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
337 data->inode = inode; 353 /* XXX do we need to do the eof zeroing found in async_filler? */
338 data->cred = msg.rpc_cred; 354 req = nfs_create_request(dreq->ctx, dreq->inode,
339 data->args.fh = NFS_FH(inode); 355 pagevec[i],
340 data->args.context = ctx; 356 pgbase, req_len);
341 data->args.lock_context = dreq->l_ctx; 357 if (IS_ERR(req)) {
342 data->args.offset = pos; 358 result = PTR_ERR(req);
343 data->args.pgbase = pgbase; 359 break;
344 data->args.pages = data->pagevec; 360 }
345 data->args.count = bytes; 361 req->wb_index = pos >> PAGE_SHIFT;
346 data->res.fattr = &data->fattr; 362 req->wb_offset = pos & ~PAGE_MASK;
347 data->res.eof = 0; 363 if (!nfs_pageio_add_request(desc, req)) {
348 data->res.count = bytes; 364 result = desc->pg_error;
349 nfs_fattr_init(&data->fattr); 365 nfs_release_request(req);
350 msg.rpc_argp = &data->args; 366 break;
351 msg.rpc_resp = &data->res; 367 }
352 368 pgbase = 0;
353 task_setup_data.task = &data->task; 369 bytes -= req_len;
354 task_setup_data.callback_data = data; 370 started += req_len;
355 NFS_PROTO(inode)->read_setup(data, &msg); 371 user_addr += req_len;
356 372 pos += req_len;
357 task = rpc_run_task(&task_setup_data); 373 count -= req_len;
358 if (IS_ERR(task)) 374 }
359 break; 375 /* The nfs_page now hold references to these pages */
360 rpc_put_task(task); 376 nfs_direct_release_pages(pagevec, npages);
361 377 } while (count != 0 && result >= 0);
362 dprintk("NFS: %5u initiated direct read call " 378
363 "(req %s/%Ld, %zu bytes @ offset %Lu)\n", 379 kfree(pagevec);
364 data->task.tk_pid,
365 inode->i_sb->s_id,
366 (long long)NFS_FILEID(inode),
367 bytes,
368 (unsigned long long)data->args.offset);
369
370 started += bytes;
371 user_addr += bytes;
372 pos += bytes;
373 /* FIXME: Remove this unnecessary math from final patch */
374 pgbase += bytes;
375 pgbase &= ~PAGE_MASK;
376 BUG_ON(pgbase != (user_addr & ~PAGE_MASK));
377
378 count -= bytes;
379 } while (count != 0);
380 380
381 if (started) 381 if (started)
382 return started; 382 return started;
@@ -388,15 +388,19 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
388 unsigned long nr_segs, 388 unsigned long nr_segs,
389 loff_t pos) 389 loff_t pos)
390{ 390{
391 struct nfs_pageio_descriptor desc;
391 ssize_t result = -EINVAL; 392 ssize_t result = -EINVAL;
392 size_t requested_bytes = 0; 393 size_t requested_bytes = 0;
393 unsigned long seg; 394 unsigned long seg;
394 395
396 nfs_pageio_init_read(&desc, dreq->inode,
397 &nfs_direct_read_completion_ops);
395 get_dreq(dreq); 398 get_dreq(dreq);
399 desc.pg_dreq = dreq;
396 400
397 for (seg = 0; seg < nr_segs; seg++) { 401 for (seg = 0; seg < nr_segs; seg++) {
398 const struct iovec *vec = &iov[seg]; 402 const struct iovec *vec = &iov[seg];
399 result = nfs_direct_read_schedule_segment(dreq, vec, pos); 403 result = nfs_direct_read_schedule_segment(&desc, vec, pos);
400 if (result < 0) 404 if (result < 0)
401 break; 405 break;
402 requested_bytes += result; 406 requested_bytes += result;
@@ -405,6 +409,8 @@ static ssize_t nfs_direct_read_schedule_iovec(struct nfs_direct_req *dreq,
405 pos += vec->iov_len; 409 pos += vec->iov_len;
406 } 410 }
407 411
412 nfs_pageio_complete(&desc);
413
408 /* 414 /*
409 * If no bytes were started, return the error, and let the 415 * If no bytes were started, return the error, and let the
410 * generic layer handle the completion. 416 * generic layer handle the completion.
@@ -441,104 +447,64 @@ static ssize_t nfs_direct_read(struct kiocb *iocb, const struct iovec *iov,
441 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos); 447 result = nfs_direct_read_schedule_iovec(dreq, iov, nr_segs, pos);
442 if (!result) 448 if (!result)
443 result = nfs_direct_wait(dreq); 449 result = nfs_direct_wait(dreq);
450 NFS_I(inode)->read_io += result;
444out_release: 451out_release:
445 nfs_direct_req_release(dreq); 452 nfs_direct_req_release(dreq);
446out: 453out:
447 return result; 454 return result;
448} 455}
449 456
450static void nfs_direct_free_writedata(struct nfs_direct_req *dreq)
451{
452 while (!list_empty(&dreq->rewrite_list)) {
453 struct nfs_write_data *data = list_entry(dreq->rewrite_list.next, struct nfs_write_data, pages);
454 list_del(&data->pages);
455 nfs_direct_release_pages(data->pagevec, data->npages);
456 nfs_writedata_free(data);
457 }
458}
459
460#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 457#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
461static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq) 458static void nfs_direct_write_reschedule(struct nfs_direct_req *dreq)
462{ 459{
463 struct inode *inode = dreq->inode; 460 struct nfs_pageio_descriptor desc;
464 struct list_head *p; 461 struct nfs_page *req, *tmp;
465 struct nfs_write_data *data; 462 LIST_HEAD(reqs);
466 struct rpc_task *task; 463 struct nfs_commit_info cinfo;
467 struct rpc_message msg = { 464 LIST_HEAD(failed);
468 .rpc_cred = dreq->ctx->cred, 465
469 }; 466 nfs_init_cinfo_from_dreq(&cinfo, dreq);
470 struct rpc_task_setup task_setup_data = { 467 pnfs_recover_commit_reqs(dreq->inode, &reqs, &cinfo);
471 .rpc_client = NFS_CLIENT(inode), 468 spin_lock(cinfo.lock);
472 .rpc_message = &msg, 469 nfs_scan_commit_list(&cinfo.mds->list, &reqs, &cinfo, 0);
473 .callback_ops = &nfs_write_direct_ops, 470 spin_unlock(cinfo.lock);
474 .workqueue = nfsiod_workqueue,
475 .flags = RPC_TASK_ASYNC,
476 };
477 471
478 dreq->count = 0; 472 dreq->count = 0;
479 get_dreq(dreq); 473 get_dreq(dreq);
480 474
481 list_for_each(p, &dreq->rewrite_list) { 475 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_STABLE,
482 data = list_entry(p, struct nfs_write_data, pages); 476 &nfs_direct_write_completion_ops);
483 477 desc.pg_dreq = dreq;
484 get_dreq(dreq); 478
485 479 list_for_each_entry_safe(req, tmp, &reqs, wb_list) {
486 /* Use stable writes */ 480 if (!nfs_pageio_add_request(&desc, req)) {
487 data->args.stable = NFS_FILE_SYNC; 481 nfs_list_add_request(req, &failed);
488 482 spin_lock(cinfo.lock);
489 /* 483 dreq->flags = 0;
490 * Reset data->res. 484 dreq->error = -EIO;
491 */ 485 spin_unlock(cinfo.lock);
492 nfs_fattr_init(&data->fattr); 486 }
493 data->res.count = data->args.count;
494 memset(&data->verf, 0, sizeof(data->verf));
495
496 /*
497 * Reuse data->task; data->args should not have changed
498 * since the original request was sent.
499 */
500 task_setup_data.task = &data->task;
501 task_setup_data.callback_data = data;
502 msg.rpc_argp = &data->args;
503 msg.rpc_resp = &data->res;
504 NFS_PROTO(inode)->write_setup(data, &msg);
505
506 /*
507 * We're called via an RPC callback, so BKL is already held.
508 */
509 task = rpc_run_task(&task_setup_data);
510 if (!IS_ERR(task))
511 rpc_put_task(task);
512
513 dprintk("NFS: %5u rescheduled direct write call (req %s/%Ld, %u bytes @ offset %Lu)\n",
514 data->task.tk_pid,
515 inode->i_sb->s_id,
516 (long long)NFS_FILEID(inode),
517 data->args.count,
518 (unsigned long long)data->args.offset);
519 } 487 }
488 nfs_pageio_complete(&desc);
520 489
521 if (put_dreq(dreq)) 490 while (!list_empty(&failed))
522 nfs_direct_write_complete(dreq, inode); 491 nfs_unlock_and_release_request(req);
523}
524
525static void nfs_direct_commit_result(struct rpc_task *task, void *calldata)
526{
527 struct nfs_write_data *data = calldata;
528 492
529 /* Call the NFS version-specific code */ 493 if (put_dreq(dreq))
530 NFS_PROTO(data->inode)->commit_done(task, data); 494 nfs_direct_write_complete(dreq, dreq->inode);
531} 495}
532 496
533static void nfs_direct_commit_release(void *calldata) 497static void nfs_direct_commit_complete(struct nfs_commit_data *data)
534{ 498{
535 struct nfs_write_data *data = calldata; 499 struct nfs_direct_req *dreq = data->dreq;
536 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req; 500 struct nfs_commit_info cinfo;
501 struct nfs_page *req;
537 int status = data->task.tk_status; 502 int status = data->task.tk_status;
538 503
504 nfs_init_cinfo_from_dreq(&cinfo, dreq);
539 if (status < 0) { 505 if (status < 0) {
540 dprintk("NFS: %5u commit failed with error %d.\n", 506 dprintk("NFS: %5u commit failed with error %d.\n",
541 data->task.tk_pid, status); 507 data->task.tk_pid, status);
542 dreq->flags = NFS_ODIRECT_RESCHED_WRITES; 508 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
543 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) { 509 } else if (memcmp(&dreq->verf, &data->verf, sizeof(data->verf))) {
544 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid); 510 dprintk("NFS: %5u commit verify failed\n", data->task.tk_pid);
@@ -546,62 +512,47 @@ static void nfs_direct_commit_release(void *calldata)
546 } 512 }
547 513
548 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status); 514 dprintk("NFS: %5u commit returned %d\n", data->task.tk_pid, status);
549 nfs_direct_write_complete(dreq, data->inode); 515 while (!list_empty(&data->pages)) {
550 nfs_commit_free(data); 516 req = nfs_list_entry(data->pages.next);
517 nfs_list_remove_request(req);
518 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES) {
519 /* Note the rewrite will go through mds */
520 kref_get(&req->wb_kref);
521 nfs_mark_request_commit(req, NULL, &cinfo);
522 }
523 nfs_unlock_and_release_request(req);
524 }
525
526 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
527 nfs_direct_write_complete(dreq, data->inode);
528}
529
530static void nfs_direct_error_cleanup(struct nfs_inode *nfsi)
531{
532 /* There is no lock to clear */
551} 533}
552 534
553static const struct rpc_call_ops nfs_commit_direct_ops = { 535static const struct nfs_commit_completion_ops nfs_direct_commit_completion_ops = {
554 .rpc_call_prepare = nfs_write_prepare, 536 .completion = nfs_direct_commit_complete,
555 .rpc_call_done = nfs_direct_commit_result, 537 .error_cleanup = nfs_direct_error_cleanup,
556 .rpc_release = nfs_direct_commit_release,
557}; 538};
558 539
559static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq) 540static void nfs_direct_commit_schedule(struct nfs_direct_req *dreq)
560{ 541{
561 struct nfs_write_data *data = dreq->commit_data; 542 int res;
562 struct rpc_task *task; 543 struct nfs_commit_info cinfo;
563 struct rpc_message msg = { 544 LIST_HEAD(mds_list);
564 .rpc_argp = &data->args, 545
565 .rpc_resp = &data->res, 546 nfs_init_cinfo_from_dreq(&cinfo, dreq);
566 .rpc_cred = dreq->ctx->cred, 547 nfs_scan_commit(dreq->inode, &mds_list, &cinfo);
567 }; 548 res = nfs_generic_commit_list(dreq->inode, &mds_list, 0, &cinfo);
568 struct rpc_task_setup task_setup_data = { 549 if (res < 0) /* res == -ENOMEM */
569 .task = &data->task, 550 nfs_direct_write_reschedule(dreq);
570 .rpc_client = NFS_CLIENT(dreq->inode),
571 .rpc_message = &msg,
572 .callback_ops = &nfs_commit_direct_ops,
573 .callback_data = data,
574 .workqueue = nfsiod_workqueue,
575 .flags = RPC_TASK_ASYNC,
576 };
577
578 data->inode = dreq->inode;
579 data->cred = msg.rpc_cred;
580
581 data->args.fh = NFS_FH(data->inode);
582 data->args.offset = 0;
583 data->args.count = 0;
584 data->args.context = dreq->ctx;
585 data->args.lock_context = dreq->l_ctx;
586 data->res.count = 0;
587 data->res.fattr = &data->fattr;
588 data->res.verf = &data->verf;
589 nfs_fattr_init(&data->fattr);
590
591 NFS_PROTO(data->inode)->commit_setup(data, &msg);
592
593 /* Note: task.tk_ops->rpc_release will free dreq->commit_data */
594 dreq->commit_data = NULL;
595
596 dprintk("NFS: %5u initiated commit call\n", data->task.tk_pid);
597
598 task = rpc_run_task(&task_setup_data);
599 if (!IS_ERR(task))
600 rpc_put_task(task);
601} 551}
602 552
603static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 553static void nfs_direct_write_schedule_work(struct work_struct *work)
604{ 554{
555 struct nfs_direct_req *dreq = container_of(work, struct nfs_direct_req, work);
605 int flags = dreq->flags; 556 int flags = dreq->flags;
606 557
607 dreq->flags = 0; 558 dreq->flags = 0;
@@ -613,89 +564,32 @@ static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode
613 nfs_direct_write_reschedule(dreq); 564 nfs_direct_write_reschedule(dreq);
614 break; 565 break;
615 default: 566 default:
616 if (dreq->commit_data != NULL) 567 nfs_zap_mapping(dreq->inode, dreq->inode->i_mapping);
617 nfs_commit_free(dreq->commit_data);
618 nfs_direct_free_writedata(dreq);
619 nfs_zap_mapping(inode, inode->i_mapping);
620 nfs_direct_complete(dreq); 568 nfs_direct_complete(dreq);
621 } 569 }
622} 570}
623 571
624static void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 572static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
625{ 573{
626 dreq->commit_data = nfs_commitdata_alloc(); 574 schedule_work(&dreq->work); /* Calls nfs_direct_write_schedule_work */
627 if (dreq->commit_data != NULL)
628 dreq->commit_data->req = (struct nfs_page *) dreq;
629} 575}
576
630#else 577#else
631static inline void nfs_alloc_commit_data(struct nfs_direct_req *dreq) 578static void nfs_direct_write_schedule_work(struct work_struct *work)
632{ 579{
633 dreq->commit_data = NULL;
634} 580}
635 581
636static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode) 582static void nfs_direct_write_complete(struct nfs_direct_req *dreq, struct inode *inode)
637{ 583{
638 nfs_direct_free_writedata(dreq);
639 nfs_zap_mapping(inode, inode->i_mapping); 584 nfs_zap_mapping(inode, inode->i_mapping);
640 nfs_direct_complete(dreq); 585 nfs_direct_complete(dreq);
641} 586}
642#endif 587#endif
643 588
644static void nfs_direct_write_result(struct rpc_task *task, void *calldata)
645{
646 struct nfs_write_data *data = calldata;
647
648 nfs_writeback_done(task, data);
649}
650
651/* 589/*
652 * NB: Return the value of the first error return code. Subsequent 590 * NB: Return the value of the first error return code. Subsequent
653 * errors after the first one are ignored. 591 * errors after the first one are ignored.
654 */ 592 */
655static void nfs_direct_write_release(void *calldata)
656{
657 struct nfs_write_data *data = calldata;
658 struct nfs_direct_req *dreq = (struct nfs_direct_req *) data->req;
659 int status = data->task.tk_status;
660
661 spin_lock(&dreq->lock);
662
663 if (unlikely(status < 0)) {
664 /* An error has occurred, so we should not commit */
665 dreq->flags = 0;
666 dreq->error = status;
667 }
668 if (unlikely(dreq->error != 0))
669 goto out_unlock;
670
671 dreq->count += data->res.count;
672
673 if (data->res.verf->committed != NFS_FILE_SYNC) {
674 switch (dreq->flags) {
675 case 0:
676 memcpy(&dreq->verf, &data->verf, sizeof(dreq->verf));
677 dreq->flags = NFS_ODIRECT_DO_COMMIT;
678 break;
679 case NFS_ODIRECT_DO_COMMIT:
680 if (memcmp(&dreq->verf, &data->verf, sizeof(dreq->verf))) {
681 dprintk("NFS: %5u write verify failed\n", data->task.tk_pid);
682 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
683 }
684 }
685 }
686out_unlock:
687 spin_unlock(&dreq->lock);
688
689 if (put_dreq(dreq))
690 nfs_direct_write_complete(dreq, data->inode);
691}
692
693static const struct rpc_call_ops nfs_write_direct_ops = {
694 .rpc_call_prepare = nfs_write_prepare,
695 .rpc_call_done = nfs_direct_write_result,
696 .rpc_release = nfs_direct_write_release,
697};
698
699/* 593/*
700 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE 594 * For each wsize'd chunk of the user's buffer, dispatch an NFS WRITE
701 * operation. If nfs_writedata_alloc() or get_user_pages() fails, 595 * operation. If nfs_writedata_alloc() or get_user_pages() fails,
@@ -703,132 +597,187 @@ static const struct rpc_call_ops nfs_write_direct_ops = {
703 * handled automatically by nfs_direct_write_result(). Otherwise, if 597 * handled automatically by nfs_direct_write_result(). Otherwise, if
704 * no requests have been sent, just return an error. 598 * no requests have been sent, just return an error.
705 */ 599 */
706static ssize_t nfs_direct_write_schedule_segment(struct nfs_direct_req *dreq, 600static ssize_t nfs_direct_write_schedule_segment(struct nfs_pageio_descriptor *desc,
707 const struct iovec *iov, 601 const struct iovec *iov,
708 loff_t pos, int sync) 602 loff_t pos)
709{ 603{
604 struct nfs_direct_req *dreq = desc->pg_dreq;
710 struct nfs_open_context *ctx = dreq->ctx; 605 struct nfs_open_context *ctx = dreq->ctx;
711 struct inode *inode = ctx->dentry->d_inode; 606 struct inode *inode = ctx->dentry->d_inode;
712 unsigned long user_addr = (unsigned long)iov->iov_base; 607 unsigned long user_addr = (unsigned long)iov->iov_base;
713 size_t count = iov->iov_len; 608 size_t count = iov->iov_len;
714 struct rpc_task *task;
715 struct rpc_message msg = {
716 .rpc_cred = ctx->cred,
717 };
718 struct rpc_task_setup task_setup_data = {
719 .rpc_client = NFS_CLIENT(inode),
720 .rpc_message = &msg,
721 .callback_ops = &nfs_write_direct_ops,
722 .workqueue = nfsiod_workqueue,
723 .flags = RPC_TASK_ASYNC,
724 };
725 size_t wsize = NFS_SERVER(inode)->wsize; 609 size_t wsize = NFS_SERVER(inode)->wsize;
726 unsigned int pgbase; 610 unsigned int pgbase;
727 int result; 611 int result;
728 ssize_t started = 0; 612 ssize_t started = 0;
613 struct page **pagevec = NULL;
614 unsigned int npages;
729 615
730 do { 616 do {
731 struct nfs_write_data *data;
732 size_t bytes; 617 size_t bytes;
618 int i;
733 619
734 pgbase = user_addr & ~PAGE_MASK; 620 pgbase = user_addr & ~PAGE_MASK;
735 bytes = min(wsize,count); 621 bytes = min(max_t(size_t, wsize, PAGE_SIZE), count);
736 622
737 result = -ENOMEM; 623 result = -ENOMEM;
738 data = nfs_writedata_alloc(nfs_page_array_len(pgbase, bytes)); 624 npages = nfs_page_array_len(pgbase, bytes);
739 if (unlikely(!data)) 625 if (!pagevec)
626 pagevec = kmalloc(npages * sizeof(struct page *), GFP_KERNEL);
627 if (!pagevec)
740 break; 628 break;
741 629
742 down_read(&current->mm->mmap_sem); 630 down_read(&current->mm->mmap_sem);
743 result = get_user_pages(current, current->mm, user_addr, 631 result = get_user_pages(current, current->mm, user_addr,
744 data->npages, 0, 0, data->pagevec, NULL); 632 npages, 0, 0, pagevec, NULL);
745 up_read(&current->mm->mmap_sem); 633 up_read(&current->mm->mmap_sem);
746 if (result < 0) { 634 if (result < 0)
747 nfs_writedata_free(data);
748 break; 635 break;
749 } 636
750 if ((unsigned)result < data->npages) { 637 if ((unsigned)result < npages) {
751 bytes = result * PAGE_SIZE; 638 bytes = result * PAGE_SIZE;
752 if (bytes <= pgbase) { 639 if (bytes <= pgbase) {
753 nfs_direct_release_pages(data->pagevec, result); 640 nfs_direct_release_pages(pagevec, result);
754 nfs_writedata_free(data);
755 break; 641 break;
756 } 642 }
757 bytes -= pgbase; 643 bytes -= pgbase;
758 data->npages = result; 644 npages = result;
759 } 645 }
760 646
761 get_dreq(dreq); 647 for (i = 0; i < npages; i++) {
762 648 struct nfs_page *req;
763 list_move_tail(&data->pages, &dreq->rewrite_list); 649 unsigned int req_len = min_t(size_t, bytes, PAGE_SIZE - pgbase);
764
765 data->req = (struct nfs_page *) dreq;
766 data->inode = inode;
767 data->cred = msg.rpc_cred;
768 data->args.fh = NFS_FH(inode);
769 data->args.context = ctx;
770 data->args.lock_context = dreq->l_ctx;
771 data->args.offset = pos;
772 data->args.pgbase = pgbase;
773 data->args.pages = data->pagevec;
774 data->args.count = bytes;
775 data->args.stable = sync;
776 data->res.fattr = &data->fattr;
777 data->res.count = bytes;
778 data->res.verf = &data->verf;
779 nfs_fattr_init(&data->fattr);
780
781 task_setup_data.task = &data->task;
782 task_setup_data.callback_data = data;
783 msg.rpc_argp = &data->args;
784 msg.rpc_resp = &data->res;
785 NFS_PROTO(inode)->write_setup(data, &msg);
786
787 task = rpc_run_task(&task_setup_data);
788 if (IS_ERR(task))
789 break;
790 rpc_put_task(task);
791
792 dprintk("NFS: %5u initiated direct write call "
793 "(req %s/%Ld, %zu bytes @ offset %Lu)\n",
794 data->task.tk_pid,
795 inode->i_sb->s_id,
796 (long long)NFS_FILEID(inode),
797 bytes,
798 (unsigned long long)data->args.offset);
799 650
800 started += bytes; 651 req = nfs_create_request(dreq->ctx, dreq->inode,
801 user_addr += bytes; 652 pagevec[i],
802 pos += bytes; 653 pgbase, req_len);
803 654 if (IS_ERR(req)) {
804 /* FIXME: Remove this useless math from the final patch */ 655 result = PTR_ERR(req);
805 pgbase += bytes; 656 break;
806 pgbase &= ~PAGE_MASK; 657 }
807 BUG_ON(pgbase != (user_addr & ~PAGE_MASK)); 658 nfs_lock_request(req);
659 req->wb_index = pos >> PAGE_SHIFT;
660 req->wb_offset = pos & ~PAGE_MASK;
661 if (!nfs_pageio_add_request(desc, req)) {
662 result = desc->pg_error;
663 nfs_unlock_and_release_request(req);
664 break;
665 }
666 pgbase = 0;
667 bytes -= req_len;
668 started += req_len;
669 user_addr += req_len;
670 pos += req_len;
671 count -= req_len;
672 }
673 /* The nfs_page now hold references to these pages */
674 nfs_direct_release_pages(pagevec, npages);
675 } while (count != 0 && result >= 0);
808 676
809 count -= bytes; 677 kfree(pagevec);
810 } while (count != 0);
811 678
812 if (started) 679 if (started)
813 return started; 680 return started;
814 return result < 0 ? (ssize_t) result : -EFAULT; 681 return result < 0 ? (ssize_t) result : -EFAULT;
815} 682}
816 683
684static void nfs_direct_write_completion(struct nfs_pgio_header *hdr)
685{
686 struct nfs_direct_req *dreq = hdr->dreq;
687 struct nfs_commit_info cinfo;
688 int bit = -1;
689 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
690
691 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
692 goto out_put;
693
694 nfs_init_cinfo_from_dreq(&cinfo, dreq);
695
696 spin_lock(&dreq->lock);
697
698 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
699 dreq->flags = 0;
700 dreq->error = hdr->error;
701 }
702 if (dreq->error != 0)
703 bit = NFS_IOHDR_ERROR;
704 else {
705 dreq->count += hdr->good_bytes;
706 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
707 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
708 bit = NFS_IOHDR_NEED_RESCHED;
709 } else if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
710 if (dreq->flags == NFS_ODIRECT_RESCHED_WRITES)
711 bit = NFS_IOHDR_NEED_RESCHED;
712 else if (dreq->flags == 0) {
713 memcpy(&dreq->verf, &req->wb_verf,
714 sizeof(dreq->verf));
715 bit = NFS_IOHDR_NEED_COMMIT;
716 dreq->flags = NFS_ODIRECT_DO_COMMIT;
717 } else if (dreq->flags == NFS_ODIRECT_DO_COMMIT) {
718 if (memcmp(&dreq->verf, &req->wb_verf, sizeof(dreq->verf))) {
719 dreq->flags = NFS_ODIRECT_RESCHED_WRITES;
720 bit = NFS_IOHDR_NEED_RESCHED;
721 } else
722 bit = NFS_IOHDR_NEED_COMMIT;
723 }
724 }
725 }
726 spin_unlock(&dreq->lock);
727
728 while (!list_empty(&hdr->pages)) {
729 req = nfs_list_entry(hdr->pages.next);
730 nfs_list_remove_request(req);
731 switch (bit) {
732 case NFS_IOHDR_NEED_RESCHED:
733 case NFS_IOHDR_NEED_COMMIT:
734 kref_get(&req->wb_kref);
735 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
736 }
737 nfs_unlock_and_release_request(req);
738 }
739
740out_put:
741 if (put_dreq(dreq))
742 nfs_direct_write_complete(dreq, hdr->inode);
743 hdr->release(hdr);
744}
745
746static void nfs_write_sync_pgio_error(struct list_head *head)
747{
748 struct nfs_page *req;
749
750 while (!list_empty(head)) {
751 req = nfs_list_entry(head->next);
752 nfs_list_remove_request(req);
753 nfs_unlock_and_release_request(req);
754 }
755}
756
757static const struct nfs_pgio_completion_ops nfs_direct_write_completion_ops = {
758 .error_cleanup = nfs_write_sync_pgio_error,
759 .init_hdr = nfs_direct_pgio_init,
760 .completion = nfs_direct_write_completion,
761};
762
817static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq, 763static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
818 const struct iovec *iov, 764 const struct iovec *iov,
819 unsigned long nr_segs, 765 unsigned long nr_segs,
820 loff_t pos, int sync) 766 loff_t pos)
821{ 767{
768 struct nfs_pageio_descriptor desc;
822 ssize_t result = 0; 769 ssize_t result = 0;
823 size_t requested_bytes = 0; 770 size_t requested_bytes = 0;
824 unsigned long seg; 771 unsigned long seg;
825 772
773 nfs_pageio_init_write(&desc, dreq->inode, FLUSH_COND_STABLE,
774 &nfs_direct_write_completion_ops);
775 desc.pg_dreq = dreq;
826 get_dreq(dreq); 776 get_dreq(dreq);
827 777
828 for (seg = 0; seg < nr_segs; seg++) { 778 for (seg = 0; seg < nr_segs; seg++) {
829 const struct iovec *vec = &iov[seg]; 779 const struct iovec *vec = &iov[seg];
830 result = nfs_direct_write_schedule_segment(dreq, vec, 780 result = nfs_direct_write_schedule_segment(&desc, vec, pos);
831 pos, sync);
832 if (result < 0) 781 if (result < 0)
833 break; 782 break;
834 requested_bytes += result; 783 requested_bytes += result;
@@ -836,6 +785,8 @@ static ssize_t nfs_direct_write_schedule_iovec(struct nfs_direct_req *dreq,
836 break; 785 break;
837 pos += vec->iov_len; 786 pos += vec->iov_len;
838 } 787 }
788 nfs_pageio_complete(&desc);
789 NFS_I(dreq->inode)->write_io += desc.pg_bytes_written;
839 790
840 /* 791 /*
841 * If no bytes were started, return the error, and let the 792 * If no bytes were started, return the error, and let the
@@ -858,16 +809,10 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
858 ssize_t result = -ENOMEM; 809 ssize_t result = -ENOMEM;
859 struct inode *inode = iocb->ki_filp->f_mapping->host; 810 struct inode *inode = iocb->ki_filp->f_mapping->host;
860 struct nfs_direct_req *dreq; 811 struct nfs_direct_req *dreq;
861 size_t wsize = NFS_SERVER(inode)->wsize;
862 int sync = NFS_UNSTABLE;
863 812
864 dreq = nfs_direct_req_alloc(); 813 dreq = nfs_direct_req_alloc();
865 if (!dreq) 814 if (!dreq)
866 goto out; 815 goto out;
867 nfs_alloc_commit_data(dreq);
868
869 if (dreq->commit_data == NULL || count <= wsize)
870 sync = NFS_FILE_SYNC;
871 816
872 dreq->inode = inode; 817 dreq->inode = inode;
873 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp)); 818 dreq->ctx = get_nfs_open_context(nfs_file_open_context(iocb->ki_filp));
@@ -877,7 +822,7 @@ static ssize_t nfs_direct_write(struct kiocb *iocb, const struct iovec *iov,
877 if (!is_sync_kiocb(iocb)) 822 if (!is_sync_kiocb(iocb))
878 dreq->iocb = iocb; 823 dreq->iocb = iocb;
879 824
880 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos, sync); 825 result = nfs_direct_write_schedule_iovec(dreq, iov, nr_segs, pos);
881 if (!result) 826 if (!result)
882 result = nfs_direct_wait(dreq); 827 result = nfs_direct_wait(dreq);
883out_release: 828out_release:
@@ -997,10 +942,15 @@ ssize_t nfs_file_direct_write(struct kiocb *iocb, const struct iovec *iov,
997 task_io_account_write(count); 942 task_io_account_write(count);
998 943
999 retval = nfs_direct_write(iocb, iov, nr_segs, pos, count); 944 retval = nfs_direct_write(iocb, iov, nr_segs, pos, count);
945 if (retval > 0) {
946 struct inode *inode = mapping->host;
1000 947
1001 if (retval > 0)
1002 iocb->ki_pos = pos + retval; 948 iocb->ki_pos = pos + retval;
1003 949 spin_lock(&inode->i_lock);
950 if (i_size_read(inode) < iocb->ki_pos)
951 i_size_write(inode, iocb->ki_pos);
952 spin_unlock(&inode->i_lock);
953 }
1004out: 954out:
1005 return retval; 955 return retval;
1006} 956}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index aa9b709fd328..56311ca5f9f8 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -174,6 +174,13 @@ nfs_file_flush(struct file *file, fl_owner_t id)
174 if ((file->f_mode & FMODE_WRITE) == 0) 174 if ((file->f_mode & FMODE_WRITE) == 0)
175 return 0; 175 return 0;
176 176
177 /*
178 * If we're holding a write delegation, then just start the i/o
179 * but don't wait for completion (or send a commit).
180 */
181 if (nfs_have_delegation(inode, FMODE_WRITE))
182 return filemap_fdatawrite(file->f_mapping);
183
177 /* Flush writes to the server and return any errors */ 184 /* Flush writes to the server and return any errors */
178 return vfs_fsync(file, 0); 185 return vfs_fsync(file, 0);
179} 186}
@@ -417,6 +424,7 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
417 424
418 if (status < 0) 425 if (status < 0)
419 return status; 426 return status;
427 NFS_I(mapping->host)->write_io += copied;
420 return copied; 428 return copied;
421} 429}
422 430
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
index ae65c16b3670..c817787fbdb4 100644
--- a/fs/nfs/fscache.c
+++ b/fs/nfs/fscache.c
@@ -64,23 +64,12 @@ void nfs_fscache_release_client_cookie(struct nfs_client *clp)
64 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent 64 * either by the 'fsc=xxx' option to mount, or by inheriting it from the parent
65 * superblock across an automount point of some nature. 65 * superblock across an automount point of some nature.
66 */ 66 */
67void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, 67void nfs_fscache_get_super_cookie(struct super_block *sb, const char *uniq, int ulen)
68 struct nfs_clone_mount *mntdata)
69{ 68{
70 struct nfs_fscache_key *key, *xkey; 69 struct nfs_fscache_key *key, *xkey;
71 struct nfs_server *nfss = NFS_SB(sb); 70 struct nfs_server *nfss = NFS_SB(sb);
72 struct rb_node **p, *parent; 71 struct rb_node **p, *parent;
73 int diff, ulen; 72 int diff;
74
75 if (uniq) {
76 ulen = strlen(uniq);
77 } else if (mntdata) {
78 struct nfs_server *mnt_s = NFS_SB(mntdata->sb);
79 if (mnt_s->fscache_key) {
80 uniq = mnt_s->fscache_key->key.uniquifier;
81 ulen = mnt_s->fscache_key->key.uniq_len;
82 }
83 }
84 73
85 if (!uniq) { 74 if (!uniq) {
86 uniq = ""; 75 uniq = "";
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
index b9c572d0679f..c5b11b53ff33 100644
--- a/fs/nfs/fscache.h
+++ b/fs/nfs/fscache.h
@@ -73,9 +73,7 @@ extern void nfs_fscache_unregister(void);
73extern void nfs_fscache_get_client_cookie(struct nfs_client *); 73extern void nfs_fscache_get_client_cookie(struct nfs_client *);
74extern void nfs_fscache_release_client_cookie(struct nfs_client *); 74extern void nfs_fscache_release_client_cookie(struct nfs_client *);
75 75
76extern void nfs_fscache_get_super_cookie(struct super_block *, 76extern void nfs_fscache_get_super_cookie(struct super_block *, const char *, int);
77 const char *,
78 struct nfs_clone_mount *);
79extern void nfs_fscache_release_super_cookie(struct super_block *); 77extern void nfs_fscache_release_super_cookie(struct super_block *);
80 78
81extern void nfs_fscache_init_inode_cookie(struct inode *); 79extern void nfs_fscache_init_inode_cookie(struct inode *);
@@ -172,12 +170,6 @@ static inline void nfs_fscache_unregister(void) {}
172static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {} 170static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
173static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {} 171static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
174 172
175static inline void nfs_fscache_get_super_cookie(
176 struct super_block *sb,
177 const char *uniq,
178 struct nfs_clone_mount *mntdata)
179{
180}
181static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {} 173static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
182 174
183static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {} 175static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
diff --git a/fs/nfs/getroot.c b/fs/nfs/getroot.c
index 4ca6f5c8038e..8abfb19bd3aa 100644
--- a/fs/nfs/getroot.c
+++ b/fs/nfs/getroot.c
@@ -150,7 +150,7 @@ int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh)
150 goto out; 150 goto out;
151 151
152 /* Start by getting the root filehandle from the server */ 152 /* Start by getting the root filehandle from the server */
153 ret = server->nfs_client->rpc_ops->getroot(server, mntfh, &fsinfo); 153 ret = nfs4_proc_get_rootfh(server, mntfh, &fsinfo);
154 if (ret < 0) { 154 if (ret < 0) {
155 dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret); 155 dprintk("nfs4_get_rootfh: getroot error = %d\n", -ret);
156 goto out; 156 goto out;
@@ -178,87 +178,4 @@ out:
178 return ret; 178 return ret;
179} 179}
180 180
181/*
182 * get an NFS4 root dentry from the root filehandle
183 */
184struct dentry *nfs4_get_root(struct super_block *sb, struct nfs_fh *mntfh,
185 const char *devname)
186{
187 struct nfs_server *server = NFS_SB(sb);
188 struct nfs_fattr *fattr = NULL;
189 struct dentry *ret;
190 struct inode *inode;
191 void *name = kstrdup(devname, GFP_KERNEL);
192 int error;
193
194 dprintk("--> nfs4_get_root()\n");
195
196 if (!name)
197 return ERR_PTR(-ENOMEM);
198
199 /* get the info about the server and filesystem */
200 error = nfs4_server_capabilities(server, mntfh);
201 if (error < 0) {
202 dprintk("nfs_get_root: getcaps error = %d\n",
203 -error);
204 kfree(name);
205 return ERR_PTR(error);
206 }
207
208 fattr = nfs_alloc_fattr();
209 if (fattr == NULL) {
210 kfree(name);
211 return ERR_PTR(-ENOMEM);
212 }
213
214 /* get the actual root for this mount */
215 error = server->nfs_client->rpc_ops->getattr(server, mntfh, fattr);
216 if (error < 0) {
217 dprintk("nfs_get_root: getattr error = %d\n", -error);
218 ret = ERR_PTR(error);
219 goto out;
220 }
221
222 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
223 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
224 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
225
226 inode = nfs_fhget(sb, mntfh, fattr);
227 if (IS_ERR(inode)) {
228 dprintk("nfs_get_root: get root inode failed\n");
229 ret = ERR_CAST(inode);
230 goto out;
231 }
232
233 error = nfs_superblock_set_dummy_root(sb, inode);
234 if (error != 0) {
235 ret = ERR_PTR(error);
236 goto out;
237 }
238
239 /* root dentries normally start off anonymous and get spliced in later
240 * if the dentry tree reaches them; however if the dentry already
241 * exists, we'll pick it up at this point and use it as the root
242 */
243 ret = d_obtain_alias(inode);
244 if (IS_ERR(ret)) {
245 dprintk("nfs_get_root: get root dentry failed\n");
246 goto out;
247 }
248
249 security_d_instantiate(ret, inode);
250 spin_lock(&ret->d_lock);
251 if (IS_ROOT(ret) && !(ret->d_flags & DCACHE_NFSFS_RENAMED)) {
252 ret->d_fsdata = name;
253 name = NULL;
254 }
255 spin_unlock(&ret->d_lock);
256out:
257 if (name)
258 kfree(name);
259 nfs_free_fattr(fattr);
260 dprintk("<-- nfs4_get_root()\n");
261 return ret;
262}
263
264#endif /* CONFIG_NFS_V4 */ 181#endif /* CONFIG_NFS_V4 */
diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
index ba3019f5934c..b5b86a05059c 100644
--- a/fs/nfs/idmap.c
+++ b/fs/nfs/idmap.c
@@ -415,7 +415,7 @@ static int __nfs_idmap_register(struct dentry *dir,
415static void nfs_idmap_unregister(struct nfs_client *clp, 415static void nfs_idmap_unregister(struct nfs_client *clp,
416 struct rpc_pipe *pipe) 416 struct rpc_pipe *pipe)
417{ 417{
418 struct net *net = clp->net; 418 struct net *net = clp->cl_net;
419 struct super_block *pipefs_sb; 419 struct super_block *pipefs_sb;
420 420
421 pipefs_sb = rpc_get_sb_net(net); 421 pipefs_sb = rpc_get_sb_net(net);
@@ -429,7 +429,7 @@ static int nfs_idmap_register(struct nfs_client *clp,
429 struct idmap *idmap, 429 struct idmap *idmap,
430 struct rpc_pipe *pipe) 430 struct rpc_pipe *pipe)
431{ 431{
432 struct net *net = clp->net; 432 struct net *net = clp->cl_net;
433 struct super_block *pipefs_sb; 433 struct super_block *pipefs_sb;
434 int err = 0; 434 int err = 0;
435 435
@@ -530,9 +530,25 @@ static struct nfs_client *nfs_get_client_for_event(struct net *net, int event)
530 struct nfs_net *nn = net_generic(net, nfs_net_id); 530 struct nfs_net *nn = net_generic(net, nfs_net_id);
531 struct dentry *cl_dentry; 531 struct dentry *cl_dentry;
532 struct nfs_client *clp; 532 struct nfs_client *clp;
533 int err;
533 534
535restart:
534 spin_lock(&nn->nfs_client_lock); 536 spin_lock(&nn->nfs_client_lock);
535 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) { 537 list_for_each_entry(clp, &nn->nfs_client_list, cl_share_link) {
538 /* Wait for initialisation to finish */
539 if (clp->cl_cons_state == NFS_CS_INITING) {
540 atomic_inc(&clp->cl_count);
541 spin_unlock(&nn->nfs_client_lock);
542 err = nfs_wait_client_init_complete(clp);
543 nfs_put_client(clp);
544 if (err)
545 return NULL;
546 goto restart;
547 }
548 /* Skip nfs_clients that failed to initialise */
549 if (clp->cl_cons_state < 0)
550 continue;
551 smp_rmb();
536 if (clp->rpc_ops != &nfs_v4_clientops) 552 if (clp->rpc_ops != &nfs_v4_clientops)
537 continue; 553 continue;
538 cl_dentry = clp->cl_idmap->idmap_pipe->dentry; 554 cl_dentry = clp->cl_idmap->idmap_pipe->dentry;
@@ -640,20 +656,16 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
640 struct idmap_msg *im; 656 struct idmap_msg *im;
641 struct idmap *idmap = (struct idmap *)aux; 657 struct idmap *idmap = (struct idmap *)aux;
642 struct key *key = cons->key; 658 struct key *key = cons->key;
643 int ret; 659 int ret = -ENOMEM;
644 660
645 /* msg and im are freed in idmap_pipe_destroy_msg */ 661 /* msg and im are freed in idmap_pipe_destroy_msg */
646 msg = kmalloc(sizeof(*msg), GFP_KERNEL); 662 msg = kmalloc(sizeof(*msg), GFP_KERNEL);
647 if (IS_ERR(msg)) { 663 if (!msg)
648 ret = PTR_ERR(msg);
649 goto out0; 664 goto out0;
650 }
651 665
652 im = kmalloc(sizeof(*im), GFP_KERNEL); 666 im = kmalloc(sizeof(*im), GFP_KERNEL);
653 if (IS_ERR(im)) { 667 if (!im)
654 ret = PTR_ERR(im);
655 goto out1; 668 goto out1;
656 }
657 669
658 ret = nfs_idmap_prepare_message(key->description, im, msg); 670 ret = nfs_idmap_prepare_message(key->description, im, msg);
659 if (ret < 0) 671 if (ret < 0)
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index e8bbfa5b3500..2f6f78c4b42d 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -121,7 +121,7 @@ static void nfs_clear_inode(struct inode *inode)
121void nfs_evict_inode(struct inode *inode) 121void nfs_evict_inode(struct inode *inode)
122{ 122{
123 truncate_inode_pages(&inode->i_data, 0); 123 truncate_inode_pages(&inode->i_data, 0);
124 end_writeback(inode); 124 clear_inode(inode);
125 nfs_clear_inode(inode); 125 nfs_clear_inode(inode);
126} 126}
127 127
@@ -285,9 +285,7 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
285 inode->i_mode = fattr->mode; 285 inode->i_mode = fattr->mode;
286 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0 286 if ((fattr->valid & NFS_ATTR_FATTR_MODE) == 0
287 && nfs_server_capable(inode, NFS_CAP_MODE)) 287 && nfs_server_capable(inode, NFS_CAP_MODE))
288 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 288 nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
289 | NFS_INO_INVALID_ACCESS
290 | NFS_INO_INVALID_ACL;
291 /* Why so? Because we want revalidate for devices/FIFOs, and 289 /* Why so? Because we want revalidate for devices/FIFOs, and
292 * that's precisely what we have in nfs_file_inode_operations. 290 * that's precisely what we have in nfs_file_inode_operations.
293 */ 291 */
@@ -300,8 +298,6 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
300 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops; 298 inode->i_op = NFS_SB(sb)->nfs_client->rpc_ops->dir_inode_ops;
301 inode->i_fop = &nfs_dir_operations; 299 inode->i_fop = &nfs_dir_operations;
302 inode->i_data.a_ops = &nfs_dir_aops; 300 inode->i_data.a_ops = &nfs_dir_aops;
303 if (nfs_server_capable(inode, NFS_CAP_READDIRPLUS))
304 set_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
305 /* Deal with crossing mountpoints */ 301 /* Deal with crossing mountpoints */
306 if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT || 302 if (fattr->valid & NFS_ATTR_FATTR_MOUNTPOINT ||
307 fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) { 303 fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL) {
@@ -327,6 +323,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
327 inode->i_gid = -2; 323 inode->i_gid = -2;
328 inode->i_blocks = 0; 324 inode->i_blocks = 0;
329 memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf)); 325 memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
326 nfsi->write_io = 0;
327 nfsi->read_io = 0;
330 328
331 nfsi->read_cache_jiffies = fattr->time_start; 329 nfsi->read_cache_jiffies = fattr->time_start;
332 nfsi->attr_gencount = fattr->gencount; 330 nfsi->attr_gencount = fattr->gencount;
@@ -337,24 +335,19 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
337 if (fattr->valid & NFS_ATTR_FATTR_MTIME) 335 if (fattr->valid & NFS_ATTR_FATTR_MTIME)
338 inode->i_mtime = fattr->mtime; 336 inode->i_mtime = fattr->mtime;
339 else if (nfs_server_capable(inode, NFS_CAP_MTIME)) 337 else if (nfs_server_capable(inode, NFS_CAP_MTIME))
340 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 338 nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
341 | NFS_INO_INVALID_DATA;
342 if (fattr->valid & NFS_ATTR_FATTR_CTIME) 339 if (fattr->valid & NFS_ATTR_FATTR_CTIME)
343 inode->i_ctime = fattr->ctime; 340 inode->i_ctime = fattr->ctime;
344 else if (nfs_server_capable(inode, NFS_CAP_CTIME)) 341 else if (nfs_server_capable(inode, NFS_CAP_CTIME))
345 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 342 nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
346 | NFS_INO_INVALID_ACCESS
347 | NFS_INO_INVALID_ACL;
348 if (fattr->valid & NFS_ATTR_FATTR_CHANGE) 343 if (fattr->valid & NFS_ATTR_FATTR_CHANGE)
349 inode->i_version = fattr->change_attr; 344 inode->i_version = fattr->change_attr;
350 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR)) 345 else if (nfs_server_capable(inode, NFS_CAP_CHANGE_ATTR))
351 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 346 nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
352 | NFS_INO_INVALID_DATA;
353 if (fattr->valid & NFS_ATTR_FATTR_SIZE) 347 if (fattr->valid & NFS_ATTR_FATTR_SIZE)
354 inode->i_size = nfs_size_to_loff_t(fattr->size); 348 inode->i_size = nfs_size_to_loff_t(fattr->size);
355 else 349 else
356 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 350 nfsi->cache_validity |= NFS_INO_INVALID_ATTR
357 | NFS_INO_INVALID_DATA
358 | NFS_INO_REVAL_PAGECACHE; 351 | NFS_INO_REVAL_PAGECACHE;
359 if (fattr->valid & NFS_ATTR_FATTR_NLINK) 352 if (fattr->valid & NFS_ATTR_FATTR_NLINK)
360 set_nlink(inode, fattr->nlink); 353 set_nlink(inode, fattr->nlink);
@@ -363,15 +356,11 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
363 if (fattr->valid & NFS_ATTR_FATTR_OWNER) 356 if (fattr->valid & NFS_ATTR_FATTR_OWNER)
364 inode->i_uid = fattr->uid; 357 inode->i_uid = fattr->uid;
365 else if (nfs_server_capable(inode, NFS_CAP_OWNER)) 358 else if (nfs_server_capable(inode, NFS_CAP_OWNER))
366 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 359 nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
367 | NFS_INO_INVALID_ACCESS
368 | NFS_INO_INVALID_ACL;
369 if (fattr->valid & NFS_ATTR_FATTR_GROUP) 360 if (fattr->valid & NFS_ATTR_FATTR_GROUP)
370 inode->i_gid = fattr->gid; 361 inode->i_gid = fattr->gid;
371 else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP)) 362 else if (nfs_server_capable(inode, NFS_CAP_OWNER_GROUP))
372 nfsi->cache_validity |= NFS_INO_INVALID_ATTR 363 nfsi->cache_validity |= NFS_INO_INVALID_ATTR;
373 | NFS_INO_INVALID_ACCESS
374 | NFS_INO_INVALID_ACL;
375 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED) 364 if (fattr->valid & NFS_ATTR_FATTR_BLOCKS_USED)
376 inode->i_blocks = fattr->du.nfs2.blocks; 365 inode->i_blocks = fattr->du.nfs2.blocks;
377 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) { 366 if (fattr->valid & NFS_ATTR_FATTR_SPACE_USED) {
@@ -654,6 +643,7 @@ struct nfs_open_context *alloc_nfs_open_context(struct dentry *dentry, fmode_t f
654 nfs_init_lock_context(&ctx->lock_context); 643 nfs_init_lock_context(&ctx->lock_context);
655 ctx->lock_context.open_context = ctx; 644 ctx->lock_context.open_context = ctx;
656 INIT_LIST_HEAD(&ctx->list); 645 INIT_LIST_HEAD(&ctx->list);
646 ctx->mdsthreshold = NULL;
657 return ctx; 647 return ctx;
658} 648}
659 649
@@ -682,6 +672,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int is_sync)
682 put_rpccred(ctx->cred); 672 put_rpccred(ctx->cred);
683 dput(ctx->dentry); 673 dput(ctx->dentry);
684 nfs_sb_deactive(sb); 674 nfs_sb_deactive(sb);
675 kfree(ctx->mdsthreshold);
685 kfree(ctx); 676 kfree(ctx);
686} 677}
687 678
@@ -870,6 +861,15 @@ static int nfs_invalidate_mapping(struct inode *inode, struct address_space *map
870 return 0; 861 return 0;
871} 862}
872 863
864static bool nfs_mapping_need_revalidate_inode(struct inode *inode)
865{
866 if (nfs_have_delegated_attributes(inode))
867 return false;
868 return (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
869 || nfs_attribute_timeout(inode)
870 || NFS_STALE(inode);
871}
872
873/** 873/**
874 * nfs_revalidate_mapping - Revalidate the pagecache 874 * nfs_revalidate_mapping - Revalidate the pagecache
875 * @inode - pointer to host inode 875 * @inode - pointer to host inode
@@ -880,9 +880,7 @@ int nfs_revalidate_mapping(struct inode *inode, struct address_space *mapping)
880 struct nfs_inode *nfsi = NFS_I(inode); 880 struct nfs_inode *nfsi = NFS_I(inode);
881 int ret = 0; 881 int ret = 0;
882 882
883 if ((nfsi->cache_validity & NFS_INO_REVAL_PAGECACHE) 883 if (nfs_mapping_need_revalidate_inode(inode)) {
884 || nfs_attribute_cache_expired(inode)
885 || NFS_STALE(inode)) {
886 ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode); 884 ret = __nfs_revalidate_inode(NFS_SERVER(inode), inode);
887 if (ret < 0) 885 if (ret < 0)
888 goto out; 886 goto out;
@@ -948,6 +946,8 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
948 unsigned long invalid = 0; 946 unsigned long invalid = 0;
949 947
950 948
949 if (nfs_have_delegated_attributes(inode))
950 return 0;
951 /* Has the inode gone and changed behind our back? */ 951 /* Has the inode gone and changed behind our back? */
952 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) 952 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid)
953 return -EIO; 953 return -EIO;
@@ -960,7 +960,7 @@ static int nfs_check_inode_attributes(struct inode *inode, struct nfs_fattr *fat
960 960
961 /* Verify a few of the more important attributes */ 961 /* Verify a few of the more important attributes */
962 if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime)) 962 if ((fattr->valid & NFS_ATTR_FATTR_MTIME) && !timespec_equal(&inode->i_mtime, &fattr->mtime))
963 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE; 963 invalid |= NFS_INO_INVALID_ATTR;
964 964
965 if (fattr->valid & NFS_ATTR_FATTR_SIZE) { 965 if (fattr->valid & NFS_ATTR_FATTR_SIZE) {
966 cur_size = i_size_read(inode); 966 cur_size = i_size_read(inode);
@@ -1279,14 +1279,26 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1279 nfs_display_fhandle_hash(NFS_FH(inode)), 1279 nfs_display_fhandle_hash(NFS_FH(inode)),
1280 atomic_read(&inode->i_count), fattr->valid); 1280 atomic_read(&inode->i_count), fattr->valid);
1281 1281
1282 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) 1282 if ((fattr->valid & NFS_ATTR_FATTR_FILEID) && nfsi->fileid != fattr->fileid) {
1283 goto out_fileid; 1283 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
1284 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
1285 NFS_SERVER(inode)->nfs_client->cl_hostname,
1286 inode->i_sb->s_id, (long long)nfsi->fileid,
1287 (long long)fattr->fileid);
1288 goto out_err;
1289 }
1284 1290
1285 /* 1291 /*
1286 * Make sure the inode's type hasn't changed. 1292 * Make sure the inode's type hasn't changed.
1287 */ 1293 */
1288 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) 1294 if ((fattr->valid & NFS_ATTR_FATTR_TYPE) && (inode->i_mode & S_IFMT) != (fattr->mode & S_IFMT)) {
1289 goto out_changed; 1295 /*
1296 * Big trouble! The inode has become a different object.
1297 */
1298 printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
1299 __func__, inode->i_ino, inode->i_mode, fattr->mode);
1300 goto out_err;
1301 }
1290 1302
1291 server = NFS_SERVER(inode); 1303 server = NFS_SERVER(inode);
1292 /* Update the fsid? */ 1304 /* Update the fsid? */
@@ -1314,7 +1326,11 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1314 if (inode->i_version != fattr->change_attr) { 1326 if (inode->i_version != fattr->change_attr) {
1315 dprintk("NFS: change_attr change on server for file %s/%ld\n", 1327 dprintk("NFS: change_attr change on server for file %s/%ld\n",
1316 inode->i_sb->s_id, inode->i_ino); 1328 inode->i_sb->s_id, inode->i_ino);
1317 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL; 1329 invalid |= NFS_INO_INVALID_ATTR
1330 | NFS_INO_INVALID_DATA
1331 | NFS_INO_INVALID_ACCESS
1332 | NFS_INO_INVALID_ACL
1333 | NFS_INO_REVAL_PAGECACHE;
1318 if (S_ISDIR(inode->i_mode)) 1334 if (S_ISDIR(inode->i_mode))
1319 nfs_force_lookup_revalidate(inode); 1335 nfs_force_lookup_revalidate(inode);
1320 inode->i_version = fattr->change_attr; 1336 inode->i_version = fattr->change_attr;
@@ -1323,38 +1339,15 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1323 invalid |= save_cache_validity; 1339 invalid |= save_cache_validity;
1324 1340
1325 if (fattr->valid & NFS_ATTR_FATTR_MTIME) { 1341 if (fattr->valid & NFS_ATTR_FATTR_MTIME) {
1326 /* NFSv2/v3: Check if the mtime agrees */ 1342 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
1327 if (!timespec_equal(&inode->i_mtime, &fattr->mtime)) {
1328 dprintk("NFS: mtime change on server for file %s/%ld\n",
1329 inode->i_sb->s_id, inode->i_ino);
1330 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
1331 if (S_ISDIR(inode->i_mode))
1332 nfs_force_lookup_revalidate(inode);
1333 memcpy(&inode->i_mtime, &fattr->mtime, sizeof(inode->i_mtime));
1334 }
1335 } else if (server->caps & NFS_CAP_MTIME) 1343 } else if (server->caps & NFS_CAP_MTIME)
1336 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR 1344 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1337 | NFS_INO_INVALID_DATA
1338 | NFS_INO_REVAL_PAGECACHE
1339 | NFS_INO_REVAL_FORCED); 1345 | NFS_INO_REVAL_FORCED);
1340 1346
1341 if (fattr->valid & NFS_ATTR_FATTR_CTIME) { 1347 if (fattr->valid & NFS_ATTR_FATTR_CTIME) {
1342 /* If ctime has changed we should definitely clear access+acl caches */ 1348 memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
1343 if (!timespec_equal(&inode->i_ctime, &fattr->ctime)) {
1344 invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
1345 /* and probably clear data for a directory too as utimes can cause
1346 * havoc with our cache.
1347 */
1348 if (S_ISDIR(inode->i_mode)) {
1349 invalid |= NFS_INO_INVALID_DATA;
1350 nfs_force_lookup_revalidate(inode);
1351 }
1352 memcpy(&inode->i_ctime, &fattr->ctime, sizeof(inode->i_ctime));
1353 }
1354 } else if (server->caps & NFS_CAP_CTIME) 1349 } else if (server->caps & NFS_CAP_CTIME)
1355 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR 1350 invalid |= save_cache_validity & (NFS_INO_INVALID_ATTR
1356 | NFS_INO_INVALID_ACCESS
1357 | NFS_INO_INVALID_ACL
1358 | NFS_INO_REVAL_FORCED); 1351 | NFS_INO_REVAL_FORCED);
1359 1352
1360 /* Check if our cached file size is stale */ 1353 /* Check if our cached file size is stale */
@@ -1466,12 +1459,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1466 nfsi->cache_validity |= invalid; 1459 nfsi->cache_validity |= invalid;
1467 1460
1468 return 0; 1461 return 0;
1469 out_changed:
1470 /*
1471 * Big trouble! The inode has become a different object.
1472 */
1473 printk(KERN_DEBUG "NFS: %s: inode %ld mode changed, %07o to %07o\n",
1474 __func__, inode->i_ino, inode->i_mode, fattr->mode);
1475 out_err: 1462 out_err:
1476 /* 1463 /*
1477 * No need to worry about unhashing the dentry, as the 1464 * No need to worry about unhashing the dentry, as the
@@ -1480,13 +1467,6 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1480 */ 1467 */
1481 nfs_invalidate_inode(inode); 1468 nfs_invalidate_inode(inode);
1482 return -ESTALE; 1469 return -ESTALE;
1483
1484 out_fileid:
1485 printk(KERN_ERR "NFS: server %s error: fileid changed\n"
1486 "fsid %s: expected fileid 0x%Lx, got 0x%Lx\n",
1487 NFS_SERVER(inode)->nfs_client->cl_hostname, inode->i_sb->s_id,
1488 (long long)nfsi->fileid, (long long)fattr->fileid);
1489 goto out_err;
1490} 1470}
1491 1471
1492 1472
@@ -1500,7 +1480,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
1500void nfs4_evict_inode(struct inode *inode) 1480void nfs4_evict_inode(struct inode *inode)
1501{ 1481{
1502 truncate_inode_pages(&inode->i_data, 0); 1482 truncate_inode_pages(&inode->i_data, 0);
1503 end_writeback(inode); 1483 clear_inode(inode);
1504 pnfs_return_layout(inode); 1484 pnfs_return_layout(inode);
1505 pnfs_destroy_layout(NFS_I(inode)); 1485 pnfs_destroy_layout(NFS_I(inode));
1506 /* If we are holding a delegation, return it! */ 1486 /* If we are holding a delegation, return it! */
@@ -1547,7 +1527,7 @@ static inline void nfs4_init_once(struct nfs_inode *nfsi)
1547 nfsi->delegation_state = 0; 1527 nfsi->delegation_state = 0;
1548 init_rwsem(&nfsi->rwsem); 1528 init_rwsem(&nfsi->rwsem);
1549 nfsi->layout = NULL; 1529 nfsi->layout = NULL;
1550 atomic_set(&nfsi->commits_outstanding, 0); 1530 atomic_set(&nfsi->commit_info.rpcs_out, 0);
1551#endif 1531#endif
1552} 1532}
1553 1533
@@ -1559,9 +1539,9 @@ static void init_once(void *foo)
1559 INIT_LIST_HEAD(&nfsi->open_files); 1539 INIT_LIST_HEAD(&nfsi->open_files);
1560 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru); 1540 INIT_LIST_HEAD(&nfsi->access_cache_entry_lru);
1561 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru); 1541 INIT_LIST_HEAD(&nfsi->access_cache_inode_lru);
1562 INIT_LIST_HEAD(&nfsi->commit_list); 1542 INIT_LIST_HEAD(&nfsi->commit_info.list);
1563 nfsi->npages = 0; 1543 nfsi->npages = 0;
1564 nfsi->ncommit = 0; 1544 nfsi->commit_info.ncommit = 0;
1565 atomic_set(&nfsi->silly_count, 1); 1545 atomic_set(&nfsi->silly_count, 1);
1566 INIT_HLIST_HEAD(&nfsi->silly_list); 1546 INIT_HLIST_HEAD(&nfsi->silly_list);
1567 init_waitqueue_head(&nfsi->waitqueue); 1547 init_waitqueue_head(&nfsi->waitqueue);
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index b777bdaba4c5..1848a7275592 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -103,6 +103,7 @@ struct nfs_parsed_mount_data {
103 unsigned int version; 103 unsigned int version;
104 unsigned int minorversion; 104 unsigned int minorversion;
105 char *fscache_uniq; 105 char *fscache_uniq;
106 bool need_mount;
106 107
107 struct { 108 struct {
108 struct sockaddr_storage address; 109 struct sockaddr_storage address;
@@ -167,11 +168,13 @@ extern struct nfs_server *nfs_clone_server(struct nfs_server *,
167 struct nfs_fh *, 168 struct nfs_fh *,
168 struct nfs_fattr *, 169 struct nfs_fattr *,
169 rpc_authflavor_t); 170 rpc_authflavor_t);
171extern int nfs_wait_client_init_complete(const struct nfs_client *clp);
170extern void nfs_mark_client_ready(struct nfs_client *clp, int state); 172extern void nfs_mark_client_ready(struct nfs_client *clp, int state);
171extern int nfs4_check_client_ready(struct nfs_client *clp);
172extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp, 173extern struct nfs_client *nfs4_set_ds_client(struct nfs_client* mds_clp,
173 const struct sockaddr *ds_addr, 174 const struct sockaddr *ds_addr,
174 int ds_addrlen, int ds_proto); 175 int ds_addrlen, int ds_proto,
176 unsigned int ds_timeo,
177 unsigned int ds_retrans);
175#ifdef CONFIG_PROC_FS 178#ifdef CONFIG_PROC_FS
176extern int __init nfs_fs_proc_init(void); 179extern int __init nfs_fs_proc_init(void);
177extern void nfs_fs_proc_exit(void); 180extern void nfs_fs_proc_exit(void);
@@ -185,21 +188,11 @@ static inline void nfs_fs_proc_exit(void)
185} 188}
186#endif 189#endif
187 190
188/* nfs4namespace.c */
189#ifdef CONFIG_NFS_V4
190extern struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry);
191#else
192static inline
193struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
194{
195 return ERR_PTR(-ENOENT);
196}
197#endif
198
199/* callback_xdr.c */ 191/* callback_xdr.c */
200extern struct svc_version nfs4_callback_version1; 192extern struct svc_version nfs4_callback_version1;
201extern struct svc_version nfs4_callback_version4; 193extern struct svc_version nfs4_callback_version4;
202 194
195struct nfs_pageio_descriptor;
203/* pagelist.c */ 196/* pagelist.c */
204extern int __init nfs_init_nfspagecache(void); 197extern int __init nfs_init_nfspagecache(void);
205extern void nfs_destroy_nfspagecache(void); 198extern void nfs_destroy_nfspagecache(void);
@@ -210,9 +203,13 @@ extern void nfs_destroy_writepagecache(void);
210 203
211extern int __init nfs_init_directcache(void); 204extern int __init nfs_init_directcache(void);
212extern void nfs_destroy_directcache(void); 205extern void nfs_destroy_directcache(void);
206extern bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount);
207extern void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
208 struct nfs_pgio_header *hdr,
209 void (*release)(struct nfs_pgio_header *hdr));
210void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos);
213 211
214/* nfs2xdr.c */ 212/* nfs2xdr.c */
215extern int nfs_stat_to_errno(enum nfs_stat);
216extern struct rpc_procinfo nfs_procedures[]; 213extern struct rpc_procinfo nfs_procedures[];
217extern int nfs2_decode_dirent(struct xdr_stream *, 214extern int nfs2_decode_dirent(struct xdr_stream *,
218 struct nfs_entry *, int); 215 struct nfs_entry *, int);
@@ -237,14 +234,13 @@ extern const u32 nfs41_maxwrite_overhead;
237extern struct rpc_procinfo nfs4_procedures[]; 234extern struct rpc_procinfo nfs4_procedures[];
238#endif 235#endif
239 236
240extern int nfs4_init_ds_session(struct nfs_client *clp); 237extern int nfs4_init_ds_session(struct nfs_client *, unsigned long);
241 238
242/* proc.c */ 239/* proc.c */
243void nfs_close_context(struct nfs_open_context *ctx, int is_sync); 240void nfs_close_context(struct nfs_open_context *ctx, int is_sync);
244extern int nfs_init_client(struct nfs_client *clp, 241extern struct nfs_client *nfs_init_client(struct nfs_client *clp,
245 const struct rpc_timeout *timeparms, 242 const struct rpc_timeout *timeparms,
246 const char *ip_addr, rpc_authflavor_t authflavour, 243 const char *ip_addr, rpc_authflavor_t authflavour);
247 int noresvport);
248 244
249/* dir.c */ 245/* dir.c */
250extern int nfs_access_cache_shrinker(struct shrinker *shrink, 246extern int nfs_access_cache_shrinker(struct shrinker *shrink,
@@ -280,9 +276,10 @@ extern void nfs_sb_deactive(struct super_block *sb);
280extern char *nfs_path(char **p, struct dentry *dentry, 276extern char *nfs_path(char **p, struct dentry *dentry,
281 char *buffer, ssize_t buflen); 277 char *buffer, ssize_t buflen);
282extern struct vfsmount *nfs_d_automount(struct path *path); 278extern struct vfsmount *nfs_d_automount(struct path *path);
283#ifdef CONFIG_NFS_V4 279struct vfsmount *nfs_submount(struct nfs_server *, struct dentry *,
284rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *); 280 struct nfs_fh *, struct nfs_fattr *);
285#endif 281struct vfsmount *nfs_do_submount(struct dentry *, struct nfs_fh *,
282 struct nfs_fattr *, rpc_authflavor_t);
286 283
287/* getroot.c */ 284/* getroot.c */
288extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *, 285extern struct dentry *nfs_get_root(struct super_block *, struct nfs_fh *,
@@ -294,46 +291,73 @@ extern struct dentry *nfs4_get_root(struct super_block *, struct nfs_fh *,
294extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh); 291extern int nfs4_get_rootfh(struct nfs_server *server, struct nfs_fh *mntfh);
295#endif 292#endif
296 293
297struct nfs_pageio_descriptor; 294struct nfs_pgio_completion_ops;
298/* read.c */ 295/* read.c */
299extern int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt, 296extern struct nfs_read_header *nfs_readhdr_alloc(void);
300 const struct rpc_call_ops *call_ops); 297extern void nfs_readhdr_free(struct nfs_pgio_header *hdr);
298extern void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
299 struct inode *inode,
300 const struct nfs_pgio_completion_ops *compl_ops);
301extern int nfs_initiate_read(struct rpc_clnt *clnt,
302 struct nfs_read_data *data,
303 const struct rpc_call_ops *call_ops, int flags);
301extern void nfs_read_prepare(struct rpc_task *task, void *calldata); 304extern void nfs_read_prepare(struct rpc_task *task, void *calldata);
302extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, 305extern int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
303 struct list_head *head); 306 struct nfs_pgio_header *hdr);
304
305extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, 307extern void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
306 struct inode *inode); 308 struct inode *inode,
309 const struct nfs_pgio_completion_ops *compl_ops);
307extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio); 310extern void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio);
308extern void nfs_readdata_release(struct nfs_read_data *rdata); 311extern void nfs_readdata_release(struct nfs_read_data *rdata);
309 312
310/* write.c */ 313/* write.c */
314extern void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
315 struct inode *inode, int ioflags,
316 const struct nfs_pgio_completion_ops *compl_ops);
317extern struct nfs_write_header *nfs_writehdr_alloc(void);
318extern void nfs_writehdr_free(struct nfs_pgio_header *hdr);
311extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc, 319extern int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
312 struct list_head *head); 320 struct nfs_pgio_header *hdr);
313extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio, 321extern void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
314 struct inode *inode, int ioflags); 322 struct inode *inode, int ioflags,
323 const struct nfs_pgio_completion_ops *compl_ops);
315extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio); 324extern void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio);
316extern void nfs_writedata_release(struct nfs_write_data *wdata); 325extern void nfs_writedata_release(struct nfs_write_data *wdata);
317extern void nfs_commit_free(struct nfs_write_data *p); 326extern void nfs_commit_free(struct nfs_commit_data *p);
318extern int nfs_initiate_write(struct nfs_write_data *data, 327extern int nfs_initiate_write(struct rpc_clnt *clnt,
319 struct rpc_clnt *clnt, 328 struct nfs_write_data *data,
320 const struct rpc_call_ops *call_ops, 329 const struct rpc_call_ops *call_ops,
321 int how); 330 int how, int flags);
322extern void nfs_write_prepare(struct rpc_task *task, void *calldata); 331extern void nfs_write_prepare(struct rpc_task *task, void *calldata);
323extern int nfs_initiate_commit(struct nfs_write_data *data, 332extern void nfs_commit_prepare(struct rpc_task *task, void *calldata);
324 struct rpc_clnt *clnt, 333extern int nfs_initiate_commit(struct rpc_clnt *clnt,
334 struct nfs_commit_data *data,
325 const struct rpc_call_ops *call_ops, 335 const struct rpc_call_ops *call_ops,
326 int how); 336 int how, int flags);
327extern void nfs_init_commit(struct nfs_write_data *data, 337extern void nfs_init_commit(struct nfs_commit_data *data,
328 struct list_head *head, 338 struct list_head *head,
329 struct pnfs_layout_segment *lseg); 339 struct pnfs_layout_segment *lseg,
340 struct nfs_commit_info *cinfo);
341int nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
342 struct nfs_commit_info *cinfo, int max);
343int nfs_scan_commit(struct inode *inode, struct list_head *dst,
344 struct nfs_commit_info *cinfo);
345void nfs_mark_request_commit(struct nfs_page *req,
346 struct pnfs_layout_segment *lseg,
347 struct nfs_commit_info *cinfo);
348int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
349 int how, struct nfs_commit_info *cinfo);
330void nfs_retry_commit(struct list_head *page_list, 350void nfs_retry_commit(struct list_head *page_list,
331 struct pnfs_layout_segment *lseg); 351 struct pnfs_layout_segment *lseg,
332void nfs_commit_clear_lock(struct nfs_inode *nfsi); 352 struct nfs_commit_info *cinfo);
333void nfs_commitdata_release(void *data); 353void nfs_commitdata_release(struct nfs_commit_data *data);
334void nfs_commit_release_pages(struct nfs_write_data *data); 354void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
335void nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head); 355 struct nfs_commit_info *cinfo);
336void nfs_request_remove_commit_list(struct nfs_page *req); 356void nfs_request_remove_commit_list(struct nfs_page *req,
357 struct nfs_commit_info *cinfo);
358void nfs_init_cinfo(struct nfs_commit_info *cinfo,
359 struct inode *inode,
360 struct nfs_direct_req *dreq);
337 361
338#ifdef CONFIG_MIGRATION 362#ifdef CONFIG_MIGRATION
339extern int nfs_migrate_page(struct address_space *, 363extern int nfs_migrate_page(struct address_space *,
@@ -342,15 +366,16 @@ extern int nfs_migrate_page(struct address_space *,
342#define nfs_migrate_page NULL 366#define nfs_migrate_page NULL
343#endif 367#endif
344 368
369/* direct.c */
370void nfs_init_cinfo_from_dreq(struct nfs_commit_info *cinfo,
371 struct nfs_direct_req *dreq);
372
345/* nfs4proc.c */ 373/* nfs4proc.c */
346extern void __nfs4_read_done_cb(struct nfs_read_data *); 374extern void __nfs4_read_done_cb(struct nfs_read_data *);
347extern void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data); 375extern struct nfs_client *nfs4_init_client(struct nfs_client *clp,
348extern int nfs4_init_client(struct nfs_client *clp,
349 const struct rpc_timeout *timeparms, 376 const struct rpc_timeout *timeparms,
350 const char *ip_addr, 377 const char *ip_addr,
351 rpc_authflavor_t authflavour, 378 rpc_authflavor_t authflavour);
352 int noresvport);
353extern void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data);
354extern int _nfs4_call_sync(struct rpc_clnt *clnt, 379extern int _nfs4_call_sync(struct rpc_clnt *clnt,
355 struct nfs_server *server, 380 struct nfs_server *server,
356 struct rpc_message *msg, 381 struct rpc_message *msg,
@@ -466,3 +491,15 @@ unsigned int nfs_page_array_len(unsigned int base, size_t len)
466 PAGE_SIZE - 1) >> PAGE_SHIFT; 491 PAGE_SIZE - 1) >> PAGE_SHIFT;
467} 492}
468 493
494/*
495 * Convert a struct timespec into a 64-bit change attribute
496 *
497 * This does approximately the same thing as timespec_to_ns(),
498 * but for calculation efficiency, we multiply the seconds by
499 * 1024*1024*1024.
500 */
501static inline
502u64 nfs_timespec_to_change_attr(const struct timespec *ts)
503{
504 return ((u64)ts->tv_sec << 30) + ts->tv_nsec;
505}
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index d51868e5683c..08b9c93675da 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -26,11 +26,6 @@ static LIST_HEAD(nfs_automount_list);
26static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts); 26static DECLARE_DELAYED_WORK(nfs_automount_task, nfs_expire_automounts);
27int nfs_mountpoint_expiry_timeout = 500 * HZ; 27int nfs_mountpoint_expiry_timeout = 500 * HZ;
28 28
29static struct vfsmount *nfs_do_submount(struct dentry *dentry,
30 struct nfs_fh *fh,
31 struct nfs_fattr *fattr,
32 rpc_authflavor_t authflavor);
33
34/* 29/*
35 * nfs_path - reconstruct the path given an arbitrary dentry 30 * nfs_path - reconstruct the path given an arbitrary dentry
36 * @base - used to return pointer to the end of devname part of path 31 * @base - used to return pointer to the end of devname part of path
@@ -118,64 +113,6 @@ Elong:
118 return ERR_PTR(-ENAMETOOLONG); 113 return ERR_PTR(-ENAMETOOLONG);
119} 114}
120 115
121#ifdef CONFIG_NFS_V4
122rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
123{
124 struct gss_api_mech *mech;
125 struct xdr_netobj oid;
126 int i;
127 rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
128
129 for (i = 0; i < flavors->num_flavors; i++) {
130 struct nfs4_secinfo_flavor *flavor;
131 flavor = &flavors->flavors[i];
132
133 if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
134 pseudoflavor = flavor->flavor;
135 break;
136 } else if (flavor->flavor == RPC_AUTH_GSS) {
137 oid.len = flavor->gss.sec_oid4.len;
138 oid.data = flavor->gss.sec_oid4.data;
139 mech = gss_mech_get_by_OID(&oid);
140 if (!mech)
141 continue;
142 pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
143 gss_mech_put(mech);
144 break;
145 }
146 }
147
148 return pseudoflavor;
149}
150
151static struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
152 struct qstr *name,
153 struct nfs_fh *fh,
154 struct nfs_fattr *fattr)
155{
156 int err;
157
158 if (NFS_PROTO(dir)->version == 4)
159 return nfs4_proc_lookup_mountpoint(dir, name, fh, fattr);
160
161 err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
162 if (err)
163 return ERR_PTR(err);
164 return rpc_clone_client(NFS_SERVER(dir)->client);
165}
166#else /* CONFIG_NFS_V4 */
167static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
168 struct qstr *name,
169 struct nfs_fh *fh,
170 struct nfs_fattr *fattr)
171{
172 int err = NFS_PROTO(dir)->lookup(NFS_SERVER(dir)->client, dir, name, fh, fattr);
173 if (err)
174 return ERR_PTR(err);
175 return rpc_clone_client(NFS_SERVER(dir)->client);
176}
177#endif /* CONFIG_NFS_V4 */
178
179/* 116/*
180 * nfs_d_automount - Handle crossing a mountpoint on the server 117 * nfs_d_automount - Handle crossing a mountpoint on the server
181 * @path - The mountpoint 118 * @path - The mountpoint
@@ -191,10 +128,9 @@ static inline struct rpc_clnt *nfs_lookup_mountpoint(struct inode *dir,
191struct vfsmount *nfs_d_automount(struct path *path) 128struct vfsmount *nfs_d_automount(struct path *path)
192{ 129{
193 struct vfsmount *mnt; 130 struct vfsmount *mnt;
194 struct dentry *parent; 131 struct nfs_server *server = NFS_SERVER(path->dentry->d_inode);
195 struct nfs_fh *fh = NULL; 132 struct nfs_fh *fh = NULL;
196 struct nfs_fattr *fattr = NULL; 133 struct nfs_fattr *fattr = NULL;
197 struct rpc_clnt *client;
198 134
199 dprintk("--> nfs_d_automount()\n"); 135 dprintk("--> nfs_d_automount()\n");
200 136
@@ -210,21 +146,7 @@ struct vfsmount *nfs_d_automount(struct path *path)
210 146
211 dprintk("%s: enter\n", __func__); 147 dprintk("%s: enter\n", __func__);
212 148
213 /* Look it up again to get its attributes */ 149 mnt = server->nfs_client->rpc_ops->submount(server, path->dentry, fh, fattr);
214 parent = dget_parent(path->dentry);
215 client = nfs_lookup_mountpoint(parent->d_inode, &path->dentry->d_name, fh, fattr);
216 dput(parent);
217 if (IS_ERR(client)) {
218 mnt = ERR_CAST(client);
219 goto out;
220 }
221
222 if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
223 mnt = nfs_do_refmount(client, path->dentry);
224 else
225 mnt = nfs_do_submount(path->dentry, fh, fattr, client->cl_auth->au_flavor);
226 rpc_shutdown_client(client);
227
228 if (IS_ERR(mnt)) 150 if (IS_ERR(mnt))
229 goto out; 151 goto out;
230 152
@@ -297,10 +219,8 @@ static struct vfsmount *nfs_do_clone_mount(struct nfs_server *server,
297 * @authflavor - security flavor to use when performing the mount 219 * @authflavor - security flavor to use when performing the mount
298 * 220 *
299 */ 221 */
300static struct vfsmount *nfs_do_submount(struct dentry *dentry, 222struct vfsmount *nfs_do_submount(struct dentry *dentry, struct nfs_fh *fh,
301 struct nfs_fh *fh, 223 struct nfs_fattr *fattr, rpc_authflavor_t authflavor)
302 struct nfs_fattr *fattr,
303 rpc_authflavor_t authflavor)
304{ 224{
305 struct nfs_clone_mount mountdata = { 225 struct nfs_clone_mount mountdata = {
306 .sb = dentry->d_sb, 226 .sb = dentry->d_sb,
@@ -333,3 +253,18 @@ out:
333 dprintk("<-- nfs_do_submount() = %p\n", mnt); 253 dprintk("<-- nfs_do_submount() = %p\n", mnt);
334 return mnt; 254 return mnt;
335} 255}
256
257struct vfsmount *nfs_submount(struct nfs_server *server, struct dentry *dentry,
258 struct nfs_fh *fh, struct nfs_fattr *fattr)
259{
260 int err;
261 struct dentry *parent = dget_parent(dentry);
262
263 /* Look it up again to get its attributes */
264 err = server->nfs_client->rpc_ops->lookup(parent->d_inode, &dentry->d_name, fh, fattr);
265 dput(parent);
266 if (err != 0)
267 return ERR_PTR(err);
268
269 return nfs_do_submount(dentry, fh, fattr, server->client->cl_auth->au_flavor);
270}
diff --git a/fs/nfs/netns.h b/fs/nfs/netns.h
index aa14ec303e94..8a6394edb8b0 100644
--- a/fs/nfs/netns.h
+++ b/fs/nfs/netns.h
@@ -1,3 +1,7 @@
1/*
2 * NFS-private data for each "struct net". Accessed with net_generic().
3 */
4
1#ifndef __NFS_NETNS_H__ 5#ifndef __NFS_NETNS_H__
2#define __NFS_NETNS_H__ 6#define __NFS_NETNS_H__
3 7
@@ -20,6 +24,7 @@ struct nfs_net {
20 struct idr cb_ident_idr; /* Protected by nfs_client_lock */ 24 struct idr cb_ident_idr; /* Protected by nfs_client_lock */
21#endif 25#endif
22 spinlock_t nfs_client_lock; 26 spinlock_t nfs_client_lock;
27 struct timespec boot_time;
23}; 28};
24 29
25extern int nfs_net_id; 30extern int nfs_net_id;
diff --git a/fs/nfs/nfs2xdr.c b/fs/nfs/nfs2xdr.c
index 1f56000fabbd..baf759bccd05 100644
--- a/fs/nfs/nfs2xdr.c
+++ b/fs/nfs/nfs2xdr.c
@@ -61,6 +61,7 @@
61#define NFS_readdirres_sz (1) 61#define NFS_readdirres_sz (1)
62#define NFS_statfsres_sz (1+NFS_info_sz) 62#define NFS_statfsres_sz (1+NFS_info_sz)
63 63
64static int nfs_stat_to_errno(enum nfs_stat);
64 65
65/* 66/*
66 * While encoding arguments, set up the reply buffer in advance to 67 * While encoding arguments, set up the reply buffer in advance to
@@ -313,6 +314,8 @@ static int decode_fattr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
313 p = xdr_decode_time(p, &fattr->atime); 314 p = xdr_decode_time(p, &fattr->atime);
314 p = xdr_decode_time(p, &fattr->mtime); 315 p = xdr_decode_time(p, &fattr->mtime);
315 xdr_decode_time(p, &fattr->ctime); 316 xdr_decode_time(p, &fattr->ctime);
317 fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
318
316 return 0; 319 return 0;
317out_overflow: 320out_overflow:
318 print_overflow_msg(__func__, xdr); 321 print_overflow_msg(__func__, xdr);
@@ -1109,7 +1112,7 @@ static const struct {
1109 * Returns a local errno value, or -EIO if the NFS status code is 1112 * Returns a local errno value, or -EIO if the NFS status code is
1110 * not recognized. This function is used jointly by NFSv2 and NFSv3. 1113 * not recognized. This function is used jointly by NFSv2 and NFSv3.
1111 */ 1114 */
1112int nfs_stat_to_errno(enum nfs_stat status) 1115static int nfs_stat_to_errno(enum nfs_stat status)
1113{ 1116{
1114 int i; 1117 int i;
1115 1118
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 75c68299358e..2292a0fd2bff 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -142,7 +142,7 @@ nfs3_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
142} 142}
143 143
144static int 144static int
145nfs3_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, 145nfs3_proc_lookup(struct inode *dir, struct qstr *name,
146 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 146 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
147{ 147{
148 struct nfs3_diropargs arg = { 148 struct nfs3_diropargs arg = {
@@ -810,11 +810,13 @@ nfs3_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
810 810
811static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data) 811static int nfs3_read_done(struct rpc_task *task, struct nfs_read_data *data)
812{ 812{
813 if (nfs3_async_handle_jukebox(task, data->inode)) 813 struct inode *inode = data->header->inode;
814
815 if (nfs3_async_handle_jukebox(task, inode))
814 return -EAGAIN; 816 return -EAGAIN;
815 817
816 nfs_invalidate_atime(data->inode); 818 nfs_invalidate_atime(inode);
817 nfs_refresh_inode(data->inode, &data->fattr); 819 nfs_refresh_inode(inode, &data->fattr);
818 return 0; 820 return 0;
819} 821}
820 822
@@ -830,10 +832,12 @@ static void nfs3_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_da
830 832
831static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data) 833static int nfs3_write_done(struct rpc_task *task, struct nfs_write_data *data)
832{ 834{
833 if (nfs3_async_handle_jukebox(task, data->inode)) 835 struct inode *inode = data->header->inode;
836
837 if (nfs3_async_handle_jukebox(task, inode))
834 return -EAGAIN; 838 return -EAGAIN;
835 if (task->tk_status >= 0) 839 if (task->tk_status >= 0)
836 nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); 840 nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
837 return 0; 841 return 0;
838} 842}
839 843
@@ -847,7 +851,12 @@ static void nfs3_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_
847 rpc_call_start(task); 851 rpc_call_start(task);
848} 852}
849 853
850static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data) 854static void nfs3_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
855{
856 rpc_call_start(task);
857}
858
859static int nfs3_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
851{ 860{
852 if (nfs3_async_handle_jukebox(task, data->inode)) 861 if (nfs3_async_handle_jukebox(task, data->inode))
853 return -EAGAIN; 862 return -EAGAIN;
@@ -855,7 +864,7 @@ static int nfs3_commit_done(struct rpc_task *task, struct nfs_write_data *data)
855 return 0; 864 return 0;
856} 865}
857 866
858static void nfs3_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) 867static void nfs3_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
859{ 868{
860 msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT]; 869 msg->rpc_proc = &nfs3_procedures[NFS3PROC_COMMIT];
861} 870}
@@ -875,6 +884,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
875 .file_inode_ops = &nfs3_file_inode_operations, 884 .file_inode_ops = &nfs3_file_inode_operations,
876 .file_ops = &nfs_file_operations, 885 .file_ops = &nfs_file_operations,
877 .getroot = nfs3_proc_get_root, 886 .getroot = nfs3_proc_get_root,
887 .submount = nfs_submount,
878 .getattr = nfs3_proc_getattr, 888 .getattr = nfs3_proc_getattr,
879 .setattr = nfs3_proc_setattr, 889 .setattr = nfs3_proc_setattr,
880 .lookup = nfs3_proc_lookup, 890 .lookup = nfs3_proc_lookup,
@@ -906,6 +916,7 @@ const struct nfs_rpc_ops nfs_v3_clientops = {
906 .write_rpc_prepare = nfs3_proc_write_rpc_prepare, 916 .write_rpc_prepare = nfs3_proc_write_rpc_prepare,
907 .write_done = nfs3_write_done, 917 .write_done = nfs3_write_done,
908 .commit_setup = nfs3_proc_commit_setup, 918 .commit_setup = nfs3_proc_commit_setup,
919 .commit_rpc_prepare = nfs3_proc_commit_rpc_prepare,
909 .commit_done = nfs3_commit_done, 920 .commit_done = nfs3_commit_done,
910 .lock = nfs3_proc_lock, 921 .lock = nfs3_proc_lock,
911 .clear_acl_cache = nfs3_forget_cached_acls, 922 .clear_acl_cache = nfs3_forget_cached_acls,
diff --git a/fs/nfs/nfs3xdr.c b/fs/nfs/nfs3xdr.c
index a77cc9a3ce55..902de489ec9b 100644
--- a/fs/nfs/nfs3xdr.c
+++ b/fs/nfs/nfs3xdr.c
@@ -86,6 +86,8 @@
86 XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE)) 86 XDR_QUADLEN(NFS_ACL_INLINE_BUFSIZE))
87#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz) 87#define ACL3_setaclres_sz (1+NFS3_post_op_attr_sz)
88 88
89static int nfs3_stat_to_errno(enum nfs_stat);
90
89/* 91/*
90 * Map file type to S_IFMT bits 92 * Map file type to S_IFMT bits
91 */ 93 */
@@ -675,6 +677,7 @@ static int decode_fattr3(struct xdr_stream *xdr, struct nfs_fattr *fattr)
675 p = xdr_decode_nfstime3(p, &fattr->atime); 677 p = xdr_decode_nfstime3(p, &fattr->atime);
676 p = xdr_decode_nfstime3(p, &fattr->mtime); 678 p = xdr_decode_nfstime3(p, &fattr->mtime);
677 xdr_decode_nfstime3(p, &fattr->ctime); 679 xdr_decode_nfstime3(p, &fattr->ctime);
680 fattr->change_attr = nfs_timespec_to_change_attr(&fattr->ctime);
678 681
679 fattr->valid |= NFS_ATTR_FATTR_V3; 682 fattr->valid |= NFS_ATTR_FATTR_V3;
680 return 0; 683 return 0;
@@ -725,12 +728,14 @@ static int decode_wcc_attr(struct xdr_stream *xdr, struct nfs_fattr *fattr)
725 goto out_overflow; 728 goto out_overflow;
726 729
727 fattr->valid |= NFS_ATTR_FATTR_PRESIZE 730 fattr->valid |= NFS_ATTR_FATTR_PRESIZE
731 | NFS_ATTR_FATTR_PRECHANGE
728 | NFS_ATTR_FATTR_PREMTIME 732 | NFS_ATTR_FATTR_PREMTIME
729 | NFS_ATTR_FATTR_PRECTIME; 733 | NFS_ATTR_FATTR_PRECTIME;
730 734
731 p = xdr_decode_size3(p, &fattr->pre_size); 735 p = xdr_decode_size3(p, &fattr->pre_size);
732 p = xdr_decode_nfstime3(p, &fattr->pre_mtime); 736 p = xdr_decode_nfstime3(p, &fattr->pre_mtime);
733 xdr_decode_nfstime3(p, &fattr->pre_ctime); 737 xdr_decode_nfstime3(p, &fattr->pre_ctime);
738 fattr->pre_change_attr = nfs_timespec_to_change_attr(&fattr->pre_ctime);
734 739
735 return 0; 740 return 0;
736out_overflow: 741out_overflow:
@@ -1287,7 +1292,7 @@ static void nfs3_xdr_enc_readdirplus3args(struct rpc_rqst *req,
1287 * }; 1292 * };
1288 */ 1293 */
1289static void encode_commit3args(struct xdr_stream *xdr, 1294static void encode_commit3args(struct xdr_stream *xdr,
1290 const struct nfs_writeargs *args) 1295 const struct nfs_commitargs *args)
1291{ 1296{
1292 __be32 *p; 1297 __be32 *p;
1293 1298
@@ -1300,7 +1305,7 @@ static void encode_commit3args(struct xdr_stream *xdr,
1300 1305
1301static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req, 1306static void nfs3_xdr_enc_commit3args(struct rpc_rqst *req,
1302 struct xdr_stream *xdr, 1307 struct xdr_stream *xdr,
1303 const struct nfs_writeargs *args) 1308 const struct nfs_commitargs *args)
1304{ 1309{
1305 encode_commit3args(xdr, args); 1310 encode_commit3args(xdr, args);
1306} 1311}
@@ -1385,7 +1390,7 @@ static int nfs3_xdr_dec_getattr3res(struct rpc_rqst *req,
1385out: 1390out:
1386 return error; 1391 return error;
1387out_default: 1392out_default:
1388 return nfs_stat_to_errno(status); 1393 return nfs3_stat_to_errno(status);
1389} 1394}
1390 1395
1391/* 1396/*
@@ -1424,7 +1429,7 @@ static int nfs3_xdr_dec_setattr3res(struct rpc_rqst *req,
1424out: 1429out:
1425 return error; 1430 return error;
1426out_status: 1431out_status:
1427 return nfs_stat_to_errno(status); 1432 return nfs3_stat_to_errno(status);
1428} 1433}
1429 1434
1430/* 1435/*
@@ -1472,7 +1477,7 @@ out_default:
1472 error = decode_post_op_attr(xdr, result->dir_attr); 1477 error = decode_post_op_attr(xdr, result->dir_attr);
1473 if (unlikely(error)) 1478 if (unlikely(error))
1474 goto out; 1479 goto out;
1475 return nfs_stat_to_errno(status); 1480 return nfs3_stat_to_errno(status);
1476} 1481}
1477 1482
1478/* 1483/*
@@ -1513,7 +1518,7 @@ static int nfs3_xdr_dec_access3res(struct rpc_rqst *req,
1513out: 1518out:
1514 return error; 1519 return error;
1515out_default: 1520out_default:
1516 return nfs_stat_to_errno(status); 1521 return nfs3_stat_to_errno(status);
1517} 1522}
1518 1523
1519/* 1524/*
@@ -1554,7 +1559,7 @@ static int nfs3_xdr_dec_readlink3res(struct rpc_rqst *req,
1554out: 1559out:
1555 return error; 1560 return error;
1556out_default: 1561out_default:
1557 return nfs_stat_to_errno(status); 1562 return nfs3_stat_to_errno(status);
1558} 1563}
1559 1564
1560/* 1565/*
@@ -1636,7 +1641,7 @@ static int nfs3_xdr_dec_read3res(struct rpc_rqst *req, struct xdr_stream *xdr,
1636out: 1641out:
1637 return error; 1642 return error;
1638out_status: 1643out_status:
1639 return nfs_stat_to_errno(status); 1644 return nfs3_stat_to_errno(status);
1640} 1645}
1641 1646
1642/* 1647/*
@@ -1706,7 +1711,7 @@ static int nfs3_xdr_dec_write3res(struct rpc_rqst *req, struct xdr_stream *xdr,
1706out: 1711out:
1707 return error; 1712 return error;
1708out_status: 1713out_status:
1709 return nfs_stat_to_errno(status); 1714 return nfs3_stat_to_errno(status);
1710} 1715}
1711 1716
1712/* 1717/*
@@ -1770,7 +1775,7 @@ out_default:
1770 error = decode_wcc_data(xdr, result->dir_attr); 1775 error = decode_wcc_data(xdr, result->dir_attr);
1771 if (unlikely(error)) 1776 if (unlikely(error))
1772 goto out; 1777 goto out;
1773 return nfs_stat_to_errno(status); 1778 return nfs3_stat_to_errno(status);
1774} 1779}
1775 1780
1776/* 1781/*
@@ -1809,7 +1814,7 @@ static int nfs3_xdr_dec_remove3res(struct rpc_rqst *req,
1809out: 1814out:
1810 return error; 1815 return error;
1811out_status: 1816out_status:
1812 return nfs_stat_to_errno(status); 1817 return nfs3_stat_to_errno(status);
1813} 1818}
1814 1819
1815/* 1820/*
@@ -1853,7 +1858,7 @@ static int nfs3_xdr_dec_rename3res(struct rpc_rqst *req,
1853out: 1858out:
1854 return error; 1859 return error;
1855out_status: 1860out_status:
1856 return nfs_stat_to_errno(status); 1861 return nfs3_stat_to_errno(status);
1857} 1862}
1858 1863
1859/* 1864/*
@@ -1896,7 +1901,7 @@ static int nfs3_xdr_dec_link3res(struct rpc_rqst *req, struct xdr_stream *xdr,
1896out: 1901out:
1897 return error; 1902 return error;
1898out_status: 1903out_status:
1899 return nfs_stat_to_errno(status); 1904 return nfs3_stat_to_errno(status);
1900} 1905}
1901 1906
1902/** 1907/**
@@ -2088,7 +2093,7 @@ out_default:
2088 error = decode_post_op_attr(xdr, result->dir_attr); 2093 error = decode_post_op_attr(xdr, result->dir_attr);
2089 if (unlikely(error)) 2094 if (unlikely(error))
2090 goto out; 2095 goto out;
2091 return nfs_stat_to_errno(status); 2096 return nfs3_stat_to_errno(status);
2092} 2097}
2093 2098
2094/* 2099/*
@@ -2156,7 +2161,7 @@ static int nfs3_xdr_dec_fsstat3res(struct rpc_rqst *req,
2156out: 2161out:
2157 return error; 2162 return error;
2158out_status: 2163out_status:
2159 return nfs_stat_to_errno(status); 2164 return nfs3_stat_to_errno(status);
2160} 2165}
2161 2166
2162/* 2167/*
@@ -2232,7 +2237,7 @@ static int nfs3_xdr_dec_fsinfo3res(struct rpc_rqst *req,
2232out: 2237out:
2233 return error; 2238 return error;
2234out_status: 2239out_status:
2235 return nfs_stat_to_errno(status); 2240 return nfs3_stat_to_errno(status);
2236} 2241}
2237 2242
2238/* 2243/*
@@ -2295,7 +2300,7 @@ static int nfs3_xdr_dec_pathconf3res(struct rpc_rqst *req,
2295out: 2300out:
2296 return error; 2301 return error;
2297out_status: 2302out_status:
2298 return nfs_stat_to_errno(status); 2303 return nfs3_stat_to_errno(status);
2299} 2304}
2300 2305
2301/* 2306/*
@@ -2319,7 +2324,7 @@ out_status:
2319 */ 2324 */
2320static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req, 2325static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
2321 struct xdr_stream *xdr, 2326 struct xdr_stream *xdr,
2322 struct nfs_writeres *result) 2327 struct nfs_commitres *result)
2323{ 2328{
2324 enum nfs_stat status; 2329 enum nfs_stat status;
2325 int error; 2330 int error;
@@ -2336,7 +2341,7 @@ static int nfs3_xdr_dec_commit3res(struct rpc_rqst *req,
2336out: 2341out:
2337 return error; 2342 return error;
2338out_status: 2343out_status:
2339 return nfs_stat_to_errno(status); 2344 return nfs3_stat_to_errno(status);
2340} 2345}
2341 2346
2342#ifdef CONFIG_NFS_V3_ACL 2347#ifdef CONFIG_NFS_V3_ACL
@@ -2401,7 +2406,7 @@ static int nfs3_xdr_dec_getacl3res(struct rpc_rqst *req,
2401out: 2406out:
2402 return error; 2407 return error;
2403out_default: 2408out_default:
2404 return nfs_stat_to_errno(status); 2409 return nfs3_stat_to_errno(status);
2405} 2410}
2406 2411
2407static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req, 2412static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
@@ -2420,11 +2425,76 @@ static int nfs3_xdr_dec_setacl3res(struct rpc_rqst *req,
2420out: 2425out:
2421 return error; 2426 return error;
2422out_default: 2427out_default:
2423 return nfs_stat_to_errno(status); 2428 return nfs3_stat_to_errno(status);
2424} 2429}
2425 2430
2426#endif /* CONFIG_NFS_V3_ACL */ 2431#endif /* CONFIG_NFS_V3_ACL */
2427 2432
2433
2434/*
2435 * We need to translate between nfs status return values and
2436 * the local errno values which may not be the same.
2437 */
2438static const struct {
2439 int stat;
2440 int errno;
2441} nfs_errtbl[] = {
2442 { NFS_OK, 0 },
2443 { NFSERR_PERM, -EPERM },
2444 { NFSERR_NOENT, -ENOENT },
2445 { NFSERR_IO, -errno_NFSERR_IO},
2446 { NFSERR_NXIO, -ENXIO },
2447/* { NFSERR_EAGAIN, -EAGAIN }, */
2448 { NFSERR_ACCES, -EACCES },
2449 { NFSERR_EXIST, -EEXIST },
2450 { NFSERR_XDEV, -EXDEV },
2451 { NFSERR_NODEV, -ENODEV },
2452 { NFSERR_NOTDIR, -ENOTDIR },
2453 { NFSERR_ISDIR, -EISDIR },
2454 { NFSERR_INVAL, -EINVAL },
2455 { NFSERR_FBIG, -EFBIG },
2456 { NFSERR_NOSPC, -ENOSPC },
2457 { NFSERR_ROFS, -EROFS },
2458 { NFSERR_MLINK, -EMLINK },
2459 { NFSERR_NAMETOOLONG, -ENAMETOOLONG },
2460 { NFSERR_NOTEMPTY, -ENOTEMPTY },
2461 { NFSERR_DQUOT, -EDQUOT },
2462 { NFSERR_STALE, -ESTALE },
2463 { NFSERR_REMOTE, -EREMOTE },
2464#ifdef EWFLUSH
2465 { NFSERR_WFLUSH, -EWFLUSH },
2466#endif
2467 { NFSERR_BADHANDLE, -EBADHANDLE },
2468 { NFSERR_NOT_SYNC, -ENOTSYNC },
2469 { NFSERR_BAD_COOKIE, -EBADCOOKIE },
2470 { NFSERR_NOTSUPP, -ENOTSUPP },
2471 { NFSERR_TOOSMALL, -ETOOSMALL },
2472 { NFSERR_SERVERFAULT, -EREMOTEIO },
2473 { NFSERR_BADTYPE, -EBADTYPE },
2474 { NFSERR_JUKEBOX, -EJUKEBOX },
2475 { -1, -EIO }
2476};
2477
2478/**
2479 * nfs3_stat_to_errno - convert an NFS status code to a local errno
2480 * @status: NFS status code to convert
2481 *
2482 * Returns a local errno value, or -EIO if the NFS status code is
2483 * not recognized. This function is used jointly by NFSv2 and NFSv3.
2484 */
2485static int nfs3_stat_to_errno(enum nfs_stat status)
2486{
2487 int i;
2488
2489 for (i = 0; nfs_errtbl[i].stat != -1; i++) {
2490 if (nfs_errtbl[i].stat == (int)status)
2491 return nfs_errtbl[i].errno;
2492 }
2493 dprintk("NFS: Unrecognized nfs status value: %u\n", status);
2494 return nfs_errtbl[i].errno;
2495}
2496
2497
2428#define PROC(proc, argtype, restype, timer) \ 2498#define PROC(proc, argtype, restype, timer) \
2429[NFS3PROC_##proc] = { \ 2499[NFS3PROC_##proc] = { \
2430 .p_proc = NFS3PROC_##proc, \ 2500 .p_proc = NFS3PROC_##proc, \
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index 8d75021020b3..c6827f93ab57 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -24,6 +24,8 @@ enum nfs4_client_state {
24 NFS4CLNT_RECALL_SLOT, 24 NFS4CLNT_RECALL_SLOT,
25 NFS4CLNT_LEASE_CONFIRM, 25 NFS4CLNT_LEASE_CONFIRM,
26 NFS4CLNT_SERVER_SCOPE_MISMATCH, 26 NFS4CLNT_SERVER_SCOPE_MISMATCH,
27 NFS4CLNT_PURGE_STATE,
28 NFS4CLNT_BIND_CONN_TO_SESSION,
27}; 29};
28 30
29enum nfs4_session_state { 31enum nfs4_session_state {
@@ -52,11 +54,6 @@ struct nfs4_minor_version_ops {
52 const struct nfs4_state_maintenance_ops *state_renewal_ops; 54 const struct nfs4_state_maintenance_ops *state_renewal_ops;
53}; 55};
54 56
55struct nfs_unique_id {
56 struct rb_node rb_node;
57 __u64 id;
58};
59
60#define NFS_SEQID_CONFIRMED 1 57#define NFS_SEQID_CONFIRMED 1
61struct nfs_seqid_counter { 58struct nfs_seqid_counter {
62 ktime_t create_time; 59 ktime_t create_time;
@@ -206,12 +203,18 @@ extern const struct dentry_operations nfs4_dentry_operations;
206extern const struct inode_operations nfs4_dir_inode_operations; 203extern const struct inode_operations nfs4_dir_inode_operations;
207 204
208/* nfs4namespace.c */ 205/* nfs4namespace.c */
206rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *);
209struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *); 207struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *, struct inode *, struct qstr *);
208struct vfsmount *nfs4_submount(struct nfs_server *, struct dentry *,
209 struct nfs_fh *, struct nfs_fattr *);
210 210
211/* nfs4proc.c */ 211/* nfs4proc.c */
212extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *); 212extern int nfs4_proc_setclientid(struct nfs_client *, u32, unsigned short, struct rpc_cred *, struct nfs4_setclientid_res *);
213extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *); 213extern int nfs4_proc_setclientid_confirm(struct nfs_client *, struct nfs4_setclientid_res *arg, struct rpc_cred *);
214extern int nfs4_proc_get_rootfh(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
215extern int nfs4_proc_bind_conn_to_session(struct nfs_client *, struct rpc_cred *cred);
214extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred); 216extern int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred);
217extern int nfs4_destroy_clientid(struct nfs_client *clp);
215extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *); 218extern int nfs4_init_clientid(struct nfs_client *, struct rpc_cred *);
216extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *); 219extern int nfs41_init_clientid(struct nfs_client *, struct rpc_cred *);
217extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc); 220extern int nfs4_do_close(struct nfs4_state *state, gfp_t gfp_mask, int wait, bool roc);
@@ -239,8 +242,8 @@ extern int nfs41_setup_sequence(struct nfs4_session *session,
239 struct rpc_task *task); 242 struct rpc_task *task);
240extern void nfs4_destroy_session(struct nfs4_session *session); 243extern void nfs4_destroy_session(struct nfs4_session *session);
241extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp); 244extern struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp);
242extern int nfs4_proc_create_session(struct nfs_client *); 245extern int nfs4_proc_create_session(struct nfs_client *, struct rpc_cred *);
243extern int nfs4_proc_destroy_session(struct nfs4_session *); 246extern int nfs4_proc_destroy_session(struct nfs4_session *, struct rpc_cred *);
244extern int nfs4_init_session(struct nfs_server *server); 247extern int nfs4_init_session(struct nfs_server *server);
245extern int nfs4_proc_get_lease_time(struct nfs_client *clp, 248extern int nfs4_proc_get_lease_time(struct nfs_client *clp,
246 struct nfs_fsinfo *fsinfo); 249 struct nfs_fsinfo *fsinfo);
@@ -310,9 +313,9 @@ struct rpc_cred *nfs4_get_renew_cred_locked(struct nfs_client *clp);
310#if defined(CONFIG_NFS_V4_1) 313#if defined(CONFIG_NFS_V4_1)
311struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp); 314struct rpc_cred *nfs4_get_machine_cred_locked(struct nfs_client *clp);
312struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp); 315struct rpc_cred *nfs4_get_exchange_id_cred(struct nfs_client *clp);
313extern void nfs4_schedule_session_recovery(struct nfs4_session *); 316extern void nfs4_schedule_session_recovery(struct nfs4_session *, int);
314#else 317#else
315static inline void nfs4_schedule_session_recovery(struct nfs4_session *session) 318static inline void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
316{ 319{
317} 320}
318#endif /* CONFIG_NFS_V4_1 */ 321#endif /* CONFIG_NFS_V4_1 */
@@ -334,7 +337,7 @@ extern void nfs4_schedule_stateid_recovery(const struct nfs_server *, struct nfs
334extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags); 337extern void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags);
335extern void nfs41_handle_recall_slot(struct nfs_client *clp); 338extern void nfs41_handle_recall_slot(struct nfs_client *clp);
336extern void nfs41_handle_server_scope(struct nfs_client *, 339extern void nfs41_handle_server_scope(struct nfs_client *,
337 struct server_scope **); 340 struct nfs41_server_scope **);
338extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp); 341extern void nfs4_put_lock_state(struct nfs4_lock_state *lsp);
339extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl); 342extern int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl);
340extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *, 343extern void nfs4_select_rw_stateid(nfs4_stateid *, struct nfs4_state *,
diff --git a/fs/nfs/nfs4filelayout.c b/fs/nfs/nfs4filelayout.c
index 5acfd9ea8a31..e1340293872c 100644
--- a/fs/nfs/nfs4filelayout.c
+++ b/fs/nfs/nfs4filelayout.c
@@ -82,29 +82,76 @@ filelayout_get_dserver_offset(struct pnfs_layout_segment *lseg, loff_t offset)
82 BUG(); 82 BUG();
83} 83}
84 84
85static void filelayout_reset_write(struct nfs_write_data *data)
86{
87 struct nfs_pgio_header *hdr = data->header;
88 struct rpc_task *task = &data->task;
89
90 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
91 dprintk("%s Reset task %5u for i/o through MDS "
92 "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
93 data->task.tk_pid,
94 hdr->inode->i_sb->s_id,
95 (long long)NFS_FILEID(hdr->inode),
96 data->args.count,
97 (unsigned long long)data->args.offset);
98
99 task->tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
100 &hdr->pages,
101 hdr->completion_ops);
102 }
103}
104
105static void filelayout_reset_read(struct nfs_read_data *data)
106{
107 struct nfs_pgio_header *hdr = data->header;
108 struct rpc_task *task = &data->task;
109
110 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
111 dprintk("%s Reset task %5u for i/o through MDS "
112 "(req %s/%lld, %u bytes @ offset %llu)\n", __func__,
113 data->task.tk_pid,
114 hdr->inode->i_sb->s_id,
115 (long long)NFS_FILEID(hdr->inode),
116 data->args.count,
117 (unsigned long long)data->args.offset);
118
119 task->tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
120 &hdr->pages,
121 hdr->completion_ops);
122 }
123}
124
85static int filelayout_async_handle_error(struct rpc_task *task, 125static int filelayout_async_handle_error(struct rpc_task *task,
86 struct nfs4_state *state, 126 struct nfs4_state *state,
87 struct nfs_client *clp, 127 struct nfs_client *clp,
88 int *reset) 128 struct pnfs_layout_segment *lseg)
89{ 129{
90 struct nfs_server *mds_server = NFS_SERVER(state->inode); 130 struct inode *inode = lseg->pls_layout->plh_inode;
131 struct nfs_server *mds_server = NFS_SERVER(inode);
132 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
91 struct nfs_client *mds_client = mds_server->nfs_client; 133 struct nfs_client *mds_client = mds_server->nfs_client;
134 struct nfs4_slot_table *tbl = &clp->cl_session->fc_slot_table;
92 135
93 if (task->tk_status >= 0) 136 if (task->tk_status >= 0)
94 return 0; 137 return 0;
95 *reset = 0;
96 138
97 switch (task->tk_status) { 139 switch (task->tk_status) {
98 /* MDS state errors */ 140 /* MDS state errors */
99 case -NFS4ERR_DELEG_REVOKED: 141 case -NFS4ERR_DELEG_REVOKED:
100 case -NFS4ERR_ADMIN_REVOKED: 142 case -NFS4ERR_ADMIN_REVOKED:
101 case -NFS4ERR_BAD_STATEID: 143 case -NFS4ERR_BAD_STATEID:
144 if (state == NULL)
145 break;
102 nfs_remove_bad_delegation(state->inode); 146 nfs_remove_bad_delegation(state->inode);
103 case -NFS4ERR_OPENMODE: 147 case -NFS4ERR_OPENMODE:
148 if (state == NULL)
149 break;
104 nfs4_schedule_stateid_recovery(mds_server, state); 150 nfs4_schedule_stateid_recovery(mds_server, state);
105 goto wait_on_recovery; 151 goto wait_on_recovery;
106 case -NFS4ERR_EXPIRED: 152 case -NFS4ERR_EXPIRED:
107 nfs4_schedule_stateid_recovery(mds_server, state); 153 if (state != NULL)
154 nfs4_schedule_stateid_recovery(mds_server, state);
108 nfs4_schedule_lease_recovery(mds_client); 155 nfs4_schedule_lease_recovery(mds_client);
109 goto wait_on_recovery; 156 goto wait_on_recovery;
110 /* DS session errors */ 157 /* DS session errors */
@@ -118,7 +165,7 @@ static int filelayout_async_handle_error(struct rpc_task *task,
118 dprintk("%s ERROR %d, Reset session. Exchangeid " 165 dprintk("%s ERROR %d, Reset session. Exchangeid "
119 "flags 0x%x\n", __func__, task->tk_status, 166 "flags 0x%x\n", __func__, task->tk_status,
120 clp->cl_exchange_flags); 167 clp->cl_exchange_flags);
121 nfs4_schedule_session_recovery(clp->cl_session); 168 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
122 break; 169 break;
123 case -NFS4ERR_DELAY: 170 case -NFS4ERR_DELAY:
124 case -NFS4ERR_GRACE: 171 case -NFS4ERR_GRACE:
@@ -127,11 +174,48 @@ static int filelayout_async_handle_error(struct rpc_task *task,
127 break; 174 break;
128 case -NFS4ERR_RETRY_UNCACHED_REP: 175 case -NFS4ERR_RETRY_UNCACHED_REP:
129 break; 176 break;
177 /* Invalidate Layout errors */
178 case -NFS4ERR_PNFS_NO_LAYOUT:
179 case -ESTALE: /* mapped NFS4ERR_STALE */
180 case -EBADHANDLE: /* mapped NFS4ERR_BADHANDLE */
181 case -EISDIR: /* mapped NFS4ERR_ISDIR */
182 case -NFS4ERR_FHEXPIRED:
183 case -NFS4ERR_WRONG_TYPE:
184 dprintk("%s Invalid layout error %d\n", __func__,
185 task->tk_status);
186 /*
187 * Destroy layout so new i/o will get a new layout.
188 * Layout will not be destroyed until all current lseg
189 * references are put. Mark layout as invalid to resend failed
190 * i/o and all i/o waiting on the slot table to the MDS until
191 * layout is destroyed and a new valid layout is obtained.
192 */
193 set_bit(NFS_LAYOUT_INVALID,
194 &NFS_I(inode)->layout->plh_flags);
195 pnfs_destroy_layout(NFS_I(inode));
196 rpc_wake_up(&tbl->slot_tbl_waitq);
197 goto reset;
198 /* RPC connection errors */
199 case -ECONNREFUSED:
200 case -EHOSTDOWN:
201 case -EHOSTUNREACH:
202 case -ENETUNREACH:
203 case -EIO:
204 case -ETIMEDOUT:
205 case -EPIPE:
206 dprintk("%s DS connection error %d\n", __func__,
207 task->tk_status);
208 if (!filelayout_test_devid_invalid(devid))
209 _pnfs_return_layout(inode);
210 filelayout_mark_devid_invalid(devid);
211 rpc_wake_up(&tbl->slot_tbl_waitq);
212 nfs4_ds_disconnect(clp);
213 /* fall through */
130 default: 214 default:
131 dprintk("%s DS error. Retry through MDS %d\n", __func__, 215reset:
216 dprintk("%s Retry through MDS. Error %d\n", __func__,
132 task->tk_status); 217 task->tk_status);
133 *reset = 1; 218 return -NFS4ERR_RESET_TO_MDS;
134 break;
135 } 219 }
136out: 220out:
137 task->tk_status = 0; 221 task->tk_status = 0;
@@ -148,18 +232,17 @@ wait_on_recovery:
148static int filelayout_read_done_cb(struct rpc_task *task, 232static int filelayout_read_done_cb(struct rpc_task *task,
149 struct nfs_read_data *data) 233 struct nfs_read_data *data)
150{ 234{
151 int reset = 0; 235 struct nfs_pgio_header *hdr = data->header;
236 int err;
152 237
153 dprintk("%s DS read\n", __func__); 238 err = filelayout_async_handle_error(task, data->args.context->state,
239 data->ds_clp, hdr->lseg);
154 240
155 if (filelayout_async_handle_error(task, data->args.context->state, 241 switch (err) {
156 data->ds_clp, &reset) == -EAGAIN) { 242 case -NFS4ERR_RESET_TO_MDS:
157 dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n", 243 filelayout_reset_read(data);
158 __func__, data->ds_clp, data->ds_clp->cl_session); 244 return task->tk_status;
159 if (reset) { 245 case -EAGAIN:
160 pnfs_set_lo_fail(data->lseg);
161 nfs4_reset_read(task, data);
162 }
163 rpc_restart_call_prepare(task); 246 rpc_restart_call_prepare(task);
164 return -EAGAIN; 247 return -EAGAIN;
165 } 248 }
@@ -175,13 +258,15 @@ static int filelayout_read_done_cb(struct rpc_task *task,
175static void 258static void
176filelayout_set_layoutcommit(struct nfs_write_data *wdata) 259filelayout_set_layoutcommit(struct nfs_write_data *wdata)
177{ 260{
178 if (FILELAYOUT_LSEG(wdata->lseg)->commit_through_mds || 261 struct nfs_pgio_header *hdr = wdata->header;
262
263 if (FILELAYOUT_LSEG(hdr->lseg)->commit_through_mds ||
179 wdata->res.verf->committed == NFS_FILE_SYNC) 264 wdata->res.verf->committed == NFS_FILE_SYNC)
180 return; 265 return;
181 266
182 pnfs_set_layoutcommit(wdata); 267 pnfs_set_layoutcommit(wdata);
183 dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, wdata->inode->i_ino, 268 dprintk("%s ionde %lu pls_end_pos %lu\n", __func__, hdr->inode->i_ino,
184 (unsigned long) NFS_I(wdata->inode)->layout->plh_lwb); 269 (unsigned long) NFS_I(hdr->inode)->layout->plh_lwb);
185} 270}
186 271
187/* 272/*
@@ -191,8 +276,14 @@ filelayout_set_layoutcommit(struct nfs_write_data *wdata)
191 */ 276 */
192static void filelayout_read_prepare(struct rpc_task *task, void *data) 277static void filelayout_read_prepare(struct rpc_task *task, void *data)
193{ 278{
194 struct nfs_read_data *rdata = (struct nfs_read_data *)data; 279 struct nfs_read_data *rdata = data;
195 280
281 if (filelayout_reset_to_mds(rdata->header->lseg)) {
282 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
283 filelayout_reset_read(rdata);
284 rpc_exit(task, 0);
285 return;
286 }
196 rdata->read_done_cb = filelayout_read_done_cb; 287 rdata->read_done_cb = filelayout_read_done_cb;
197 288
198 if (nfs41_setup_sequence(rdata->ds_clp->cl_session, 289 if (nfs41_setup_sequence(rdata->ds_clp->cl_session,
@@ -205,42 +296,47 @@ static void filelayout_read_prepare(struct rpc_task *task, void *data)
205 296
206static void filelayout_read_call_done(struct rpc_task *task, void *data) 297static void filelayout_read_call_done(struct rpc_task *task, void *data)
207{ 298{
208 struct nfs_read_data *rdata = (struct nfs_read_data *)data; 299 struct nfs_read_data *rdata = data;
209 300
210 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status); 301 dprintk("--> %s task->tk_status %d\n", __func__, task->tk_status);
211 302
303 if (test_bit(NFS_IOHDR_REDO, &rdata->header->flags) &&
304 task->tk_status == 0)
305 return;
306
212 /* Note this may cause RPC to be resent */ 307 /* Note this may cause RPC to be resent */
213 rdata->mds_ops->rpc_call_done(task, data); 308 rdata->header->mds_ops->rpc_call_done(task, data);
214} 309}
215 310
216static void filelayout_read_count_stats(struct rpc_task *task, void *data) 311static void filelayout_read_count_stats(struct rpc_task *task, void *data)
217{ 312{
218 struct nfs_read_data *rdata = (struct nfs_read_data *)data; 313 struct nfs_read_data *rdata = data;
219 314
220 rpc_count_iostats(task, NFS_SERVER(rdata->inode)->client->cl_metrics); 315 rpc_count_iostats(task, NFS_SERVER(rdata->header->inode)->client->cl_metrics);
221} 316}
222 317
223static void filelayout_read_release(void *data) 318static void filelayout_read_release(void *data)
224{ 319{
225 struct nfs_read_data *rdata = (struct nfs_read_data *)data; 320 struct nfs_read_data *rdata = data;
226 321
227 put_lseg(rdata->lseg); 322 nfs_put_client(rdata->ds_clp);
228 rdata->mds_ops->rpc_release(data); 323 rdata->header->mds_ops->rpc_release(data);
229} 324}
230 325
231static int filelayout_write_done_cb(struct rpc_task *task, 326static int filelayout_write_done_cb(struct rpc_task *task,
232 struct nfs_write_data *data) 327 struct nfs_write_data *data)
233{ 328{
234 int reset = 0; 329 struct nfs_pgio_header *hdr = data->header;
235 330 int err;
236 if (filelayout_async_handle_error(task, data->args.context->state, 331
237 data->ds_clp, &reset) == -EAGAIN) { 332 err = filelayout_async_handle_error(task, data->args.context->state,
238 dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n", 333 data->ds_clp, hdr->lseg);
239 __func__, data->ds_clp, data->ds_clp->cl_session); 334
240 if (reset) { 335 switch (err) {
241 pnfs_set_lo_fail(data->lseg); 336 case -NFS4ERR_RESET_TO_MDS:
242 nfs4_reset_write(task, data); 337 filelayout_reset_write(data);
243 } 338 return task->tk_status;
339 case -EAGAIN:
244 rpc_restart_call_prepare(task); 340 rpc_restart_call_prepare(task);
245 return -EAGAIN; 341 return -EAGAIN;
246 } 342 }
@@ -250,7 +346,7 @@ static int filelayout_write_done_cb(struct rpc_task *task,
250} 346}
251 347
252/* Fake up some data that will cause nfs_commit_release to retry the writes. */ 348/* Fake up some data that will cause nfs_commit_release to retry the writes. */
253static void prepare_to_resend_writes(struct nfs_write_data *data) 349static void prepare_to_resend_writes(struct nfs_commit_data *data)
254{ 350{
255 struct nfs_page *first = nfs_list_entry(data->pages.next); 351 struct nfs_page *first = nfs_list_entry(data->pages.next);
256 352
@@ -261,19 +357,19 @@ static void prepare_to_resend_writes(struct nfs_write_data *data)
261} 357}
262 358
263static int filelayout_commit_done_cb(struct rpc_task *task, 359static int filelayout_commit_done_cb(struct rpc_task *task,
264 struct nfs_write_data *data) 360 struct nfs_commit_data *data)
265{ 361{
266 int reset = 0; 362 int err;
267 363
268 if (filelayout_async_handle_error(task, data->args.context->state, 364 err = filelayout_async_handle_error(task, NULL, data->ds_clp,
269 data->ds_clp, &reset) == -EAGAIN) { 365 data->lseg);
270 dprintk("%s calling restart ds_clp %p ds_clp->cl_session %p\n", 366
271 __func__, data->ds_clp, data->ds_clp->cl_session); 367 switch (err) {
272 if (reset) { 368 case -NFS4ERR_RESET_TO_MDS:
273 prepare_to_resend_writes(data); 369 prepare_to_resend_writes(data);
274 pnfs_set_lo_fail(data->lseg); 370 return -EAGAIN;
275 } else 371 case -EAGAIN:
276 rpc_restart_call_prepare(task); 372 rpc_restart_call_prepare(task);
277 return -EAGAIN; 373 return -EAGAIN;
278 } 374 }
279 375
@@ -282,8 +378,14 @@ static int filelayout_commit_done_cb(struct rpc_task *task,
282 378
283static void filelayout_write_prepare(struct rpc_task *task, void *data) 379static void filelayout_write_prepare(struct rpc_task *task, void *data)
284{ 380{
285 struct nfs_write_data *wdata = (struct nfs_write_data *)data; 381 struct nfs_write_data *wdata = data;
286 382
383 if (filelayout_reset_to_mds(wdata->header->lseg)) {
384 dprintk("%s task %u reset io to MDS\n", __func__, task->tk_pid);
385 filelayout_reset_write(wdata);
386 rpc_exit(task, 0);
387 return;
388 }
287 if (nfs41_setup_sequence(wdata->ds_clp->cl_session, 389 if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
288 &wdata->args.seq_args, &wdata->res.seq_res, 390 &wdata->args.seq_args, &wdata->res.seq_res,
289 task)) 391 task))
@@ -294,36 +396,66 @@ static void filelayout_write_prepare(struct rpc_task *task, void *data)
294 396
295static void filelayout_write_call_done(struct rpc_task *task, void *data) 397static void filelayout_write_call_done(struct rpc_task *task, void *data)
296{ 398{
297 struct nfs_write_data *wdata = (struct nfs_write_data *)data; 399 struct nfs_write_data *wdata = data;
400
401 if (test_bit(NFS_IOHDR_REDO, &wdata->header->flags) &&
402 task->tk_status == 0)
403 return;
298 404
299 /* Note this may cause RPC to be resent */ 405 /* Note this may cause RPC to be resent */
300 wdata->mds_ops->rpc_call_done(task, data); 406 wdata->header->mds_ops->rpc_call_done(task, data);
301} 407}
302 408
303static void filelayout_write_count_stats(struct rpc_task *task, void *data) 409static void filelayout_write_count_stats(struct rpc_task *task, void *data)
304{ 410{
305 struct nfs_write_data *wdata = (struct nfs_write_data *)data; 411 struct nfs_write_data *wdata = data;
306 412
307 rpc_count_iostats(task, NFS_SERVER(wdata->inode)->client->cl_metrics); 413 rpc_count_iostats(task, NFS_SERVER(wdata->header->inode)->client->cl_metrics);
308} 414}
309 415
310static void filelayout_write_release(void *data) 416static void filelayout_write_release(void *data)
311{ 417{
312 struct nfs_write_data *wdata = (struct nfs_write_data *)data; 418 struct nfs_write_data *wdata = data;
419
420 nfs_put_client(wdata->ds_clp);
421 wdata->header->mds_ops->rpc_release(data);
422}
423
424static void filelayout_commit_prepare(struct rpc_task *task, void *data)
425{
426 struct nfs_commit_data *wdata = data;
313 427
314 put_lseg(wdata->lseg); 428 if (nfs41_setup_sequence(wdata->ds_clp->cl_session,
315 wdata->mds_ops->rpc_release(data); 429 &wdata->args.seq_args, &wdata->res.seq_res,
430 task))
431 return;
432
433 rpc_call_start(task);
434}
435
436static void filelayout_write_commit_done(struct rpc_task *task, void *data)
437{
438 struct nfs_commit_data *wdata = data;
439
440 /* Note this may cause RPC to be resent */
441 wdata->mds_ops->rpc_call_done(task, data);
442}
443
444static void filelayout_commit_count_stats(struct rpc_task *task, void *data)
445{
446 struct nfs_commit_data *cdata = data;
447
448 rpc_count_iostats(task, NFS_SERVER(cdata->inode)->client->cl_metrics);
316} 449}
317 450
318static void filelayout_commit_release(void *data) 451static void filelayout_commit_release(void *calldata)
319{ 452{
320 struct nfs_write_data *wdata = (struct nfs_write_data *)data; 453 struct nfs_commit_data *data = calldata;
321 454
322 nfs_commit_release_pages(wdata); 455 data->completion_ops->completion(data);
323 if (atomic_dec_and_test(&NFS_I(wdata->inode)->commits_outstanding)) 456 put_lseg(data->lseg);
324 nfs_commit_clear_lock(NFS_I(wdata->inode)); 457 nfs_put_client(data->ds_clp);
325 put_lseg(wdata->lseg); 458 nfs_commitdata_release(data);
326 nfs_commitdata_release(wdata);
327} 459}
328 460
329static const struct rpc_call_ops filelayout_read_call_ops = { 461static const struct rpc_call_ops filelayout_read_call_ops = {
@@ -341,16 +473,17 @@ static const struct rpc_call_ops filelayout_write_call_ops = {
341}; 473};
342 474
343static const struct rpc_call_ops filelayout_commit_call_ops = { 475static const struct rpc_call_ops filelayout_commit_call_ops = {
344 .rpc_call_prepare = filelayout_write_prepare, 476 .rpc_call_prepare = filelayout_commit_prepare,
345 .rpc_call_done = filelayout_write_call_done, 477 .rpc_call_done = filelayout_write_commit_done,
346 .rpc_count_stats = filelayout_write_count_stats, 478 .rpc_count_stats = filelayout_commit_count_stats,
347 .rpc_release = filelayout_commit_release, 479 .rpc_release = filelayout_commit_release,
348}; 480};
349 481
350static enum pnfs_try_status 482static enum pnfs_try_status
351filelayout_read_pagelist(struct nfs_read_data *data) 483filelayout_read_pagelist(struct nfs_read_data *data)
352{ 484{
353 struct pnfs_layout_segment *lseg = data->lseg; 485 struct nfs_pgio_header *hdr = data->header;
486 struct pnfs_layout_segment *lseg = hdr->lseg;
354 struct nfs4_pnfs_ds *ds; 487 struct nfs4_pnfs_ds *ds;
355 loff_t offset = data->args.offset; 488 loff_t offset = data->args.offset;
356 u32 j, idx; 489 u32 j, idx;
@@ -358,25 +491,20 @@ filelayout_read_pagelist(struct nfs_read_data *data)
358 int status; 491 int status;
359 492
360 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n", 493 dprintk("--> %s ino %lu pgbase %u req %Zu@%llu\n",
361 __func__, data->inode->i_ino, 494 __func__, hdr->inode->i_ino,
362 data->args.pgbase, (size_t)data->args.count, offset); 495 data->args.pgbase, (size_t)data->args.count, offset);
363 496
364 if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
365 return PNFS_NOT_ATTEMPTED;
366
367 /* Retrieve the correct rpc_client for the byte range */ 497 /* Retrieve the correct rpc_client for the byte range */
368 j = nfs4_fl_calc_j_index(lseg, offset); 498 j = nfs4_fl_calc_j_index(lseg, offset);
369 idx = nfs4_fl_calc_ds_index(lseg, j); 499 idx = nfs4_fl_calc_ds_index(lseg, j);
370 ds = nfs4_fl_prepare_ds(lseg, idx); 500 ds = nfs4_fl_prepare_ds(lseg, idx);
371 if (!ds) { 501 if (!ds)
372 /* Either layout fh index faulty, or ds connect failed */
373 set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
374 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
375 return PNFS_NOT_ATTEMPTED; 502 return PNFS_NOT_ATTEMPTED;
376 } 503 dprintk("%s USE DS: %s cl_count %d\n", __func__,
377 dprintk("%s USE DS: %s\n", __func__, ds->ds_remotestr); 504 ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
378 505
379 /* No multipath support. Use first DS */ 506 /* No multipath support. Use first DS */
507 atomic_inc(&ds->ds_clp->cl_count);
380 data->ds_clp = ds->ds_clp; 508 data->ds_clp = ds->ds_clp;
381 fh = nfs4_fl_select_ds_fh(lseg, j); 509 fh = nfs4_fl_select_ds_fh(lseg, j);
382 if (fh) 510 if (fh)
@@ -386,8 +514,8 @@ filelayout_read_pagelist(struct nfs_read_data *data)
386 data->mds_offset = offset; 514 data->mds_offset = offset;
387 515
388 /* Perform an asynchronous read to ds */ 516 /* Perform an asynchronous read to ds */
389 status = nfs_initiate_read(data, ds->ds_clp->cl_rpcclient, 517 status = nfs_initiate_read(ds->ds_clp->cl_rpcclient, data,
390 &filelayout_read_call_ops); 518 &filelayout_read_call_ops, RPC_TASK_SOFTCONN);
391 BUG_ON(status != 0); 519 BUG_ON(status != 0);
392 return PNFS_ATTEMPTED; 520 return PNFS_ATTEMPTED;
393} 521}
@@ -396,32 +524,26 @@ filelayout_read_pagelist(struct nfs_read_data *data)
396static enum pnfs_try_status 524static enum pnfs_try_status
397filelayout_write_pagelist(struct nfs_write_data *data, int sync) 525filelayout_write_pagelist(struct nfs_write_data *data, int sync)
398{ 526{
399 struct pnfs_layout_segment *lseg = data->lseg; 527 struct nfs_pgio_header *hdr = data->header;
528 struct pnfs_layout_segment *lseg = hdr->lseg;
400 struct nfs4_pnfs_ds *ds; 529 struct nfs4_pnfs_ds *ds;
401 loff_t offset = data->args.offset; 530 loff_t offset = data->args.offset;
402 u32 j, idx; 531 u32 j, idx;
403 struct nfs_fh *fh; 532 struct nfs_fh *fh;
404 int status; 533 int status;
405 534
406 if (test_bit(NFS_DEVICEID_INVALID, &FILELAYOUT_DEVID_NODE(lseg)->flags))
407 return PNFS_NOT_ATTEMPTED;
408
409 /* Retrieve the correct rpc_client for the byte range */ 535 /* Retrieve the correct rpc_client for the byte range */
410 j = nfs4_fl_calc_j_index(lseg, offset); 536 j = nfs4_fl_calc_j_index(lseg, offset);
411 idx = nfs4_fl_calc_ds_index(lseg, j); 537 idx = nfs4_fl_calc_ds_index(lseg, j);
412 ds = nfs4_fl_prepare_ds(lseg, idx); 538 ds = nfs4_fl_prepare_ds(lseg, idx);
413 if (!ds) { 539 if (!ds)
414 printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
415 __func__);
416 set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
417 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
418 return PNFS_NOT_ATTEMPTED; 540 return PNFS_NOT_ATTEMPTED;
419 } 541 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s cl_count %d\n",
420 dprintk("%s ino %lu sync %d req %Zu@%llu DS: %s\n", __func__, 542 __func__, hdr->inode->i_ino, sync, (size_t) data->args.count,
421 data->inode->i_ino, sync, (size_t) data->args.count, offset, 543 offset, ds->ds_remotestr, atomic_read(&ds->ds_clp->cl_count));
422 ds->ds_remotestr);
423 544
424 data->write_done_cb = filelayout_write_done_cb; 545 data->write_done_cb = filelayout_write_done_cb;
546 atomic_inc(&ds->ds_clp->cl_count);
425 data->ds_clp = ds->ds_clp; 547 data->ds_clp = ds->ds_clp;
426 fh = nfs4_fl_select_ds_fh(lseg, j); 548 fh = nfs4_fl_select_ds_fh(lseg, j);
427 if (fh) 549 if (fh)
@@ -433,8 +555,9 @@ filelayout_write_pagelist(struct nfs_write_data *data, int sync)
433 data->args.offset = filelayout_get_dserver_offset(lseg, offset); 555 data->args.offset = filelayout_get_dserver_offset(lseg, offset);
434 556
435 /* Perform an asynchronous write */ 557 /* Perform an asynchronous write */
436 status = nfs_initiate_write(data, ds->ds_clp->cl_rpcclient, 558 status = nfs_initiate_write(ds->ds_clp->cl_rpcclient, data,
437 &filelayout_write_call_ops, sync); 559 &filelayout_write_call_ops, sync,
560 RPC_TASK_SOFTCONN);
438 BUG_ON(status != 0); 561 BUG_ON(status != 0);
439 return PNFS_ATTEMPTED; 562 return PNFS_ATTEMPTED;
440} 563}
@@ -650,10 +773,65 @@ filelayout_free_lseg(struct pnfs_layout_segment *lseg)
650 773
651 dprintk("--> %s\n", __func__); 774 dprintk("--> %s\n", __func__);
652 nfs4_fl_put_deviceid(fl->dsaddr); 775 nfs4_fl_put_deviceid(fl->dsaddr);
653 kfree(fl->commit_buckets); 776 /* This assumes a single RW lseg */
777 if (lseg->pls_range.iomode == IOMODE_RW) {
778 struct nfs4_filelayout *flo;
779
780 flo = FILELAYOUT_FROM_HDR(lseg->pls_layout);
781 flo->commit_info.nbuckets = 0;
782 kfree(flo->commit_info.buckets);
783 flo->commit_info.buckets = NULL;
784 }
654 _filelayout_free_lseg(fl); 785 _filelayout_free_lseg(fl);
655} 786}
656 787
788static int
789filelayout_alloc_commit_info(struct pnfs_layout_segment *lseg,
790 struct nfs_commit_info *cinfo,
791 gfp_t gfp_flags)
792{
793 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
794 struct pnfs_commit_bucket *buckets;
795 int size;
796
797 if (fl->commit_through_mds)
798 return 0;
799 if (cinfo->ds->nbuckets != 0) {
800 /* This assumes there is only one IOMODE_RW lseg. What
801 * we really want to do is have a layout_hdr level
802 * dictionary of <multipath_list4, fh> keys, each
803 * associated with a struct list_head, populated by calls
804 * to filelayout_write_pagelist().
805 * */
806 return 0;
807 }
808
809 size = (fl->stripe_type == STRIPE_SPARSE) ?
810 fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
811
812 buckets = kcalloc(size, sizeof(struct pnfs_commit_bucket),
813 gfp_flags);
814 if (!buckets)
815 return -ENOMEM;
816 else {
817 int i;
818
819 spin_lock(cinfo->lock);
820 if (cinfo->ds->nbuckets != 0)
821 kfree(buckets);
822 else {
823 cinfo->ds->buckets = buckets;
824 cinfo->ds->nbuckets = size;
825 for (i = 0; i < size; i++) {
826 INIT_LIST_HEAD(&buckets[i].written);
827 INIT_LIST_HEAD(&buckets[i].committing);
828 }
829 }
830 spin_unlock(cinfo->lock);
831 return 0;
832 }
833}
834
657static struct pnfs_layout_segment * 835static struct pnfs_layout_segment *
658filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid, 836filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
659 struct nfs4_layoutget_res *lgr, 837 struct nfs4_layoutget_res *lgr,
@@ -673,29 +851,6 @@ filelayout_alloc_lseg(struct pnfs_layout_hdr *layoutid,
673 _filelayout_free_lseg(fl); 851 _filelayout_free_lseg(fl);
674 return NULL; 852 return NULL;
675 } 853 }
676
677 /* This assumes there is only one IOMODE_RW lseg. What
678 * we really want to do is have a layout_hdr level
679 * dictionary of <multipath_list4, fh> keys, each
680 * associated with a struct list_head, populated by calls
681 * to filelayout_write_pagelist().
682 * */
683 if ((!fl->commit_through_mds) && (lgr->range.iomode == IOMODE_RW)) {
684 int i;
685 int size = (fl->stripe_type == STRIPE_SPARSE) ?
686 fl->dsaddr->ds_num : fl->dsaddr->stripe_count;
687
688 fl->commit_buckets = kcalloc(size, sizeof(struct nfs4_fl_commit_bucket), gfp_flags);
689 if (!fl->commit_buckets) {
690 filelayout_free_lseg(&fl->generic_hdr);
691 return NULL;
692 }
693 fl->number_of_buckets = size;
694 for (i = 0; i < size; i++) {
695 INIT_LIST_HEAD(&fl->commit_buckets[i].written);
696 INIT_LIST_HEAD(&fl->commit_buckets[i].committing);
697 }
698 }
699 return &fl->generic_hdr; 854 return &fl->generic_hdr;
700} 855}
701 856
@@ -716,8 +871,8 @@ filelayout_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
716 !nfs_generic_pg_test(pgio, prev, req)) 871 !nfs_generic_pg_test(pgio, prev, req))
717 return false; 872 return false;
718 873
719 p_stripe = (u64)prev->wb_index << PAGE_CACHE_SHIFT; 874 p_stripe = (u64)req_offset(prev);
720 r_stripe = (u64)req->wb_index << PAGE_CACHE_SHIFT; 875 r_stripe = (u64)req_offset(req);
721 stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit; 876 stripe_unit = FILELAYOUT_LSEG(pgio->pg_lseg)->stripe_unit;
722 877
723 do_div(p_stripe, stripe_unit); 878 do_div(p_stripe, stripe_unit);
@@ -732,6 +887,16 @@ filelayout_pg_init_read(struct nfs_pageio_descriptor *pgio,
732{ 887{
733 BUG_ON(pgio->pg_lseg != NULL); 888 BUG_ON(pgio->pg_lseg != NULL);
734 889
890 if (req->wb_offset != req->wb_pgbase) {
891 /*
892 * Handling unaligned pages is difficult, because have to
893 * somehow split a req in two in certain cases in the
894 * pg.test code. Avoid this by just not using pnfs
895 * in this case.
896 */
897 nfs_pageio_reset_read_mds(pgio);
898 return;
899 }
735 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 900 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
736 req->wb_context, 901 req->wb_context,
737 0, 902 0,
@@ -747,8 +912,13 @@ static void
747filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio, 912filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
748 struct nfs_page *req) 913 struct nfs_page *req)
749{ 914{
915 struct nfs_commit_info cinfo;
916 int status;
917
750 BUG_ON(pgio->pg_lseg != NULL); 918 BUG_ON(pgio->pg_lseg != NULL);
751 919
920 if (req->wb_offset != req->wb_pgbase)
921 goto out_mds;
752 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 922 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
753 req->wb_context, 923 req->wb_context,
754 0, 924 0,
@@ -757,7 +927,17 @@ filelayout_pg_init_write(struct nfs_pageio_descriptor *pgio,
757 GFP_NOFS); 927 GFP_NOFS);
758 /* If no lseg, fall back to write through mds */ 928 /* If no lseg, fall back to write through mds */
759 if (pgio->pg_lseg == NULL) 929 if (pgio->pg_lseg == NULL)
760 nfs_pageio_reset_write_mds(pgio); 930 goto out_mds;
931 nfs_init_cinfo(&cinfo, pgio->pg_inode, pgio->pg_dreq);
932 status = filelayout_alloc_commit_info(pgio->pg_lseg, &cinfo, GFP_NOFS);
933 if (status < 0) {
934 put_lseg(pgio->pg_lseg);
935 pgio->pg_lseg = NULL;
936 goto out_mds;
937 }
938 return;
939out_mds:
940 nfs_pageio_reset_write_mds(pgio);
761} 941}
762 942
763static const struct nfs_pageio_ops filelayout_pg_read_ops = { 943static const struct nfs_pageio_ops filelayout_pg_read_ops = {
@@ -784,43 +964,42 @@ static u32 select_bucket_index(struct nfs4_filelayout_segment *fl, u32 j)
784 * If this will make the bucket empty, it will need to put the lseg reference. 964 * If this will make the bucket empty, it will need to put the lseg reference.
785 */ 965 */
786static void 966static void
787filelayout_clear_request_commit(struct nfs_page *req) 967filelayout_clear_request_commit(struct nfs_page *req,
968 struct nfs_commit_info *cinfo)
788{ 969{
789 struct pnfs_layout_segment *freeme = NULL; 970 struct pnfs_layout_segment *freeme = NULL;
790 struct inode *inode = req->wb_context->dentry->d_inode;
791 971
792 spin_lock(&inode->i_lock); 972 spin_lock(cinfo->lock);
793 if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags)) 973 if (!test_and_clear_bit(PG_COMMIT_TO_DS, &req->wb_flags))
794 goto out; 974 goto out;
975 cinfo->ds->nwritten--;
795 if (list_is_singular(&req->wb_list)) { 976 if (list_is_singular(&req->wb_list)) {
796 struct pnfs_layout_segment *lseg; 977 struct pnfs_commit_bucket *bucket;
797 978
798 /* From here we can find the bucket, but for the moment, 979 bucket = list_first_entry(&req->wb_list,
799 * since there is only one relevant lseg... 980 struct pnfs_commit_bucket,
800 */ 981 written);
801 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) { 982 freeme = bucket->wlseg;
802 if (lseg->pls_range.iomode == IOMODE_RW) { 983 bucket->wlseg = NULL;
803 freeme = lseg;
804 break;
805 }
806 }
807 } 984 }
808out: 985out:
809 nfs_request_remove_commit_list(req); 986 nfs_request_remove_commit_list(req, cinfo);
810 spin_unlock(&inode->i_lock); 987 spin_unlock(cinfo->lock);
811 put_lseg(freeme); 988 put_lseg(freeme);
812} 989}
813 990
814static struct list_head * 991static struct list_head *
815filelayout_choose_commit_list(struct nfs_page *req, 992filelayout_choose_commit_list(struct nfs_page *req,
816 struct pnfs_layout_segment *lseg) 993 struct pnfs_layout_segment *lseg,
994 struct nfs_commit_info *cinfo)
817{ 995{
818 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg); 996 struct nfs4_filelayout_segment *fl = FILELAYOUT_LSEG(lseg);
819 u32 i, j; 997 u32 i, j;
820 struct list_head *list; 998 struct list_head *list;
999 struct pnfs_commit_bucket *buckets;
821 1000
822 if (fl->commit_through_mds) 1001 if (fl->commit_through_mds)
823 return &NFS_I(req->wb_context->dentry->d_inode)->commit_list; 1002 return &cinfo->mds->list;
824 1003
825 /* Note that we are calling nfs4_fl_calc_j_index on each page 1004 /* Note that we are calling nfs4_fl_calc_j_index on each page
826 * that ends up being committed to a data server. An attractive 1005 * that ends up being committed to a data server. An attractive
@@ -828,31 +1007,33 @@ filelayout_choose_commit_list(struct nfs_page *req,
828 * to store the value calculated in filelayout_write_pagelist 1007 * to store the value calculated in filelayout_write_pagelist
829 * and just use that here. 1008 * and just use that here.
830 */ 1009 */
831 j = nfs4_fl_calc_j_index(lseg, 1010 j = nfs4_fl_calc_j_index(lseg, req_offset(req));
832 (loff_t)req->wb_index << PAGE_CACHE_SHIFT);
833 i = select_bucket_index(fl, j); 1011 i = select_bucket_index(fl, j);
834 list = &fl->commit_buckets[i].written; 1012 buckets = cinfo->ds->buckets;
1013 list = &buckets[i].written;
835 if (list_empty(list)) { 1014 if (list_empty(list)) {
836 /* Non-empty buckets hold a reference on the lseg. That ref 1015 /* Non-empty buckets hold a reference on the lseg. That ref
837 * is normally transferred to the COMMIT call and released 1016 * is normally transferred to the COMMIT call and released
838 * there. It could also be released if the last req is pulled 1017 * there. It could also be released if the last req is pulled
839 * off due to a rewrite, in which case it will be done in 1018 * off due to a rewrite, in which case it will be done in
840 * filelayout_remove_commit_req 1019 * filelayout_clear_request_commit
841 */ 1020 */
842 get_lseg(lseg); 1021 buckets[i].wlseg = get_lseg(lseg);
843 } 1022 }
844 set_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1023 set_bit(PG_COMMIT_TO_DS, &req->wb_flags);
1024 cinfo->ds->nwritten++;
845 return list; 1025 return list;
846} 1026}
847 1027
848static void 1028static void
849filelayout_mark_request_commit(struct nfs_page *req, 1029filelayout_mark_request_commit(struct nfs_page *req,
850 struct pnfs_layout_segment *lseg) 1030 struct pnfs_layout_segment *lseg,
1031 struct nfs_commit_info *cinfo)
851{ 1032{
852 struct list_head *list; 1033 struct list_head *list;
853 1034
854 list = filelayout_choose_commit_list(req, lseg); 1035 list = filelayout_choose_commit_list(req, lseg, cinfo);
855 nfs_request_add_commit_list(req, list); 1036 nfs_request_add_commit_list(req, list, cinfo);
856} 1037}
857 1038
858static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i) 1039static u32 calc_ds_index_from_commit(struct pnfs_layout_segment *lseg, u32 i)
@@ -880,7 +1061,7 @@ select_ds_fh_from_commit(struct pnfs_layout_segment *lseg, u32 i)
880 return flseg->fh_array[i]; 1061 return flseg->fh_array[i];
881} 1062}
882 1063
883static int filelayout_initiate_commit(struct nfs_write_data *data, int how) 1064static int filelayout_initiate_commit(struct nfs_commit_data *data, int how)
884{ 1065{
885 struct pnfs_layout_segment *lseg = data->lseg; 1066 struct pnfs_layout_segment *lseg = data->lseg;
886 struct nfs4_pnfs_ds *ds; 1067 struct nfs4_pnfs_ds *ds;
@@ -890,135 +1071,138 @@ static int filelayout_initiate_commit(struct nfs_write_data *data, int how)
890 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index); 1071 idx = calc_ds_index_from_commit(lseg, data->ds_commit_index);
891 ds = nfs4_fl_prepare_ds(lseg, idx); 1072 ds = nfs4_fl_prepare_ds(lseg, idx);
892 if (!ds) { 1073 if (!ds) {
893 printk(KERN_ERR "NFS: %s: prepare_ds failed, use MDS\n",
894 __func__);
895 set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
896 set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
897 prepare_to_resend_writes(data); 1074 prepare_to_resend_writes(data);
898 filelayout_commit_release(data); 1075 filelayout_commit_release(data);
899 return -EAGAIN; 1076 return -EAGAIN;
900 } 1077 }
901 dprintk("%s ino %lu, how %d\n", __func__, data->inode->i_ino, how); 1078 dprintk("%s ino %lu, how %d cl_count %d\n", __func__,
902 data->write_done_cb = filelayout_commit_done_cb; 1079 data->inode->i_ino, how, atomic_read(&ds->ds_clp->cl_count));
1080 data->commit_done_cb = filelayout_commit_done_cb;
1081 atomic_inc(&ds->ds_clp->cl_count);
903 data->ds_clp = ds->ds_clp; 1082 data->ds_clp = ds->ds_clp;
904 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index); 1083 fh = select_ds_fh_from_commit(lseg, data->ds_commit_index);
905 if (fh) 1084 if (fh)
906 data->args.fh = fh; 1085 data->args.fh = fh;
907 return nfs_initiate_commit(data, ds->ds_clp->cl_rpcclient, 1086 return nfs_initiate_commit(ds->ds_clp->cl_rpcclient, data,
908 &filelayout_commit_call_ops, how); 1087 &filelayout_commit_call_ops, how,
909} 1088 RPC_TASK_SOFTCONN);
910
911/*
912 * This is only useful while we are using whole file layouts.
913 */
914static struct pnfs_layout_segment *
915find_only_write_lseg_locked(struct inode *inode)
916{
917 struct pnfs_layout_segment *lseg;
918
919 list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
920 if (lseg->pls_range.iomode == IOMODE_RW)
921 return lseg;
922 return NULL;
923}
924
925static struct pnfs_layout_segment *find_only_write_lseg(struct inode *inode)
926{
927 struct pnfs_layout_segment *rv;
928
929 spin_lock(&inode->i_lock);
930 rv = find_only_write_lseg_locked(inode);
931 if (rv)
932 get_lseg(rv);
933 spin_unlock(&inode->i_lock);
934 return rv;
935} 1089}
936 1090
937static int 1091static int
938filelayout_scan_ds_commit_list(struct nfs4_fl_commit_bucket *bucket, int max, 1092transfer_commit_list(struct list_head *src, struct list_head *dst,
939 spinlock_t *lock) 1093 struct nfs_commit_info *cinfo, int max)
940{ 1094{
941 struct list_head *src = &bucket->written;
942 struct list_head *dst = &bucket->committing;
943 struct nfs_page *req, *tmp; 1095 struct nfs_page *req, *tmp;
944 int ret = 0; 1096 int ret = 0;
945 1097
946 list_for_each_entry_safe(req, tmp, src, wb_list) { 1098 list_for_each_entry_safe(req, tmp, src, wb_list) {
947 if (!nfs_lock_request(req)) 1099 if (!nfs_lock_request(req))
948 continue; 1100 continue;
949 if (cond_resched_lock(lock)) 1101 kref_get(&req->wb_kref);
1102 if (cond_resched_lock(cinfo->lock))
950 list_safe_reset_next(req, tmp, wb_list); 1103 list_safe_reset_next(req, tmp, wb_list);
951 nfs_request_remove_commit_list(req); 1104 nfs_request_remove_commit_list(req, cinfo);
952 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags); 1105 clear_bit(PG_COMMIT_TO_DS, &req->wb_flags);
953 nfs_list_add_request(req, dst); 1106 nfs_list_add_request(req, dst);
954 ret++; 1107 ret++;
955 if (ret == max) 1108 if ((ret == max) && !cinfo->dreq)
956 break; 1109 break;
957 } 1110 }
958 return ret; 1111 return ret;
959} 1112}
960 1113
1114static int
1115filelayout_scan_ds_commit_list(struct pnfs_commit_bucket *bucket,
1116 struct nfs_commit_info *cinfo,
1117 int max)
1118{
1119 struct list_head *src = &bucket->written;
1120 struct list_head *dst = &bucket->committing;
1121 int ret;
1122
1123 ret = transfer_commit_list(src, dst, cinfo, max);
1124 if (ret) {
1125 cinfo->ds->nwritten -= ret;
1126 cinfo->ds->ncommitting += ret;
1127 bucket->clseg = bucket->wlseg;
1128 if (list_empty(src))
1129 bucket->wlseg = NULL;
1130 else
1131 get_lseg(bucket->clseg);
1132 }
1133 return ret;
1134}
1135
961/* Move reqs from written to committing lists, returning count of number moved. 1136/* Move reqs from written to committing lists, returning count of number moved.
962 * Note called with i_lock held. 1137 * Note called with cinfo->lock held.
963 */ 1138 */
964static int filelayout_scan_commit_lists(struct inode *inode, int max, 1139static int filelayout_scan_commit_lists(struct nfs_commit_info *cinfo,
965 spinlock_t *lock) 1140 int max)
966{ 1141{
967 struct pnfs_layout_segment *lseg;
968 struct nfs4_filelayout_segment *fl;
969 int i, rv = 0, cnt; 1142 int i, rv = 0, cnt;
970 1143
971 lseg = find_only_write_lseg_locked(inode); 1144 for (i = 0; i < cinfo->ds->nbuckets && max != 0; i++) {
972 if (!lseg) 1145 cnt = filelayout_scan_ds_commit_list(&cinfo->ds->buckets[i],
973 goto out_done; 1146 cinfo, max);
974 fl = FILELAYOUT_LSEG(lseg);
975 if (fl->commit_through_mds)
976 goto out_done;
977 for (i = 0; i < fl->number_of_buckets && max != 0; i++) {
978 cnt = filelayout_scan_ds_commit_list(&fl->commit_buckets[i],
979 max, lock);
980 max -= cnt; 1147 max -= cnt;
981 rv += cnt; 1148 rv += cnt;
982 } 1149 }
983out_done:
984 return rv; 1150 return rv;
985} 1151}
986 1152
1153/* Pull everything off the committing lists and dump into @dst */
1154static void filelayout_recover_commit_reqs(struct list_head *dst,
1155 struct nfs_commit_info *cinfo)
1156{
1157 struct pnfs_commit_bucket *b;
1158 int i;
1159
1160 /* NOTE cinfo->lock is NOT held, relying on fact that this is
1161 * only called on single thread per dreq.
1162 * Can't take the lock because need to do put_lseg
1163 */
1164 for (i = 0, b = cinfo->ds->buckets; i < cinfo->ds->nbuckets; i++, b++) {
1165 if (transfer_commit_list(&b->written, dst, cinfo, 0)) {
1166 BUG_ON(!list_empty(&b->written));
1167 put_lseg(b->wlseg);
1168 b->wlseg = NULL;
1169 }
1170 }
1171 cinfo->ds->nwritten = 0;
1172}
1173
987static unsigned int 1174static unsigned int
988alloc_ds_commits(struct inode *inode, struct list_head *list) 1175alloc_ds_commits(struct nfs_commit_info *cinfo, struct list_head *list)
989{ 1176{
990 struct pnfs_layout_segment *lseg; 1177 struct pnfs_ds_commit_info *fl_cinfo;
991 struct nfs4_filelayout_segment *fl; 1178 struct pnfs_commit_bucket *bucket;
992 struct nfs_write_data *data; 1179 struct nfs_commit_data *data;
993 int i, j; 1180 int i, j;
994 unsigned int nreq = 0; 1181 unsigned int nreq = 0;
995 1182
996 /* Won't need this when non-whole file layout segments are supported 1183 fl_cinfo = cinfo->ds;
997 * instead we will use a pnfs_layout_hdr structure */ 1184 bucket = fl_cinfo->buckets;
998 lseg = find_only_write_lseg(inode); 1185 for (i = 0; i < fl_cinfo->nbuckets; i++, bucket++) {
999 if (!lseg) 1186 if (list_empty(&bucket->committing))
1000 return 0;
1001 fl = FILELAYOUT_LSEG(lseg);
1002 for (i = 0; i < fl->number_of_buckets; i++) {
1003 if (list_empty(&fl->commit_buckets[i].committing))
1004 continue; 1187 continue;
1005 data = nfs_commitdata_alloc(); 1188 data = nfs_commitdata_alloc();
1006 if (!data) 1189 if (!data)
1007 break; 1190 break;
1008 data->ds_commit_index = i; 1191 data->ds_commit_index = i;
1009 data->lseg = lseg; 1192 data->lseg = bucket->clseg;
1193 bucket->clseg = NULL;
1010 list_add(&data->pages, list); 1194 list_add(&data->pages, list);
1011 nreq++; 1195 nreq++;
1012 } 1196 }
1013 1197
1014 /* Clean up on error */ 1198 /* Clean up on error */
1015 for (j = i; j < fl->number_of_buckets; j++) { 1199 for (j = i; j < fl_cinfo->nbuckets; j++, bucket++) {
1016 if (list_empty(&fl->commit_buckets[i].committing)) 1200 if (list_empty(&bucket->committing))
1017 continue; 1201 continue;
1018 nfs_retry_commit(&fl->commit_buckets[i].committing, lseg); 1202 nfs_retry_commit(&bucket->committing, bucket->clseg, cinfo);
1019 put_lseg(lseg); /* associated with emptying bucket */ 1203 put_lseg(bucket->clseg);
1204 bucket->clseg = NULL;
1020 } 1205 }
1021 put_lseg(lseg);
1022 /* Caller will clean up entries put on list */ 1206 /* Caller will clean up entries put on list */
1023 return nreq; 1207 return nreq;
1024} 1208}
@@ -1026,9 +1210,9 @@ alloc_ds_commits(struct inode *inode, struct list_head *list)
1026/* This follows nfs_commit_list pretty closely */ 1210/* This follows nfs_commit_list pretty closely */
1027static int 1211static int
1028filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages, 1212filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1029 int how) 1213 int how, struct nfs_commit_info *cinfo)
1030{ 1214{
1031 struct nfs_write_data *data, *tmp; 1215 struct nfs_commit_data *data, *tmp;
1032 LIST_HEAD(list); 1216 LIST_HEAD(list);
1033 unsigned int nreq = 0; 1217 unsigned int nreq = 0;
1034 1218
@@ -1039,30 +1223,34 @@ filelayout_commit_pagelist(struct inode *inode, struct list_head *mds_pages,
1039 list_add(&data->pages, &list); 1223 list_add(&data->pages, &list);
1040 nreq++; 1224 nreq++;
1041 } else 1225 } else
1042 nfs_retry_commit(mds_pages, NULL); 1226 nfs_retry_commit(mds_pages, NULL, cinfo);
1043 } 1227 }
1044 1228
1045 nreq += alloc_ds_commits(inode, &list); 1229 nreq += alloc_ds_commits(cinfo, &list);
1046 1230
1047 if (nreq == 0) { 1231 if (nreq == 0) {
1048 nfs_commit_clear_lock(NFS_I(inode)); 1232 cinfo->completion_ops->error_cleanup(NFS_I(inode));
1049 goto out; 1233 goto out;
1050 } 1234 }
1051 1235
1052 atomic_add(nreq, &NFS_I(inode)->commits_outstanding); 1236 atomic_add(nreq, &cinfo->mds->rpcs_out);
1053 1237
1054 list_for_each_entry_safe(data, tmp, &list, pages) { 1238 list_for_each_entry_safe(data, tmp, &list, pages) {
1055 list_del_init(&data->pages); 1239 list_del_init(&data->pages);
1056 if (!data->lseg) { 1240 if (!data->lseg) {
1057 nfs_init_commit(data, mds_pages, NULL); 1241 nfs_init_commit(data, mds_pages, NULL, cinfo);
1058 nfs_initiate_commit(data, NFS_CLIENT(inode), 1242 nfs_initiate_commit(NFS_CLIENT(inode), data,
1059 data->mds_ops, how); 1243 data->mds_ops, how, 0);
1060 } else { 1244 } else {
1061 nfs_init_commit(data, &FILELAYOUT_LSEG(data->lseg)->commit_buckets[data->ds_commit_index].committing, data->lseg); 1245 struct pnfs_commit_bucket *buckets;
1246
1247 buckets = cinfo->ds->buckets;
1248 nfs_init_commit(data, &buckets[data->ds_commit_index].committing, data->lseg, cinfo);
1062 filelayout_initiate_commit(data, how); 1249 filelayout_initiate_commit(data, how);
1063 } 1250 }
1064 } 1251 }
1065out: 1252out:
1253 cinfo->ds->ncommitting = 0;
1066 return PNFS_ATTEMPTED; 1254 return PNFS_ATTEMPTED;
1067} 1255}
1068 1256
@@ -1072,17 +1260,47 @@ filelayout_free_deveiceid_node(struct nfs4_deviceid_node *d)
1072 nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node)); 1260 nfs4_fl_free_deviceid(container_of(d, struct nfs4_file_layout_dsaddr, id_node));
1073} 1261}
1074 1262
1263static struct pnfs_layout_hdr *
1264filelayout_alloc_layout_hdr(struct inode *inode, gfp_t gfp_flags)
1265{
1266 struct nfs4_filelayout *flo;
1267
1268 flo = kzalloc(sizeof(*flo), gfp_flags);
1269 return &flo->generic_hdr;
1270}
1271
1272static void
1273filelayout_free_layout_hdr(struct pnfs_layout_hdr *lo)
1274{
1275 kfree(FILELAYOUT_FROM_HDR(lo));
1276}
1277
1278static struct pnfs_ds_commit_info *
1279filelayout_get_ds_info(struct inode *inode)
1280{
1281 struct pnfs_layout_hdr *layout = NFS_I(inode)->layout;
1282
1283 if (layout == NULL)
1284 return NULL;
1285 else
1286 return &FILELAYOUT_FROM_HDR(layout)->commit_info;
1287}
1288
1075static struct pnfs_layoutdriver_type filelayout_type = { 1289static struct pnfs_layoutdriver_type filelayout_type = {
1076 .id = LAYOUT_NFSV4_1_FILES, 1290 .id = LAYOUT_NFSV4_1_FILES,
1077 .name = "LAYOUT_NFSV4_1_FILES", 1291 .name = "LAYOUT_NFSV4_1_FILES",
1078 .owner = THIS_MODULE, 1292 .owner = THIS_MODULE,
1293 .alloc_layout_hdr = filelayout_alloc_layout_hdr,
1294 .free_layout_hdr = filelayout_free_layout_hdr,
1079 .alloc_lseg = filelayout_alloc_lseg, 1295 .alloc_lseg = filelayout_alloc_lseg,
1080 .free_lseg = filelayout_free_lseg, 1296 .free_lseg = filelayout_free_lseg,
1081 .pg_read_ops = &filelayout_pg_read_ops, 1297 .pg_read_ops = &filelayout_pg_read_ops,
1082 .pg_write_ops = &filelayout_pg_write_ops, 1298 .pg_write_ops = &filelayout_pg_write_ops,
1299 .get_ds_info = &filelayout_get_ds_info,
1083 .mark_request_commit = filelayout_mark_request_commit, 1300 .mark_request_commit = filelayout_mark_request_commit,
1084 .clear_request_commit = filelayout_clear_request_commit, 1301 .clear_request_commit = filelayout_clear_request_commit,
1085 .scan_commit_lists = filelayout_scan_commit_lists, 1302 .scan_commit_lists = filelayout_scan_commit_lists,
1303 .recover_commit_reqs = filelayout_recover_commit_reqs,
1086 .commit_pagelist = filelayout_commit_pagelist, 1304 .commit_pagelist = filelayout_commit_pagelist,
1087 .read_pagelist = filelayout_read_pagelist, 1305 .read_pagelist = filelayout_read_pagelist,
1088 .write_pagelist = filelayout_write_pagelist, 1306 .write_pagelist = filelayout_write_pagelist,
diff --git a/fs/nfs/nfs4filelayout.h b/fs/nfs/nfs4filelayout.h
index 21190bb1f5e3..43fe802dd678 100644
--- a/fs/nfs/nfs4filelayout.h
+++ b/fs/nfs/nfs4filelayout.h
@@ -33,6 +33,13 @@
33#include "pnfs.h" 33#include "pnfs.h"
34 34
35/* 35/*
36 * Default data server connection timeout and retrans vaules.
37 * Set by module paramters dataserver_timeo and dataserver_retrans.
38 */
39#define NFS4_DEF_DS_TIMEO 60
40#define NFS4_DEF_DS_RETRANS 5
41
42/*
36 * Field testing shows we need to support up to 4096 stripe indices. 43 * Field testing shows we need to support up to 4096 stripe indices.
37 * We store each index as a u8 (u32 on the wire) to keep the memory footprint 44 * We store each index as a u8 (u32 on the wire) to keep the memory footprint
38 * reasonable. This in turn means we support a maximum of 256 45 * reasonable. This in turn means we support a maximum of 256
@@ -41,6 +48,9 @@
41#define NFS4_PNFS_MAX_STRIPE_CNT 4096 48#define NFS4_PNFS_MAX_STRIPE_CNT 4096
42#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */ 49#define NFS4_PNFS_MAX_MULTI_CNT 256 /* 256 fit into a u8 stripe_index */
43 50
51/* error codes for internal use */
52#define NFS4ERR_RESET_TO_MDS 12001
53
44enum stripetype4 { 54enum stripetype4 {
45 STRIPE_SPARSE = 1, 55 STRIPE_SPARSE = 1,
46 STRIPE_DENSE = 2 56 STRIPE_DENSE = 2
@@ -62,23 +72,14 @@ struct nfs4_pnfs_ds {
62 atomic_t ds_count; 72 atomic_t ds_count;
63}; 73};
64 74
65/* nfs4_file_layout_dsaddr flags */
66#define NFS4_DEVICE_ID_NEG_ENTRY 0x00000001
67
68struct nfs4_file_layout_dsaddr { 75struct nfs4_file_layout_dsaddr {
69 struct nfs4_deviceid_node id_node; 76 struct nfs4_deviceid_node id_node;
70 unsigned long flags;
71 u32 stripe_count; 77 u32 stripe_count;
72 u8 *stripe_indices; 78 u8 *stripe_indices;
73 u32 ds_num; 79 u32 ds_num;
74 struct nfs4_pnfs_ds *ds_list[1]; 80 struct nfs4_pnfs_ds *ds_list[1];
75}; 81};
76 82
77struct nfs4_fl_commit_bucket {
78 struct list_head written;
79 struct list_head committing;
80};
81
82struct nfs4_filelayout_segment { 83struct nfs4_filelayout_segment {
83 struct pnfs_layout_segment generic_hdr; 84 struct pnfs_layout_segment generic_hdr;
84 u32 stripe_type; 85 u32 stripe_type;
@@ -89,10 +90,19 @@ struct nfs4_filelayout_segment {
89 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */ 90 struct nfs4_file_layout_dsaddr *dsaddr; /* Point to GETDEVINFO data */
90 unsigned int num_fh; 91 unsigned int num_fh;
91 struct nfs_fh **fh_array; 92 struct nfs_fh **fh_array;
92 struct nfs4_fl_commit_bucket *commit_buckets; /* Sort commits to ds */
93 int number_of_buckets;
94}; 93};
95 94
95struct nfs4_filelayout {
96 struct pnfs_layout_hdr generic_hdr;
97 struct pnfs_ds_commit_info commit_info;
98};
99
100static inline struct nfs4_filelayout *
101FILELAYOUT_FROM_HDR(struct pnfs_layout_hdr *lo)
102{
103 return container_of(lo, struct nfs4_filelayout, generic_hdr);
104}
105
96static inline struct nfs4_filelayout_segment * 106static inline struct nfs4_filelayout_segment *
97FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg) 107FILELAYOUT_LSEG(struct pnfs_layout_segment *lseg)
98{ 108{
@@ -107,6 +117,36 @@ FILELAYOUT_DEVID_NODE(struct pnfs_layout_segment *lseg)
107 return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node; 117 return &FILELAYOUT_LSEG(lseg)->dsaddr->id_node;
108} 118}
109 119
120static inline void
121filelayout_mark_devid_invalid(struct nfs4_deviceid_node *node)
122{
123 u32 *p = (u32 *)&node->deviceid;
124
125 printk(KERN_WARNING "NFS: Deviceid [%x%x%x%x] marked out of use.\n",
126 p[0], p[1], p[2], p[3]);
127
128 set_bit(NFS_DEVICEID_INVALID, &node->flags);
129}
130
131static inline bool
132filelayout_test_layout_invalid(struct pnfs_layout_hdr *lo)
133{
134 return test_bit(NFS_LAYOUT_INVALID, &lo->plh_flags);
135}
136
137static inline bool
138filelayout_test_devid_invalid(struct nfs4_deviceid_node *node)
139{
140 return test_bit(NFS_DEVICEID_INVALID, &node->flags);
141}
142
143static inline bool
144filelayout_reset_to_mds(struct pnfs_layout_segment *lseg)
145{
146 return filelayout_test_devid_invalid(FILELAYOUT_DEVID_NODE(lseg)) ||
147 filelayout_test_layout_invalid(lseg->pls_layout);
148}
149
110extern struct nfs_fh * 150extern struct nfs_fh *
111nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j); 151nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j);
112 152
@@ -119,5 +159,6 @@ extern void nfs4_fl_put_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
119extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr); 159extern void nfs4_fl_free_deviceid(struct nfs4_file_layout_dsaddr *dsaddr);
120struct nfs4_file_layout_dsaddr * 160struct nfs4_file_layout_dsaddr *
121get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags); 161get_device_info(struct inode *inode, struct nfs4_deviceid *dev_id, gfp_t gfp_flags);
162void nfs4_ds_disconnect(struct nfs_client *clp);
122 163
123#endif /* FS_NFS_NFS4FILELAYOUT_H */ 164#endif /* FS_NFS_NFS4FILELAYOUT_H */
diff --git a/fs/nfs/nfs4filelayoutdev.c b/fs/nfs/nfs4filelayoutdev.c
index c9cff9adb2d3..a1fab8da7f03 100644
--- a/fs/nfs/nfs4filelayoutdev.c
+++ b/fs/nfs/nfs4filelayoutdev.c
@@ -30,12 +30,16 @@
30 30
31#include <linux/nfs_fs.h> 31#include <linux/nfs_fs.h>
32#include <linux/vmalloc.h> 32#include <linux/vmalloc.h>
33#include <linux/module.h>
33 34
34#include "internal.h" 35#include "internal.h"
35#include "nfs4filelayout.h" 36#include "nfs4filelayout.h"
36 37
37#define NFSDBG_FACILITY NFSDBG_PNFS_LD 38#define NFSDBG_FACILITY NFSDBG_PNFS_LD
38 39
40static unsigned int dataserver_timeo = NFS4_DEF_DS_TIMEO;
41static unsigned int dataserver_retrans = NFS4_DEF_DS_RETRANS;
42
39/* 43/*
40 * Data server cache 44 * Data server cache
41 * 45 *
@@ -145,6 +149,28 @@ _data_server_lookup_locked(const struct list_head *dsaddrs)
145} 149}
146 150
147/* 151/*
152 * Lookup DS by nfs_client pointer. Zero data server client pointer
153 */
154void nfs4_ds_disconnect(struct nfs_client *clp)
155{
156 struct nfs4_pnfs_ds *ds;
157 struct nfs_client *found = NULL;
158
159 dprintk("%s clp %p\n", __func__, clp);
160 spin_lock(&nfs4_ds_cache_lock);
161 list_for_each_entry(ds, &nfs4_data_server_cache, ds_node)
162 if (ds->ds_clp && ds->ds_clp == clp) {
163 found = ds->ds_clp;
164 ds->ds_clp = NULL;
165 }
166 spin_unlock(&nfs4_ds_cache_lock);
167 if (found) {
168 set_bit(NFS_CS_STOP_RENEW, &clp->cl_res_state);
169 nfs_put_client(clp);
170 }
171}
172
173/*
148 * Create an rpc connection to the nfs4_pnfs_ds data server 174 * Create an rpc connection to the nfs4_pnfs_ds data server
149 * Currently only supports IPv4 and IPv6 addresses 175 * Currently only supports IPv4 and IPv6 addresses
150 */ 176 */
@@ -165,8 +191,9 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
165 __func__, ds->ds_remotestr, da->da_remotestr); 191 __func__, ds->ds_remotestr, da->da_remotestr);
166 192
167 clp = nfs4_set_ds_client(mds_srv->nfs_client, 193 clp = nfs4_set_ds_client(mds_srv->nfs_client,
168 (struct sockaddr *)&da->da_addr, 194 (struct sockaddr *)&da->da_addr,
169 da->da_addrlen, IPPROTO_TCP); 195 da->da_addrlen, IPPROTO_TCP,
196 dataserver_timeo, dataserver_retrans);
170 if (!IS_ERR(clp)) 197 if (!IS_ERR(clp))
171 break; 198 break;
172 } 199 }
@@ -176,28 +203,7 @@ nfs4_ds_connect(struct nfs_server *mds_srv, struct nfs4_pnfs_ds *ds)
176 goto out; 203 goto out;
177 } 204 }
178 205
179 if ((clp->cl_exchange_flags & EXCHGID4_FLAG_MASK_PNFS) != 0) { 206 status = nfs4_init_ds_session(clp, mds_srv->nfs_client->cl_lease_time);
180 if (!is_ds_client(clp)) {
181 status = -ENODEV;
182 goto out_put;
183 }
184 ds->ds_clp = clp;
185 dprintk("%s [existing] server=%s\n", __func__,
186 ds->ds_remotestr);
187 goto out;
188 }
189
190 /*
191 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the DS lease to
192 * be equal to the MDS lease. Renewal is scheduled in create_session.
193 */
194 spin_lock(&mds_srv->nfs_client->cl_lock);
195 clp->cl_lease_time = mds_srv->nfs_client->cl_lease_time;
196 spin_unlock(&mds_srv->nfs_client->cl_lock);
197 clp->cl_last_renewal = jiffies;
198
199 /* New nfs_client */
200 status = nfs4_init_ds_session(clp);
201 if (status) 207 if (status)
202 goto out_put; 208 goto out_put;
203 209
@@ -602,7 +608,7 @@ decode_device(struct inode *ino, struct pnfs_device *pdev, gfp_t gfp_flags)
602 608
603 mp_count = be32_to_cpup(p); /* multipath count */ 609 mp_count = be32_to_cpup(p); /* multipath count */
604 for (j = 0; j < mp_count; j++) { 610 for (j = 0; j < mp_count; j++) {
605 da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->net, 611 da = decode_ds_addr(NFS_SERVER(ino)->nfs_client->cl_net,
606 &stream, gfp_flags); 612 &stream, gfp_flags);
607 if (da) 613 if (da)
608 list_add_tail(&da->da_node, &dsaddrs); 614 list_add_tail(&da->da_node, &dsaddrs);
@@ -791,48 +797,42 @@ nfs4_fl_select_ds_fh(struct pnfs_layout_segment *lseg, u32 j)
791 return flseg->fh_array[i]; 797 return flseg->fh_array[i];
792} 798}
793 799
794static void
795filelayout_mark_devid_negative(struct nfs4_file_layout_dsaddr *dsaddr,
796 int err, const char *ds_remotestr)
797{
798 u32 *p = (u32 *)&dsaddr->id_node.deviceid;
799
800 printk(KERN_ERR "NFS: data server %s connection error %d."
801 " Deviceid [%x%x%x%x] marked out of use.\n",
802 ds_remotestr, err, p[0], p[1], p[2], p[3]);
803
804 spin_lock(&nfs4_ds_cache_lock);
805 dsaddr->flags |= NFS4_DEVICE_ID_NEG_ENTRY;
806 spin_unlock(&nfs4_ds_cache_lock);
807}
808
809struct nfs4_pnfs_ds * 800struct nfs4_pnfs_ds *
810nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx) 801nfs4_fl_prepare_ds(struct pnfs_layout_segment *lseg, u32 ds_idx)
811{ 802{
812 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr; 803 struct nfs4_file_layout_dsaddr *dsaddr = FILELAYOUT_LSEG(lseg)->dsaddr;
813 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx]; 804 struct nfs4_pnfs_ds *ds = dsaddr->ds_list[ds_idx];
805 struct nfs4_deviceid_node *devid = FILELAYOUT_DEVID_NODE(lseg);
806
807 if (filelayout_test_devid_invalid(devid))
808 return NULL;
814 809
815 if (ds == NULL) { 810 if (ds == NULL) {
816 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n", 811 printk(KERN_ERR "NFS: %s: No data server for offset index %d\n",
817 __func__, ds_idx); 812 __func__, ds_idx);
818 return NULL; 813 goto mark_dev_invalid;
819 } 814 }
820 815
821 if (!ds->ds_clp) { 816 if (!ds->ds_clp) {
822 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode); 817 struct nfs_server *s = NFS_SERVER(lseg->pls_layout->plh_inode);
823 int err; 818 int err;
824 819
825 if (dsaddr->flags & NFS4_DEVICE_ID_NEG_ENTRY) {
826 /* Already tried to connect, don't try again */
827 dprintk("%s Deviceid marked out of use\n", __func__);
828 return NULL;
829 }
830 err = nfs4_ds_connect(s, ds); 820 err = nfs4_ds_connect(s, ds);
831 if (err) { 821 if (err)
832 filelayout_mark_devid_negative(dsaddr, err, 822 goto mark_dev_invalid;
833 ds->ds_remotestr);
834 return NULL;
835 }
836 } 823 }
837 return ds; 824 return ds;
825
826mark_dev_invalid:
827 filelayout_mark_devid_invalid(devid);
828 return NULL;
838} 829}
830
831module_param(dataserver_retrans, uint, 0644);
832MODULE_PARM_DESC(dataserver_retrans, "The number of times the NFSv4.1 client "
833 "retries a request before it attempts further "
834 " recovery action.");
835module_param(dataserver_timeo, uint, 0644);
836MODULE_PARM_DESC(dataserver_timeo, "The time (in tenths of a second) the "
837 "NFSv4.1 client waits for a response from a "
838 " data server before it retries an NFS request.");
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index a7f3dedc4ec7..017b4b01a69c 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -132,6 +132,35 @@ static size_t nfs_parse_server_name(char *string, size_t len,
132 return ret; 132 return ret;
133} 133}
134 134
135rpc_authflavor_t nfs_find_best_sec(struct nfs4_secinfo_flavors *flavors)
136{
137 struct gss_api_mech *mech;
138 struct xdr_netobj oid;
139 int i;
140 rpc_authflavor_t pseudoflavor = RPC_AUTH_UNIX;
141
142 for (i = 0; i < flavors->num_flavors; i++) {
143 struct nfs4_secinfo_flavor *flavor;
144 flavor = &flavors->flavors[i];
145
146 if (flavor->flavor == RPC_AUTH_NULL || flavor->flavor == RPC_AUTH_UNIX) {
147 pseudoflavor = flavor->flavor;
148 break;
149 } else if (flavor->flavor == RPC_AUTH_GSS) {
150 oid.len = flavor->gss.sec_oid4.len;
151 oid.data = flavor->gss.sec_oid4.data;
152 mech = gss_mech_get_by_OID(&oid);
153 if (!mech)
154 continue;
155 pseudoflavor = gss_svc_to_pseudoflavor(mech, flavor->gss.service);
156 gss_mech_put(mech);
157 break;
158 }
159 }
160
161 return pseudoflavor;
162}
163
135static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name) 164static rpc_authflavor_t nfs4_negotiate_security(struct inode *inode, struct qstr *name)
136{ 165{
137 struct page *page; 166 struct page *page;
@@ -168,7 +197,7 @@ struct rpc_clnt *nfs4_create_sec_client(struct rpc_clnt *clnt, struct inode *ino
168 rpc_authflavor_t flavor; 197 rpc_authflavor_t flavor;
169 198
170 flavor = nfs4_negotiate_security(inode, name); 199 flavor = nfs4_negotiate_security(inode, name);
171 if (flavor < 0) 200 if ((int)flavor < 0)
172 return ERR_PTR(flavor); 201 return ERR_PTR(flavor);
173 202
174 clone = rpc_clone_client(clnt); 203 clone = rpc_clone_client(clnt);
@@ -300,7 +329,7 @@ out:
300 * @dentry - dentry of referral 329 * @dentry - dentry of referral
301 * 330 *
302 */ 331 */
303struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry) 332static struct vfsmount *nfs_do_refmount(struct rpc_clnt *client, struct dentry *dentry)
304{ 333{
305 struct vfsmount *mnt = ERR_PTR(-ENOMEM); 334 struct vfsmount *mnt = ERR_PTR(-ENOMEM);
306 struct dentry *parent; 335 struct dentry *parent;
@@ -341,3 +370,25 @@ out:
341 dprintk("%s: done\n", __func__); 370 dprintk("%s: done\n", __func__);
342 return mnt; 371 return mnt;
343} 372}
373
374struct vfsmount *nfs4_submount(struct nfs_server *server, struct dentry *dentry,
375 struct nfs_fh *fh, struct nfs_fattr *fattr)
376{
377 struct dentry *parent = dget_parent(dentry);
378 struct rpc_clnt *client;
379 struct vfsmount *mnt;
380
381 /* Look it up again to get its attributes and sec flavor */
382 client = nfs4_proc_lookup_mountpoint(parent->d_inode, &dentry->d_name, fh, fattr);
383 dput(parent);
384 if (IS_ERR(client))
385 return ERR_CAST(client);
386
387 if (fattr->valid & NFS_ATTR_FATTR_V4_REFERRAL)
388 mnt = nfs_do_refmount(client, dentry);
389 else
390 mnt = nfs_do_submount(dentry, fh, fattr, client->cl_auth->au_flavor);
391
392 rpc_shutdown_client(client);
393 return mnt;
394}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index ab985f6f0da8..d48dbefa0e71 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -64,6 +64,7 @@
64#include "iostat.h" 64#include "iostat.h"
65#include "callback.h" 65#include "callback.h"
66#include "pnfs.h" 66#include "pnfs.h"
67#include "netns.h"
67 68
68#define NFSDBG_FACILITY NFSDBG_PROC 69#define NFSDBG_FACILITY NFSDBG_PROC
69 70
@@ -80,6 +81,7 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data);
80static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *); 81static int nfs4_do_fsinfo(struct nfs_server *, struct nfs_fh *, struct nfs_fsinfo *);
81static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *); 82static int nfs4_async_handle_error(struct rpc_task *, const struct nfs_server *, struct nfs4_state *);
82static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr); 83static void nfs_fixup_referral_attributes(struct nfs_fattr *fattr);
84static int nfs4_proc_getattr(struct nfs_server *, struct nfs_fh *, struct nfs_fattr *);
83static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr); 85static int _nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr);
84static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred, 86static int nfs4_do_setattr(struct inode *inode, struct rpc_cred *cred,
85 struct nfs_fattr *fattr, struct iattr *sattr, 87 struct nfs_fattr *fattr, struct iattr *sattr,
@@ -101,6 +103,8 @@ static int nfs4_map_errors(int err)
101 case -NFS4ERR_BADOWNER: 103 case -NFS4ERR_BADOWNER:
102 case -NFS4ERR_BADNAME: 104 case -NFS4ERR_BADNAME:
103 return -EINVAL; 105 return -EINVAL;
106 case -NFS4ERR_SHARE_DENIED:
107 return -EACCES;
104 default: 108 default:
105 dprintk("%s could not handle NFSv4 error %d\n", 109 dprintk("%s could not handle NFSv4 error %d\n",
106 __func__, -err); 110 __func__, -err);
@@ -304,7 +308,7 @@ static int nfs4_handle_exception(struct nfs_server *server, int errorcode, struc
304 case -NFS4ERR_SEQ_MISORDERED: 308 case -NFS4ERR_SEQ_MISORDERED:
305 dprintk("%s ERROR: %d Reset session\n", __func__, 309 dprintk("%s ERROR: %d Reset session\n", __func__,
306 errorcode); 310 errorcode);
307 nfs4_schedule_session_recovery(clp->cl_session); 311 nfs4_schedule_session_recovery(clp->cl_session, errorcode);
308 exception->retry = 1; 312 exception->retry = 1;
309 break; 313 break;
310#endif /* defined(CONFIG_NFS_V4_1) */ 314#endif /* defined(CONFIG_NFS_V4_1) */
@@ -772,7 +776,7 @@ static void update_changeattr(struct inode *dir, struct nfs4_change_info *cinfo)
772 struct nfs_inode *nfsi = NFS_I(dir); 776 struct nfs_inode *nfsi = NFS_I(dir);
773 777
774 spin_lock(&dir->i_lock); 778 spin_lock(&dir->i_lock);
775 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA; 779 nfsi->cache_validity |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA;
776 if (!cinfo->atomic || cinfo->before != dir->i_version) 780 if (!cinfo->atomic || cinfo->before != dir->i_version)
777 nfs_force_lookup_revalidate(dir); 781 nfs_force_lookup_revalidate(dir);
778 dir->i_version = cinfo->after; 782 dir->i_version = cinfo->after;
@@ -788,7 +792,6 @@ struct nfs4_opendata {
788 struct nfs4_string owner_name; 792 struct nfs4_string owner_name;
789 struct nfs4_string group_name; 793 struct nfs4_string group_name;
790 struct nfs_fattr f_attr; 794 struct nfs_fattr f_attr;
791 struct nfs_fattr dir_attr;
792 struct dentry *dir; 795 struct dentry *dir;
793 struct dentry *dentry; 796 struct dentry *dentry;
794 struct nfs4_state_owner *owner; 797 struct nfs4_state_owner *owner;
@@ -804,12 +807,10 @@ struct nfs4_opendata {
804static void nfs4_init_opendata_res(struct nfs4_opendata *p) 807static void nfs4_init_opendata_res(struct nfs4_opendata *p)
805{ 808{
806 p->o_res.f_attr = &p->f_attr; 809 p->o_res.f_attr = &p->f_attr;
807 p->o_res.dir_attr = &p->dir_attr;
808 p->o_res.seqid = p->o_arg.seqid; 810 p->o_res.seqid = p->o_arg.seqid;
809 p->c_res.seqid = p->c_arg.seqid; 811 p->c_res.seqid = p->c_arg.seqid;
810 p->o_res.server = p->o_arg.server; 812 p->o_res.server = p->o_arg.server;
811 nfs_fattr_init(&p->f_attr); 813 nfs_fattr_init(&p->f_attr);
812 nfs_fattr_init(&p->dir_attr);
813 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name); 814 nfs_fattr_init_names(&p->f_attr, &p->owner_name, &p->group_name);
814} 815}
815 816
@@ -843,7 +844,6 @@ static struct nfs4_opendata *nfs4_opendata_alloc(struct dentry *dentry,
843 p->o_arg.name = &dentry->d_name; 844 p->o_arg.name = &dentry->d_name;
844 p->o_arg.server = server; 845 p->o_arg.server = server;
845 p->o_arg.bitmask = server->attr_bitmask; 846 p->o_arg.bitmask = server->attr_bitmask;
846 p->o_arg.dir_bitmask = server->cache_consistency_bitmask;
847 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL; 847 p->o_arg.claim = NFS4_OPEN_CLAIM_NULL;
848 if (attrs != NULL && attrs->ia_valid != 0) { 848 if (attrs != NULL && attrs->ia_valid != 0) {
849 __be32 verf[2]; 849 __be32 verf[2];
@@ -1332,7 +1332,7 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state
1332 case -NFS4ERR_BAD_HIGH_SLOT: 1332 case -NFS4ERR_BAD_HIGH_SLOT:
1333 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 1333 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1334 case -NFS4ERR_DEADSESSION: 1334 case -NFS4ERR_DEADSESSION:
1335 nfs4_schedule_session_recovery(server->nfs_client->cl_session); 1335 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
1336 goto out; 1336 goto out;
1337 case -NFS4ERR_STALE_CLIENTID: 1337 case -NFS4ERR_STALE_CLIENTID:
1338 case -NFS4ERR_STALE_STATEID: 1338 case -NFS4ERR_STALE_STATEID:
@@ -1611,8 +1611,6 @@ static int _nfs4_recover_proc_open(struct nfs4_opendata *data)
1611 1611
1612 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr); 1612 nfs_fattr_map_and_free_names(NFS_SERVER(dir), &data->f_attr);
1613 1613
1614 nfs_refresh_inode(dir, o_res->dir_attr);
1615
1616 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1614 if (o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
1617 status = _nfs4_proc_open_confirm(data); 1615 status = _nfs4_proc_open_confirm(data);
1618 if (status != 0) 1616 if (status != 0)
@@ -1645,11 +1643,8 @@ static int _nfs4_proc_open(struct nfs4_opendata *data)
1645 1643
1646 nfs_fattr_map_and_free_names(server, &data->f_attr); 1644 nfs_fattr_map_and_free_names(server, &data->f_attr);
1647 1645
1648 if (o_arg->open_flags & O_CREAT) { 1646 if (o_arg->open_flags & O_CREAT)
1649 update_changeattr(dir, &o_res->cinfo); 1647 update_changeattr(dir, &o_res->cinfo);
1650 nfs_post_op_update_inode(dir, o_res->dir_attr);
1651 } else
1652 nfs_refresh_inode(dir, o_res->dir_attr);
1653 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0) 1648 if ((o_res->rflags & NFS4_OPEN_RESULT_LOCKTYPE_POSIX) == 0)
1654 server->caps &= ~NFS_CAP_POSIX_LOCK; 1649 server->caps &= ~NFS_CAP_POSIX_LOCK;
1655 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) { 1650 if(o_res->rflags & NFS4_OPEN_RESULT_CONFIRM) {
@@ -1789,7 +1784,14 @@ static inline void nfs4_exclusive_attrset(struct nfs4_opendata *opendata, struct
1789/* 1784/*
1790 * Returns a referenced nfs4_state 1785 * Returns a referenced nfs4_state
1791 */ 1786 */
1792static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred, struct nfs4_state **res) 1787static int _nfs4_do_open(struct inode *dir,
1788 struct dentry *dentry,
1789 fmode_t fmode,
1790 int flags,
1791 struct iattr *sattr,
1792 struct rpc_cred *cred,
1793 struct nfs4_state **res,
1794 struct nfs4_threshold **ctx_th)
1793{ 1795{
1794 struct nfs4_state_owner *sp; 1796 struct nfs4_state_owner *sp;
1795 struct nfs4_state *state = NULL; 1797 struct nfs4_state *state = NULL;
@@ -1814,6 +1816,11 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
1814 if (opendata == NULL) 1816 if (opendata == NULL)
1815 goto err_put_state_owner; 1817 goto err_put_state_owner;
1816 1818
1819 if (ctx_th && server->attr_bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD) {
1820 opendata->f_attr.mdsthreshold = pnfs_mdsthreshold_alloc();
1821 if (!opendata->f_attr.mdsthreshold)
1822 goto err_opendata_put;
1823 }
1817 if (dentry->d_inode != NULL) 1824 if (dentry->d_inode != NULL)
1818 opendata->state = nfs4_get_open_state(dentry->d_inode, sp); 1825 opendata->state = nfs4_get_open_state(dentry->d_inode, sp);
1819 1826
@@ -1839,11 +1846,19 @@ static int _nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode
1839 nfs_setattr_update_inode(state->inode, sattr); 1846 nfs_setattr_update_inode(state->inode, sattr);
1840 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr); 1847 nfs_post_op_update_inode(state->inode, opendata->o_res.f_attr);
1841 } 1848 }
1849
1850 if (pnfs_use_threshold(ctx_th, opendata->f_attr.mdsthreshold, server))
1851 *ctx_th = opendata->f_attr.mdsthreshold;
1852 else
1853 kfree(opendata->f_attr.mdsthreshold);
1854 opendata->f_attr.mdsthreshold = NULL;
1855
1842 nfs4_opendata_put(opendata); 1856 nfs4_opendata_put(opendata);
1843 nfs4_put_state_owner(sp); 1857 nfs4_put_state_owner(sp);
1844 *res = state; 1858 *res = state;
1845 return 0; 1859 return 0;
1846err_opendata_put: 1860err_opendata_put:
1861 kfree(opendata->f_attr.mdsthreshold);
1847 nfs4_opendata_put(opendata); 1862 nfs4_opendata_put(opendata);
1848err_put_state_owner: 1863err_put_state_owner:
1849 nfs4_put_state_owner(sp); 1864 nfs4_put_state_owner(sp);
@@ -1853,14 +1868,21 @@ out_err:
1853} 1868}
1854 1869
1855 1870
1856static struct nfs4_state *nfs4_do_open(struct inode *dir, struct dentry *dentry, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) 1871static struct nfs4_state *nfs4_do_open(struct inode *dir,
1872 struct dentry *dentry,
1873 fmode_t fmode,
1874 int flags,
1875 struct iattr *sattr,
1876 struct rpc_cred *cred,
1877 struct nfs4_threshold **ctx_th)
1857{ 1878{
1858 struct nfs4_exception exception = { }; 1879 struct nfs4_exception exception = { };
1859 struct nfs4_state *res; 1880 struct nfs4_state *res;
1860 int status; 1881 int status;
1861 1882
1862 do { 1883 do {
1863 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred, &res); 1884 status = _nfs4_do_open(dir, dentry, fmode, flags, sattr, cred,
1885 &res, ctx_th);
1864 if (status == 0) 1886 if (status == 0)
1865 break; 1887 break;
1866 /* NOTE: BAD_SEQID means the server and client disagree about the 1888 /* NOTE: BAD_SEQID means the server and client disagree about the
@@ -2184,7 +2206,8 @@ nfs4_atomic_open(struct inode *dir, struct nfs_open_context *ctx, int open_flags
2184 struct nfs4_state *state; 2206 struct nfs4_state *state;
2185 2207
2186 /* Protect against concurrent sillydeletes */ 2208 /* Protect against concurrent sillydeletes */
2187 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr, ctx->cred); 2209 state = nfs4_do_open(dir, ctx->dentry, ctx->mode, open_flags, attr,
2210 ctx->cred, &ctx->mdsthreshold);
2188 if (IS_ERR(state)) 2211 if (IS_ERR(state))
2189 return ERR_CAST(state); 2212 return ERR_CAST(state);
2190 ctx->state = state; 2213 ctx->state = state;
@@ -2354,8 +2377,8 @@ static int nfs4_find_root_sec(struct nfs_server *server, struct nfs_fh *fhandle,
2354/* 2377/*
2355 * get the file handle for the "/" directory on the server 2378 * get the file handle for the "/" directory on the server
2356 */ 2379 */
2357static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle, 2380int nfs4_proc_get_rootfh(struct nfs_server *server, struct nfs_fh *fhandle,
2358 struct nfs_fsinfo *info) 2381 struct nfs_fsinfo *info)
2359{ 2382{
2360 int minor_version = server->nfs_client->cl_minorversion; 2383 int minor_version = server->nfs_client->cl_minorversion;
2361 int status = nfs4_lookup_root(server, fhandle, info); 2384 int status = nfs4_lookup_root(server, fhandle, info);
@@ -2372,6 +2395,31 @@ static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
2372 return nfs4_map_errors(status); 2395 return nfs4_map_errors(status);
2373} 2396}
2374 2397
2398static int nfs4_proc_get_root(struct nfs_server *server, struct nfs_fh *mntfh,
2399 struct nfs_fsinfo *info)
2400{
2401 int error;
2402 struct nfs_fattr *fattr = info->fattr;
2403
2404 error = nfs4_server_capabilities(server, mntfh);
2405 if (error < 0) {
2406 dprintk("nfs4_get_root: getcaps error = %d\n", -error);
2407 return error;
2408 }
2409
2410 error = nfs4_proc_getattr(server, mntfh, fattr);
2411 if (error < 0) {
2412 dprintk("nfs4_get_root: getattr error = %d\n", -error);
2413 return error;
2414 }
2415
2416 if (fattr->valid & NFS_ATTR_FATTR_FSID &&
2417 !nfs_fsid_equal(&server->fsid, &fattr->fsid))
2418 memcpy(&server->fsid, &fattr->fsid, sizeof(server->fsid));
2419
2420 return error;
2421}
2422
2375/* 2423/*
2376 * Get locations and (maybe) other attributes of a referral. 2424 * Get locations and (maybe) other attributes of a referral.
2377 * Note that we'll actually follow the referral later when 2425 * Note that we'll actually follow the referral later when
@@ -2578,7 +2626,7 @@ out:
2578 return err; 2626 return err;
2579} 2627}
2580 2628
2581static int nfs4_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, 2629static int nfs4_proc_lookup(struct inode *dir, struct qstr *name,
2582 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 2630 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
2583{ 2631{
2584 int status; 2632 int status;
@@ -2761,7 +2809,7 @@ nfs4_proc_create(struct inode *dir, struct dentry *dentry, struct iattr *sattr,
2761 fmode = ctx->mode; 2809 fmode = ctx->mode;
2762 } 2810 }
2763 sattr->ia_mode &= ~current_umask(); 2811 sattr->ia_mode &= ~current_umask();
2764 state = nfs4_do_open(dir, de, fmode, flags, sattr, cred); 2812 state = nfs4_do_open(dir, de, fmode, flags, sattr, cred, NULL);
2765 d_drop(dentry); 2813 d_drop(dentry);
2766 if (IS_ERR(state)) { 2814 if (IS_ERR(state)) {
2767 status = PTR_ERR(state); 2815 status = PTR_ERR(state);
@@ -2783,7 +2831,6 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2783 struct nfs_removeargs args = { 2831 struct nfs_removeargs args = {
2784 .fh = NFS_FH(dir), 2832 .fh = NFS_FH(dir),
2785 .name = *name, 2833 .name = *name,
2786 .bitmask = server->attr_bitmask,
2787 }; 2834 };
2788 struct nfs_removeres res = { 2835 struct nfs_removeres res = {
2789 .server = server, 2836 .server = server,
@@ -2793,19 +2840,11 @@ static int _nfs4_proc_remove(struct inode *dir, struct qstr *name)
2793 .rpc_argp = &args, 2840 .rpc_argp = &args,
2794 .rpc_resp = &res, 2841 .rpc_resp = &res,
2795 }; 2842 };
2796 int status = -ENOMEM; 2843 int status;
2797
2798 res.dir_attr = nfs_alloc_fattr();
2799 if (res.dir_attr == NULL)
2800 goto out;
2801 2844
2802 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1); 2845 status = nfs4_call_sync(server->client, server, &msg, &args.seq_args, &res.seq_res, 1);
2803 if (status == 0) { 2846 if (status == 0)
2804 update_changeattr(dir, &res.cinfo); 2847 update_changeattr(dir, &res.cinfo);
2805 nfs_post_op_update_inode(dir, res.dir_attr);
2806 }
2807 nfs_free_fattr(res.dir_attr);
2808out:
2809 return status; 2848 return status;
2810} 2849}
2811 2850
@@ -2827,7 +2866,6 @@ static void nfs4_proc_unlink_setup(struct rpc_message *msg, struct inode *dir)
2827 struct nfs_removeargs *args = msg->rpc_argp; 2866 struct nfs_removeargs *args = msg->rpc_argp;
2828 struct nfs_removeres *res = msg->rpc_resp; 2867 struct nfs_removeres *res = msg->rpc_resp;
2829 2868
2830 args->bitmask = server->cache_consistency_bitmask;
2831 res->server = server; 2869 res->server = server;
2832 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE]; 2870 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_REMOVE];
2833 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1); 2871 nfs41_init_sequence(&args->seq_args, &res->seq_res, 1);
@@ -2852,7 +2890,6 @@ static int nfs4_proc_unlink_done(struct rpc_task *task, struct inode *dir)
2852 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN) 2890 if (nfs4_async_handle_error(task, res->server, NULL) == -EAGAIN)
2853 return 0; 2891 return 0;
2854 update_changeattr(dir, &res->cinfo); 2892 update_changeattr(dir, &res->cinfo);
2855 nfs_post_op_update_inode(dir, res->dir_attr);
2856 return 1; 2893 return 1;
2857} 2894}
2858 2895
@@ -2863,7 +2900,6 @@ static void nfs4_proc_rename_setup(struct rpc_message *msg, struct inode *dir)
2863 struct nfs_renameres *res = msg->rpc_resp; 2900 struct nfs_renameres *res = msg->rpc_resp;
2864 2901
2865 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME]; 2902 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_RENAME];
2866 arg->bitmask = server->attr_bitmask;
2867 res->server = server; 2903 res->server = server;
2868 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1); 2904 nfs41_init_sequence(&arg->seq_args, &res->seq_res, 1);
2869} 2905}
@@ -2889,9 +2925,7 @@ static int nfs4_proc_rename_done(struct rpc_task *task, struct inode *old_dir,
2889 return 0; 2925 return 0;
2890 2926
2891 update_changeattr(old_dir, &res->old_cinfo); 2927 update_changeattr(old_dir, &res->old_cinfo);
2892 nfs_post_op_update_inode(old_dir, res->old_fattr);
2893 update_changeattr(new_dir, &res->new_cinfo); 2928 update_changeattr(new_dir, &res->new_cinfo);
2894 nfs_post_op_update_inode(new_dir, res->new_fattr);
2895 return 1; 2929 return 1;
2896} 2930}
2897 2931
@@ -2904,7 +2938,6 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2904 .new_dir = NFS_FH(new_dir), 2938 .new_dir = NFS_FH(new_dir),
2905 .old_name = old_name, 2939 .old_name = old_name,
2906 .new_name = new_name, 2940 .new_name = new_name,
2907 .bitmask = server->attr_bitmask,
2908 }; 2941 };
2909 struct nfs_renameres res = { 2942 struct nfs_renameres res = {
2910 .server = server, 2943 .server = server,
@@ -2916,21 +2949,11 @@ static int _nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name,
2916 }; 2949 };
2917 int status = -ENOMEM; 2950 int status = -ENOMEM;
2918 2951
2919 res.old_fattr = nfs_alloc_fattr();
2920 res.new_fattr = nfs_alloc_fattr();
2921 if (res.old_fattr == NULL || res.new_fattr == NULL)
2922 goto out;
2923
2924 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2952 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2925 if (!status) { 2953 if (!status) {
2926 update_changeattr(old_dir, &res.old_cinfo); 2954 update_changeattr(old_dir, &res.old_cinfo);
2927 nfs_post_op_update_inode(old_dir, res.old_fattr);
2928 update_changeattr(new_dir, &res.new_cinfo); 2955 update_changeattr(new_dir, &res.new_cinfo);
2929 nfs_post_op_update_inode(new_dir, res.new_fattr);
2930 } 2956 }
2931out:
2932 nfs_free_fattr(res.new_fattr);
2933 nfs_free_fattr(res.old_fattr);
2934 return status; 2957 return status;
2935} 2958}
2936 2959
@@ -2968,18 +2991,15 @@ static int _nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *
2968 int status = -ENOMEM; 2991 int status = -ENOMEM;
2969 2992
2970 res.fattr = nfs_alloc_fattr(); 2993 res.fattr = nfs_alloc_fattr();
2971 res.dir_attr = nfs_alloc_fattr(); 2994 if (res.fattr == NULL)
2972 if (res.fattr == NULL || res.dir_attr == NULL)
2973 goto out; 2995 goto out;
2974 2996
2975 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1); 2997 status = nfs4_call_sync(server->client, server, &msg, &arg.seq_args, &res.seq_res, 1);
2976 if (!status) { 2998 if (!status) {
2977 update_changeattr(dir, &res.cinfo); 2999 update_changeattr(dir, &res.cinfo);
2978 nfs_post_op_update_inode(dir, res.dir_attr);
2979 nfs_post_op_update_inode(inode, res.fattr); 3000 nfs_post_op_update_inode(inode, res.fattr);
2980 } 3001 }
2981out: 3002out:
2982 nfs_free_fattr(res.dir_attr);
2983 nfs_free_fattr(res.fattr); 3003 nfs_free_fattr(res.fattr);
2984 return status; 3004 return status;
2985} 3005}
@@ -3002,7 +3022,6 @@ struct nfs4_createdata {
3002 struct nfs4_create_res res; 3022 struct nfs4_create_res res;
3003 struct nfs_fh fh; 3023 struct nfs_fh fh;
3004 struct nfs_fattr fattr; 3024 struct nfs_fattr fattr;
3005 struct nfs_fattr dir_fattr;
3006}; 3025};
3007 3026
3008static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir, 3027static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
@@ -3026,9 +3045,7 @@ static struct nfs4_createdata *nfs4_alloc_createdata(struct inode *dir,
3026 data->res.server = server; 3045 data->res.server = server;
3027 data->res.fh = &data->fh; 3046 data->res.fh = &data->fh;
3028 data->res.fattr = &data->fattr; 3047 data->res.fattr = &data->fattr;
3029 data->res.dir_fattr = &data->dir_fattr;
3030 nfs_fattr_init(data->res.fattr); 3048 nfs_fattr_init(data->res.fattr);
3031 nfs_fattr_init(data->res.dir_fattr);
3032 } 3049 }
3033 return data; 3050 return data;
3034} 3051}
@@ -3039,7 +3056,6 @@ static int nfs4_do_create(struct inode *dir, struct dentry *dentry, struct nfs4_
3039 &data->arg.seq_args, &data->res.seq_res, 1); 3056 &data->arg.seq_args, &data->res.seq_res, 1);
3040 if (status == 0) { 3057 if (status == 0) {
3041 update_changeattr(dir, &data->res.dir_cinfo); 3058 update_changeattr(dir, &data->res.dir_cinfo);
3042 nfs_post_op_update_inode(dir, data->res.dir_fattr);
3043 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr); 3059 status = nfs_instantiate(dentry, data->res.fh, data->res.fattr);
3044 } 3060 }
3045 return status; 3061 return status;
@@ -3335,12 +3351,12 @@ static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
3335 3351
3336void __nfs4_read_done_cb(struct nfs_read_data *data) 3352void __nfs4_read_done_cb(struct nfs_read_data *data)
3337{ 3353{
3338 nfs_invalidate_atime(data->inode); 3354 nfs_invalidate_atime(data->header->inode);
3339} 3355}
3340 3356
3341static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data) 3357static int nfs4_read_done_cb(struct rpc_task *task, struct nfs_read_data *data)
3342{ 3358{
3343 struct nfs_server *server = NFS_SERVER(data->inode); 3359 struct nfs_server *server = NFS_SERVER(data->header->inode);
3344 3360
3345 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) { 3361 if (nfs4_async_handle_error(task, server, data->args.context->state) == -EAGAIN) {
3346 rpc_restart_call_prepare(task); 3362 rpc_restart_call_prepare(task);
@@ -3375,7 +3391,7 @@ static void nfs4_proc_read_setup(struct nfs_read_data *data, struct rpc_message
3375 3391
3376static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data) 3392static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_data *data)
3377{ 3393{
3378 if (nfs4_setup_sequence(NFS_SERVER(data->inode), 3394 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3379 &data->args.seq_args, 3395 &data->args.seq_args,
3380 &data->res.seq_res, 3396 &data->res.seq_res,
3381 task)) 3397 task))
@@ -3383,25 +3399,9 @@ static void nfs4_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_da
3383 rpc_call_start(task); 3399 rpc_call_start(task);
3384} 3400}
3385 3401
3386/* Reset the the nfs_read_data to send the read to the MDS. */
3387void nfs4_reset_read(struct rpc_task *task, struct nfs_read_data *data)
3388{
3389 dprintk("%s Reset task for i/o through\n", __func__);
3390 put_lseg(data->lseg);
3391 data->lseg = NULL;
3392 /* offsets will differ in the dense stripe case */
3393 data->args.offset = data->mds_offset;
3394 data->ds_clp = NULL;
3395 data->args.fh = NFS_FH(data->inode);
3396 data->read_done_cb = nfs4_read_done_cb;
3397 task->tk_ops = data->mds_ops;
3398 rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3399}
3400EXPORT_SYMBOL_GPL(nfs4_reset_read);
3401
3402static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data) 3402static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data)
3403{ 3403{
3404 struct inode *inode = data->inode; 3404 struct inode *inode = data->header->inode;
3405 3405
3406 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) { 3406 if (nfs4_async_handle_error(task, NFS_SERVER(inode), data->args.context->state) == -EAGAIN) {
3407 rpc_restart_call_prepare(task); 3407 rpc_restart_call_prepare(task);
@@ -3409,7 +3409,7 @@ static int nfs4_write_done_cb(struct rpc_task *task, struct nfs_write_data *data
3409 } 3409 }
3410 if (task->tk_status >= 0) { 3410 if (task->tk_status >= 0) {
3411 renew_lease(NFS_SERVER(inode), data->timestamp); 3411 renew_lease(NFS_SERVER(inode), data->timestamp);
3412 nfs_post_op_update_inode_force_wcc(inode, data->res.fattr); 3412 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
3413 } 3413 }
3414 return 0; 3414 return 0;
3415} 3415}
@@ -3422,32 +3422,30 @@ static int nfs4_write_done(struct rpc_task *task, struct nfs_write_data *data)
3422 nfs4_write_done_cb(task, data); 3422 nfs4_write_done_cb(task, data);
3423} 3423}
3424 3424
3425/* Reset the the nfs_write_data to send the write to the MDS. */ 3425static
3426void nfs4_reset_write(struct rpc_task *task, struct nfs_write_data *data) 3426bool nfs4_write_need_cache_consistency_data(const struct nfs_write_data *data)
3427{ 3427{
3428 dprintk("%s Reset task for i/o through\n", __func__); 3428 const struct nfs_pgio_header *hdr = data->header;
3429 put_lseg(data->lseg); 3429
3430 data->lseg = NULL; 3430 /* Don't request attributes for pNFS or O_DIRECT writes */
3431 data->ds_clp = NULL; 3431 if (data->ds_clp != NULL || hdr->dreq != NULL)
3432 data->write_done_cb = nfs4_write_done_cb; 3432 return false;
3433 data->args.fh = NFS_FH(data->inode); 3433 /* Otherwise, request attributes if and only if we don't hold
3434 data->args.bitmask = data->res.server->cache_consistency_bitmask; 3434 * a delegation
3435 data->args.offset = data->mds_offset; 3435 */
3436 data->res.fattr = &data->fattr; 3436 return nfs_have_delegation(hdr->inode, FMODE_READ) == 0;
3437 task->tk_ops = data->mds_ops;
3438 rpc_task_reset_client(task, NFS_CLIENT(data->inode));
3439} 3437}
3440EXPORT_SYMBOL_GPL(nfs4_reset_write);
3441 3438
3442static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg) 3439static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_message *msg)
3443{ 3440{
3444 struct nfs_server *server = NFS_SERVER(data->inode); 3441 struct nfs_server *server = NFS_SERVER(data->header->inode);
3445 3442
3446 if (data->lseg) { 3443 if (!nfs4_write_need_cache_consistency_data(data)) {
3447 data->args.bitmask = NULL; 3444 data->args.bitmask = NULL;
3448 data->res.fattr = NULL; 3445 data->res.fattr = NULL;
3449 } else 3446 } else
3450 data->args.bitmask = server->cache_consistency_bitmask; 3447 data->args.bitmask = server->cache_consistency_bitmask;
3448
3451 if (!data->write_done_cb) 3449 if (!data->write_done_cb)
3452 data->write_done_cb = nfs4_write_done_cb; 3450 data->write_done_cb = nfs4_write_done_cb;
3453 data->res.server = server; 3451 data->res.server = server;
@@ -3459,6 +3457,16 @@ static void nfs4_proc_write_setup(struct nfs_write_data *data, struct rpc_messag
3459 3457
3460static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data) 3458static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_data *data)
3461{ 3459{
3460 if (nfs4_setup_sequence(NFS_SERVER(data->header->inode),
3461 &data->args.seq_args,
3462 &data->res.seq_res,
3463 task))
3464 return;
3465 rpc_call_start(task);
3466}
3467
3468static void nfs4_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
3469{
3462 if (nfs4_setup_sequence(NFS_SERVER(data->inode), 3470 if (nfs4_setup_sequence(NFS_SERVER(data->inode),
3463 &data->args.seq_args, 3471 &data->args.seq_args,
3464 &data->res.seq_res, 3472 &data->res.seq_res,
@@ -3467,7 +3475,7 @@ static void nfs4_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_
3467 rpc_call_start(task); 3475 rpc_call_start(task);
3468} 3476}
3469 3477
3470static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *data) 3478static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_commit_data *data)
3471{ 3479{
3472 struct inode *inode = data->inode; 3480 struct inode *inode = data->inode;
3473 3481
@@ -3475,28 +3483,22 @@ static int nfs4_commit_done_cb(struct rpc_task *task, struct nfs_write_data *dat
3475 rpc_restart_call_prepare(task); 3483 rpc_restart_call_prepare(task);
3476 return -EAGAIN; 3484 return -EAGAIN;
3477 } 3485 }
3478 nfs_refresh_inode(inode, data->res.fattr);
3479 return 0; 3486 return 0;
3480} 3487}
3481 3488
3482static int nfs4_commit_done(struct rpc_task *task, struct nfs_write_data *data) 3489static int nfs4_commit_done(struct rpc_task *task, struct nfs_commit_data *data)
3483{ 3490{
3484 if (!nfs4_sequence_done(task, &data->res.seq_res)) 3491 if (!nfs4_sequence_done(task, &data->res.seq_res))
3485 return -EAGAIN; 3492 return -EAGAIN;
3486 return data->write_done_cb(task, data); 3493 return data->commit_done_cb(task, data);
3487} 3494}
3488 3495
3489static void nfs4_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) 3496static void nfs4_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
3490{ 3497{
3491 struct nfs_server *server = NFS_SERVER(data->inode); 3498 struct nfs_server *server = NFS_SERVER(data->inode);
3492 3499
3493 if (data->lseg) { 3500 if (data->commit_done_cb == NULL)
3494 data->args.bitmask = NULL; 3501 data->commit_done_cb = nfs4_commit_done_cb;
3495 data->res.fattr = NULL;
3496 } else
3497 data->args.bitmask = server->cache_consistency_bitmask;
3498 if (!data->write_done_cb)
3499 data->write_done_cb = nfs4_commit_done_cb;
3500 data->res.server = server; 3502 data->res.server = server;
3501 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT]; 3503 msg->rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_COMMIT];
3502 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 3504 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
@@ -3905,7 +3907,7 @@ nfs4_async_handle_error(struct rpc_task *task, const struct nfs_server *server,
3905 case -NFS4ERR_SEQ_MISORDERED: 3907 case -NFS4ERR_SEQ_MISORDERED:
3906 dprintk("%s ERROR %d, Reset session\n", __func__, 3908 dprintk("%s ERROR %d, Reset session\n", __func__,
3907 task->tk_status); 3909 task->tk_status);
3908 nfs4_schedule_session_recovery(clp->cl_session); 3910 nfs4_schedule_session_recovery(clp->cl_session, task->tk_status);
3909 task->tk_status = 0; 3911 task->tk_status = 0;
3910 return -EAGAIN; 3912 return -EAGAIN;
3911#endif /* CONFIG_NFS_V4_1 */ 3913#endif /* CONFIG_NFS_V4_1 */
@@ -3931,13 +3933,21 @@ wait_on_recovery:
3931 return -EAGAIN; 3933 return -EAGAIN;
3932} 3934}
3933 3935
3934static void nfs4_construct_boot_verifier(struct nfs_client *clp, 3936static void nfs4_init_boot_verifier(const struct nfs_client *clp,
3935 nfs4_verifier *bootverf) 3937 nfs4_verifier *bootverf)
3936{ 3938{
3937 __be32 verf[2]; 3939 __be32 verf[2];
3938 3940
3939 verf[0] = htonl((u32)clp->cl_boot_time.tv_sec); 3941 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
3940 verf[1] = htonl((u32)clp->cl_boot_time.tv_nsec); 3942 /* An impossible timestamp guarantees this value
3943 * will never match a generated boot time. */
3944 verf[0] = 0;
3945 verf[1] = (__be32)(NSEC_PER_SEC + 1);
3946 } else {
3947 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
3948 verf[0] = (__be32)nn->boot_time.tv_sec;
3949 verf[1] = (__be32)nn->boot_time.tv_nsec;
3950 }
3941 memcpy(bootverf->data, verf, sizeof(bootverf->data)); 3951 memcpy(bootverf->data, verf, sizeof(bootverf->data));
3942} 3952}
3943 3953
@@ -3960,7 +3970,7 @@ int nfs4_proc_setclientid(struct nfs_client *clp, u32 program,
3960 int loop = 0; 3970 int loop = 0;
3961 int status; 3971 int status;
3962 3972
3963 nfs4_construct_boot_verifier(clp, &sc_verifier); 3973 nfs4_init_boot_verifier(clp, &sc_verifier);
3964 3974
3965 for(;;) { 3975 for(;;) {
3966 rcu_read_lock(); 3976 rcu_read_lock();
@@ -4104,7 +4114,7 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
4104 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1); 4114 nfs41_init_sequence(&data->args.seq_args, &data->res.seq_res, 1);
4105 data->args.fhandle = &data->fh; 4115 data->args.fhandle = &data->fh;
4106 data->args.stateid = &data->stateid; 4116 data->args.stateid = &data->stateid;
4107 data->args.bitmask = server->attr_bitmask; 4117 data->args.bitmask = server->cache_consistency_bitmask;
4108 nfs_copy_fh(&data->fh, NFS_FH(inode)); 4118 nfs_copy_fh(&data->fh, NFS_FH(inode));
4109 nfs4_stateid_copy(&data->stateid, stateid); 4119 nfs4_stateid_copy(&data->stateid, stateid);
4110 data->res.fattr = &data->fattr; 4120 data->res.fattr = &data->fattr;
@@ -4125,9 +4135,10 @@ static int _nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, co
4125 if (status != 0) 4135 if (status != 0)
4126 goto out; 4136 goto out;
4127 status = data->rpc_status; 4137 status = data->rpc_status;
4128 if (status != 0) 4138 if (status == 0)
4129 goto out; 4139 nfs_post_op_update_inode_force_wcc(inode, &data->fattr);
4130 nfs_refresh_inode(inode, &data->fattr); 4140 else
4141 nfs_refresh_inode(inode, &data->fattr);
4131out: 4142out:
4132 rpc_put_task(task); 4143 rpc_put_task(task);
4133 return status; 4144 return status;
@@ -4837,7 +4848,7 @@ int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl)
4837 case -NFS4ERR_BAD_HIGH_SLOT: 4848 case -NFS4ERR_BAD_HIGH_SLOT:
4838 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION: 4849 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
4839 case -NFS4ERR_DEADSESSION: 4850 case -NFS4ERR_DEADSESSION:
4840 nfs4_schedule_session_recovery(server->nfs_client->cl_session); 4851 nfs4_schedule_session_recovery(server->nfs_client->cl_session, err);
4841 goto out; 4852 goto out;
4842 case -ERESTARTSYS: 4853 case -ERESTARTSYS:
4843 /* 4854 /*
@@ -5079,7 +5090,8 @@ out_inval:
5079} 5090}
5080 5091
5081static bool 5092static bool
5082nfs41_same_server_scope(struct server_scope *a, struct server_scope *b) 5093nfs41_same_server_scope(struct nfs41_server_scope *a,
5094 struct nfs41_server_scope *b)
5083{ 5095{
5084 if (a->server_scope_sz == b->server_scope_sz && 5096 if (a->server_scope_sz == b->server_scope_sz &&
5085 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0) 5097 memcmp(a->server_scope, b->server_scope, a->server_scope_sz) == 0)
@@ -5089,6 +5101,61 @@ nfs41_same_server_scope(struct server_scope *a, struct server_scope *b)
5089} 5101}
5090 5102
5091/* 5103/*
5104 * nfs4_proc_bind_conn_to_session()
5105 *
5106 * The 4.1 client currently uses the same TCP connection for the
5107 * fore and backchannel.
5108 */
5109int nfs4_proc_bind_conn_to_session(struct nfs_client *clp, struct rpc_cred *cred)
5110{
5111 int status;
5112 struct nfs41_bind_conn_to_session_res res;
5113 struct rpc_message msg = {
5114 .rpc_proc =
5115 &nfs4_procedures[NFSPROC4_CLNT_BIND_CONN_TO_SESSION],
5116 .rpc_argp = clp,
5117 .rpc_resp = &res,
5118 .rpc_cred = cred,
5119 };
5120
5121 dprintk("--> %s\n", __func__);
5122 BUG_ON(clp == NULL);
5123
5124 res.session = kzalloc(sizeof(struct nfs4_session), GFP_NOFS);
5125 if (unlikely(res.session == NULL)) {
5126 status = -ENOMEM;
5127 goto out;
5128 }
5129
5130 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5131 if (status == 0) {
5132 if (memcmp(res.session->sess_id.data,
5133 clp->cl_session->sess_id.data, NFS4_MAX_SESSIONID_LEN)) {
5134 dprintk("NFS: %s: Session ID mismatch\n", __func__);
5135 status = -EIO;
5136 goto out_session;
5137 }
5138 if (res.dir != NFS4_CDFS4_BOTH) {
5139 dprintk("NFS: %s: Unexpected direction from server\n",
5140 __func__);
5141 status = -EIO;
5142 goto out_session;
5143 }
5144 if (res.use_conn_in_rdma_mode) {
5145 dprintk("NFS: %s: Server returned RDMA mode = true\n",
5146 __func__);
5147 status = -EIO;
5148 goto out_session;
5149 }
5150 }
5151out_session:
5152 kfree(res.session);
5153out:
5154 dprintk("<-- %s status= %d\n", __func__, status);
5155 return status;
5156}
5157
5158/*
5092 * nfs4_proc_exchange_id() 5159 * nfs4_proc_exchange_id()
5093 * 5160 *
5094 * Since the clientid has expired, all compounds using sessions 5161 * Since the clientid has expired, all compounds using sessions
@@ -5105,7 +5172,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5105 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER, 5172 .flags = EXCHGID4_FLAG_SUPP_MOVED_REFER,
5106 }; 5173 };
5107 struct nfs41_exchange_id_res res = { 5174 struct nfs41_exchange_id_res res = {
5108 .client = clp, 5175 0
5109 }; 5176 };
5110 int status; 5177 int status;
5111 struct rpc_message msg = { 5178 struct rpc_message msg = {
@@ -5118,7 +5185,7 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5118 dprintk("--> %s\n", __func__); 5185 dprintk("--> %s\n", __func__);
5119 BUG_ON(clp == NULL); 5186 BUG_ON(clp == NULL);
5120 5187
5121 nfs4_construct_boot_verifier(clp, &verifier); 5188 nfs4_init_boot_verifier(clp, &verifier);
5122 5189
5123 args.id_len = scnprintf(args.id, sizeof(args.id), 5190 args.id_len = scnprintf(args.id, sizeof(args.id),
5124 "%s/%s/%u", 5191 "%s/%s/%u",
@@ -5126,59 +5193,135 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred)
5126 clp->cl_rpcclient->cl_nodename, 5193 clp->cl_rpcclient->cl_nodename,
5127 clp->cl_rpcclient->cl_auth->au_flavor); 5194 clp->cl_rpcclient->cl_auth->au_flavor);
5128 5195
5129 res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); 5196 res.server_owner = kzalloc(sizeof(struct nfs41_server_owner),
5130 if (unlikely(!res.server_scope)) { 5197 GFP_NOFS);
5198 if (unlikely(res.server_owner == NULL)) {
5131 status = -ENOMEM; 5199 status = -ENOMEM;
5132 goto out; 5200 goto out;
5133 } 5201 }
5134 5202
5135 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_KERNEL); 5203 res.server_scope = kzalloc(sizeof(struct nfs41_server_scope),
5136 if (unlikely(!res.impl_id)) { 5204 GFP_NOFS);
5205 if (unlikely(res.server_scope == NULL)) {
5206 status = -ENOMEM;
5207 goto out_server_owner;
5208 }
5209
5210 res.impl_id = kzalloc(sizeof(struct nfs41_impl_id), GFP_NOFS);
5211 if (unlikely(res.impl_id == NULL)) {
5137 status = -ENOMEM; 5212 status = -ENOMEM;
5138 goto out_server_scope; 5213 goto out_server_scope;
5139 } 5214 }
5140 5215
5141 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5216 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5142 if (!status) 5217 if (status == 0)
5143 status = nfs4_check_cl_exchange_flags(clp->cl_exchange_flags); 5218 status = nfs4_check_cl_exchange_flags(res.flags);
5219
5220 if (status == 0) {
5221 clp->cl_clientid = res.clientid;
5222 clp->cl_exchange_flags = (res.flags & ~EXCHGID4_FLAG_CONFIRMED_R);
5223 if (!(res.flags & EXCHGID4_FLAG_CONFIRMED_R))
5224 clp->cl_seqid = res.seqid;
5225
5226 kfree(clp->cl_serverowner);
5227 clp->cl_serverowner = res.server_owner;
5228 res.server_owner = NULL;
5144 5229
5145 if (!status) {
5146 /* use the most recent implementation id */ 5230 /* use the most recent implementation id */
5147 kfree(clp->impl_id); 5231 kfree(clp->cl_implid);
5148 clp->impl_id = res.impl_id; 5232 clp->cl_implid = res.impl_id;
5149 } else
5150 kfree(res.impl_id);
5151 5233
5152 if (!status) { 5234 if (clp->cl_serverscope != NULL &&
5153 if (clp->server_scope && 5235 !nfs41_same_server_scope(clp->cl_serverscope,
5154 !nfs41_same_server_scope(clp->server_scope,
5155 res.server_scope)) { 5236 res.server_scope)) {
5156 dprintk("%s: server_scope mismatch detected\n", 5237 dprintk("%s: server_scope mismatch detected\n",
5157 __func__); 5238 __func__);
5158 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state); 5239 set_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, &clp->cl_state);
5159 kfree(clp->server_scope); 5240 kfree(clp->cl_serverscope);
5160 clp->server_scope = NULL; 5241 clp->cl_serverscope = NULL;
5161 } 5242 }
5162 5243
5163 if (!clp->server_scope) { 5244 if (clp->cl_serverscope == NULL) {
5164 clp->server_scope = res.server_scope; 5245 clp->cl_serverscope = res.server_scope;
5165 goto out; 5246 goto out;
5166 } 5247 }
5167 } 5248 } else
5249 kfree(res.impl_id);
5168 5250
5251out_server_owner:
5252 kfree(res.server_owner);
5169out_server_scope: 5253out_server_scope:
5170 kfree(res.server_scope); 5254 kfree(res.server_scope);
5171out: 5255out:
5172 if (clp->impl_id) 5256 if (clp->cl_implid != NULL)
5173 dprintk("%s: Server Implementation ID: " 5257 dprintk("%s: Server Implementation ID: "
5174 "domain: %s, name: %s, date: %llu,%u\n", 5258 "domain: %s, name: %s, date: %llu,%u\n",
5175 __func__, clp->impl_id->domain, clp->impl_id->name, 5259 __func__, clp->cl_implid->domain, clp->cl_implid->name,
5176 clp->impl_id->date.seconds, 5260 clp->cl_implid->date.seconds,
5177 clp->impl_id->date.nseconds); 5261 clp->cl_implid->date.nseconds);
5178 dprintk("<-- %s status= %d\n", __func__, status); 5262 dprintk("<-- %s status= %d\n", __func__, status);
5179 return status; 5263 return status;
5180} 5264}
5181 5265
5266static int _nfs4_proc_destroy_clientid(struct nfs_client *clp,
5267 struct rpc_cred *cred)
5268{
5269 struct rpc_message msg = {
5270 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_CLIENTID],
5271 .rpc_argp = clp,
5272 .rpc_cred = cred,
5273 };
5274 int status;
5275
5276 status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5277 if (status)
5278 pr_warn("NFS: Got error %d from the server %s on "
5279 "DESTROY_CLIENTID.", status, clp->cl_hostname);
5280 return status;
5281}
5282
5283static int nfs4_proc_destroy_clientid(struct nfs_client *clp,
5284 struct rpc_cred *cred)
5285{
5286 unsigned int loop;
5287 int ret;
5288
5289 for (loop = NFS4_MAX_LOOP_ON_RECOVER; loop != 0; loop--) {
5290 ret = _nfs4_proc_destroy_clientid(clp, cred);
5291 switch (ret) {
5292 case -NFS4ERR_DELAY:
5293 case -NFS4ERR_CLIENTID_BUSY:
5294 ssleep(1);
5295 break;
5296 default:
5297 return ret;
5298 }
5299 }
5300 return 0;
5301}
5302
5303int nfs4_destroy_clientid(struct nfs_client *clp)
5304{
5305 struct rpc_cred *cred;
5306 int ret = 0;
5307
5308 if (clp->cl_mvops->minor_version < 1)
5309 goto out;
5310 if (clp->cl_exchange_flags == 0)
5311 goto out;
5312 cred = nfs4_get_exchange_id_cred(clp);
5313 ret = nfs4_proc_destroy_clientid(clp, cred);
5314 if (cred)
5315 put_rpccred(cred);
5316 switch (ret) {
5317 case 0:
5318 case -NFS4ERR_STALE_CLIENTID:
5319 clp->cl_exchange_flags = 0;
5320 }
5321out:
5322 return ret;
5323}
5324
5182struct nfs4_get_lease_time_data { 5325struct nfs4_get_lease_time_data {
5183 struct nfs4_get_lease_time_args *args; 5326 struct nfs4_get_lease_time_args *args;
5184 struct nfs4_get_lease_time_res *res; 5327 struct nfs4_get_lease_time_res *res;
@@ -5399,8 +5542,12 @@ struct nfs4_session *nfs4_alloc_session(struct nfs_client *clp)
5399void nfs4_destroy_session(struct nfs4_session *session) 5542void nfs4_destroy_session(struct nfs4_session *session)
5400{ 5543{
5401 struct rpc_xprt *xprt; 5544 struct rpc_xprt *xprt;
5545 struct rpc_cred *cred;
5402 5546
5403 nfs4_proc_destroy_session(session); 5547 cred = nfs4_get_exchange_id_cred(session->clp);
5548 nfs4_proc_destroy_session(session, cred);
5549 if (cred)
5550 put_rpccred(cred);
5404 5551
5405 rcu_read_lock(); 5552 rcu_read_lock();
5406 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt); 5553 xprt = rcu_dereference(session->clp->cl_rpcclient->cl_xprt);
@@ -5510,7 +5657,8 @@ static int nfs4_verify_channel_attrs(struct nfs41_create_session_args *args,
5510 return nfs4_verify_back_channel_attrs(args, session); 5657 return nfs4_verify_back_channel_attrs(args, session);
5511} 5658}
5512 5659
5513static int _nfs4_proc_create_session(struct nfs_client *clp) 5660static int _nfs4_proc_create_session(struct nfs_client *clp,
5661 struct rpc_cred *cred)
5514{ 5662{
5515 struct nfs4_session *session = clp->cl_session; 5663 struct nfs4_session *session = clp->cl_session;
5516 struct nfs41_create_session_args args = { 5664 struct nfs41_create_session_args args = {
@@ -5524,6 +5672,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
5524 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION], 5672 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_CREATE_SESSION],
5525 .rpc_argp = &args, 5673 .rpc_argp = &args,
5526 .rpc_resp = &res, 5674 .rpc_resp = &res,
5675 .rpc_cred = cred,
5527 }; 5676 };
5528 int status; 5677 int status;
5529 5678
@@ -5548,7 +5697,7 @@ static int _nfs4_proc_create_session(struct nfs_client *clp)
5548 * It is the responsibility of the caller to verify the session is 5697 * It is the responsibility of the caller to verify the session is
5549 * expired before calling this routine. 5698 * expired before calling this routine.
5550 */ 5699 */
5551int nfs4_proc_create_session(struct nfs_client *clp) 5700int nfs4_proc_create_session(struct nfs_client *clp, struct rpc_cred *cred)
5552{ 5701{
5553 int status; 5702 int status;
5554 unsigned *ptr; 5703 unsigned *ptr;
@@ -5556,7 +5705,7 @@ int nfs4_proc_create_session(struct nfs_client *clp)
5556 5705
5557 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session); 5706 dprintk("--> %s clp=%p session=%p\n", __func__, clp, session);
5558 5707
5559 status = _nfs4_proc_create_session(clp); 5708 status = _nfs4_proc_create_session(clp, cred);
5560 if (status) 5709 if (status)
5561 goto out; 5710 goto out;
5562 5711
@@ -5578,10 +5727,15 @@ out:
5578 * Issue the over-the-wire RPC DESTROY_SESSION. 5727 * Issue the over-the-wire RPC DESTROY_SESSION.
5579 * The caller must serialize access to this routine. 5728 * The caller must serialize access to this routine.
5580 */ 5729 */
5581int nfs4_proc_destroy_session(struct nfs4_session *session) 5730int nfs4_proc_destroy_session(struct nfs4_session *session,
5731 struct rpc_cred *cred)
5582{ 5732{
5733 struct rpc_message msg = {
5734 .rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION],
5735 .rpc_argp = session,
5736 .rpc_cred = cred,
5737 };
5583 int status = 0; 5738 int status = 0;
5584 struct rpc_message msg;
5585 5739
5586 dprintk("--> nfs4_proc_destroy_session\n"); 5740 dprintk("--> nfs4_proc_destroy_session\n");
5587 5741
@@ -5589,10 +5743,6 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
5589 if (session->clp->cl_cons_state != NFS_CS_READY) 5743 if (session->clp->cl_cons_state != NFS_CS_READY)
5590 return status; 5744 return status;
5591 5745
5592 msg.rpc_proc = &nfs4_procedures[NFSPROC4_CLNT_DESTROY_SESSION];
5593 msg.rpc_argp = session;
5594 msg.rpc_resp = NULL;
5595 msg.rpc_cred = NULL;
5596 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); 5746 status = rpc_call_sync(session->clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT);
5597 5747
5598 if (status) 5748 if (status)
@@ -5604,53 +5754,79 @@ int nfs4_proc_destroy_session(struct nfs4_session *session)
5604 return status; 5754 return status;
5605} 5755}
5606 5756
5757/*
5758 * With sessions, the client is not marked ready until after a
5759 * successful EXCHANGE_ID and CREATE_SESSION.
5760 *
5761 * Map errors cl_cons_state errors to EPROTONOSUPPORT to indicate
5762 * other versions of NFS can be tried.
5763 */
5764static int nfs41_check_session_ready(struct nfs_client *clp)
5765{
5766 int ret;
5767
5768 if (clp->cl_cons_state == NFS_CS_SESSION_INITING) {
5769 ret = nfs4_client_recover_expired_lease(clp);
5770 if (ret)
5771 return ret;
5772 }
5773 if (clp->cl_cons_state < NFS_CS_READY)
5774 return -EPROTONOSUPPORT;
5775 smp_rmb();
5776 return 0;
5777}
5778
5607int nfs4_init_session(struct nfs_server *server) 5779int nfs4_init_session(struct nfs_server *server)
5608{ 5780{
5609 struct nfs_client *clp = server->nfs_client; 5781 struct nfs_client *clp = server->nfs_client;
5610 struct nfs4_session *session; 5782 struct nfs4_session *session;
5611 unsigned int rsize, wsize; 5783 unsigned int rsize, wsize;
5612 int ret;
5613 5784
5614 if (!nfs4_has_session(clp)) 5785 if (!nfs4_has_session(clp))
5615 return 0; 5786 return 0;
5616 5787
5617 session = clp->cl_session; 5788 session = clp->cl_session;
5618 if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) 5789 spin_lock(&clp->cl_lock);
5619 return 0; 5790 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
5620 5791
5621 rsize = server->rsize; 5792 rsize = server->rsize;
5622 if (rsize == 0) 5793 if (rsize == 0)
5623 rsize = NFS_MAX_FILE_IO_SIZE; 5794 rsize = NFS_MAX_FILE_IO_SIZE;
5624 wsize = server->wsize; 5795 wsize = server->wsize;
5625 if (wsize == 0) 5796 if (wsize == 0)
5626 wsize = NFS_MAX_FILE_IO_SIZE; 5797 wsize = NFS_MAX_FILE_IO_SIZE;
5627 5798
5628 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead; 5799 session->fc_attrs.max_rqst_sz = wsize + nfs41_maxwrite_overhead;
5629 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead; 5800 session->fc_attrs.max_resp_sz = rsize + nfs41_maxread_overhead;
5801 }
5802 spin_unlock(&clp->cl_lock);
5630 5803
5631 ret = nfs4_recover_expired_lease(server); 5804 return nfs41_check_session_ready(clp);
5632 if (!ret)
5633 ret = nfs4_check_client_ready(clp);
5634 return ret;
5635} 5805}
5636 5806
5637int nfs4_init_ds_session(struct nfs_client *clp) 5807int nfs4_init_ds_session(struct nfs_client *clp, unsigned long lease_time)
5638{ 5808{
5639 struct nfs4_session *session = clp->cl_session; 5809 struct nfs4_session *session = clp->cl_session;
5640 int ret; 5810 int ret;
5641 5811
5642 if (!test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) 5812 spin_lock(&clp->cl_lock);
5643 return 0; 5813 if (test_and_clear_bit(NFS4_SESSION_INITING, &session->session_state)) {
5644 5814 /*
5645 ret = nfs4_client_recover_expired_lease(clp); 5815 * Do not set NFS_CS_CHECK_LEASE_TIME instead set the
5646 if (!ret) 5816 * DS lease to be equal to the MDS lease.
5647 /* Test for the DS role */ 5817 */
5648 if (!is_ds_client(clp)) 5818 clp->cl_lease_time = lease_time;
5649 ret = -ENODEV; 5819 clp->cl_last_renewal = jiffies;
5650 if (!ret) 5820 }
5651 ret = nfs4_check_client_ready(clp); 5821 spin_unlock(&clp->cl_lock);
5652 return ret;
5653 5822
5823 ret = nfs41_check_session_ready(clp);
5824 if (ret)
5825 return ret;
5826 /* Test for the DS role */
5827 if (!is_ds_client(clp))
5828 return -ENODEV;
5829 return 0;
5654} 5830}
5655EXPORT_SYMBOL_GPL(nfs4_init_ds_session); 5831EXPORT_SYMBOL_GPL(nfs4_init_ds_session);
5656 5832
@@ -6557,6 +6733,7 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
6557 .file_inode_ops = &nfs4_file_inode_operations, 6733 .file_inode_ops = &nfs4_file_inode_operations,
6558 .file_ops = &nfs4_file_operations, 6734 .file_ops = &nfs4_file_operations,
6559 .getroot = nfs4_proc_get_root, 6735 .getroot = nfs4_proc_get_root,
6736 .submount = nfs4_submount,
6560 .getattr = nfs4_proc_getattr, 6737 .getattr = nfs4_proc_getattr,
6561 .setattr = nfs4_proc_setattr, 6738 .setattr = nfs4_proc_setattr,
6562 .lookup = nfs4_proc_lookup, 6739 .lookup = nfs4_proc_lookup,
@@ -6589,13 +6766,13 @@ const struct nfs_rpc_ops nfs_v4_clientops = {
6589 .write_rpc_prepare = nfs4_proc_write_rpc_prepare, 6766 .write_rpc_prepare = nfs4_proc_write_rpc_prepare,
6590 .write_done = nfs4_write_done, 6767 .write_done = nfs4_write_done,
6591 .commit_setup = nfs4_proc_commit_setup, 6768 .commit_setup = nfs4_proc_commit_setup,
6769 .commit_rpc_prepare = nfs4_proc_commit_rpc_prepare,
6592 .commit_done = nfs4_commit_done, 6770 .commit_done = nfs4_commit_done,
6593 .lock = nfs4_proc_lock, 6771 .lock = nfs4_proc_lock,
6594 .clear_acl_cache = nfs4_zap_acl_attr, 6772 .clear_acl_cache = nfs4_zap_acl_attr,
6595 .close_context = nfs4_close_context, 6773 .close_context = nfs4_close_context,
6596 .open_context = nfs4_atomic_open, 6774 .open_context = nfs4_atomic_open,
6597 .init_client = nfs4_init_client, 6775 .init_client = nfs4_init_client,
6598 .secinfo = nfs4_proc_secinfo,
6599}; 6776};
6600 6777
6601static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = { 6778static const struct xattr_handler nfs4_xattr_nfs4_acl_handler = {
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index dc484c0eae7f..6930bec91bca 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -49,7 +49,7 @@
49#include "nfs4_fs.h" 49#include "nfs4_fs.h"
50#include "delegation.h" 50#include "delegation.h"
51 51
52#define NFSDBG_FACILITY NFSDBG_PROC 52#define NFSDBG_FACILITY NFSDBG_STATE
53 53
54void 54void
55nfs4_renew_state(struct work_struct *work) 55nfs4_renew_state(struct work_struct *work)
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 7f0fcfc1fe9d..c679b9ecef63 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -57,6 +57,8 @@
57#include "internal.h" 57#include "internal.h"
58#include "pnfs.h" 58#include "pnfs.h"
59 59
60#define NFSDBG_FACILITY NFSDBG_STATE
61
60#define OPENOWNER_POOL_SIZE 8 62#define OPENOWNER_POOL_SIZE 8
61 63
62const nfs4_stateid zero_stateid; 64const nfs4_stateid zero_stateid;
@@ -254,7 +256,7 @@ int nfs41_init_clientid(struct nfs_client *clp, struct rpc_cred *cred)
254 goto out; 256 goto out;
255 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 257 set_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
256do_confirm: 258do_confirm:
257 status = nfs4_proc_create_session(clp); 259 status = nfs4_proc_create_session(clp, cred);
258 if (status != 0) 260 if (status != 0)
259 goto out; 261 goto out;
260 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 262 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
@@ -1106,6 +1108,8 @@ void nfs4_schedule_lease_recovery(struct nfs_client *clp)
1106 return; 1108 return;
1107 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1109 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1108 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 1110 set_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1111 dprintk("%s: scheduling lease recovery for server %s\n", __func__,
1112 clp->cl_hostname);
1109 nfs4_schedule_state_manager(clp); 1113 nfs4_schedule_state_manager(clp);
1110} 1114}
1111EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery); 1115EXPORT_SYMBOL_GPL(nfs4_schedule_lease_recovery);
@@ -1122,6 +1126,8 @@ static void nfs40_handle_cb_pathdown(struct nfs_client *clp)
1122{ 1126{
1123 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1127 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1124 nfs_expire_all_delegations(clp); 1128 nfs_expire_all_delegations(clp);
1129 dprintk("%s: handling CB_PATHDOWN recovery for server %s\n", __func__,
1130 clp->cl_hostname);
1125} 1131}
1126 1132
1127void nfs4_schedule_path_down_recovery(struct nfs_client *clp) 1133void nfs4_schedule_path_down_recovery(struct nfs_client *clp)
@@ -1158,6 +1164,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4
1158 struct nfs_client *clp = server->nfs_client; 1164 struct nfs_client *clp = server->nfs_client;
1159 1165
1160 nfs4_state_mark_reclaim_nograce(clp, state); 1166 nfs4_state_mark_reclaim_nograce(clp, state);
1167 dprintk("%s: scheduling stateid recovery for server %s\n", __func__,
1168 clp->cl_hostname);
1161 nfs4_schedule_state_manager(clp); 1169 nfs4_schedule_state_manager(clp);
1162} 1170}
1163EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery); 1171EXPORT_SYMBOL_GPL(nfs4_schedule_stateid_recovery);
@@ -1491,19 +1499,25 @@ static int nfs4_recovery_handle_error(struct nfs_client *clp, int error)
1491 case -NFS4ERR_BADSLOT: 1499 case -NFS4ERR_BADSLOT:
1492 case -NFS4ERR_BAD_HIGH_SLOT: 1500 case -NFS4ERR_BAD_HIGH_SLOT:
1493 case -NFS4ERR_DEADSESSION: 1501 case -NFS4ERR_DEADSESSION:
1494 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1495 case -NFS4ERR_SEQ_FALSE_RETRY: 1502 case -NFS4ERR_SEQ_FALSE_RETRY:
1496 case -NFS4ERR_SEQ_MISORDERED: 1503 case -NFS4ERR_SEQ_MISORDERED:
1497 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 1504 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1498 /* Zero session reset errors */ 1505 /* Zero session reset errors */
1499 break; 1506 break;
1507 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1508 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1509 break;
1500 case -EKEYEXPIRED: 1510 case -EKEYEXPIRED:
1501 /* Nothing we can do */ 1511 /* Nothing we can do */
1502 nfs4_warn_keyexpired(clp->cl_hostname); 1512 nfs4_warn_keyexpired(clp->cl_hostname);
1503 break; 1513 break;
1504 default: 1514 default:
1515 dprintk("%s: failed to handle error %d for server %s\n",
1516 __func__, error, clp->cl_hostname);
1505 return error; 1517 return error;
1506 } 1518 }
1519 dprintk("%s: handled error %d for server %s\n", __func__, error,
1520 clp->cl_hostname);
1507 return 0; 1521 return 0;
1508} 1522}
1509 1523
@@ -1572,34 +1586,82 @@ out:
1572 return nfs4_recovery_handle_error(clp, status); 1586 return nfs4_recovery_handle_error(clp, status);
1573} 1587}
1574 1588
1589/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1590 * on EXCHANGE_ID for v4.1
1591 */
1592static int nfs4_handle_reclaim_lease_error(struct nfs_client *clp, int status)
1593{
1594 switch (status) {
1595 case -NFS4ERR_SEQ_MISORDERED:
1596 if (test_and_set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state))
1597 return -ESERVERFAULT;
1598 /* Lease confirmation error: retry after purging the lease */
1599 ssleep(1);
1600 case -NFS4ERR_CLID_INUSE:
1601 case -NFS4ERR_STALE_CLIENTID:
1602 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1603 break;
1604 case -EACCES:
1605 if (clp->cl_machine_cred == NULL)
1606 return -EACCES;
1607 /* Handle case where the user hasn't set up machine creds */
1608 nfs4_clear_machine_cred(clp);
1609 case -NFS4ERR_DELAY:
1610 case -ETIMEDOUT:
1611 case -EAGAIN:
1612 ssleep(1);
1613 break;
1614
1615 case -NFS4ERR_MINOR_VERS_MISMATCH:
1616 if (clp->cl_cons_state == NFS_CS_SESSION_INITING)
1617 nfs_mark_client_ready(clp, -EPROTONOSUPPORT);
1618 dprintk("%s: exit with error %d for server %s\n",
1619 __func__, -EPROTONOSUPPORT, clp->cl_hostname);
1620 return -EPROTONOSUPPORT;
1621 case -EKEYEXPIRED:
1622 nfs4_warn_keyexpired(clp->cl_hostname);
1623 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1624 * in nfs4_exchange_id */
1625 default:
1626 dprintk("%s: exit with error %d for server %s\n", __func__,
1627 status, clp->cl_hostname);
1628 return status;
1629 }
1630 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1631 dprintk("%s: handled error %d for server %s\n", __func__, status,
1632 clp->cl_hostname);
1633 return 0;
1634}
1635
1575static int nfs4_reclaim_lease(struct nfs_client *clp) 1636static int nfs4_reclaim_lease(struct nfs_client *clp)
1576{ 1637{
1577 struct rpc_cred *cred; 1638 struct rpc_cred *cred;
1578 const struct nfs4_state_recovery_ops *ops = 1639 const struct nfs4_state_recovery_ops *ops =
1579 clp->cl_mvops->reboot_recovery_ops; 1640 clp->cl_mvops->reboot_recovery_ops;
1580 int status = -ENOENT; 1641 int status;
1581 1642
1582 cred = ops->get_clid_cred(clp); 1643 cred = ops->get_clid_cred(clp);
1583 if (cred != NULL) { 1644 if (cred == NULL)
1584 status = ops->establish_clid(clp, cred); 1645 return -ENOENT;
1585 put_rpccred(cred); 1646 status = ops->establish_clid(clp, cred);
1586 /* Handle case where the user hasn't set up machine creds */ 1647 put_rpccred(cred);
1587 if (status == -EACCES && cred == clp->cl_machine_cred) { 1648 if (status != 0)
1588 nfs4_clear_machine_cred(clp); 1649 return nfs4_handle_reclaim_lease_error(clp, status);
1589 status = -EAGAIN; 1650 return 0;
1590 }
1591 if (status == -NFS4ERR_MINOR_VERS_MISMATCH)
1592 status = -EPROTONOSUPPORT;
1593 }
1594 return status;
1595} 1651}
1596 1652
1597#ifdef CONFIG_NFS_V4_1 1653#ifdef CONFIG_NFS_V4_1
1598void nfs4_schedule_session_recovery(struct nfs4_session *session) 1654void nfs4_schedule_session_recovery(struct nfs4_session *session, int err)
1599{ 1655{
1600 struct nfs_client *clp = session->clp; 1656 struct nfs_client *clp = session->clp;
1601 1657
1602 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 1658 switch (err) {
1659 default:
1660 set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1661 break;
1662 case -NFS4ERR_CONN_NOT_BOUND_TO_SESSION:
1663 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1664 }
1603 nfs4_schedule_lease_recovery(clp); 1665 nfs4_schedule_lease_recovery(clp);
1604} 1666}
1605EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery); 1667EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
@@ -1607,14 +1669,19 @@ EXPORT_SYMBOL_GPL(nfs4_schedule_session_recovery);
1607void nfs41_handle_recall_slot(struct nfs_client *clp) 1669void nfs41_handle_recall_slot(struct nfs_client *clp)
1608{ 1670{
1609 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1671 set_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1672 dprintk("%s: scheduling slot recall for server %s\n", __func__,
1673 clp->cl_hostname);
1610 nfs4_schedule_state_manager(clp); 1674 nfs4_schedule_state_manager(clp);
1611} 1675}
1612 1676
1613static void nfs4_reset_all_state(struct nfs_client *clp) 1677static void nfs4_reset_all_state(struct nfs_client *clp)
1614{ 1678{
1615 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1679 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1616 clp->cl_boot_time = CURRENT_TIME; 1680 set_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
1681 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state);
1617 nfs4_state_start_reclaim_nograce(clp); 1682 nfs4_state_start_reclaim_nograce(clp);
1683 dprintk("%s: scheduling reset of all state for server %s!\n",
1684 __func__, clp->cl_hostname);
1618 nfs4_schedule_state_manager(clp); 1685 nfs4_schedule_state_manager(clp);
1619 } 1686 }
1620} 1687}
@@ -1623,33 +1690,50 @@ static void nfs41_handle_server_reboot(struct nfs_client *clp)
1623{ 1690{
1624 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) { 1691 if (test_and_set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state) == 0) {
1625 nfs4_state_start_reclaim_reboot(clp); 1692 nfs4_state_start_reclaim_reboot(clp);
1693 dprintk("%s: server %s rebooted!\n", __func__,
1694 clp->cl_hostname);
1626 nfs4_schedule_state_manager(clp); 1695 nfs4_schedule_state_manager(clp);
1627 } 1696 }
1628} 1697}
1629 1698
1630static void nfs41_handle_state_revoked(struct nfs_client *clp) 1699static void nfs41_handle_state_revoked(struct nfs_client *clp)
1631{ 1700{
1632 /* Temporary */
1633 nfs4_reset_all_state(clp); 1701 nfs4_reset_all_state(clp);
1702 dprintk("%s: state revoked on server %s\n", __func__, clp->cl_hostname);
1634} 1703}
1635 1704
1636static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp) 1705static void nfs41_handle_recallable_state_revoked(struct nfs_client *clp)
1637{ 1706{
1638 /* This will need to handle layouts too */ 1707 /* This will need to handle layouts too */
1639 nfs_expire_all_delegations(clp); 1708 nfs_expire_all_delegations(clp);
1709 dprintk("%s: Recallable state revoked on server %s!\n", __func__,
1710 clp->cl_hostname);
1640} 1711}
1641 1712
1642static void nfs41_handle_cb_path_down(struct nfs_client *clp) 1713static void nfs41_handle_backchannel_fault(struct nfs_client *clp)
1643{ 1714{
1644 nfs_expire_all_delegations(clp); 1715 nfs_expire_all_delegations(clp);
1645 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0) 1716 if (test_and_set_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state) == 0)
1646 nfs4_schedule_state_manager(clp); 1717 nfs4_schedule_state_manager(clp);
1718 dprintk("%s: server %s declared a backchannel fault\n", __func__,
1719 clp->cl_hostname);
1720}
1721
1722static void nfs41_handle_cb_path_down(struct nfs_client *clp)
1723{
1724 if (test_and_set_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
1725 &clp->cl_state) == 0)
1726 nfs4_schedule_state_manager(clp);
1647} 1727}
1648 1728
1649void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags) 1729void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1650{ 1730{
1651 if (!flags) 1731 if (!flags)
1652 return; 1732 return;
1733
1734 dprintk("%s: \"%s\" (client ID %llx) flags=0x%08x\n",
1735 __func__, clp->cl_hostname, clp->cl_clientid, flags);
1736
1653 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED) 1737 if (flags & SEQ4_STATUS_RESTART_RECLAIM_NEEDED)
1654 nfs41_handle_server_reboot(clp); 1738 nfs41_handle_server_reboot(clp);
1655 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED | 1739 if (flags & (SEQ4_STATUS_EXPIRED_ALL_STATE_REVOKED |
@@ -1659,18 +1743,21 @@ void nfs41_handle_sequence_flag_errors(struct nfs_client *clp, u32 flags)
1659 nfs41_handle_state_revoked(clp); 1743 nfs41_handle_state_revoked(clp);
1660 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED) 1744 if (flags & SEQ4_STATUS_RECALLABLE_STATE_REVOKED)
1661 nfs41_handle_recallable_state_revoked(clp); 1745 nfs41_handle_recallable_state_revoked(clp);
1662 if (flags & (SEQ4_STATUS_CB_PATH_DOWN | 1746 if (flags & SEQ4_STATUS_BACKCHANNEL_FAULT)
1663 SEQ4_STATUS_BACKCHANNEL_FAULT | 1747 nfs41_handle_backchannel_fault(clp);
1664 SEQ4_STATUS_CB_PATH_DOWN_SESSION)) 1748 else if (flags & (SEQ4_STATUS_CB_PATH_DOWN |
1749 SEQ4_STATUS_CB_PATH_DOWN_SESSION))
1665 nfs41_handle_cb_path_down(clp); 1750 nfs41_handle_cb_path_down(clp);
1666} 1751}
1667 1752
1668static int nfs4_reset_session(struct nfs_client *clp) 1753static int nfs4_reset_session(struct nfs_client *clp)
1669{ 1754{
1755 struct rpc_cred *cred;
1670 int status; 1756 int status;
1671 1757
1672 nfs4_begin_drain_session(clp); 1758 nfs4_begin_drain_session(clp);
1673 status = nfs4_proc_destroy_session(clp->cl_session); 1759 cred = nfs4_get_exchange_id_cred(clp);
1760 status = nfs4_proc_destroy_session(clp->cl_session, cred);
1674 if (status && status != -NFS4ERR_BADSESSION && 1761 if (status && status != -NFS4ERR_BADSESSION &&
1675 status != -NFS4ERR_DEADSESSION) { 1762 status != -NFS4ERR_DEADSESSION) {
1676 status = nfs4_recovery_handle_error(clp, status); 1763 status = nfs4_recovery_handle_error(clp, status);
@@ -1678,19 +1765,26 @@ static int nfs4_reset_session(struct nfs_client *clp)
1678 } 1765 }
1679 1766
1680 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN); 1767 memset(clp->cl_session->sess_id.data, 0, NFS4_MAX_SESSIONID_LEN);
1681 status = nfs4_proc_create_session(clp); 1768 status = nfs4_proc_create_session(clp, cred);
1682 if (status) { 1769 if (status) {
1683 status = nfs4_recovery_handle_error(clp, status); 1770 dprintk("%s: session reset failed with status %d for server %s!\n",
1771 __func__, status, clp->cl_hostname);
1772 status = nfs4_handle_reclaim_lease_error(clp, status);
1684 goto out; 1773 goto out;
1685 } 1774 }
1686 clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state); 1775 clear_bit(NFS4CLNT_SESSION_RESET, &clp->cl_state);
1687 /* create_session negotiated new slot table */ 1776 /* create_session negotiated new slot table */
1688 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state); 1777 clear_bit(NFS4CLNT_RECALL_SLOT, &clp->cl_state);
1778 clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1779 dprintk("%s: session reset was successful for server %s!\n",
1780 __func__, clp->cl_hostname);
1689 1781
1690 /* Let the state manager reestablish state */ 1782 /* Let the state manager reestablish state */
1691 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) 1783 if (!test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1692 nfs41_setup_state_renewal(clp); 1784 nfs41_setup_state_renewal(clp);
1693out: 1785out:
1786 if (cred)
1787 put_rpccred(cred);
1694 return status; 1788 return status;
1695} 1789}
1696 1790
@@ -1722,37 +1816,41 @@ static int nfs4_recall_slot(struct nfs_client *clp)
1722 return 0; 1816 return 0;
1723} 1817}
1724 1818
1725#else /* CONFIG_NFS_V4_1 */ 1819static int nfs4_bind_conn_to_session(struct nfs_client *clp)
1726static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
1727static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
1728static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
1729#endif /* CONFIG_NFS_V4_1 */
1730
1731/* Set NFS4CLNT_LEASE_EXPIRED for all v4.0 errors and for recoverable errors
1732 * on EXCHANGE_ID for v4.1
1733 */
1734static void nfs4_set_lease_expired(struct nfs_client *clp, int status)
1735{ 1820{
1736 switch (status) { 1821 struct rpc_cred *cred;
1737 case -NFS4ERR_CLID_INUSE: 1822 int ret;
1738 case -NFS4ERR_STALE_CLIENTID: 1823
1739 clear_bit(NFS4CLNT_LEASE_CONFIRM, &clp->cl_state); 1824 nfs4_begin_drain_session(clp);
1825 cred = nfs4_get_exchange_id_cred(clp);
1826 ret = nfs4_proc_bind_conn_to_session(clp, cred);
1827 if (cred)
1828 put_rpccred(cred);
1829 clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1830 switch (ret) {
1831 case 0:
1832 dprintk("%s: bind_conn_to_session was successful for server %s!\n",
1833 __func__, clp->cl_hostname);
1740 break; 1834 break;
1741 case -NFS4ERR_DELAY: 1835 case -NFS4ERR_DELAY:
1742 case -ETIMEDOUT:
1743 case -EAGAIN:
1744 ssleep(1); 1836 ssleep(1);
1837 set_bit(NFS4CLNT_BIND_CONN_TO_SESSION, &clp->cl_state);
1745 break; 1838 break;
1746
1747 case -EKEYEXPIRED:
1748 nfs4_warn_keyexpired(clp->cl_hostname);
1749 case -NFS4ERR_NOT_SAME: /* FixMe: implement recovery
1750 * in nfs4_exchange_id */
1751 default: 1839 default:
1752 return; 1840 return nfs4_recovery_handle_error(clp, ret);
1753 } 1841 }
1754 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state); 1842 return 0;
1755} 1843}
1844#else /* CONFIG_NFS_V4_1 */
1845static int nfs4_reset_session(struct nfs_client *clp) { return 0; }
1846static int nfs4_end_drain_session(struct nfs_client *clp) { return 0; }
1847static int nfs4_recall_slot(struct nfs_client *clp) { return 0; }
1848
1849static int nfs4_bind_conn_to_session(struct nfs_client *clp)
1850{
1851 return 0;
1852}
1853#endif /* CONFIG_NFS_V4_1 */
1756 1854
1757static void nfs4_state_manager(struct nfs_client *clp) 1855static void nfs4_state_manager(struct nfs_client *clp)
1758{ 1856{
@@ -1760,19 +1858,21 @@ static void nfs4_state_manager(struct nfs_client *clp)
1760 1858
1761 /* Ensure exclusive access to NFSv4 state */ 1859 /* Ensure exclusive access to NFSv4 state */
1762 do { 1860 do {
1861 if (test_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state)) {
1862 status = nfs4_reclaim_lease(clp);
1863 if (status < 0)
1864 goto out_error;
1865 clear_bit(NFS4CLNT_PURGE_STATE, &clp->cl_state);
1866 set_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state);
1867 }
1868
1763 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) { 1869 if (test_and_clear_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state)) {
1764 /* We're going to have to re-establish a clientid */ 1870 /* We're going to have to re-establish a clientid */
1765 status = nfs4_reclaim_lease(clp); 1871 status = nfs4_reclaim_lease(clp);
1766 if (status) { 1872 if (status < 0)
1767 nfs4_set_lease_expired(clp, status);
1768 if (test_bit(NFS4CLNT_LEASE_EXPIRED,
1769 &clp->cl_state))
1770 continue;
1771 if (clp->cl_cons_state ==
1772 NFS_CS_SESSION_INITING)
1773 nfs_mark_client_ready(clp, status);
1774 goto out_error; 1873 goto out_error;
1775 } 1874 if (test_bit(NFS4CLNT_LEASE_EXPIRED, &clp->cl_state))
1875 continue;
1776 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state); 1876 clear_bit(NFS4CLNT_CHECK_LEASE, &clp->cl_state);
1777 1877
1778 if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH, 1878 if (test_and_clear_bit(NFS4CLNT_SERVER_SCOPE_MISMATCH,
@@ -1803,6 +1903,15 @@ static void nfs4_state_manager(struct nfs_client *clp)
1803 goto out_error; 1903 goto out_error;
1804 } 1904 }
1805 1905
1906 /* Send BIND_CONN_TO_SESSION */
1907 if (test_and_clear_bit(NFS4CLNT_BIND_CONN_TO_SESSION,
1908 &clp->cl_state) && nfs4_has_session(clp)) {
1909 status = nfs4_bind_conn_to_session(clp);
1910 if (status < 0)
1911 goto out_error;
1912 continue;
1913 }
1914
1806 /* First recover reboot state... */ 1915 /* First recover reboot state... */
1807 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) { 1916 if (test_bit(NFS4CLNT_RECLAIM_REBOOT, &clp->cl_state)) {
1808 status = nfs4_do_reclaim(clp, 1917 status = nfs4_do_reclaim(clp,
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index c54aae364bee..ee4a74db95d0 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -53,9 +53,11 @@
53#include <linux/nfs4.h> 53#include <linux/nfs4.h>
54#include <linux/nfs_fs.h> 54#include <linux/nfs_fs.h>
55#include <linux/nfs_idmap.h> 55#include <linux/nfs_idmap.h>
56
56#include "nfs4_fs.h" 57#include "nfs4_fs.h"
57#include "internal.h" 58#include "internal.h"
58#include "pnfs.h" 59#include "pnfs.h"
60#include "netns.h"
59 61
60#define NFSDBG_FACILITY NFSDBG_XDR 62#define NFSDBG_FACILITY NFSDBG_XDR
61 63
@@ -99,9 +101,12 @@ static int nfs4_stat_to_errno(int);
99#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2)) 101#define nfs4_path_maxsz (1 + ((3 + NFS4_MAXPATHLEN) >> 2))
100#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) 102#define nfs4_owner_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
101#define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ)) 103#define nfs4_group_maxsz (1 + XDR_QUADLEN(IDMAP_NAMESZ))
104/* We support only one layout type per file system */
105#define decode_mdsthreshold_maxsz (1 + 1 + nfs4_fattr_bitmap_maxsz + 1 + 8)
102/* This is based on getfattr, which uses the most attributes: */ 106/* This is based on getfattr, which uses the most attributes: */
103#define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \ 107#define nfs4_fattr_value_maxsz (1 + (1 + 2 + 2 + 4 + 2 + 1 + 1 + 2 + 2 + \
104 3 + 3 + 3 + nfs4_owner_maxsz + nfs4_group_maxsz)) 108 3 + 3 + 3 + nfs4_owner_maxsz + \
109 nfs4_group_maxsz + decode_mdsthreshold_maxsz))
105#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \ 110#define nfs4_fattr_maxsz (nfs4_fattr_bitmap_maxsz + \
106 nfs4_fattr_value_maxsz) 111 nfs4_fattr_value_maxsz)
107#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz) 112#define decode_getattr_maxsz (op_decode_hdr_maxsz + nfs4_fattr_maxsz)
@@ -321,8 +326,20 @@ static int nfs4_stat_to_errno(int);
321 1 /* csr_flags */ + \ 326 1 /* csr_flags */ + \
322 decode_channel_attrs_maxsz + \ 327 decode_channel_attrs_maxsz + \
323 decode_channel_attrs_maxsz) 328 decode_channel_attrs_maxsz)
329#define encode_bind_conn_to_session_maxsz (op_encode_hdr_maxsz + \
330 /* bctsa_sessid */ \
331 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
332 1 /* bctsa_dir */ + \
333 1 /* bctsa_use_conn_in_rdma_mode */)
334#define decode_bind_conn_to_session_maxsz (op_decode_hdr_maxsz + \
335 /* bctsr_sessid */ \
336 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + \
337 1 /* bctsr_dir */ + \
338 1 /* bctsr_use_conn_in_rdma_mode */)
324#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4) 339#define encode_destroy_session_maxsz (op_encode_hdr_maxsz + 4)
325#define decode_destroy_session_maxsz (op_decode_hdr_maxsz) 340#define decode_destroy_session_maxsz (op_decode_hdr_maxsz)
341#define encode_destroy_clientid_maxsz (op_encode_hdr_maxsz + 2)
342#define decode_destroy_clientid_maxsz (op_decode_hdr_maxsz)
326#define encode_sequence_maxsz (op_encode_hdr_maxsz + \ 343#define encode_sequence_maxsz (op_encode_hdr_maxsz + \
327 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4) 344 XDR_QUADLEN(NFS4_MAX_SESSIONID_LEN) + 4)
328#define decode_sequence_maxsz (op_decode_hdr_maxsz + \ 345#define decode_sequence_maxsz (op_decode_hdr_maxsz + \
@@ -421,30 +438,22 @@ static int nfs4_stat_to_errno(int);
421#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \ 438#define NFS4_enc_commit_sz (compound_encode_hdr_maxsz + \
422 encode_sequence_maxsz + \ 439 encode_sequence_maxsz + \
423 encode_putfh_maxsz + \ 440 encode_putfh_maxsz + \
424 encode_commit_maxsz + \ 441 encode_commit_maxsz)
425 encode_getattr_maxsz)
426#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \ 442#define NFS4_dec_commit_sz (compound_decode_hdr_maxsz + \
427 decode_sequence_maxsz + \ 443 decode_sequence_maxsz + \
428 decode_putfh_maxsz + \ 444 decode_putfh_maxsz + \
429 decode_commit_maxsz + \ 445 decode_commit_maxsz)
430 decode_getattr_maxsz)
431#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \ 446#define NFS4_enc_open_sz (compound_encode_hdr_maxsz + \
432 encode_sequence_maxsz + \ 447 encode_sequence_maxsz + \
433 encode_putfh_maxsz + \ 448 encode_putfh_maxsz + \
434 encode_savefh_maxsz + \
435 encode_open_maxsz + \ 449 encode_open_maxsz + \
436 encode_getfh_maxsz + \ 450 encode_getfh_maxsz + \
437 encode_getattr_maxsz + \
438 encode_restorefh_maxsz + \
439 encode_getattr_maxsz) 451 encode_getattr_maxsz)
440#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \ 452#define NFS4_dec_open_sz (compound_decode_hdr_maxsz + \
441 decode_sequence_maxsz + \ 453 decode_sequence_maxsz + \
442 decode_putfh_maxsz + \ 454 decode_putfh_maxsz + \
443 decode_savefh_maxsz + \
444 decode_open_maxsz + \ 455 decode_open_maxsz + \
445 decode_getfh_maxsz + \ 456 decode_getfh_maxsz + \
446 decode_getattr_maxsz + \
447 decode_restorefh_maxsz + \
448 decode_getattr_maxsz) 457 decode_getattr_maxsz)
449#define NFS4_enc_open_confirm_sz \ 458#define NFS4_enc_open_confirm_sz \
450 (compound_encode_hdr_maxsz + \ 459 (compound_encode_hdr_maxsz + \
@@ -595,47 +604,37 @@ static int nfs4_stat_to_errno(int);
595#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \ 604#define NFS4_enc_remove_sz (compound_encode_hdr_maxsz + \
596 encode_sequence_maxsz + \ 605 encode_sequence_maxsz + \
597 encode_putfh_maxsz + \ 606 encode_putfh_maxsz + \
598 encode_remove_maxsz + \ 607 encode_remove_maxsz)
599 encode_getattr_maxsz)
600#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \ 608#define NFS4_dec_remove_sz (compound_decode_hdr_maxsz + \
601 decode_sequence_maxsz + \ 609 decode_sequence_maxsz + \
602 decode_putfh_maxsz + \ 610 decode_putfh_maxsz + \
603 decode_remove_maxsz + \ 611 decode_remove_maxsz)
604 decode_getattr_maxsz)
605#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \ 612#define NFS4_enc_rename_sz (compound_encode_hdr_maxsz + \
606 encode_sequence_maxsz + \ 613 encode_sequence_maxsz + \
607 encode_putfh_maxsz + \ 614 encode_putfh_maxsz + \
608 encode_savefh_maxsz + \ 615 encode_savefh_maxsz + \
609 encode_putfh_maxsz + \ 616 encode_putfh_maxsz + \
610 encode_rename_maxsz + \ 617 encode_rename_maxsz)
611 encode_getattr_maxsz + \
612 encode_restorefh_maxsz + \
613 encode_getattr_maxsz)
614#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \ 618#define NFS4_dec_rename_sz (compound_decode_hdr_maxsz + \
615 decode_sequence_maxsz + \ 619 decode_sequence_maxsz + \
616 decode_putfh_maxsz + \ 620 decode_putfh_maxsz + \
617 decode_savefh_maxsz + \ 621 decode_savefh_maxsz + \
618 decode_putfh_maxsz + \ 622 decode_putfh_maxsz + \
619 decode_rename_maxsz + \ 623 decode_rename_maxsz)
620 decode_getattr_maxsz + \
621 decode_restorefh_maxsz + \
622 decode_getattr_maxsz)
623#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \ 624#define NFS4_enc_link_sz (compound_encode_hdr_maxsz + \
624 encode_sequence_maxsz + \ 625 encode_sequence_maxsz + \
625 encode_putfh_maxsz + \ 626 encode_putfh_maxsz + \
626 encode_savefh_maxsz + \ 627 encode_savefh_maxsz + \
627 encode_putfh_maxsz + \ 628 encode_putfh_maxsz + \
628 encode_link_maxsz + \ 629 encode_link_maxsz + \
629 decode_getattr_maxsz + \
630 encode_restorefh_maxsz + \ 630 encode_restorefh_maxsz + \
631 decode_getattr_maxsz) 631 encode_getattr_maxsz)
632#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \ 632#define NFS4_dec_link_sz (compound_decode_hdr_maxsz + \
633 decode_sequence_maxsz + \ 633 decode_sequence_maxsz + \
634 decode_putfh_maxsz + \ 634 decode_putfh_maxsz + \
635 decode_savefh_maxsz + \ 635 decode_savefh_maxsz + \
636 decode_putfh_maxsz + \ 636 decode_putfh_maxsz + \
637 decode_link_maxsz + \ 637 decode_link_maxsz + \
638 decode_getattr_maxsz + \
639 decode_restorefh_maxsz + \ 638 decode_restorefh_maxsz + \
640 decode_getattr_maxsz) 639 decode_getattr_maxsz)
641#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \ 640#define NFS4_enc_symlink_sz (compound_encode_hdr_maxsz + \
@@ -653,20 +652,14 @@ static int nfs4_stat_to_errno(int);
653#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \ 652#define NFS4_enc_create_sz (compound_encode_hdr_maxsz + \
654 encode_sequence_maxsz + \ 653 encode_sequence_maxsz + \
655 encode_putfh_maxsz + \ 654 encode_putfh_maxsz + \
656 encode_savefh_maxsz + \
657 encode_create_maxsz + \ 655 encode_create_maxsz + \
658 encode_getfh_maxsz + \ 656 encode_getfh_maxsz + \
659 encode_getattr_maxsz + \
660 encode_restorefh_maxsz + \
661 encode_getattr_maxsz) 657 encode_getattr_maxsz)
662#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \ 658#define NFS4_dec_create_sz (compound_decode_hdr_maxsz + \
663 decode_sequence_maxsz + \ 659 decode_sequence_maxsz + \
664 decode_putfh_maxsz + \ 660 decode_putfh_maxsz + \
665 decode_savefh_maxsz + \
666 decode_create_maxsz + \ 661 decode_create_maxsz + \
667 decode_getfh_maxsz + \ 662 decode_getfh_maxsz + \
668 decode_getattr_maxsz + \
669 decode_restorefh_maxsz + \
670 decode_getattr_maxsz) 663 decode_getattr_maxsz)
671#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \ 664#define NFS4_enc_pathconf_sz (compound_encode_hdr_maxsz + \
672 encode_sequence_maxsz + \ 665 encode_sequence_maxsz + \
@@ -738,6 +731,12 @@ static int nfs4_stat_to_errno(int);
738 decode_putfh_maxsz + \ 731 decode_putfh_maxsz + \
739 decode_secinfo_maxsz) 732 decode_secinfo_maxsz)
740#if defined(CONFIG_NFS_V4_1) 733#if defined(CONFIG_NFS_V4_1)
734#define NFS4_enc_bind_conn_to_session_sz \
735 (compound_encode_hdr_maxsz + \
736 encode_bind_conn_to_session_maxsz)
737#define NFS4_dec_bind_conn_to_session_sz \
738 (compound_decode_hdr_maxsz + \
739 decode_bind_conn_to_session_maxsz)
741#define NFS4_enc_exchange_id_sz \ 740#define NFS4_enc_exchange_id_sz \
742 (compound_encode_hdr_maxsz + \ 741 (compound_encode_hdr_maxsz + \
743 encode_exchange_id_maxsz) 742 encode_exchange_id_maxsz)
@@ -754,6 +753,10 @@ static int nfs4_stat_to_errno(int);
754 encode_destroy_session_maxsz) 753 encode_destroy_session_maxsz)
755#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \ 754#define NFS4_dec_destroy_session_sz (compound_decode_hdr_maxsz + \
756 decode_destroy_session_maxsz) 755 decode_destroy_session_maxsz)
756#define NFS4_enc_destroy_clientid_sz (compound_encode_hdr_maxsz + \
757 encode_destroy_clientid_maxsz)
758#define NFS4_dec_destroy_clientid_sz (compound_decode_hdr_maxsz + \
759 decode_destroy_clientid_maxsz)
757#define NFS4_enc_sequence_sz \ 760#define NFS4_enc_sequence_sz \
758 (compound_decode_hdr_maxsz + \ 761 (compound_decode_hdr_maxsz + \
759 encode_sequence_maxsz) 762 encode_sequence_maxsz)
@@ -1103,7 +1106,7 @@ static void encode_close(struct xdr_stream *xdr, const struct nfs_closeargs *arg
1103 encode_nfs4_stateid(xdr, arg->stateid); 1106 encode_nfs4_stateid(xdr, arg->stateid);
1104} 1107}
1105 1108
1106static void encode_commit(struct xdr_stream *xdr, const struct nfs_writeargs *args, struct compound_hdr *hdr) 1109static void encode_commit(struct xdr_stream *xdr, const struct nfs_commitargs *args, struct compound_hdr *hdr)
1107{ 1110{
1108 __be32 *p; 1111 __be32 *p;
1109 1112
@@ -1194,6 +1197,16 @@ static void encode_getfattr(struct xdr_stream *xdr, const u32* bitmask, struct c
1194 bitmask[1] & nfs4_fattr_bitmap[1], hdr); 1197 bitmask[1] & nfs4_fattr_bitmap[1], hdr);
1195} 1198}
1196 1199
1200static void encode_getfattr_open(struct xdr_stream *xdr, const u32 *bitmask,
1201 struct compound_hdr *hdr)
1202{
1203 encode_getattr_three(xdr,
1204 bitmask[0] & nfs4_fattr_bitmap[0],
1205 bitmask[1] & nfs4_fattr_bitmap[1],
1206 bitmask[2] & FATTR4_WORD2_MDSTHRESHOLD,
1207 hdr);
1208}
1209
1197static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr) 1210static void encode_fsinfo(struct xdr_stream *xdr, const u32* bitmask, struct compound_hdr *hdr)
1198{ 1211{
1199 encode_getattr_three(xdr, 1212 encode_getattr_three(xdr,
@@ -1678,6 +1691,20 @@ static void encode_secinfo(struct xdr_stream *xdr, const struct qstr *name, stru
1678 1691
1679#if defined(CONFIG_NFS_V4_1) 1692#if defined(CONFIG_NFS_V4_1)
1680/* NFSv4.1 operations */ 1693/* NFSv4.1 operations */
1694static void encode_bind_conn_to_session(struct xdr_stream *xdr,
1695 struct nfs4_session *session,
1696 struct compound_hdr *hdr)
1697{
1698 __be32 *p;
1699
1700 encode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION,
1701 decode_bind_conn_to_session_maxsz, hdr);
1702 encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
1703 p = xdr_reserve_space(xdr, 8);
1704 *p++ = cpu_to_be32(NFS4_CDFC4_BACK_OR_BOTH);
1705 *p = 0; /* use_conn_in_rdma_mode = False */
1706}
1707
1681static void encode_exchange_id(struct xdr_stream *xdr, 1708static void encode_exchange_id(struct xdr_stream *xdr,
1682 struct nfs41_exchange_id_args *args, 1709 struct nfs41_exchange_id_args *args,
1683 struct compound_hdr *hdr) 1710 struct compound_hdr *hdr)
@@ -1726,6 +1753,7 @@ static void encode_create_session(struct xdr_stream *xdr,
1726 char machine_name[NFS4_MAX_MACHINE_NAME_LEN]; 1753 char machine_name[NFS4_MAX_MACHINE_NAME_LEN];
1727 uint32_t len; 1754 uint32_t len;
1728 struct nfs_client *clp = args->client; 1755 struct nfs_client *clp = args->client;
1756 struct nfs_net *nn = net_generic(clp->cl_net, nfs_net_id);
1729 u32 max_resp_sz_cached; 1757 u32 max_resp_sz_cached;
1730 1758
1731 /* 1759 /*
@@ -1767,7 +1795,7 @@ static void encode_create_session(struct xdr_stream *xdr,
1767 *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */ 1795 *p++ = cpu_to_be32(RPC_AUTH_UNIX); /* auth_sys */
1768 1796
1769 /* authsys_parms rfc1831 */ 1797 /* authsys_parms rfc1831 */
1770 *p++ = cpu_to_be32((u32)clp->cl_boot_time.tv_nsec); /* stamp */ 1798 *p++ = (__be32)nn->boot_time.tv_nsec; /* stamp */
1771 p = xdr_encode_opaque(p, machine_name, len); 1799 p = xdr_encode_opaque(p, machine_name, len);
1772 *p++ = cpu_to_be32(0); /* UID */ 1800 *p++ = cpu_to_be32(0); /* UID */
1773 *p++ = cpu_to_be32(0); /* GID */ 1801 *p++ = cpu_to_be32(0); /* GID */
@@ -1782,6 +1810,14 @@ static void encode_destroy_session(struct xdr_stream *xdr,
1782 encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN); 1810 encode_opaque_fixed(xdr, session->sess_id.data, NFS4_MAX_SESSIONID_LEN);
1783} 1811}
1784 1812
1813static void encode_destroy_clientid(struct xdr_stream *xdr,
1814 uint64_t clientid,
1815 struct compound_hdr *hdr)
1816{
1817 encode_op_hdr(xdr, OP_DESTROY_CLIENTID, decode_destroy_clientid_maxsz, hdr);
1818 encode_uint64(xdr, clientid);
1819}
1820
1785static void encode_reclaim_complete(struct xdr_stream *xdr, 1821static void encode_reclaim_complete(struct xdr_stream *xdr,
1786 struct nfs41_reclaim_complete_args *args, 1822 struct nfs41_reclaim_complete_args *args,
1787 struct compound_hdr *hdr) 1823 struct compound_hdr *hdr)
@@ -2064,7 +2100,6 @@ static void nfs4_xdr_enc_remove(struct rpc_rqst *req, struct xdr_stream *xdr,
2064 encode_sequence(xdr, &args->seq_args, &hdr); 2100 encode_sequence(xdr, &args->seq_args, &hdr);
2065 encode_putfh(xdr, args->fh, &hdr); 2101 encode_putfh(xdr, args->fh, &hdr);
2066 encode_remove(xdr, &args->name, &hdr); 2102 encode_remove(xdr, &args->name, &hdr);
2067 encode_getfattr(xdr, args->bitmask, &hdr);
2068 encode_nops(&hdr); 2103 encode_nops(&hdr);
2069} 2104}
2070 2105
@@ -2084,9 +2119,6 @@ static void nfs4_xdr_enc_rename(struct rpc_rqst *req, struct xdr_stream *xdr,
2084 encode_savefh(xdr, &hdr); 2119 encode_savefh(xdr, &hdr);
2085 encode_putfh(xdr, args->new_dir, &hdr); 2120 encode_putfh(xdr, args->new_dir, &hdr);
2086 encode_rename(xdr, args->old_name, args->new_name, &hdr); 2121 encode_rename(xdr, args->old_name, args->new_name, &hdr);
2087 encode_getfattr(xdr, args->bitmask, &hdr);
2088 encode_restorefh(xdr, &hdr);
2089 encode_getfattr(xdr, args->bitmask, &hdr);
2090 encode_nops(&hdr); 2122 encode_nops(&hdr);
2091} 2123}
2092 2124
@@ -2106,7 +2138,6 @@ static void nfs4_xdr_enc_link(struct rpc_rqst *req, struct xdr_stream *xdr,
2106 encode_savefh(xdr, &hdr); 2138 encode_savefh(xdr, &hdr);
2107 encode_putfh(xdr, args->dir_fh, &hdr); 2139 encode_putfh(xdr, args->dir_fh, &hdr);
2108 encode_link(xdr, args->name, &hdr); 2140 encode_link(xdr, args->name, &hdr);
2109 encode_getfattr(xdr, args->bitmask, &hdr);
2110 encode_restorefh(xdr, &hdr); 2141 encode_restorefh(xdr, &hdr);
2111 encode_getfattr(xdr, args->bitmask, &hdr); 2142 encode_getfattr(xdr, args->bitmask, &hdr);
2112 encode_nops(&hdr); 2143 encode_nops(&hdr);
@@ -2125,12 +2156,9 @@ static void nfs4_xdr_enc_create(struct rpc_rqst *req, struct xdr_stream *xdr,
2125 encode_compound_hdr(xdr, req, &hdr); 2156 encode_compound_hdr(xdr, req, &hdr);
2126 encode_sequence(xdr, &args->seq_args, &hdr); 2157 encode_sequence(xdr, &args->seq_args, &hdr);
2127 encode_putfh(xdr, args->dir_fh, &hdr); 2158 encode_putfh(xdr, args->dir_fh, &hdr);
2128 encode_savefh(xdr, &hdr);
2129 encode_create(xdr, args, &hdr); 2159 encode_create(xdr, args, &hdr);
2130 encode_getfh(xdr, &hdr); 2160 encode_getfh(xdr, &hdr);
2131 encode_getfattr(xdr, args->bitmask, &hdr); 2161 encode_getfattr(xdr, args->bitmask, &hdr);
2132 encode_restorefh(xdr, &hdr);
2133 encode_getfattr(xdr, args->bitmask, &hdr);
2134 encode_nops(&hdr); 2162 encode_nops(&hdr);
2135} 2163}
2136 2164
@@ -2191,12 +2219,9 @@ static void nfs4_xdr_enc_open(struct rpc_rqst *req, struct xdr_stream *xdr,
2191 encode_compound_hdr(xdr, req, &hdr); 2219 encode_compound_hdr(xdr, req, &hdr);
2192 encode_sequence(xdr, &args->seq_args, &hdr); 2220 encode_sequence(xdr, &args->seq_args, &hdr);
2193 encode_putfh(xdr, args->fh, &hdr); 2221 encode_putfh(xdr, args->fh, &hdr);
2194 encode_savefh(xdr, &hdr);
2195 encode_open(xdr, args, &hdr); 2222 encode_open(xdr, args, &hdr);
2196 encode_getfh(xdr, &hdr); 2223 encode_getfh(xdr, &hdr);
2197 encode_getfattr(xdr, args->bitmask, &hdr); 2224 encode_getfattr_open(xdr, args->bitmask, &hdr);
2198 encode_restorefh(xdr, &hdr);
2199 encode_getfattr(xdr, args->dir_bitmask, &hdr);
2200 encode_nops(&hdr); 2225 encode_nops(&hdr);
2201} 2226}
2202 2227
@@ -2448,7 +2473,7 @@ static void nfs4_xdr_enc_write(struct rpc_rqst *req, struct xdr_stream *xdr,
2448 * a COMMIT request 2473 * a COMMIT request
2449 */ 2474 */
2450static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr, 2475static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
2451 struct nfs_writeargs *args) 2476 struct nfs_commitargs *args)
2452{ 2477{
2453 struct compound_hdr hdr = { 2478 struct compound_hdr hdr = {
2454 .minorversion = nfs4_xdr_minorversion(&args->seq_args), 2479 .minorversion = nfs4_xdr_minorversion(&args->seq_args),
@@ -2458,8 +2483,6 @@ static void nfs4_xdr_enc_commit(struct rpc_rqst *req, struct xdr_stream *xdr,
2458 encode_sequence(xdr, &args->seq_args, &hdr); 2483 encode_sequence(xdr, &args->seq_args, &hdr);
2459 encode_putfh(xdr, args->fh, &hdr); 2484 encode_putfh(xdr, args->fh, &hdr);
2460 encode_commit(xdr, args, &hdr); 2485 encode_commit(xdr, args, &hdr);
2461 if (args->bitmask)
2462 encode_getfattr(xdr, args->bitmask, &hdr);
2463 encode_nops(&hdr); 2486 encode_nops(&hdr);
2464} 2487}
2465 2488
@@ -2602,8 +2625,8 @@ static void nfs4_xdr_enc_delegreturn(struct rpc_rqst *req,
2602 encode_compound_hdr(xdr, req, &hdr); 2625 encode_compound_hdr(xdr, req, &hdr);
2603 encode_sequence(xdr, &args->seq_args, &hdr); 2626 encode_sequence(xdr, &args->seq_args, &hdr);
2604 encode_putfh(xdr, args->fhandle, &hdr); 2627 encode_putfh(xdr, args->fhandle, &hdr);
2605 encode_delegreturn(xdr, args->stateid, &hdr);
2606 encode_getfattr(xdr, args->bitmask, &hdr); 2628 encode_getfattr(xdr, args->bitmask, &hdr);
2629 encode_delegreturn(xdr, args->stateid, &hdr);
2607 encode_nops(&hdr); 2630 encode_nops(&hdr);
2608} 2631}
2609 2632
@@ -2651,6 +2674,22 @@ static void nfs4_xdr_enc_secinfo(struct rpc_rqst *req,
2651 2674
2652#if defined(CONFIG_NFS_V4_1) 2675#if defined(CONFIG_NFS_V4_1)
2653/* 2676/*
2677 * BIND_CONN_TO_SESSION request
2678 */
2679static void nfs4_xdr_enc_bind_conn_to_session(struct rpc_rqst *req,
2680 struct xdr_stream *xdr,
2681 struct nfs_client *clp)
2682{
2683 struct compound_hdr hdr = {
2684 .minorversion = clp->cl_mvops->minor_version,
2685 };
2686
2687 encode_compound_hdr(xdr, req, &hdr);
2688 encode_bind_conn_to_session(xdr, clp->cl_session, &hdr);
2689 encode_nops(&hdr);
2690}
2691
2692/*
2654 * EXCHANGE_ID request 2693 * EXCHANGE_ID request
2655 */ 2694 */
2656static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req, 2695static void nfs4_xdr_enc_exchange_id(struct rpc_rqst *req,
@@ -2699,6 +2738,22 @@ static void nfs4_xdr_enc_destroy_session(struct rpc_rqst *req,
2699} 2738}
2700 2739
2701/* 2740/*
2741 * a DESTROY_CLIENTID request
2742 */
2743static void nfs4_xdr_enc_destroy_clientid(struct rpc_rqst *req,
2744 struct xdr_stream *xdr,
2745 struct nfs_client *clp)
2746{
2747 struct compound_hdr hdr = {
2748 .minorversion = clp->cl_mvops->minor_version,
2749 };
2750
2751 encode_compound_hdr(xdr, req, &hdr);
2752 encode_destroy_clientid(xdr, clp->cl_clientid, &hdr);
2753 encode_nops(&hdr);
2754}
2755
2756/*
2702 * a SEQUENCE request 2757 * a SEQUENCE request
2703 */ 2758 */
2704static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr, 2759static void nfs4_xdr_enc_sequence(struct rpc_rqst *req, struct xdr_stream *xdr,
@@ -4102,7 +4157,7 @@ static int decode_verifier(struct xdr_stream *xdr, void *verifier)
4102 return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE); 4157 return decode_opaque_fixed(xdr, verifier, NFS4_VERIFIER_SIZE);
4103} 4158}
4104 4159
4105static int decode_commit(struct xdr_stream *xdr, struct nfs_writeres *res) 4160static int decode_commit(struct xdr_stream *xdr, struct nfs_commitres *res)
4106{ 4161{
4107 int status; 4162 int status;
4108 4163
@@ -4220,6 +4275,110 @@ xdr_error:
4220 return status; 4275 return status;
4221} 4276}
4222 4277
4278static int decode_threshold_hint(struct xdr_stream *xdr,
4279 uint32_t *bitmap,
4280 uint64_t *res,
4281 uint32_t hint_bit)
4282{
4283 __be32 *p;
4284
4285 *res = 0;
4286 if (likely(bitmap[0] & hint_bit)) {
4287 p = xdr_inline_decode(xdr, 8);
4288 if (unlikely(!p))
4289 goto out_overflow;
4290 xdr_decode_hyper(p, res);
4291 }
4292 return 0;
4293out_overflow:
4294 print_overflow_msg(__func__, xdr);
4295 return -EIO;
4296}
4297
4298static int decode_first_threshold_item4(struct xdr_stream *xdr,
4299 struct nfs4_threshold *res)
4300{
4301 __be32 *p, *savep;
4302 uint32_t bitmap[3] = {0,}, attrlen;
4303 int status;
4304
4305 /* layout type */
4306 p = xdr_inline_decode(xdr, 4);
4307 if (unlikely(!p)) {
4308 print_overflow_msg(__func__, xdr);
4309 return -EIO;
4310 }
4311 res->l_type = be32_to_cpup(p);
4312
4313 /* thi_hintset bitmap */
4314 status = decode_attr_bitmap(xdr, bitmap);
4315 if (status < 0)
4316 goto xdr_error;
4317
4318 /* thi_hintlist length */
4319 status = decode_attr_length(xdr, &attrlen, &savep);
4320 if (status < 0)
4321 goto xdr_error;
4322 /* thi_hintlist */
4323 status = decode_threshold_hint(xdr, bitmap, &res->rd_sz, THRESHOLD_RD);
4324 if (status < 0)
4325 goto xdr_error;
4326 status = decode_threshold_hint(xdr, bitmap, &res->wr_sz, THRESHOLD_WR);
4327 if (status < 0)
4328 goto xdr_error;
4329 status = decode_threshold_hint(xdr, bitmap, &res->rd_io_sz,
4330 THRESHOLD_RD_IO);
4331 if (status < 0)
4332 goto xdr_error;
4333 status = decode_threshold_hint(xdr, bitmap, &res->wr_io_sz,
4334 THRESHOLD_WR_IO);
4335 if (status < 0)
4336 goto xdr_error;
4337
4338 status = verify_attr_len(xdr, savep, attrlen);
4339 res->bm = bitmap[0];
4340
4341 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
4342 __func__, res->bm, res->rd_sz, res->wr_sz, res->rd_io_sz,
4343 res->wr_io_sz);
4344xdr_error:
4345 dprintk("%s ret=%d!\n", __func__, status);
4346 return status;
4347}
4348
4349/*
4350 * Thresholds on pNFS direct I/O vrs MDS I/O
4351 */
4352static int decode_attr_mdsthreshold(struct xdr_stream *xdr,
4353 uint32_t *bitmap,
4354 struct nfs4_threshold *res)
4355{
4356 __be32 *p;
4357 int status = 0;
4358 uint32_t num;
4359
4360 if (unlikely(bitmap[2] & (FATTR4_WORD2_MDSTHRESHOLD - 1U)))
4361 return -EIO;
4362 if (likely(bitmap[2] & FATTR4_WORD2_MDSTHRESHOLD)) {
4363 p = xdr_inline_decode(xdr, 4);
4364 if (unlikely(!p))
4365 goto out_overflow;
4366 num = be32_to_cpup(p);
4367 if (num == 0)
4368 return 0;
4369 if (num > 1)
4370 printk(KERN_INFO "%s: Warning: Multiple pNFS layout "
4371 "drivers per filesystem not supported\n",
4372 __func__);
4373
4374 status = decode_first_threshold_item4(xdr, res);
4375 }
4376 return status;
4377out_overflow:
4378 print_overflow_msg(__func__, xdr);
4379 return -EIO;
4380}
4381
4223static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap, 4382static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
4224 struct nfs_fattr *fattr, struct nfs_fh *fh, 4383 struct nfs_fattr *fattr, struct nfs_fh *fh,
4225 struct nfs4_fs_locations *fs_loc, 4384 struct nfs4_fs_locations *fs_loc,
@@ -4326,6 +4485,10 @@ static int decode_getfattr_attrs(struct xdr_stream *xdr, uint32_t *bitmap,
4326 goto xdr_error; 4485 goto xdr_error;
4327 fattr->valid |= status; 4486 fattr->valid |= status;
4328 4487
4488 status = decode_attr_mdsthreshold(xdr, bitmap, fattr->mdsthreshold);
4489 if (status < 0)
4490 goto xdr_error;
4491
4329xdr_error: 4492xdr_error:
4330 dprintk("%s: xdr returned %d\n", __func__, -status); 4493 dprintk("%s: xdr returned %d\n", __func__, -status);
4331 return status; 4494 return status;
@@ -5156,7 +5319,6 @@ static int decode_exchange_id(struct xdr_stream *xdr,
5156 uint32_t dummy; 5319 uint32_t dummy;
5157 char *dummy_str; 5320 char *dummy_str;
5158 int status; 5321 int status;
5159 struct nfs_client *clp = res->client;
5160 uint32_t impl_id_count; 5322 uint32_t impl_id_count;
5161 5323
5162 status = decode_op_hdr(xdr, OP_EXCHANGE_ID); 5324 status = decode_op_hdr(xdr, OP_EXCHANGE_ID);
@@ -5166,36 +5328,39 @@ static int decode_exchange_id(struct xdr_stream *xdr,
5166 p = xdr_inline_decode(xdr, 8); 5328 p = xdr_inline_decode(xdr, 8);
5167 if (unlikely(!p)) 5329 if (unlikely(!p))
5168 goto out_overflow; 5330 goto out_overflow;
5169 xdr_decode_hyper(p, &clp->cl_clientid); 5331 xdr_decode_hyper(p, &res->clientid);
5170 p = xdr_inline_decode(xdr, 12); 5332 p = xdr_inline_decode(xdr, 12);
5171 if (unlikely(!p)) 5333 if (unlikely(!p))
5172 goto out_overflow; 5334 goto out_overflow;
5173 clp->cl_seqid = be32_to_cpup(p++); 5335 res->seqid = be32_to_cpup(p++);
5174 clp->cl_exchange_flags = be32_to_cpup(p++); 5336 res->flags = be32_to_cpup(p++);
5175 5337
5176 /* We ask for SP4_NONE */ 5338 /* We ask for SP4_NONE */
5177 dummy = be32_to_cpup(p); 5339 dummy = be32_to_cpup(p);
5178 if (dummy != SP4_NONE) 5340 if (dummy != SP4_NONE)
5179 return -EIO; 5341 return -EIO;
5180 5342
5181 /* Throw away minor_id */ 5343 /* server_owner4.so_minor_id */
5182 p = xdr_inline_decode(xdr, 8); 5344 p = xdr_inline_decode(xdr, 8);
5183 if (unlikely(!p)) 5345 if (unlikely(!p))
5184 goto out_overflow; 5346 goto out_overflow;
5347 p = xdr_decode_hyper(p, &res->server_owner->minor_id);
5185 5348
5186 /* Throw away Major id */ 5349 /* server_owner4.so_major_id */
5187 status = decode_opaque_inline(xdr, &dummy, &dummy_str); 5350 status = decode_opaque_inline(xdr, &dummy, &dummy_str);
5188 if (unlikely(status)) 5351 if (unlikely(status))
5189 return status; 5352 return status;
5353 if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
5354 return -EIO;
5355 memcpy(res->server_owner->major_id, dummy_str, dummy);
5356 res->server_owner->major_id_sz = dummy;
5190 5357
5191 /* Save server_scope */ 5358 /* server_scope4 */
5192 status = decode_opaque_inline(xdr, &dummy, &dummy_str); 5359 status = decode_opaque_inline(xdr, &dummy, &dummy_str);
5193 if (unlikely(status)) 5360 if (unlikely(status))
5194 return status; 5361 return status;
5195
5196 if (unlikely(dummy > NFS4_OPAQUE_LIMIT)) 5362 if (unlikely(dummy > NFS4_OPAQUE_LIMIT))
5197 return -EIO; 5363 return -EIO;
5198
5199 memcpy(res->server_scope->server_scope, dummy_str, dummy); 5364 memcpy(res->server_scope->server_scope, dummy_str, dummy);
5200 res->server_scope->server_scope_sz = dummy; 5365 res->server_scope->server_scope_sz = dummy;
5201 5366
@@ -5276,6 +5441,37 @@ static int decode_sessionid(struct xdr_stream *xdr, struct nfs4_sessionid *sid)
5276 return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN); 5441 return decode_opaque_fixed(xdr, sid->data, NFS4_MAX_SESSIONID_LEN);
5277} 5442}
5278 5443
5444static int decode_bind_conn_to_session(struct xdr_stream *xdr,
5445 struct nfs41_bind_conn_to_session_res *res)
5446{
5447 __be32 *p;
5448 int status;
5449
5450 status = decode_op_hdr(xdr, OP_BIND_CONN_TO_SESSION);
5451 if (!status)
5452 status = decode_sessionid(xdr, &res->session->sess_id);
5453 if (unlikely(status))
5454 return status;
5455
5456 /* dir flags, rdma mode bool */
5457 p = xdr_inline_decode(xdr, 8);
5458 if (unlikely(!p))
5459 goto out_overflow;
5460
5461 res->dir = be32_to_cpup(p++);
5462 if (res->dir == 0 || res->dir > NFS4_CDFS4_BOTH)
5463 return -EIO;
5464 if (be32_to_cpup(p) == 0)
5465 res->use_conn_in_rdma_mode = false;
5466 else
5467 res->use_conn_in_rdma_mode = true;
5468
5469 return 0;
5470out_overflow:
5471 print_overflow_msg(__func__, xdr);
5472 return -EIO;
5473}
5474
5279static int decode_create_session(struct xdr_stream *xdr, 5475static int decode_create_session(struct xdr_stream *xdr,
5280 struct nfs41_create_session_res *res) 5476 struct nfs41_create_session_res *res)
5281{ 5477{
@@ -5312,6 +5508,11 @@ static int decode_destroy_session(struct xdr_stream *xdr, void *dummy)
5312 return decode_op_hdr(xdr, OP_DESTROY_SESSION); 5508 return decode_op_hdr(xdr, OP_DESTROY_SESSION);
5313} 5509}
5314 5510
5511static int decode_destroy_clientid(struct xdr_stream *xdr, void *dummy)
5512{
5513 return decode_op_hdr(xdr, OP_DESTROY_CLIENTID);
5514}
5515
5315static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy) 5516static int decode_reclaim_complete(struct xdr_stream *xdr, void *dummy)
5316{ 5517{
5317 return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE); 5518 return decode_op_hdr(xdr, OP_RECLAIM_COMPLETE);
@@ -5800,9 +6001,6 @@ static int nfs4_xdr_dec_remove(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5800 if (status) 6001 if (status)
5801 goto out; 6002 goto out;
5802 status = decode_remove(xdr, &res->cinfo); 6003 status = decode_remove(xdr, &res->cinfo);
5803 if (status)
5804 goto out;
5805 decode_getfattr(xdr, res->dir_attr, res->server);
5806out: 6004out:
5807 return status; 6005 return status;
5808} 6006}
@@ -5832,15 +6030,6 @@ static int nfs4_xdr_dec_rename(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5832 if (status) 6030 if (status)
5833 goto out; 6031 goto out;
5834 status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo); 6032 status = decode_rename(xdr, &res->old_cinfo, &res->new_cinfo);
5835 if (status)
5836 goto out;
5837 /* Current FH is target directory */
5838 if (decode_getfattr(xdr, res->new_fattr, res->server))
5839 goto out;
5840 status = decode_restorefh(xdr);
5841 if (status)
5842 goto out;
5843 decode_getfattr(xdr, res->old_fattr, res->server);
5844out: 6033out:
5845 return status; 6034 return status;
5846} 6035}
@@ -5876,8 +6065,6 @@ static int nfs4_xdr_dec_link(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5876 * Note order: OP_LINK leaves the directory as the current 6065 * Note order: OP_LINK leaves the directory as the current
5877 * filehandle. 6066 * filehandle.
5878 */ 6067 */
5879 if (decode_getfattr(xdr, res->dir_attr, res->server))
5880 goto out;
5881 status = decode_restorefh(xdr); 6068 status = decode_restorefh(xdr);
5882 if (status) 6069 if (status)
5883 goto out; 6070 goto out;
@@ -5904,21 +6091,13 @@ static int nfs4_xdr_dec_create(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
5904 status = decode_putfh(xdr); 6091 status = decode_putfh(xdr);
5905 if (status) 6092 if (status)
5906 goto out; 6093 goto out;
5907 status = decode_savefh(xdr);
5908 if (status)
5909 goto out;
5910 status = decode_create(xdr, &res->dir_cinfo); 6094 status = decode_create(xdr, &res->dir_cinfo);
5911 if (status) 6095 if (status)
5912 goto out; 6096 goto out;
5913 status = decode_getfh(xdr, res->fh); 6097 status = decode_getfh(xdr, res->fh);
5914 if (status) 6098 if (status)
5915 goto out; 6099 goto out;
5916 if (decode_getfattr(xdr, res->fattr, res->server)) 6100 decode_getfattr(xdr, res->fattr, res->server);
5917 goto out;
5918 status = decode_restorefh(xdr);
5919 if (status)
5920 goto out;
5921 decode_getfattr(xdr, res->dir_fattr, res->server);
5922out: 6101out:
5923 return status; 6102 return status;
5924} 6103}
@@ -6075,19 +6254,12 @@ static int nfs4_xdr_dec_open(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
6075 status = decode_putfh(xdr); 6254 status = decode_putfh(xdr);
6076 if (status) 6255 if (status)
6077 goto out; 6256 goto out;
6078 status = decode_savefh(xdr);
6079 if (status)
6080 goto out;
6081 status = decode_open(xdr, res); 6257 status = decode_open(xdr, res);
6082 if (status) 6258 if (status)
6083 goto out; 6259 goto out;
6084 if (decode_getfh(xdr, &res->fh) != 0) 6260 if (decode_getfh(xdr, &res->fh) != 0)
6085 goto out; 6261 goto out;
6086 if (decode_getfattr(xdr, res->f_attr, res->server) != 0) 6262 decode_getfattr(xdr, res->f_attr, res->server);
6087 goto out;
6088 if (decode_restorefh(xdr) != 0)
6089 goto out;
6090 decode_getfattr(xdr, res->dir_attr, res->server);
6091out: 6263out:
6092 return status; 6264 return status;
6093} 6265}
@@ -6353,7 +6525,7 @@ out:
6353 * Decode COMMIT response 6525 * Decode COMMIT response
6354 */ 6526 */
6355static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr, 6527static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
6356 struct nfs_writeres *res) 6528 struct nfs_commitres *res)
6357{ 6529{
6358 struct compound_hdr hdr; 6530 struct compound_hdr hdr;
6359 int status; 6531 int status;
@@ -6368,10 +6540,6 @@ static int nfs4_xdr_dec_commit(struct rpc_rqst *rqstp, struct xdr_stream *xdr,
6368 if (status) 6540 if (status)
6369 goto out; 6541 goto out;
6370 status = decode_commit(xdr, res); 6542 status = decode_commit(xdr, res);
6371 if (status)
6372 goto out;
6373 if (res->fattr)
6374 decode_getfattr(xdr, res->fattr, res->server);
6375out: 6543out:
6376 return status; 6544 return status;
6377} 6545}
@@ -6527,10 +6695,10 @@ static int nfs4_xdr_dec_delegreturn(struct rpc_rqst *rqstp,
6527 status = decode_putfh(xdr); 6695 status = decode_putfh(xdr);
6528 if (status != 0) 6696 if (status != 0)
6529 goto out; 6697 goto out;
6530 status = decode_delegreturn(xdr); 6698 status = decode_getfattr(xdr, res->fattr, res->server);
6531 if (status != 0) 6699 if (status != 0)
6532 goto out; 6700 goto out;
6533 decode_getfattr(xdr, res->fattr, res->server); 6701 status = decode_delegreturn(xdr);
6534out: 6702out:
6535 return status; 6703 return status;
6536} 6704}
@@ -6591,6 +6759,22 @@ out:
6591 6759
6592#if defined(CONFIG_NFS_V4_1) 6760#if defined(CONFIG_NFS_V4_1)
6593/* 6761/*
6762 * Decode BIND_CONN_TO_SESSION response
6763 */
6764static int nfs4_xdr_dec_bind_conn_to_session(struct rpc_rqst *rqstp,
6765 struct xdr_stream *xdr,
6766 void *res)
6767{
6768 struct compound_hdr hdr;
6769 int status;
6770
6771 status = decode_compound_hdr(xdr, &hdr);
6772 if (!status)
6773 status = decode_bind_conn_to_session(xdr, res);
6774 return status;
6775}
6776
6777/*
6594 * Decode EXCHANGE_ID response 6778 * Decode EXCHANGE_ID response
6595 */ 6779 */
6596static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp, 6780static int nfs4_xdr_dec_exchange_id(struct rpc_rqst *rqstp,
@@ -6639,6 +6823,22 @@ static int nfs4_xdr_dec_destroy_session(struct rpc_rqst *rqstp,
6639} 6823}
6640 6824
6641/* 6825/*
6826 * Decode DESTROY_CLIENTID response
6827 */
6828static int nfs4_xdr_dec_destroy_clientid(struct rpc_rqst *rqstp,
6829 struct xdr_stream *xdr,
6830 void *res)
6831{
6832 struct compound_hdr hdr;
6833 int status;
6834
6835 status = decode_compound_hdr(xdr, &hdr);
6836 if (!status)
6837 status = decode_destroy_clientid(xdr, res);
6838 return status;
6839}
6840
6841/*
6642 * Decode SEQUENCE response 6842 * Decode SEQUENCE response
6643 */ 6843 */
6644static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp, 6844static int nfs4_xdr_dec_sequence(struct rpc_rqst *rqstp,
@@ -7085,6 +7285,9 @@ struct rpc_procinfo nfs4_procedures[] = {
7085 PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid), 7285 PROC(TEST_STATEID, enc_test_stateid, dec_test_stateid),
7086 PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid), 7286 PROC(FREE_STATEID, enc_free_stateid, dec_free_stateid),
7087 PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist), 7287 PROC(GETDEVICELIST, enc_getdevicelist, dec_getdevicelist),
7288 PROC(BIND_CONN_TO_SESSION,
7289 enc_bind_conn_to_session, dec_bind_conn_to_session),
7290 PROC(DESTROY_CLIENTID, enc_destroy_clientid, dec_destroy_clientid),
7088#endif /* CONFIG_NFS_V4_1 */ 7291#endif /* CONFIG_NFS_V4_1 */
7089}; 7292};
7090 7293
diff --git a/fs/nfs/objlayout/objio_osd.c b/fs/nfs/objlayout/objio_osd.c
index 4bff4a3dab46..b47277baebab 100644
--- a/fs/nfs/objlayout/objio_osd.c
+++ b/fs/nfs/objlayout/objio_osd.c
@@ -211,7 +211,7 @@ static void copy_single_comp(struct ore_components *oc, unsigned c,
211 memcpy(ocomp->cred, src_comp->oc_cap.cred, sizeof(ocomp->cred)); 211 memcpy(ocomp->cred, src_comp->oc_cap.cred, sizeof(ocomp->cred));
212} 212}
213 213
214int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags, 214static int __alloc_objio_seg(unsigned numdevs, gfp_t gfp_flags,
215 struct objio_segment **pseg) 215 struct objio_segment **pseg)
216{ 216{
217/* This is the in memory structure of the objio_segment 217/* This is the in memory structure of the objio_segment
@@ -440,11 +440,12 @@ static void _read_done(struct ore_io_state *ios, void *private)
440 440
441int objio_read_pagelist(struct nfs_read_data *rdata) 441int objio_read_pagelist(struct nfs_read_data *rdata)
442{ 442{
443 struct nfs_pgio_header *hdr = rdata->header;
443 struct objio_state *objios; 444 struct objio_state *objios;
444 int ret; 445 int ret;
445 446
446 ret = objio_alloc_io_state(NFS_I(rdata->inode)->layout, true, 447 ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, true,
447 rdata->lseg, rdata->args.pages, rdata->args.pgbase, 448 hdr->lseg, rdata->args.pages, rdata->args.pgbase,
448 rdata->args.offset, rdata->args.count, rdata, 449 rdata->args.offset, rdata->args.count, rdata,
449 GFP_KERNEL, &objios); 450 GFP_KERNEL, &objios);
450 if (unlikely(ret)) 451 if (unlikely(ret))
@@ -483,12 +484,12 @@ static struct page *__r4w_get_page(void *priv, u64 offset, bool *uptodate)
483{ 484{
484 struct objio_state *objios = priv; 485 struct objio_state *objios = priv;
485 struct nfs_write_data *wdata = objios->oir.rpcdata; 486 struct nfs_write_data *wdata = objios->oir.rpcdata;
487 struct address_space *mapping = wdata->header->inode->i_mapping;
486 pgoff_t index = offset / PAGE_SIZE; 488 pgoff_t index = offset / PAGE_SIZE;
487 struct page *page = find_get_page(wdata->inode->i_mapping, index); 489 struct page *page = find_get_page(mapping, index);
488 490
489 if (!page) { 491 if (!page) {
490 page = find_or_create_page(wdata->inode->i_mapping, 492 page = find_or_create_page(mapping, index, GFP_NOFS);
491 index, GFP_NOFS);
492 if (unlikely(!page)) { 493 if (unlikely(!page)) {
493 dprintk("%s: grab_cache_page Failed index=0x%lx\n", 494 dprintk("%s: grab_cache_page Failed index=0x%lx\n",
494 __func__, index); 495 __func__, index);
@@ -518,11 +519,12 @@ static const struct _ore_r4w_op _r4w_op = {
518 519
519int objio_write_pagelist(struct nfs_write_data *wdata, int how) 520int objio_write_pagelist(struct nfs_write_data *wdata, int how)
520{ 521{
522 struct nfs_pgio_header *hdr = wdata->header;
521 struct objio_state *objios; 523 struct objio_state *objios;
522 int ret; 524 int ret;
523 525
524 ret = objio_alloc_io_state(NFS_I(wdata->inode)->layout, false, 526 ret = objio_alloc_io_state(NFS_I(hdr->inode)->layout, false,
525 wdata->lseg, wdata->args.pages, wdata->args.pgbase, 527 hdr->lseg, wdata->args.pages, wdata->args.pgbase,
526 wdata->args.offset, wdata->args.count, wdata, GFP_NOFS, 528 wdata->args.offset, wdata->args.count, wdata, GFP_NOFS,
527 &objios); 529 &objios);
528 if (unlikely(ret)) 530 if (unlikely(ret))
diff --git a/fs/nfs/objlayout/objlayout.c b/fs/nfs/objlayout/objlayout.c
index 595c5fc21a19..874613545301 100644
--- a/fs/nfs/objlayout/objlayout.c
+++ b/fs/nfs/objlayout/objlayout.c
@@ -258,7 +258,7 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
258 if (status >= 0) 258 if (status >= 0)
259 rdata->res.count = status; 259 rdata->res.count = status;
260 else 260 else
261 rdata->pnfs_error = status; 261 rdata->header->pnfs_error = status;
262 objlayout_iodone(oir); 262 objlayout_iodone(oir);
263 /* must not use oir after this point */ 263 /* must not use oir after this point */
264 264
@@ -279,12 +279,14 @@ objlayout_read_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
279enum pnfs_try_status 279enum pnfs_try_status
280objlayout_read_pagelist(struct nfs_read_data *rdata) 280objlayout_read_pagelist(struct nfs_read_data *rdata)
281{ 281{
282 struct nfs_pgio_header *hdr = rdata->header;
283 struct inode *inode = hdr->inode;
282 loff_t offset = rdata->args.offset; 284 loff_t offset = rdata->args.offset;
283 size_t count = rdata->args.count; 285 size_t count = rdata->args.count;
284 int err; 286 int err;
285 loff_t eof; 287 loff_t eof;
286 288
287 eof = i_size_read(rdata->inode); 289 eof = i_size_read(inode);
288 if (unlikely(offset + count > eof)) { 290 if (unlikely(offset + count > eof)) {
289 if (offset >= eof) { 291 if (offset >= eof) {
290 err = 0; 292 err = 0;
@@ -297,17 +299,17 @@ objlayout_read_pagelist(struct nfs_read_data *rdata)
297 } 299 }
298 300
299 rdata->res.eof = (offset + count) >= eof; 301 rdata->res.eof = (offset + count) >= eof;
300 _fix_verify_io_params(rdata->lseg, &rdata->args.pages, 302 _fix_verify_io_params(hdr->lseg, &rdata->args.pages,
301 &rdata->args.pgbase, 303 &rdata->args.pgbase,
302 rdata->args.offset, rdata->args.count); 304 rdata->args.offset, rdata->args.count);
303 305
304 dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n", 306 dprintk("%s: inode(%lx) offset 0x%llx count 0x%Zx eof=%d\n",
305 __func__, rdata->inode->i_ino, offset, count, rdata->res.eof); 307 __func__, inode->i_ino, offset, count, rdata->res.eof);
306 308
307 err = objio_read_pagelist(rdata); 309 err = objio_read_pagelist(rdata);
308 out: 310 out:
309 if (unlikely(err)) { 311 if (unlikely(err)) {
310 rdata->pnfs_error = err; 312 hdr->pnfs_error = err;
311 dprintk("%s: Returned Error %d\n", __func__, err); 313 dprintk("%s: Returned Error %d\n", __func__, err);
312 return PNFS_NOT_ATTEMPTED; 314 return PNFS_NOT_ATTEMPTED;
313 } 315 }
@@ -340,7 +342,7 @@ objlayout_write_done(struct objlayout_io_res *oir, ssize_t status, bool sync)
340 wdata->res.count = status; 342 wdata->res.count = status;
341 wdata->verf.committed = oir->committed; 343 wdata->verf.committed = oir->committed;
342 } else { 344 } else {
343 wdata->pnfs_error = status; 345 wdata->header->pnfs_error = status;
344 } 346 }
345 objlayout_iodone(oir); 347 objlayout_iodone(oir);
346 /* must not use oir after this point */ 348 /* must not use oir after this point */
@@ -363,15 +365,16 @@ enum pnfs_try_status
363objlayout_write_pagelist(struct nfs_write_data *wdata, 365objlayout_write_pagelist(struct nfs_write_data *wdata,
364 int how) 366 int how)
365{ 367{
368 struct nfs_pgio_header *hdr = wdata->header;
366 int err; 369 int err;
367 370
368 _fix_verify_io_params(wdata->lseg, &wdata->args.pages, 371 _fix_verify_io_params(hdr->lseg, &wdata->args.pages,
369 &wdata->args.pgbase, 372 &wdata->args.pgbase,
370 wdata->args.offset, wdata->args.count); 373 wdata->args.offset, wdata->args.count);
371 374
372 err = objio_write_pagelist(wdata, how); 375 err = objio_write_pagelist(wdata, how);
373 if (unlikely(err)) { 376 if (unlikely(err)) {
374 wdata->pnfs_error = err; 377 hdr->pnfs_error = err;
375 dprintk("%s: Returned Error %d\n", __func__, err); 378 dprintk("%s: Returned Error %d\n", __func__, err);
376 return PNFS_NOT_ATTEMPTED; 379 return PNFS_NOT_ATTEMPTED;
377 } 380 }
diff --git a/fs/nfs/pagelist.c b/fs/nfs/pagelist.c
index d21fceaa9f62..aed913c833f4 100644
--- a/fs/nfs/pagelist.c
+++ b/fs/nfs/pagelist.c
@@ -26,6 +26,47 @@
26 26
27static struct kmem_cache *nfs_page_cachep; 27static struct kmem_cache *nfs_page_cachep;
28 28
29bool nfs_pgarray_set(struct nfs_page_array *p, unsigned int pagecount)
30{
31 p->npages = pagecount;
32 if (pagecount <= ARRAY_SIZE(p->page_array))
33 p->pagevec = p->page_array;
34 else {
35 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL);
36 if (!p->pagevec)
37 p->npages = 0;
38 }
39 return p->pagevec != NULL;
40}
41
42void nfs_pgheader_init(struct nfs_pageio_descriptor *desc,
43 struct nfs_pgio_header *hdr,
44 void (*release)(struct nfs_pgio_header *hdr))
45{
46 hdr->req = nfs_list_entry(desc->pg_list.next);
47 hdr->inode = desc->pg_inode;
48 hdr->cred = hdr->req->wb_context->cred;
49 hdr->io_start = req_offset(hdr->req);
50 hdr->good_bytes = desc->pg_count;
51 hdr->dreq = desc->pg_dreq;
52 hdr->release = release;
53 hdr->completion_ops = desc->pg_completion_ops;
54 if (hdr->completion_ops->init_hdr)
55 hdr->completion_ops->init_hdr(hdr);
56}
57
58void nfs_set_pgio_error(struct nfs_pgio_header *hdr, int error, loff_t pos)
59{
60 spin_lock(&hdr->lock);
61 if (pos < hdr->io_start + hdr->good_bytes) {
62 set_bit(NFS_IOHDR_ERROR, &hdr->flags);
63 clear_bit(NFS_IOHDR_EOF, &hdr->flags);
64 hdr->good_bytes = pos - hdr->io_start;
65 hdr->error = error;
66 }
67 spin_unlock(&hdr->lock);
68}
69
29static inline struct nfs_page * 70static inline struct nfs_page *
30nfs_page_alloc(void) 71nfs_page_alloc(void)
31{ 72{
@@ -76,12 +117,8 @@ nfs_create_request(struct nfs_open_context *ctx, struct inode *inode,
76 * long write-back delay. This will be adjusted in 117 * long write-back delay. This will be adjusted in
77 * update_nfs_request below if the region is not locked. */ 118 * update_nfs_request below if the region is not locked. */
78 req->wb_page = page; 119 req->wb_page = page;
79 atomic_set(&req->wb_complete, 0);
80 req->wb_index = page->index; 120 req->wb_index = page->index;
81 page_cache_get(page); 121 page_cache_get(page);
82 BUG_ON(PagePrivate(page));
83 BUG_ON(!PageLocked(page));
84 BUG_ON(page->mapping->host != inode);
85 req->wb_offset = offset; 122 req->wb_offset = offset;
86 req->wb_pgbase = offset; 123 req->wb_pgbase = offset;
87 req->wb_bytes = count; 124 req->wb_bytes = count;
@@ -104,6 +141,15 @@ void nfs_unlock_request(struct nfs_page *req)
104 clear_bit(PG_BUSY, &req->wb_flags); 141 clear_bit(PG_BUSY, &req->wb_flags);
105 smp_mb__after_clear_bit(); 142 smp_mb__after_clear_bit();
106 wake_up_bit(&req->wb_flags, PG_BUSY); 143 wake_up_bit(&req->wb_flags, PG_BUSY);
144}
145
146/**
147 * nfs_unlock_and_release_request - Unlock request and release the nfs_page
148 * @req:
149 */
150void nfs_unlock_and_release_request(struct nfs_page *req)
151{
152 nfs_unlock_request(req);
107 nfs_release_request(req); 153 nfs_release_request(req);
108} 154}
109 155
@@ -203,6 +249,7 @@ EXPORT_SYMBOL_GPL(nfs_generic_pg_test);
203void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 249void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
204 struct inode *inode, 250 struct inode *inode,
205 const struct nfs_pageio_ops *pg_ops, 251 const struct nfs_pageio_ops *pg_ops,
252 const struct nfs_pgio_completion_ops *compl_ops,
206 size_t bsize, 253 size_t bsize,
207 int io_flags) 254 int io_flags)
208{ 255{
@@ -215,9 +262,11 @@ void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
215 desc->pg_recoalesce = 0; 262 desc->pg_recoalesce = 0;
216 desc->pg_inode = inode; 263 desc->pg_inode = inode;
217 desc->pg_ops = pg_ops; 264 desc->pg_ops = pg_ops;
265 desc->pg_completion_ops = compl_ops;
218 desc->pg_ioflags = io_flags; 266 desc->pg_ioflags = io_flags;
219 desc->pg_error = 0; 267 desc->pg_error = 0;
220 desc->pg_lseg = NULL; 268 desc->pg_lseg = NULL;
269 desc->pg_dreq = NULL;
221} 270}
222 271
223/** 272/**
@@ -241,12 +290,12 @@ static bool nfs_can_coalesce_requests(struct nfs_page *prev,
241 return false; 290 return false;
242 if (req->wb_context->state != prev->wb_context->state) 291 if (req->wb_context->state != prev->wb_context->state)
243 return false; 292 return false;
244 if (req->wb_index != (prev->wb_index + 1))
245 return false;
246 if (req->wb_pgbase != 0) 293 if (req->wb_pgbase != 0)
247 return false; 294 return false;
248 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE) 295 if (prev->wb_pgbase + prev->wb_bytes != PAGE_CACHE_SIZE)
249 return false; 296 return false;
297 if (req_offset(req) != req_offset(prev) + prev->wb_bytes)
298 return false;
250 return pgio->pg_ops->pg_test(pgio, prev, req); 299 return pgio->pg_ops->pg_test(pgio, prev, req);
251} 300}
252 301
diff --git a/fs/nfs/pnfs.c b/fs/nfs/pnfs.c
index 38512bcd2e98..b8323aa7b543 100644
--- a/fs/nfs/pnfs.c
+++ b/fs/nfs/pnfs.c
@@ -395,6 +395,9 @@ mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
395 dprintk("%s:Begin lo %p\n", __func__, lo); 395 dprintk("%s:Begin lo %p\n", __func__, lo);
396 396
397 if (list_empty(&lo->plh_segs)) { 397 if (list_empty(&lo->plh_segs)) {
398 /* Reset MDS Threshold I/O counters */
399 NFS_I(lo->plh_inode)->write_io = 0;
400 NFS_I(lo->plh_inode)->read_io = 0;
398 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) 401 if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
399 put_layout_hdr_locked(lo); 402 put_layout_hdr_locked(lo);
400 return 0; 403 return 0;
@@ -455,6 +458,7 @@ pnfs_destroy_layout(struct nfs_inode *nfsi)
455 spin_unlock(&nfsi->vfs_inode.i_lock); 458 spin_unlock(&nfsi->vfs_inode.i_lock);
456 pnfs_free_lseg_list(&tmp_list); 459 pnfs_free_lseg_list(&tmp_list);
457} 460}
461EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
458 462
459/* 463/*
460 * Called by the state manger to remove all layouts established under an 464 * Called by the state manger to remove all layouts established under an
@@ -692,6 +696,7 @@ out:
692 dprintk("<-- %s status: %d\n", __func__, status); 696 dprintk("<-- %s status: %d\n", __func__, status);
693 return status; 697 return status;
694} 698}
699EXPORT_SYMBOL_GPL(_pnfs_return_layout);
695 700
696bool pnfs_roc(struct inode *ino) 701bool pnfs_roc(struct inode *ino)
697{ 702{
@@ -931,6 +936,81 @@ pnfs_find_lseg(struct pnfs_layout_hdr *lo,
931} 936}
932 937
933/* 938/*
939 * Use mdsthreshold hints set at each OPEN to determine if I/O should go
940 * to the MDS or over pNFS
941 *
942 * The nfs_inode read_io and write_io fields are cumulative counters reset
943 * when there are no layout segments. Note that in pnfs_update_layout iomode
944 * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
945 * WRITE request.
946 *
947 * A return of true means use MDS I/O.
948 *
949 * From rfc 5661:
950 * If a file's size is smaller than the file size threshold, data accesses
951 * SHOULD be sent to the metadata server. If an I/O request has a length that
952 * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
953 * server. If both file size and I/O size are provided, the client SHOULD
954 * reach or exceed both thresholds before sending its read or write
955 * requests to the data server.
956 */
957static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
958 struct inode *ino, int iomode)
959{
960 struct nfs4_threshold *t = ctx->mdsthreshold;
961 struct nfs_inode *nfsi = NFS_I(ino);
962 loff_t fsize = i_size_read(ino);
963 bool size = false, size_set = false, io = false, io_set = false, ret = false;
964
965 if (t == NULL)
966 return ret;
967
968 dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
969 __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
970
971 switch (iomode) {
972 case IOMODE_READ:
973 if (t->bm & THRESHOLD_RD) {
974 dprintk("%s fsize %llu\n", __func__, fsize);
975 size_set = true;
976 if (fsize < t->rd_sz)
977 size = true;
978 }
979 if (t->bm & THRESHOLD_RD_IO) {
980 dprintk("%s nfsi->read_io %llu\n", __func__,
981 nfsi->read_io);
982 io_set = true;
983 if (nfsi->read_io < t->rd_io_sz)
984 io = true;
985 }
986 break;
987 case IOMODE_RW:
988 if (t->bm & THRESHOLD_WR) {
989 dprintk("%s fsize %llu\n", __func__, fsize);
990 size_set = true;
991 if (fsize < t->wr_sz)
992 size = true;
993 }
994 if (t->bm & THRESHOLD_WR_IO) {
995 dprintk("%s nfsi->write_io %llu\n", __func__,
996 nfsi->write_io);
997 io_set = true;
998 if (nfsi->write_io < t->wr_io_sz)
999 io = true;
1000 }
1001 break;
1002 }
1003 if (size_set && io_set) {
1004 if (size && io)
1005 ret = true;
1006 } else if (size || io)
1007 ret = true;
1008
1009 dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
1010 return ret;
1011}
1012
1013/*
934 * Layout segment is retreived from the server if not cached. 1014 * Layout segment is retreived from the server if not cached.
935 * The appropriate layout segment is referenced and returned to the caller. 1015 * The appropriate layout segment is referenced and returned to the caller.
936 */ 1016 */
@@ -957,6 +1037,10 @@ pnfs_update_layout(struct inode *ino,
957 1037
958 if (!pnfs_enabled_sb(NFS_SERVER(ino))) 1038 if (!pnfs_enabled_sb(NFS_SERVER(ino)))
959 return NULL; 1039 return NULL;
1040
1041 if (pnfs_within_mdsthreshold(ctx, ino, iomode))
1042 return NULL;
1043
960 spin_lock(&ino->i_lock); 1044 spin_lock(&ino->i_lock);
961 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags); 1045 lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
962 if (lo == NULL) { 1046 if (lo == NULL) {
@@ -1082,6 +1166,10 @@ pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *r
1082{ 1166{
1083 BUG_ON(pgio->pg_lseg != NULL); 1167 BUG_ON(pgio->pg_lseg != NULL);
1084 1168
1169 if (req->wb_offset != req->wb_pgbase) {
1170 nfs_pageio_reset_read_mds(pgio);
1171 return;
1172 }
1085 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1173 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1086 req->wb_context, 1174 req->wb_context,
1087 req_offset(req), 1175 req_offset(req),
@@ -1100,6 +1188,10 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
1100{ 1188{
1101 BUG_ON(pgio->pg_lseg != NULL); 1189 BUG_ON(pgio->pg_lseg != NULL);
1102 1190
1191 if (req->wb_offset != req->wb_pgbase) {
1192 nfs_pageio_reset_write_mds(pgio);
1193 return;
1194 }
1103 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode, 1195 pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
1104 req->wb_context, 1196 req->wb_context,
1105 req_offset(req), 1197 req_offset(req),
@@ -1113,26 +1205,31 @@ pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *
1113EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write); 1205EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
1114 1206
1115bool 1207bool
1116pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode) 1208pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1209 const struct nfs_pgio_completion_ops *compl_ops)
1117{ 1210{
1118 struct nfs_server *server = NFS_SERVER(inode); 1211 struct nfs_server *server = NFS_SERVER(inode);
1119 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; 1212 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1120 1213
1121 if (ld == NULL) 1214 if (ld == NULL)
1122 return false; 1215 return false;
1123 nfs_pageio_init(pgio, inode, ld->pg_read_ops, server->rsize, 0); 1216 nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops,
1217 server->rsize, 0);
1124 return true; 1218 return true;
1125} 1219}
1126 1220
1127bool 1221bool
1128pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags) 1222pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
1223 int ioflags,
1224 const struct nfs_pgio_completion_ops *compl_ops)
1129{ 1225{
1130 struct nfs_server *server = NFS_SERVER(inode); 1226 struct nfs_server *server = NFS_SERVER(inode);
1131 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld; 1227 struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
1132 1228
1133 if (ld == NULL) 1229 if (ld == NULL)
1134 return false; 1230 return false;
1135 nfs_pageio_init(pgio, inode, ld->pg_write_ops, server->wsize, ioflags); 1231 nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops,
1232 server->wsize, ioflags);
1136 return true; 1233 return true;
1137} 1234}
1138 1235
@@ -1162,13 +1259,15 @@ pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
1162} 1259}
1163EXPORT_SYMBOL_GPL(pnfs_generic_pg_test); 1260EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
1164 1261
1165static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head) 1262int pnfs_write_done_resend_to_mds(struct inode *inode,
1263 struct list_head *head,
1264 const struct nfs_pgio_completion_ops *compl_ops)
1166{ 1265{
1167 struct nfs_pageio_descriptor pgio; 1266 struct nfs_pageio_descriptor pgio;
1168 LIST_HEAD(failed); 1267 LIST_HEAD(failed);
1169 1268
1170 /* Resend all requests through the MDS */ 1269 /* Resend all requests through the MDS */
1171 nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE); 1270 nfs_pageio_init_write_mds(&pgio, inode, FLUSH_STABLE, compl_ops);
1172 while (!list_empty(head)) { 1271 while (!list_empty(head)) {
1173 struct nfs_page *req = nfs_list_entry(head->next); 1272 struct nfs_page *req = nfs_list_entry(head->next);
1174 1273
@@ -1188,30 +1287,37 @@ static int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *
1188 } 1287 }
1189 return 0; 1288 return 0;
1190} 1289}
1290EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
1291
1292static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
1293{
1294 struct nfs_pgio_header *hdr = data->header;
1295
1296 dprintk("pnfs write error = %d\n", hdr->pnfs_error);
1297 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1298 PNFS_LAYOUTRET_ON_ERROR) {
1299 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1300 pnfs_return_layout(hdr->inode);
1301 }
1302 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1303 data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
1304 &hdr->pages,
1305 hdr->completion_ops);
1306}
1191 1307
1192/* 1308/*
1193 * Called by non rpc-based layout drivers 1309 * Called by non rpc-based layout drivers
1194 */ 1310 */
1195void pnfs_ld_write_done(struct nfs_write_data *data) 1311void pnfs_ld_write_done(struct nfs_write_data *data)
1196{ 1312{
1197 if (likely(!data->pnfs_error)) { 1313 struct nfs_pgio_header *hdr = data->header;
1314
1315 if (!hdr->pnfs_error) {
1198 pnfs_set_layoutcommit(data); 1316 pnfs_set_layoutcommit(data);
1199 data->mds_ops->rpc_call_done(&data->task, data); 1317 hdr->mds_ops->rpc_call_done(&data->task, data);
1200 } else { 1318 } else
1201 dprintk("pnfs write error = %d\n", data->pnfs_error); 1319 pnfs_ld_handle_write_error(data);
1202 if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags & 1320 hdr->mds_ops->rpc_release(data);
1203 PNFS_LAYOUTRET_ON_ERROR) {
1204 /* Don't lo_commit on error, Server will needs to
1205 * preform a file recovery.
1206 */
1207 clear_bit(NFS_INO_LAYOUTCOMMIT,
1208 &NFS_I(data->inode)->flags);
1209 pnfs_return_layout(data->inode);
1210 }
1211 data->task.tk_status = pnfs_write_done_resend_to_mds(data->inode, &data->pages);
1212 }
1213 put_lseg(data->lseg);
1214 data->mds_ops->rpc_release(data);
1215} 1321}
1216EXPORT_SYMBOL_GPL(pnfs_ld_write_done); 1322EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
1217 1323
@@ -1219,12 +1325,13 @@ static void
1219pnfs_write_through_mds(struct nfs_pageio_descriptor *desc, 1325pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
1220 struct nfs_write_data *data) 1326 struct nfs_write_data *data)
1221{ 1327{
1222 list_splice_tail_init(&data->pages, &desc->pg_list); 1328 struct nfs_pgio_header *hdr = data->header;
1223 if (data->req && list_empty(&data->req->wb_list)) 1329
1224 nfs_list_add_request(data->req, &desc->pg_list); 1330 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1225 nfs_pageio_reset_write_mds(desc); 1331 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1226 desc->pg_recoalesce = 1; 1332 nfs_pageio_reset_write_mds(desc);
1227 put_lseg(data->lseg); 1333 desc->pg_recoalesce = 1;
1334 }
1228 nfs_writedata_release(data); 1335 nfs_writedata_release(data);
1229} 1336}
1230 1337
@@ -1234,23 +1341,18 @@ pnfs_try_to_write_data(struct nfs_write_data *wdata,
1234 struct pnfs_layout_segment *lseg, 1341 struct pnfs_layout_segment *lseg,
1235 int how) 1342 int how)
1236{ 1343{
1237 struct inode *inode = wdata->inode; 1344 struct nfs_pgio_header *hdr = wdata->header;
1345 struct inode *inode = hdr->inode;
1238 enum pnfs_try_status trypnfs; 1346 enum pnfs_try_status trypnfs;
1239 struct nfs_server *nfss = NFS_SERVER(inode); 1347 struct nfs_server *nfss = NFS_SERVER(inode);
1240 1348
1241 wdata->mds_ops = call_ops; 1349 hdr->mds_ops = call_ops;
1242 wdata->lseg = get_lseg(lseg);
1243 1350
1244 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__, 1351 dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
1245 inode->i_ino, wdata->args.count, wdata->args.offset, how); 1352 inode->i_ino, wdata->args.count, wdata->args.offset, how);
1246
1247 trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how); 1353 trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
1248 if (trypnfs == PNFS_NOT_ATTEMPTED) { 1354 if (trypnfs != PNFS_NOT_ATTEMPTED)
1249 put_lseg(wdata->lseg);
1250 wdata->lseg = NULL;
1251 } else
1252 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE); 1355 nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
1253
1254 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); 1356 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1255 return trypnfs; 1357 return trypnfs;
1256} 1358}
@@ -1266,7 +1368,7 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
1266 while (!list_empty(head)) { 1368 while (!list_empty(head)) {
1267 enum pnfs_try_status trypnfs; 1369 enum pnfs_try_status trypnfs;
1268 1370
1269 data = list_entry(head->next, struct nfs_write_data, list); 1371 data = list_first_entry(head, struct nfs_write_data, list);
1270 list_del_init(&data->list); 1372 list_del_init(&data->list);
1271 1373
1272 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how); 1374 trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
@@ -1276,43 +1378,82 @@ pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *he
1276 put_lseg(lseg); 1378 put_lseg(lseg);
1277} 1379}
1278 1380
1381static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
1382{
1383 put_lseg(hdr->lseg);
1384 nfs_writehdr_free(hdr);
1385}
1386
1279int 1387int
1280pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) 1388pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1281{ 1389{
1282 LIST_HEAD(head); 1390 struct nfs_write_header *whdr;
1391 struct nfs_pgio_header *hdr;
1283 int ret; 1392 int ret;
1284 1393
1285 ret = nfs_generic_flush(desc, &head); 1394 whdr = nfs_writehdr_alloc();
1286 if (ret != 0) { 1395 if (!whdr) {
1396 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1287 put_lseg(desc->pg_lseg); 1397 put_lseg(desc->pg_lseg);
1288 desc->pg_lseg = NULL; 1398 desc->pg_lseg = NULL;
1289 return ret; 1399 return -ENOMEM;
1290 } 1400 }
1291 pnfs_do_multiple_writes(desc, &head, desc->pg_ioflags); 1401 hdr = &whdr->header;
1292 return 0; 1402 nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
1403 hdr->lseg = get_lseg(desc->pg_lseg);
1404 atomic_inc(&hdr->refcnt);
1405 ret = nfs_generic_flush(desc, hdr);
1406 if (ret != 0) {
1407 put_lseg(desc->pg_lseg);
1408 desc->pg_lseg = NULL;
1409 } else
1410 pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
1411 if (atomic_dec_and_test(&hdr->refcnt))
1412 hdr->completion_ops->completion(hdr);
1413 return ret;
1293} 1414}
1294EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages); 1415EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
1295 1416
1296static void pnfs_ld_handle_read_error(struct nfs_read_data *data) 1417int pnfs_read_done_resend_to_mds(struct inode *inode,
1418 struct list_head *head,
1419 const struct nfs_pgio_completion_ops *compl_ops)
1297{ 1420{
1298 struct nfs_pageio_descriptor pgio; 1421 struct nfs_pageio_descriptor pgio;
1422 LIST_HEAD(failed);
1299 1423
1300 put_lseg(data->lseg); 1424 /* Resend all requests through the MDS */
1301 data->lseg = NULL; 1425 nfs_pageio_init_read_mds(&pgio, inode, compl_ops);
1302 dprintk("pnfs write error = %d\n", data->pnfs_error); 1426 while (!list_empty(head)) {
1303 if (NFS_SERVER(data->inode)->pnfs_curr_ld->flags & 1427 struct nfs_page *req = nfs_list_entry(head->next);
1304 PNFS_LAYOUTRET_ON_ERROR)
1305 pnfs_return_layout(data->inode);
1306
1307 nfs_pageio_init_read_mds(&pgio, data->inode);
1308
1309 while (!list_empty(&data->pages)) {
1310 struct nfs_page *req = nfs_list_entry(data->pages.next);
1311 1428
1312 nfs_list_remove_request(req); 1429 nfs_list_remove_request(req);
1313 nfs_pageio_add_request(&pgio, req); 1430 if (!nfs_pageio_add_request(&pgio, req))
1431 nfs_list_add_request(req, &failed);
1314 } 1432 }
1315 nfs_pageio_complete(&pgio); 1433 nfs_pageio_complete(&pgio);
1434
1435 if (!list_empty(&failed)) {
1436 list_move(&failed, head);
1437 return -EIO;
1438 }
1439 return 0;
1440}
1441EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
1442
1443static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1444{
1445 struct nfs_pgio_header *hdr = data->header;
1446
1447 dprintk("pnfs read error = %d\n", hdr->pnfs_error);
1448 if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
1449 PNFS_LAYOUTRET_ON_ERROR) {
1450 clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
1451 pnfs_return_layout(hdr->inode);
1452 }
1453 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
1454 data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
1455 &hdr->pages,
1456 hdr->completion_ops);
1316} 1457}
1317 1458
1318/* 1459/*
@@ -1320,13 +1461,14 @@ static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
1320 */ 1461 */
1321void pnfs_ld_read_done(struct nfs_read_data *data) 1462void pnfs_ld_read_done(struct nfs_read_data *data)
1322{ 1463{
1323 if (likely(!data->pnfs_error)) { 1464 struct nfs_pgio_header *hdr = data->header;
1465
1466 if (likely(!hdr->pnfs_error)) {
1324 __nfs4_read_done_cb(data); 1467 __nfs4_read_done_cb(data);
1325 data->mds_ops->rpc_call_done(&data->task, data); 1468 hdr->mds_ops->rpc_call_done(&data->task, data);
1326 } else 1469 } else
1327 pnfs_ld_handle_read_error(data); 1470 pnfs_ld_handle_read_error(data);
1328 put_lseg(data->lseg); 1471 hdr->mds_ops->rpc_release(data);
1329 data->mds_ops->rpc_release(data);
1330} 1472}
1331EXPORT_SYMBOL_GPL(pnfs_ld_read_done); 1473EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
1332 1474
@@ -1334,11 +1476,13 @@ static void
1334pnfs_read_through_mds(struct nfs_pageio_descriptor *desc, 1476pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
1335 struct nfs_read_data *data) 1477 struct nfs_read_data *data)
1336{ 1478{
1337 list_splice_tail_init(&data->pages, &desc->pg_list); 1479 struct nfs_pgio_header *hdr = data->header;
1338 if (data->req && list_empty(&data->req->wb_list)) 1480
1339 nfs_list_add_request(data->req, &desc->pg_list); 1481 if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
1340 nfs_pageio_reset_read_mds(desc); 1482 list_splice_tail_init(&hdr->pages, &desc->pg_list);
1341 desc->pg_recoalesce = 1; 1483 nfs_pageio_reset_read_mds(desc);
1484 desc->pg_recoalesce = 1;
1485 }
1342 nfs_readdata_release(data); 1486 nfs_readdata_release(data);
1343} 1487}
1344 1488
@@ -1350,23 +1494,19 @@ pnfs_try_to_read_data(struct nfs_read_data *rdata,
1350 const struct rpc_call_ops *call_ops, 1494 const struct rpc_call_ops *call_ops,
1351 struct pnfs_layout_segment *lseg) 1495 struct pnfs_layout_segment *lseg)
1352{ 1496{
1353 struct inode *inode = rdata->inode; 1497 struct nfs_pgio_header *hdr = rdata->header;
1498 struct inode *inode = hdr->inode;
1354 struct nfs_server *nfss = NFS_SERVER(inode); 1499 struct nfs_server *nfss = NFS_SERVER(inode);
1355 enum pnfs_try_status trypnfs; 1500 enum pnfs_try_status trypnfs;
1356 1501
1357 rdata->mds_ops = call_ops; 1502 hdr->mds_ops = call_ops;
1358 rdata->lseg = get_lseg(lseg);
1359 1503
1360 dprintk("%s: Reading ino:%lu %u@%llu\n", 1504 dprintk("%s: Reading ino:%lu %u@%llu\n",
1361 __func__, inode->i_ino, rdata->args.count, rdata->args.offset); 1505 __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
1362 1506
1363 trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata); 1507 trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
1364 if (trypnfs == PNFS_NOT_ATTEMPTED) { 1508 if (trypnfs != PNFS_NOT_ATTEMPTED)
1365 put_lseg(rdata->lseg);
1366 rdata->lseg = NULL;
1367 } else {
1368 nfs_inc_stats(inode, NFSIOS_PNFS_READ); 1509 nfs_inc_stats(inode, NFSIOS_PNFS_READ);
1369 }
1370 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs); 1510 dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
1371 return trypnfs; 1511 return trypnfs;
1372} 1512}
@@ -1382,7 +1522,7 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
1382 while (!list_empty(head)) { 1522 while (!list_empty(head)) {
1383 enum pnfs_try_status trypnfs; 1523 enum pnfs_try_status trypnfs;
1384 1524
1385 data = list_entry(head->next, struct nfs_read_data, list); 1525 data = list_first_entry(head, struct nfs_read_data, list);
1386 list_del_init(&data->list); 1526 list_del_init(&data->list);
1387 1527
1388 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg); 1528 trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
@@ -1392,20 +1532,40 @@ pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *hea
1392 put_lseg(lseg); 1532 put_lseg(lseg);
1393} 1533}
1394 1534
1535static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
1536{
1537 put_lseg(hdr->lseg);
1538 nfs_readhdr_free(hdr);
1539}
1540
1395int 1541int
1396pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) 1542pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
1397{ 1543{
1398 LIST_HEAD(head); 1544 struct nfs_read_header *rhdr;
1545 struct nfs_pgio_header *hdr;
1399 int ret; 1546 int ret;
1400 1547
1401 ret = nfs_generic_pagein(desc, &head); 1548 rhdr = nfs_readhdr_alloc();
1402 if (ret != 0) { 1549 if (!rhdr) {
1550 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1551 ret = -ENOMEM;
1403 put_lseg(desc->pg_lseg); 1552 put_lseg(desc->pg_lseg);
1404 desc->pg_lseg = NULL; 1553 desc->pg_lseg = NULL;
1405 return ret; 1554 return ret;
1406 } 1555 }
1407 pnfs_do_multiple_reads(desc, &head); 1556 hdr = &rhdr->header;
1408 return 0; 1557 nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
1558 hdr->lseg = get_lseg(desc->pg_lseg);
1559 atomic_inc(&hdr->refcnt);
1560 ret = nfs_generic_pagein(desc, hdr);
1561 if (ret != 0) {
1562 put_lseg(desc->pg_lseg);
1563 desc->pg_lseg = NULL;
1564 } else
1565 pnfs_do_multiple_reads(desc, &hdr->rpc_list);
1566 if (atomic_dec_and_test(&hdr->refcnt))
1567 hdr->completion_ops->completion(hdr);
1568 return ret;
1409} 1569}
1410EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages); 1570EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
1411 1571
@@ -1438,30 +1598,32 @@ EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
1438void 1598void
1439pnfs_set_layoutcommit(struct nfs_write_data *wdata) 1599pnfs_set_layoutcommit(struct nfs_write_data *wdata)
1440{ 1600{
1441 struct nfs_inode *nfsi = NFS_I(wdata->inode); 1601 struct nfs_pgio_header *hdr = wdata->header;
1602 struct inode *inode = hdr->inode;
1603 struct nfs_inode *nfsi = NFS_I(inode);
1442 loff_t end_pos = wdata->mds_offset + wdata->res.count; 1604 loff_t end_pos = wdata->mds_offset + wdata->res.count;
1443 bool mark_as_dirty = false; 1605 bool mark_as_dirty = false;
1444 1606
1445 spin_lock(&nfsi->vfs_inode.i_lock); 1607 spin_lock(&inode->i_lock);
1446 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) { 1608 if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
1447 mark_as_dirty = true; 1609 mark_as_dirty = true;
1448 dprintk("%s: Set layoutcommit for inode %lu ", 1610 dprintk("%s: Set layoutcommit for inode %lu ",
1449 __func__, wdata->inode->i_ino); 1611 __func__, inode->i_ino);
1450 } 1612 }
1451 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &wdata->lseg->pls_flags)) { 1613 if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
1452 /* references matched in nfs4_layoutcommit_release */ 1614 /* references matched in nfs4_layoutcommit_release */
1453 get_lseg(wdata->lseg); 1615 get_lseg(hdr->lseg);
1454 } 1616 }
1455 if (end_pos > nfsi->layout->plh_lwb) 1617 if (end_pos > nfsi->layout->plh_lwb)
1456 nfsi->layout->plh_lwb = end_pos; 1618 nfsi->layout->plh_lwb = end_pos;
1457 spin_unlock(&nfsi->vfs_inode.i_lock); 1619 spin_unlock(&inode->i_lock);
1458 dprintk("%s: lseg %p end_pos %llu\n", 1620 dprintk("%s: lseg %p end_pos %llu\n",
1459 __func__, wdata->lseg, nfsi->layout->plh_lwb); 1621 __func__, hdr->lseg, nfsi->layout->plh_lwb);
1460 1622
1461 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one 1623 /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
1462 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */ 1624 * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
1463 if (mark_as_dirty) 1625 if (mark_as_dirty)
1464 mark_inode_dirty_sync(wdata->inode); 1626 mark_inode_dirty_sync(inode);
1465} 1627}
1466EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit); 1628EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
1467 1629
@@ -1550,3 +1712,15 @@ out_free:
1550 kfree(data); 1712 kfree(data);
1551 goto out; 1713 goto out;
1552} 1714}
1715
1716struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
1717{
1718 struct nfs4_threshold *thp;
1719
1720 thp = kzalloc(sizeof(*thp), GFP_NOFS);
1721 if (!thp) {
1722 dprintk("%s mdsthreshold allocation failed\n", __func__);
1723 return NULL;
1724 }
1725 return thp;
1726}
diff --git a/fs/nfs/pnfs.h b/fs/nfs/pnfs.h
index 442ebf68eeec..29fd23c0efdc 100644
--- a/fs/nfs/pnfs.h
+++ b/fs/nfs/pnfs.h
@@ -63,6 +63,7 @@ enum {
63 NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */ 63 NFS_LAYOUT_BULK_RECALL, /* bulk recall affecting layout */
64 NFS_LAYOUT_ROC, /* some lseg had roc bit set */ 64 NFS_LAYOUT_ROC, /* some lseg had roc bit set */
65 NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */ 65 NFS_LAYOUT_DESTROYED, /* no new use of layout allowed */
66 NFS_LAYOUT_INVALID, /* layout is being destroyed */
66}; 67};
67 68
68enum layoutdriver_policy_flags { 69enum layoutdriver_policy_flags {
@@ -94,11 +95,20 @@ struct pnfs_layoutdriver_type {
94 const struct nfs_pageio_ops *pg_read_ops; 95 const struct nfs_pageio_ops *pg_read_ops;
95 const struct nfs_pageio_ops *pg_write_ops; 96 const struct nfs_pageio_ops *pg_write_ops;
96 97
98 struct pnfs_ds_commit_info *(*get_ds_info) (struct inode *inode);
97 void (*mark_request_commit) (struct nfs_page *req, 99 void (*mark_request_commit) (struct nfs_page *req,
98 struct pnfs_layout_segment *lseg); 100 struct pnfs_layout_segment *lseg,
99 void (*clear_request_commit) (struct nfs_page *req); 101 struct nfs_commit_info *cinfo);
100 int (*scan_commit_lists) (struct inode *inode, int max, spinlock_t *lock); 102 void (*clear_request_commit) (struct nfs_page *req,
101 int (*commit_pagelist)(struct inode *inode, struct list_head *mds_pages, int how); 103 struct nfs_commit_info *cinfo);
104 int (*scan_commit_lists) (struct nfs_commit_info *cinfo,
105 int max);
106 void (*recover_commit_reqs) (struct list_head *list,
107 struct nfs_commit_info *cinfo);
108 int (*commit_pagelist)(struct inode *inode,
109 struct list_head *mds_pages,
110 int how,
111 struct nfs_commit_info *cinfo);
102 112
103 /* 113 /*
104 * Return PNFS_ATTEMPTED to indicate the layout code has attempted 114 * Return PNFS_ATTEMPTED to indicate the layout code has attempted
@@ -168,8 +178,10 @@ extern int nfs4_proc_layoutreturn(struct nfs4_layoutreturn *lrp);
168void get_layout_hdr(struct pnfs_layout_hdr *lo); 178void get_layout_hdr(struct pnfs_layout_hdr *lo);
169void put_lseg(struct pnfs_layout_segment *lseg); 179void put_lseg(struct pnfs_layout_segment *lseg);
170 180
171bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *); 181bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *, struct inode *,
172bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *, int); 182 const struct nfs_pgio_completion_ops *);
183bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *, struct inode *,
184 int, const struct nfs_pgio_completion_ops *);
173 185
174void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32); 186void set_pnfs_layoutdriver(struct nfs_server *, const struct nfs_fh *, u32);
175void unset_pnfs_layoutdriver(struct nfs_server *); 187void unset_pnfs_layoutdriver(struct nfs_server *);
@@ -211,6 +223,11 @@ struct pnfs_layout_segment *pnfs_update_layout(struct inode *ino,
211 gfp_t gfp_flags); 223 gfp_t gfp_flags);
212 224
213void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp); 225void nfs4_deviceid_mark_client_invalid(struct nfs_client *clp);
226int pnfs_read_done_resend_to_mds(struct inode *inode, struct list_head *head,
227 const struct nfs_pgio_completion_ops *compl_ops);
228int pnfs_write_done_resend_to_mds(struct inode *inode, struct list_head *head,
229 const struct nfs_pgio_completion_ops *compl_ops);
230struct nfs4_threshold *pnfs_mdsthreshold_alloc(void);
214 231
215/* nfs4_deviceid_flags */ 232/* nfs4_deviceid_flags */
216enum { 233enum {
@@ -261,49 +278,66 @@ static inline int pnfs_enabled_sb(struct nfs_server *nfss)
261} 278}
262 279
263static inline int 280static inline int
264pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how) 281pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
282 struct nfs_commit_info *cinfo)
265{ 283{
266 if (!test_and_clear_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags)) 284 if (cinfo->ds == NULL || cinfo->ds->ncommitting == 0)
267 return PNFS_NOT_ATTEMPTED; 285 return PNFS_NOT_ATTEMPTED;
268 return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how); 286 return NFS_SERVER(inode)->pnfs_curr_ld->commit_pagelist(inode, mds_pages, how, cinfo);
287}
288
289static inline struct pnfs_ds_commit_info *
290pnfs_get_ds_info(struct inode *inode)
291{
292 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
293
294 if (ld == NULL || ld->get_ds_info == NULL)
295 return NULL;
296 return ld->get_ds_info(inode);
269} 297}
270 298
271static inline bool 299static inline bool
272pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) 300pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
301 struct nfs_commit_info *cinfo)
273{ 302{
274 struct inode *inode = req->wb_context->dentry->d_inode; 303 struct inode *inode = req->wb_context->dentry->d_inode;
275 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 304 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
276 305
277 if (lseg == NULL || ld->mark_request_commit == NULL) 306 if (lseg == NULL || ld->mark_request_commit == NULL)
278 return false; 307 return false;
279 ld->mark_request_commit(req, lseg); 308 ld->mark_request_commit(req, lseg, cinfo);
280 return true; 309 return true;
281} 310}
282 311
283static inline bool 312static inline bool
284pnfs_clear_request_commit(struct nfs_page *req) 313pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
285{ 314{
286 struct inode *inode = req->wb_context->dentry->d_inode; 315 struct inode *inode = req->wb_context->dentry->d_inode;
287 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 316 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
288 317
289 if (ld == NULL || ld->clear_request_commit == NULL) 318 if (ld == NULL || ld->clear_request_commit == NULL)
290 return false; 319 return false;
291 ld->clear_request_commit(req); 320 ld->clear_request_commit(req, cinfo);
292 return true; 321 return true;
293} 322}
294 323
295static inline int 324static inline int
296pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock) 325pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
326 int max)
297{ 327{
298 struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld; 328 if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
299 int ret;
300
301 if (ld == NULL || ld->scan_commit_lists == NULL)
302 return 0; 329 return 0;
303 ret = ld->scan_commit_lists(inode, max, lock); 330 else
304 if (ret != 0) 331 return NFS_SERVER(inode)->pnfs_curr_ld->scan_commit_lists(cinfo, max);
305 set_bit(NFS_INO_PNFS_COMMIT, &NFS_I(inode)->flags); 332}
306 return ret; 333
334static inline void
335pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
336 struct nfs_commit_info *cinfo)
337{
338 if (cinfo->ds == NULL || cinfo->ds->nwritten == 0)
339 return;
340 NFS_SERVER(inode)->pnfs_curr_ld->recover_commit_reqs(list, cinfo);
307} 341}
308 342
309/* Should the pNFS client commit and return the layout upon a setattr */ 343/* Should the pNFS client commit and return the layout upon a setattr */
@@ -327,6 +361,14 @@ static inline int pnfs_return_layout(struct inode *ino)
327 return 0; 361 return 0;
328} 362}
329 363
364static inline bool
365pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
366 struct nfs_server *nfss)
367{
368 return (dst && src && src->bm != 0 &&
369 nfss->pnfs_curr_ld->id == src->l_type);
370}
371
330#ifdef NFS_DEBUG 372#ifdef NFS_DEBUG
331void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id); 373void nfs4_print_deviceid(const struct nfs4_deviceid *dev_id);
332#else 374#else
@@ -396,45 +438,74 @@ static inline void unset_pnfs_layoutdriver(struct nfs_server *s)
396{ 438{
397} 439}
398 440
399static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode) 441static inline bool pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
442 const struct nfs_pgio_completion_ops *compl_ops)
400{ 443{
401 return false; 444 return false;
402} 445}
403 446
404static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags) 447static inline bool pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode, int ioflags,
448 const struct nfs_pgio_completion_ops *compl_ops)
405{ 449{
406 return false; 450 return false;
407} 451}
408 452
409static inline int 453static inline int
410pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how) 454pnfs_commit_list(struct inode *inode, struct list_head *mds_pages, int how,
455 struct nfs_commit_info *cinfo)
411{ 456{
412 return PNFS_NOT_ATTEMPTED; 457 return PNFS_NOT_ATTEMPTED;
413} 458}
414 459
460static inline struct pnfs_ds_commit_info *
461pnfs_get_ds_info(struct inode *inode)
462{
463 return NULL;
464}
465
415static inline bool 466static inline bool
416pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) 467pnfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
468 struct nfs_commit_info *cinfo)
417{ 469{
418 return false; 470 return false;
419} 471}
420 472
421static inline bool 473static inline bool
422pnfs_clear_request_commit(struct nfs_page *req) 474pnfs_clear_request_commit(struct nfs_page *req, struct nfs_commit_info *cinfo)
423{ 475{
424 return false; 476 return false;
425} 477}
426 478
427static inline int 479static inline int
428pnfs_scan_commit_lists(struct inode *inode, int max, spinlock_t *lock) 480pnfs_scan_commit_lists(struct inode *inode, struct nfs_commit_info *cinfo,
481 int max)
429{ 482{
430 return 0; 483 return 0;
431} 484}
432 485
486static inline void
487pnfs_recover_commit_reqs(struct inode *inode, struct list_head *list,
488 struct nfs_commit_info *cinfo)
489{
490}
491
433static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync) 492static inline int pnfs_layoutcommit_inode(struct inode *inode, bool sync)
434{ 493{
435 return 0; 494 return 0;
436} 495}
437 496
497static inline bool
498pnfs_use_threshold(struct nfs4_threshold **dst, struct nfs4_threshold *src,
499 struct nfs_server *nfss)
500{
501 return false;
502}
503
504static inline struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
505{
506 return NULL;
507}
508
438#endif /* CONFIG_NFS_V4_1 */ 509#endif /* CONFIG_NFS_V4_1 */
439 510
440#endif /* FS_NFS_PNFS_H */ 511#endif /* FS_NFS_PNFS_H */
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index d6408b6437de..a706b6bcc286 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -178,7 +178,7 @@ nfs_proc_setattr(struct dentry *dentry, struct nfs_fattr *fattr,
178} 178}
179 179
180static int 180static int
181nfs_proc_lookup(struct rpc_clnt *clnt, struct inode *dir, struct qstr *name, 181nfs_proc_lookup(struct inode *dir, struct qstr *name,
182 struct nfs_fh *fhandle, struct nfs_fattr *fattr) 182 struct nfs_fh *fhandle, struct nfs_fattr *fattr)
183{ 183{
184 struct nfs_diropargs arg = { 184 struct nfs_diropargs arg = {
@@ -640,12 +640,14 @@ nfs_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle,
640 640
641static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data) 641static int nfs_read_done(struct rpc_task *task, struct nfs_read_data *data)
642{ 642{
643 struct inode *inode = data->header->inode;
644
643 if (nfs_async_handle_expired_key(task)) 645 if (nfs_async_handle_expired_key(task))
644 return -EAGAIN; 646 return -EAGAIN;
645 647
646 nfs_invalidate_atime(data->inode); 648 nfs_invalidate_atime(inode);
647 if (task->tk_status >= 0) { 649 if (task->tk_status >= 0) {
648 nfs_refresh_inode(data->inode, data->res.fattr); 650 nfs_refresh_inode(inode, data->res.fattr);
649 /* Emulate the eof flag, which isn't normally needed in NFSv2 651 /* Emulate the eof flag, which isn't normally needed in NFSv2
650 * as it is guaranteed to always return the file attributes 652 * as it is guaranteed to always return the file attributes
651 */ 653 */
@@ -667,11 +669,13 @@ static void nfs_proc_read_rpc_prepare(struct rpc_task *task, struct nfs_read_dat
667 669
668static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data) 670static int nfs_write_done(struct rpc_task *task, struct nfs_write_data *data)
669{ 671{
672 struct inode *inode = data->header->inode;
673
670 if (nfs_async_handle_expired_key(task)) 674 if (nfs_async_handle_expired_key(task))
671 return -EAGAIN; 675 return -EAGAIN;
672 676
673 if (task->tk_status >= 0) 677 if (task->tk_status >= 0)
674 nfs_post_op_update_inode_force_wcc(data->inode, data->res.fattr); 678 nfs_post_op_update_inode_force_wcc(inode, data->res.fattr);
675 return 0; 679 return 0;
676} 680}
677 681
@@ -687,8 +691,13 @@ static void nfs_proc_write_rpc_prepare(struct rpc_task *task, struct nfs_write_d
687 rpc_call_start(task); 691 rpc_call_start(task);
688} 692}
689 693
694static void nfs_proc_commit_rpc_prepare(struct rpc_task *task, struct nfs_commit_data *data)
695{
696 BUG();
697}
698
690static void 699static void
691nfs_proc_commit_setup(struct nfs_write_data *data, struct rpc_message *msg) 700nfs_proc_commit_setup(struct nfs_commit_data *data, struct rpc_message *msg)
692{ 701{
693 BUG(); 702 BUG();
694} 703}
@@ -732,6 +741,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
732 .file_inode_ops = &nfs_file_inode_operations, 741 .file_inode_ops = &nfs_file_inode_operations,
733 .file_ops = &nfs_file_operations, 742 .file_ops = &nfs_file_operations,
734 .getroot = nfs_proc_get_root, 743 .getroot = nfs_proc_get_root,
744 .submount = nfs_submount,
735 .getattr = nfs_proc_getattr, 745 .getattr = nfs_proc_getattr,
736 .setattr = nfs_proc_setattr, 746 .setattr = nfs_proc_setattr,
737 .lookup = nfs_proc_lookup, 747 .lookup = nfs_proc_lookup,
@@ -763,6 +773,7 @@ const struct nfs_rpc_ops nfs_v2_clientops = {
763 .write_rpc_prepare = nfs_proc_write_rpc_prepare, 773 .write_rpc_prepare = nfs_proc_write_rpc_prepare,
764 .write_done = nfs_write_done, 774 .write_done = nfs_write_done,
765 .commit_setup = nfs_proc_commit_setup, 775 .commit_setup = nfs_proc_commit_setup,
776 .commit_rpc_prepare = nfs_proc_commit_rpc_prepare,
766 .lock = nfs_proc_lock, 777 .lock = nfs_proc_lock,
767 .lock_check_bounds = nfs_lock_check_bounds, 778 .lock_check_bounds = nfs_lock_check_bounds,
768 .close_context = nfs_close_context, 779 .close_context = nfs_close_context,
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 0a4be28c2ea3..86ced7836214 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -30,43 +30,73 @@
30#define NFSDBG_FACILITY NFSDBG_PAGECACHE 30#define NFSDBG_FACILITY NFSDBG_PAGECACHE
31 31
32static const struct nfs_pageio_ops nfs_pageio_read_ops; 32static const struct nfs_pageio_ops nfs_pageio_read_ops;
33static const struct rpc_call_ops nfs_read_partial_ops; 33static const struct rpc_call_ops nfs_read_common_ops;
34static const struct rpc_call_ops nfs_read_full_ops; 34static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops;
35 35
36static struct kmem_cache *nfs_rdata_cachep; 36static struct kmem_cache *nfs_rdata_cachep;
37 37
38struct nfs_read_data *nfs_readdata_alloc(unsigned int pagecount) 38struct nfs_read_header *nfs_readhdr_alloc(void)
39{ 39{
40 struct nfs_read_data *p; 40 struct nfs_read_header *rhdr;
41 41
42 p = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL); 42 rhdr = kmem_cache_zalloc(nfs_rdata_cachep, GFP_KERNEL);
43 if (p) { 43 if (rhdr) {
44 INIT_LIST_HEAD(&p->pages); 44 struct nfs_pgio_header *hdr = &rhdr->header;
45 p->npages = pagecount; 45
46 if (pagecount <= ARRAY_SIZE(p->page_array)) 46 INIT_LIST_HEAD(&hdr->pages);
47 p->pagevec = p->page_array; 47 INIT_LIST_HEAD(&hdr->rpc_list);
48 else { 48 spin_lock_init(&hdr->lock);
49 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_KERNEL); 49 atomic_set(&hdr->refcnt, 0);
50 if (!p->pagevec) { 50 }
51 kmem_cache_free(nfs_rdata_cachep, p); 51 return rhdr;
52 p = NULL; 52}
53 } 53
54 } 54static struct nfs_read_data *nfs_readdata_alloc(struct nfs_pgio_header *hdr,
55 unsigned int pagecount)
56{
57 struct nfs_read_data *data, *prealloc;
58
59 prealloc = &container_of(hdr, struct nfs_read_header, header)->rpc_data;
60 if (prealloc->header == NULL)
61 data = prealloc;
62 else
63 data = kzalloc(sizeof(*data), GFP_KERNEL);
64 if (!data)
65 goto out;
66
67 if (nfs_pgarray_set(&data->pages, pagecount)) {
68 data->header = hdr;
69 atomic_inc(&hdr->refcnt);
70 } else {
71 if (data != prealloc)
72 kfree(data);
73 data = NULL;
55 } 74 }
56 return p; 75out:
76 return data;
57} 77}
58 78
59void nfs_readdata_free(struct nfs_read_data *p) 79void nfs_readhdr_free(struct nfs_pgio_header *hdr)
60{ 80{
61 if (p && (p->pagevec != &p->page_array[0])) 81 struct nfs_read_header *rhdr = container_of(hdr, struct nfs_read_header, header);
62 kfree(p->pagevec); 82
63 kmem_cache_free(nfs_rdata_cachep, p); 83 kmem_cache_free(nfs_rdata_cachep, rhdr);
64} 84}
65 85
66void nfs_readdata_release(struct nfs_read_data *rdata) 86void nfs_readdata_release(struct nfs_read_data *rdata)
67{ 87{
88 struct nfs_pgio_header *hdr = rdata->header;
89 struct nfs_read_header *read_header = container_of(hdr, struct nfs_read_header, header);
90
68 put_nfs_open_context(rdata->args.context); 91 put_nfs_open_context(rdata->args.context);
69 nfs_readdata_free(rdata); 92 if (rdata->pages.pagevec != rdata->pages.page_array)
93 kfree(rdata->pages.pagevec);
94 if (rdata != &read_header->rpc_data)
95 kfree(rdata);
96 else
97 rdata->header = NULL;
98 if (atomic_dec_and_test(&hdr->refcnt))
99 hdr->completion_ops->completion(hdr);
70} 100}
71 101
72static 102static
@@ -78,39 +108,11 @@ int nfs_return_empty_page(struct page *page)
78 return 0; 108 return 0;
79} 109}
80 110
81static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
82{
83 unsigned int remainder = data->args.count - data->res.count;
84 unsigned int base = data->args.pgbase + data->res.count;
85 unsigned int pglen;
86 struct page **pages;
87
88 if (data->res.eof == 0 || remainder == 0)
89 return;
90 /*
91 * Note: "remainder" can never be negative, since we check for
92 * this in the XDR code.
93 */
94 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
95 base &= ~PAGE_CACHE_MASK;
96 pglen = PAGE_CACHE_SIZE - base;
97 for (;;) {
98 if (remainder <= pglen) {
99 zero_user(*pages, base, remainder);
100 break;
101 }
102 zero_user(*pages, base, pglen);
103 pages++;
104 remainder -= pglen;
105 pglen = PAGE_CACHE_SIZE;
106 base = 0;
107 }
108}
109
110void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio, 111void nfs_pageio_init_read_mds(struct nfs_pageio_descriptor *pgio,
111 struct inode *inode) 112 struct inode *inode,
113 const struct nfs_pgio_completion_ops *compl_ops)
112{ 114{
113 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, 115 nfs_pageio_init(pgio, inode, &nfs_pageio_read_ops, compl_ops,
114 NFS_SERVER(inode)->rsize, 0); 116 NFS_SERVER(inode)->rsize, 0);
115} 117}
116 118
@@ -121,11 +123,12 @@ void nfs_pageio_reset_read_mds(struct nfs_pageio_descriptor *pgio)
121} 123}
122EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds); 124EXPORT_SYMBOL_GPL(nfs_pageio_reset_read_mds);
123 125
124static void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, 126void nfs_pageio_init_read(struct nfs_pageio_descriptor *pgio,
125 struct inode *inode) 127 struct inode *inode,
128 const struct nfs_pgio_completion_ops *compl_ops)
126{ 129{
127 if (!pnfs_pageio_init_read(pgio, inode)) 130 if (!pnfs_pageio_init_read(pgio, inode, compl_ops))
128 nfs_pageio_init_read_mds(pgio, inode); 131 nfs_pageio_init_read_mds(pgio, inode, compl_ops);
129} 132}
130 133
131int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode, 134int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
@@ -146,9 +149,10 @@ int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
146 if (len < PAGE_CACHE_SIZE) 149 if (len < PAGE_CACHE_SIZE)
147 zero_user_segment(page, len, PAGE_CACHE_SIZE); 150 zero_user_segment(page, len, PAGE_CACHE_SIZE);
148 151
149 nfs_pageio_init_read(&pgio, inode); 152 nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
150 nfs_pageio_add_request(&pgio, new); 153 nfs_pageio_add_request(&pgio, new);
151 nfs_pageio_complete(&pgio); 154 nfs_pageio_complete(&pgio);
155 NFS_I(inode)->read_io += pgio.pg_bytes_written;
152 return 0; 156 return 0;
153} 157}
154 158
@@ -169,16 +173,49 @@ static void nfs_readpage_release(struct nfs_page *req)
169 nfs_release_request(req); 173 nfs_release_request(req);
170} 174}
171 175
172int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt, 176/* Note io was page aligned */
173 const struct rpc_call_ops *call_ops) 177static void nfs_read_completion(struct nfs_pgio_header *hdr)
178{
179 unsigned long bytes = 0;
180
181 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
182 goto out;
183 while (!list_empty(&hdr->pages)) {
184 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
185 struct page *page = req->wb_page;
186
187 if (test_bit(NFS_IOHDR_EOF, &hdr->flags)) {
188 if (bytes > hdr->good_bytes)
189 zero_user(page, 0, PAGE_SIZE);
190 else if (hdr->good_bytes - bytes < PAGE_SIZE)
191 zero_user_segment(page,
192 hdr->good_bytes & ~PAGE_MASK,
193 PAGE_SIZE);
194 }
195 bytes += req->wb_bytes;
196 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags)) {
197 if (bytes <= hdr->good_bytes)
198 SetPageUptodate(page);
199 } else
200 SetPageUptodate(page);
201 nfs_list_remove_request(req);
202 nfs_readpage_release(req);
203 }
204out:
205 hdr->release(hdr);
206}
207
208int nfs_initiate_read(struct rpc_clnt *clnt,
209 struct nfs_read_data *data,
210 const struct rpc_call_ops *call_ops, int flags)
174{ 211{
175 struct inode *inode = data->inode; 212 struct inode *inode = data->header->inode;
176 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0; 213 int swap_flags = IS_SWAPFILE(inode) ? NFS_RPC_SWAPFLAGS : 0;
177 struct rpc_task *task; 214 struct rpc_task *task;
178 struct rpc_message msg = { 215 struct rpc_message msg = {
179 .rpc_argp = &data->args, 216 .rpc_argp = &data->args,
180 .rpc_resp = &data->res, 217 .rpc_resp = &data->res,
181 .rpc_cred = data->cred, 218 .rpc_cred = data->header->cred,
182 }; 219 };
183 struct rpc_task_setup task_setup_data = { 220 struct rpc_task_setup task_setup_data = {
184 .task = &data->task, 221 .task = &data->task,
@@ -187,7 +224,7 @@ int nfs_initiate_read(struct nfs_read_data *data, struct rpc_clnt *clnt,
187 .callback_ops = call_ops, 224 .callback_ops = call_ops,
188 .callback_data = data, 225 .callback_data = data,
189 .workqueue = nfsiod_workqueue, 226 .workqueue = nfsiod_workqueue,
190 .flags = RPC_TASK_ASYNC | swap_flags, 227 .flags = RPC_TASK_ASYNC | swap_flags | flags,
191 }; 228 };
192 229
193 /* Set up the initial task struct. */ 230 /* Set up the initial task struct. */
@@ -212,19 +249,15 @@ EXPORT_SYMBOL_GPL(nfs_initiate_read);
212/* 249/*
213 * Set up the NFS read request struct 250 * Set up the NFS read request struct
214 */ 251 */
215static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data, 252static void nfs_read_rpcsetup(struct nfs_read_data *data,
216 unsigned int count, unsigned int offset) 253 unsigned int count, unsigned int offset)
217{ 254{
218 struct inode *inode = req->wb_context->dentry->d_inode; 255 struct nfs_page *req = data->header->req;
219
220 data->req = req;
221 data->inode = inode;
222 data->cred = req->wb_context->cred;
223 256
224 data->args.fh = NFS_FH(inode); 257 data->args.fh = NFS_FH(data->header->inode);
225 data->args.offset = req_offset(req) + offset; 258 data->args.offset = req_offset(req) + offset;
226 data->args.pgbase = req->wb_pgbase + offset; 259 data->args.pgbase = req->wb_pgbase + offset;
227 data->args.pages = data->pagevec; 260 data->args.pages = data->pages.pagevec;
228 data->args.count = count; 261 data->args.count = count;
229 data->args.context = get_nfs_open_context(req->wb_context); 262 data->args.context = get_nfs_open_context(req->wb_context);
230 data->args.lock_context = req->wb_lock_context; 263 data->args.lock_context = req->wb_lock_context;
@@ -238,9 +271,9 @@ static void nfs_read_rpcsetup(struct nfs_page *req, struct nfs_read_data *data,
238static int nfs_do_read(struct nfs_read_data *data, 271static int nfs_do_read(struct nfs_read_data *data,
239 const struct rpc_call_ops *call_ops) 272 const struct rpc_call_ops *call_ops)
240{ 273{
241 struct inode *inode = data->args.context->dentry->d_inode; 274 struct inode *inode = data->header->inode;
242 275
243 return nfs_initiate_read(data, NFS_CLIENT(inode), call_ops); 276 return nfs_initiate_read(NFS_CLIENT(inode), data, call_ops, 0);
244} 277}
245 278
246static int 279static int
@@ -253,7 +286,7 @@ nfs_do_multiple_reads(struct list_head *head,
253 while (!list_empty(head)) { 286 while (!list_empty(head)) {
254 int ret2; 287 int ret2;
255 288
256 data = list_entry(head->next, struct nfs_read_data, list); 289 data = list_first_entry(head, struct nfs_read_data, list);
257 list_del_init(&data->list); 290 list_del_init(&data->list);
258 291
259 ret2 = nfs_do_read(data, call_ops); 292 ret2 = nfs_do_read(data, call_ops);
@@ -275,6 +308,24 @@ nfs_async_read_error(struct list_head *head)
275 } 308 }
276} 309}
277 310
311static const struct nfs_pgio_completion_ops nfs_async_read_completion_ops = {
312 .error_cleanup = nfs_async_read_error,
313 .completion = nfs_read_completion,
314};
315
316static void nfs_pagein_error(struct nfs_pageio_descriptor *desc,
317 struct nfs_pgio_header *hdr)
318{
319 set_bit(NFS_IOHDR_REDO, &hdr->flags);
320 while (!list_empty(&hdr->rpc_list)) {
321 struct nfs_read_data *data = list_first_entry(&hdr->rpc_list,
322 struct nfs_read_data, list);
323 list_del(&data->list);
324 nfs_readdata_release(data);
325 }
326 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
327}
328
278/* 329/*
279 * Generate multiple requests to fill a single page. 330 * Generate multiple requests to fill a single page.
280 * 331 *
@@ -288,93 +339,95 @@ nfs_async_read_error(struct list_head *head)
288 * won't see the new data until our attribute cache is updated. This is more 339 * won't see the new data until our attribute cache is updated. This is more
289 * or less conventional NFS client behavior. 340 * or less conventional NFS client behavior.
290 */ 341 */
291static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc, struct list_head *res) 342static int nfs_pagein_multi(struct nfs_pageio_descriptor *desc,
343 struct nfs_pgio_header *hdr)
292{ 344{
293 struct nfs_page *req = nfs_list_entry(desc->pg_list.next); 345 struct nfs_page *req = hdr->req;
294 struct page *page = req->wb_page; 346 struct page *page = req->wb_page;
295 struct nfs_read_data *data; 347 struct nfs_read_data *data;
296 size_t rsize = desc->pg_bsize, nbytes; 348 size_t rsize = desc->pg_bsize, nbytes;
297 unsigned int offset; 349 unsigned int offset;
298 int requests = 0;
299 int ret = 0;
300
301 nfs_list_remove_request(req);
302 350
303 offset = 0; 351 offset = 0;
304 nbytes = desc->pg_count; 352 nbytes = desc->pg_count;
305 do { 353 do {
306 size_t len = min(nbytes,rsize); 354 size_t len = min(nbytes,rsize);
307 355
308 data = nfs_readdata_alloc(1); 356 data = nfs_readdata_alloc(hdr, 1);
309 if (!data) 357 if (!data) {
310 goto out_bad; 358 nfs_pagein_error(desc, hdr);
311 data->pagevec[0] = page; 359 return -ENOMEM;
312 nfs_read_rpcsetup(req, data, len, offset); 360 }
313 list_add(&data->list, res); 361 data->pages.pagevec[0] = page;
314 requests++; 362 nfs_read_rpcsetup(data, len, offset);
363 list_add(&data->list, &hdr->rpc_list);
315 nbytes -= len; 364 nbytes -= len;
316 offset += len; 365 offset += len;
317 } while(nbytes != 0); 366 } while (nbytes != 0);
318 atomic_set(&req->wb_complete, requests); 367
319 desc->pg_rpc_callops = &nfs_read_partial_ops; 368 nfs_list_remove_request(req);
320 return ret; 369 nfs_list_add_request(req, &hdr->pages);
321out_bad: 370 desc->pg_rpc_callops = &nfs_read_common_ops;
322 while (!list_empty(res)) { 371 return 0;
323 data = list_entry(res->next, struct nfs_read_data, list);
324 list_del(&data->list);
325 nfs_readdata_release(data);
326 }
327 nfs_readpage_release(req);
328 return -ENOMEM;
329} 372}
330 373
331static int nfs_pagein_one(struct nfs_pageio_descriptor *desc, struct list_head *res) 374static int nfs_pagein_one(struct nfs_pageio_descriptor *desc,
375 struct nfs_pgio_header *hdr)
332{ 376{
333 struct nfs_page *req; 377 struct nfs_page *req;
334 struct page **pages; 378 struct page **pages;
335 struct nfs_read_data *data; 379 struct nfs_read_data *data;
336 struct list_head *head = &desc->pg_list; 380 struct list_head *head = &desc->pg_list;
337 int ret = 0;
338 381
339 data = nfs_readdata_alloc(nfs_page_array_len(desc->pg_base, 382 data = nfs_readdata_alloc(hdr, nfs_page_array_len(desc->pg_base,
340 desc->pg_count)); 383 desc->pg_count));
341 if (!data) { 384 if (!data) {
342 nfs_async_read_error(head); 385 nfs_pagein_error(desc, hdr);
343 ret = -ENOMEM; 386 return -ENOMEM;
344 goto out;
345 } 387 }
346 388
347 pages = data->pagevec; 389 pages = data->pages.pagevec;
348 while (!list_empty(head)) { 390 while (!list_empty(head)) {
349 req = nfs_list_entry(head->next); 391 req = nfs_list_entry(head->next);
350 nfs_list_remove_request(req); 392 nfs_list_remove_request(req);
351 nfs_list_add_request(req, &data->pages); 393 nfs_list_add_request(req, &hdr->pages);
352 *pages++ = req->wb_page; 394 *pages++ = req->wb_page;
353 } 395 }
354 req = nfs_list_entry(data->pages.next);
355 396
356 nfs_read_rpcsetup(req, data, desc->pg_count, 0); 397 nfs_read_rpcsetup(data, desc->pg_count, 0);
357 list_add(&data->list, res); 398 list_add(&data->list, &hdr->rpc_list);
358 desc->pg_rpc_callops = &nfs_read_full_ops; 399 desc->pg_rpc_callops = &nfs_read_common_ops;
359out: 400 return 0;
360 return ret;
361} 401}
362 402
363int nfs_generic_pagein(struct nfs_pageio_descriptor *desc, struct list_head *head) 403int nfs_generic_pagein(struct nfs_pageio_descriptor *desc,
404 struct nfs_pgio_header *hdr)
364{ 405{
365 if (desc->pg_bsize < PAGE_CACHE_SIZE) 406 if (desc->pg_bsize < PAGE_CACHE_SIZE)
366 return nfs_pagein_multi(desc, head); 407 return nfs_pagein_multi(desc, hdr);
367 return nfs_pagein_one(desc, head); 408 return nfs_pagein_one(desc, hdr);
368} 409}
369 410
370static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc) 411static int nfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
371{ 412{
372 LIST_HEAD(head); 413 struct nfs_read_header *rhdr;
414 struct nfs_pgio_header *hdr;
373 int ret; 415 int ret;
374 416
375 ret = nfs_generic_pagein(desc, &head); 417 rhdr = nfs_readhdr_alloc();
418 if (!rhdr) {
419 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
420 return -ENOMEM;
421 }
422 hdr = &rhdr->header;
423 nfs_pgheader_init(desc, hdr, nfs_readhdr_free);
424 atomic_inc(&hdr->refcnt);
425 ret = nfs_generic_pagein(desc, hdr);
376 if (ret == 0) 426 if (ret == 0)
377 ret = nfs_do_multiple_reads(&head, desc->pg_rpc_callops); 427 ret = nfs_do_multiple_reads(&hdr->rpc_list,
428 desc->pg_rpc_callops);
429 if (atomic_dec_and_test(&hdr->refcnt))
430 hdr->completion_ops->completion(hdr);
378 return ret; 431 return ret;
379} 432}
380 433
@@ -389,20 +442,21 @@ static const struct nfs_pageio_ops nfs_pageio_read_ops = {
389 */ 442 */
390int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data) 443int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
391{ 444{
445 struct inode *inode = data->header->inode;
392 int status; 446 int status;
393 447
394 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid, 448 dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
395 task->tk_status); 449 task->tk_status);
396 450
397 status = NFS_PROTO(data->inode)->read_done(task, data); 451 status = NFS_PROTO(inode)->read_done(task, data);
398 if (status != 0) 452 if (status != 0)
399 return status; 453 return status;
400 454
401 nfs_add_stats(data->inode, NFSIOS_SERVERREADBYTES, data->res.count); 455 nfs_add_stats(inode, NFSIOS_SERVERREADBYTES, data->res.count);
402 456
403 if (task->tk_status == -ESTALE) { 457 if (task->tk_status == -ESTALE) {
404 set_bit(NFS_INO_STALE, &NFS_I(data->inode)->flags); 458 set_bit(NFS_INO_STALE, &NFS_I(inode)->flags);
405 nfs_mark_for_revalidate(data->inode); 459 nfs_mark_for_revalidate(inode);
406 } 460 }
407 return 0; 461 return 0;
408} 462}
@@ -412,15 +466,13 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
412 struct nfs_readargs *argp = &data->args; 466 struct nfs_readargs *argp = &data->args;
413 struct nfs_readres *resp = &data->res; 467 struct nfs_readres *resp = &data->res;
414 468
415 if (resp->eof || resp->count == argp->count)
416 return;
417
418 /* This is a short read! */ 469 /* This is a short read! */
419 nfs_inc_stats(data->inode, NFSIOS_SHORTREAD); 470 nfs_inc_stats(data->header->inode, NFSIOS_SHORTREAD);
420 /* Has the server at least made some progress? */ 471 /* Has the server at least made some progress? */
421 if (resp->count == 0) 472 if (resp->count == 0) {
473 nfs_set_pgio_error(data->header, -EIO, argp->offset);
422 return; 474 return;
423 475 }
424 /* Yes, so retry the read at the end of the data */ 476 /* Yes, so retry the read at the end of the data */
425 data->mds_offset += resp->count; 477 data->mds_offset += resp->count;
426 argp->offset += resp->count; 478 argp->offset += resp->count;
@@ -429,114 +481,46 @@ static void nfs_readpage_retry(struct rpc_task *task, struct nfs_read_data *data
429 rpc_restart_call_prepare(task); 481 rpc_restart_call_prepare(task);
430} 482}
431 483
432/* 484static void nfs_readpage_result_common(struct rpc_task *task, void *calldata)
433 * Handle a read reply that fills part of a page.
434 */
435static void nfs_readpage_result_partial(struct rpc_task *task, void *calldata)
436{ 485{
437 struct nfs_read_data *data = calldata; 486 struct nfs_read_data *data = calldata;
438 487 struct nfs_pgio_header *hdr = data->header;
488
489 /* Note the only returns of nfs_readpage_result are 0 and -EAGAIN */
439 if (nfs_readpage_result(task, data) != 0) 490 if (nfs_readpage_result(task, data) != 0)
440 return; 491 return;
441 if (task->tk_status < 0) 492 if (task->tk_status < 0)
442 return; 493 nfs_set_pgio_error(hdr, task->tk_status, data->args.offset);
443 494 else if (data->res.eof) {
444 nfs_readpage_truncate_uninitialised_page(data); 495 loff_t bound;
445 nfs_readpage_retry(task, data); 496
497 bound = data->args.offset + data->res.count;
498 spin_lock(&hdr->lock);
499 if (bound < hdr->io_start + hdr->good_bytes) {
500 set_bit(NFS_IOHDR_EOF, &hdr->flags);
501 clear_bit(NFS_IOHDR_ERROR, &hdr->flags);
502 hdr->good_bytes = bound - hdr->io_start;
503 }
504 spin_unlock(&hdr->lock);
505 } else if (data->res.count != data->args.count)
506 nfs_readpage_retry(task, data);
446} 507}
447 508
448static void nfs_readpage_release_partial(void *calldata) 509static void nfs_readpage_release_common(void *calldata)
449{ 510{
450 struct nfs_read_data *data = calldata;
451 struct nfs_page *req = data->req;
452 struct page *page = req->wb_page;
453 int status = data->task.tk_status;
454
455 if (status < 0)
456 set_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags);
457
458 if (atomic_dec_and_test(&req->wb_complete)) {
459 if (!test_bit(PG_PARTIAL_READ_FAILED, &req->wb_flags))
460 SetPageUptodate(page);
461 nfs_readpage_release(req);
462 }
463 nfs_readdata_release(calldata); 511 nfs_readdata_release(calldata);
464} 512}
465 513
466void nfs_read_prepare(struct rpc_task *task, void *calldata) 514void nfs_read_prepare(struct rpc_task *task, void *calldata)
467{ 515{
468 struct nfs_read_data *data = calldata; 516 struct nfs_read_data *data = calldata;
469 NFS_PROTO(data->inode)->read_rpc_prepare(task, data); 517 NFS_PROTO(data->header->inode)->read_rpc_prepare(task, data);
470}
471
472static const struct rpc_call_ops nfs_read_partial_ops = {
473 .rpc_call_prepare = nfs_read_prepare,
474 .rpc_call_done = nfs_readpage_result_partial,
475 .rpc_release = nfs_readpage_release_partial,
476};
477
478static void nfs_readpage_set_pages_uptodate(struct nfs_read_data *data)
479{
480 unsigned int count = data->res.count;
481 unsigned int base = data->args.pgbase;
482 struct page **pages;
483
484 if (data->res.eof)
485 count = data->args.count;
486 if (unlikely(count == 0))
487 return;
488 pages = &data->args.pages[base >> PAGE_CACHE_SHIFT];
489 base &= ~PAGE_CACHE_MASK;
490 count += base;
491 for (;count >= PAGE_CACHE_SIZE; count -= PAGE_CACHE_SIZE, pages++)
492 SetPageUptodate(*pages);
493 if (count == 0)
494 return;
495 /* Was this a short read? */
496 if (data->res.eof || data->res.count == data->args.count)
497 SetPageUptodate(*pages);
498}
499
500/*
501 * This is the callback from RPC telling us whether a reply was
502 * received or some error occurred (timeout or socket shutdown).
503 */
504static void nfs_readpage_result_full(struct rpc_task *task, void *calldata)
505{
506 struct nfs_read_data *data = calldata;
507
508 if (nfs_readpage_result(task, data) != 0)
509 return;
510 if (task->tk_status < 0)
511 return;
512 /*
513 * Note: nfs_readpage_retry may change the values of
514 * data->args. In the multi-page case, we therefore need
515 * to ensure that we call nfs_readpage_set_pages_uptodate()
516 * first.
517 */
518 nfs_readpage_truncate_uninitialised_page(data);
519 nfs_readpage_set_pages_uptodate(data);
520 nfs_readpage_retry(task, data);
521}
522
523static void nfs_readpage_release_full(void *calldata)
524{
525 struct nfs_read_data *data = calldata;
526
527 while (!list_empty(&data->pages)) {
528 struct nfs_page *req = nfs_list_entry(data->pages.next);
529
530 nfs_list_remove_request(req);
531 nfs_readpage_release(req);
532 }
533 nfs_readdata_release(calldata);
534} 518}
535 519
536static const struct rpc_call_ops nfs_read_full_ops = { 520static const struct rpc_call_ops nfs_read_common_ops = {
537 .rpc_call_prepare = nfs_read_prepare, 521 .rpc_call_prepare = nfs_read_prepare,
538 .rpc_call_done = nfs_readpage_result_full, 522 .rpc_call_done = nfs_readpage_result_common,
539 .rpc_release = nfs_readpage_release_full, 523 .rpc_release = nfs_readpage_release_common,
540}; 524};
541 525
542/* 526/*
@@ -668,11 +652,12 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
668 if (ret == 0) 652 if (ret == 0)
669 goto read_complete; /* all pages were read */ 653 goto read_complete; /* all pages were read */
670 654
671 nfs_pageio_init_read(&pgio, inode); 655 nfs_pageio_init_read(&pgio, inode, &nfs_async_read_completion_ops);
672 656
673 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc); 657 ret = read_cache_pages(mapping, pages, readpage_async_filler, &desc);
674 658
675 nfs_pageio_complete(&pgio); 659 nfs_pageio_complete(&pgio);
660 NFS_I(inode)->read_io += pgio.pg_bytes_written;
676 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 661 npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
677 nfs_add_stats(inode, NFSIOS_READPAGES, npages); 662 nfs_add_stats(inode, NFSIOS_READPAGES, npages);
678read_complete: 663read_complete:
@@ -684,7 +669,7 @@ out:
684int __init nfs_init_readpagecache(void) 669int __init nfs_init_readpagecache(void)
685{ 670{
686 nfs_rdata_cachep = kmem_cache_create("nfs_read_data", 671 nfs_rdata_cachep = kmem_cache_create("nfs_read_data",
687 sizeof(struct nfs_read_data), 672 sizeof(struct nfs_read_header),
688 0, SLAB_HWCACHE_ALIGN, 673 0, SLAB_HWCACHE_ALIGN,
689 NULL); 674 NULL);
690 if (nfs_rdata_cachep == NULL) 675 if (nfs_rdata_cachep == NULL)
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 4ac7fca7e4bf..ff656c022684 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -66,6 +66,7 @@
66#include "pnfs.h" 66#include "pnfs.h"
67 67
68#define NFSDBG_FACILITY NFSDBG_VFS 68#define NFSDBG_FACILITY NFSDBG_VFS
69#define NFS_TEXT_DATA 1
69 70
70#ifdef CONFIG_NFS_V3 71#ifdef CONFIG_NFS_V3
71#define NFS_DEFAULT_VERSION 3 72#define NFS_DEFAULT_VERSION 3
@@ -277,12 +278,22 @@ static match_table_t nfs_vers_tokens = {
277 { Opt_vers_err, NULL } 278 { Opt_vers_err, NULL }
278}; 279};
279 280
281struct nfs_mount_info {
282 void (*fill_super)(struct super_block *, struct nfs_mount_info *);
283 int (*set_security)(struct super_block *, struct dentry *, struct nfs_mount_info *);
284 struct nfs_parsed_mount_data *parsed;
285 struct nfs_clone_mount *cloned;
286 struct nfs_fh *mntfh;
287};
288
280static void nfs_umount_begin(struct super_block *); 289static void nfs_umount_begin(struct super_block *);
281static int nfs_statfs(struct dentry *, struct kstatfs *); 290static int nfs_statfs(struct dentry *, struct kstatfs *);
282static int nfs_show_options(struct seq_file *, struct dentry *); 291static int nfs_show_options(struct seq_file *, struct dentry *);
283static int nfs_show_devname(struct seq_file *, struct dentry *); 292static int nfs_show_devname(struct seq_file *, struct dentry *);
284static int nfs_show_path(struct seq_file *, struct dentry *); 293static int nfs_show_path(struct seq_file *, struct dentry *);
285static int nfs_show_stats(struct seq_file *, struct dentry *); 294static int nfs_show_stats(struct seq_file *, struct dentry *);
295static struct dentry *nfs_fs_mount_common(struct file_system_type *,
296 struct nfs_server *, int, const char *, struct nfs_mount_info *);
286static struct dentry *nfs_fs_mount(struct file_system_type *, 297static struct dentry *nfs_fs_mount(struct file_system_type *,
287 int, const char *, void *); 298 int, const char *, void *);
288static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type, 299static struct dentry *nfs_xdev_mount(struct file_system_type *fs_type,
@@ -323,12 +334,11 @@ static const struct super_operations nfs_sops = {
323}; 334};
324 335
325#ifdef CONFIG_NFS_V4 336#ifdef CONFIG_NFS_V4
326static int nfs4_validate_text_mount_data(void *options, 337static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *);
338static int nfs4_validate_mount_data(void *options,
327 struct nfs_parsed_mount_data *args, const char *dev_name); 339 struct nfs_parsed_mount_data *args, const char *dev_name);
328static struct dentry *nfs4_try_mount(int flags, const char *dev_name, 340static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
329 struct nfs_parsed_mount_data *data); 341 struct nfs_mount_info *mount_info);
330static struct dentry *nfs4_mount(struct file_system_type *fs_type,
331 int flags, const char *dev_name, void *raw_data);
332static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type, 342static struct dentry *nfs4_remote_mount(struct file_system_type *fs_type,
333 int flags, const char *dev_name, void *raw_data); 343 int flags, const char *dev_name, void *raw_data);
334static struct dentry *nfs4_xdev_mount(struct file_system_type *fs_type, 344static struct dentry *nfs4_xdev_mount(struct file_system_type *fs_type,
@@ -342,7 +352,7 @@ static void nfs4_kill_super(struct super_block *sb);
342static struct file_system_type nfs4_fs_type = { 352static struct file_system_type nfs4_fs_type = {
343 .owner = THIS_MODULE, 353 .owner = THIS_MODULE,
344 .name = "nfs4", 354 .name = "nfs4",
345 .mount = nfs4_mount, 355 .mount = nfs_fs_mount,
346 .kill_sb = nfs4_kill_super, 356 .kill_sb = nfs4_kill_super,
347 .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA, 357 .fs_flags = FS_RENAME_DOES_D_MOVE|FS_REVAL_DOT|FS_BINARY_MOUNTDATA,
348}; 358};
@@ -786,8 +796,8 @@ static void show_pnfs(struct seq_file *m, struct nfs_server *server)
786 796
787static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss) 797static void show_implementation_id(struct seq_file *m, struct nfs_server *nfss)
788{ 798{
789 if (nfss->nfs_client && nfss->nfs_client->impl_id) { 799 if (nfss->nfs_client && nfss->nfs_client->cl_implid) {
790 struct nfs41_impl_id *impl_id = nfss->nfs_client->impl_id; 800 struct nfs41_impl_id *impl_id = nfss->nfs_client->cl_implid;
791 seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s'," 801 seq_printf(m, "\n\timpl_id:\tname='%s',domain='%s',"
792 "date='%llu,%u'", 802 "date='%llu,%u'",
793 impl_id->name, impl_id->domain, 803 impl_id->name, impl_id->domain,
@@ -938,7 +948,7 @@ static void nfs_umount_begin(struct super_block *sb)
938 rpc_killall_tasks(rpc); 948 rpc_killall_tasks(rpc);
939} 949}
940 950
941static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int version) 951static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(void)
942{ 952{
943 struct nfs_parsed_mount_data *data; 953 struct nfs_parsed_mount_data *data;
944 954
@@ -953,8 +963,8 @@ static struct nfs_parsed_mount_data *nfs_alloc_parsed_mount_data(unsigned int ve
953 data->nfs_server.protocol = XPRT_TRANSPORT_TCP; 963 data->nfs_server.protocol = XPRT_TRANSPORT_TCP;
954 data->auth_flavors[0] = RPC_AUTH_UNIX; 964 data->auth_flavors[0] = RPC_AUTH_UNIX;
955 data->auth_flavor_len = 1; 965 data->auth_flavor_len = 1;
956 data->version = version;
957 data->minorversion = 0; 966 data->minorversion = 0;
967 data->need_mount = true;
958 data->net = current->nsproxy->net_ns; 968 data->net = current->nsproxy->net_ns;
959 security_init_mnt_opts(&data->lsm_opts); 969 security_init_mnt_opts(&data->lsm_opts);
960 } 970 }
@@ -1674,8 +1684,8 @@ static int nfs_walk_authlist(struct nfs_parsed_mount_data *args,
1674 * Use the remote server's MOUNT service to request the NFS file handle 1684 * Use the remote server's MOUNT service to request the NFS file handle
1675 * corresponding to the provided path. 1685 * corresponding to the provided path.
1676 */ 1686 */
1677static int nfs_try_mount(struct nfs_parsed_mount_data *args, 1687static int nfs_request_mount(struct nfs_parsed_mount_data *args,
1678 struct nfs_fh *root_fh) 1688 struct nfs_fh *root_fh)
1679{ 1689{
1680 rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS]; 1690 rpc_authflavor_t server_authlist[NFS_MAX_SECFLAVORS];
1681 unsigned int server_authlist_len = ARRAY_SIZE(server_authlist); 1691 unsigned int server_authlist_len = ARRAY_SIZE(server_authlist);
@@ -1738,6 +1748,26 @@ static int nfs_try_mount(struct nfs_parsed_mount_data *args,
1738 return nfs_walk_authlist(args, &request); 1748 return nfs_walk_authlist(args, &request);
1739} 1749}
1740 1750
1751static struct dentry *nfs_try_mount(int flags, const char *dev_name,
1752 struct nfs_mount_info *mount_info)
1753{
1754 int status;
1755 struct nfs_server *server;
1756
1757 if (mount_info->parsed->need_mount) {
1758 status = nfs_request_mount(mount_info->parsed, mount_info->mntfh);
1759 if (status)
1760 return ERR_PTR(status);
1761 }
1762
1763 /* Get a volume representation */
1764 server = nfs_create_server(mount_info->parsed, mount_info->mntfh);
1765 if (IS_ERR(server))
1766 return ERR_CAST(server);
1767
1768 return nfs_fs_mount_common(&nfs_fs_type, server, flags, dev_name, mount_info);
1769}
1770
1741/* 1771/*
1742 * Split "dev_name" into "hostname:export_path". 1772 * Split "dev_name" into "hostname:export_path".
1743 * 1773 *
@@ -1826,10 +1856,10 @@ out_path:
1826 * + breaking back: trying proto=udp after proto=tcp, v2 after v3, 1856 * + breaking back: trying proto=udp after proto=tcp, v2 after v3,
1827 * mountproto=tcp after mountproto=udp, and so on 1857 * mountproto=tcp after mountproto=udp, and so on
1828 */ 1858 */
1829static int nfs_validate_mount_data(void *options, 1859static int nfs23_validate_mount_data(void *options,
1830 struct nfs_parsed_mount_data *args, 1860 struct nfs_parsed_mount_data *args,
1831 struct nfs_fh *mntfh, 1861 struct nfs_fh *mntfh,
1832 const char *dev_name) 1862 const char *dev_name)
1833{ 1863{
1834 struct nfs_mount_data *data = (struct nfs_mount_data *)options; 1864 struct nfs_mount_data *data = (struct nfs_mount_data *)options;
1835 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address; 1865 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
@@ -1883,6 +1913,7 @@ static int nfs_validate_mount_data(void *options,
1883 args->acregmax = data->acregmax; 1913 args->acregmax = data->acregmax;
1884 args->acdirmin = data->acdirmin; 1914 args->acdirmin = data->acdirmin;
1885 args->acdirmax = data->acdirmax; 1915 args->acdirmax = data->acdirmax;
1916 args->need_mount = false;
1886 1917
1887 memcpy(sap, &data->addr, sizeof(data->addr)); 1918 memcpy(sap, &data->addr, sizeof(data->addr));
1888 args->nfs_server.addrlen = sizeof(data->addr); 1919 args->nfs_server.addrlen = sizeof(data->addr);
@@ -1934,43 +1965,8 @@ static int nfs_validate_mount_data(void *options,
1934 } 1965 }
1935 1966
1936 break; 1967 break;
1937 default: { 1968 default:
1938 int status; 1969 return NFS_TEXT_DATA;
1939
1940 if (nfs_parse_mount_options((char *)options, args) == 0)
1941 return -EINVAL;
1942
1943 if (!nfs_verify_server_address(sap))
1944 goto out_no_address;
1945
1946 if (args->version == 4)
1947#ifdef CONFIG_NFS_V4
1948 return nfs4_validate_text_mount_data(options,
1949 args, dev_name);
1950#else
1951 goto out_v4_not_compiled;
1952#endif
1953
1954 nfs_set_port(sap, &args->nfs_server.port, 0);
1955
1956 nfs_set_mount_transport_protocol(args);
1957
1958 status = nfs_parse_devname(dev_name,
1959 &args->nfs_server.hostname,
1960 PAGE_SIZE,
1961 &args->nfs_server.export_path,
1962 NFS_MAXPATHLEN);
1963 if (!status)
1964 status = nfs_try_mount(args, mntfh);
1965
1966 kfree(args->nfs_server.export_path);
1967 args->nfs_server.export_path = NULL;
1968
1969 if (status)
1970 return status;
1971
1972 break;
1973 }
1974 } 1970 }
1975 1971
1976#ifndef CONFIG_NFS_V3 1972#ifndef CONFIG_NFS_V3
@@ -1999,12 +1995,6 @@ out_v3_not_compiled:
1999 return -EPROTONOSUPPORT; 1995 return -EPROTONOSUPPORT;
2000#endif /* !CONFIG_NFS_V3 */ 1996#endif /* !CONFIG_NFS_V3 */
2001 1997
2002#ifndef CONFIG_NFS_V4
2003out_v4_not_compiled:
2004 dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
2005 return -EPROTONOSUPPORT;
2006#endif /* !CONFIG_NFS_V4 */
2007
2008out_nomem: 1998out_nomem:
2009 dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n"); 1999 dfprintk(MOUNT, "NFS: not enough memory to handle mount options\n");
2010 return -ENOMEM; 2000 return -ENOMEM;
@@ -2018,6 +2008,82 @@ out_invalid_fh:
2018 return -EINVAL; 2008 return -EINVAL;
2019} 2009}
2020 2010
2011#ifdef CONFIG_NFS_V4
2012static int nfs_validate_mount_data(struct file_system_type *fs_type,
2013 void *options,
2014 struct nfs_parsed_mount_data *args,
2015 struct nfs_fh *mntfh,
2016 const char *dev_name)
2017{
2018 if (fs_type == &nfs_fs_type)
2019 return nfs23_validate_mount_data(options, args, mntfh, dev_name);
2020 return nfs4_validate_mount_data(options, args, dev_name);
2021}
2022#else
2023static int nfs_validate_mount_data(struct file_system_type *fs_type,
2024 void *options,
2025 struct nfs_parsed_mount_data *args,
2026 struct nfs_fh *mntfh,
2027 const char *dev_name)
2028{
2029 return nfs23_validate_mount_data(options, args, mntfh, dev_name);
2030}
2031#endif
2032
2033static int nfs_validate_text_mount_data(void *options,
2034 struct nfs_parsed_mount_data *args,
2035 const char *dev_name)
2036{
2037 int port = 0;
2038 int max_namelen = PAGE_SIZE;
2039 int max_pathlen = NFS_MAXPATHLEN;
2040 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
2041
2042 if (nfs_parse_mount_options((char *)options, args) == 0)
2043 return -EINVAL;
2044
2045 if (!nfs_verify_server_address(sap))
2046 goto out_no_address;
2047
2048 if (args->version == 4) {
2049#ifdef CONFIG_NFS_V4
2050 port = NFS_PORT;
2051 max_namelen = NFS4_MAXNAMLEN;
2052 max_pathlen = NFS4_MAXPATHLEN;
2053 nfs_validate_transport_protocol(args);
2054 nfs4_validate_mount_flags(args);
2055#else
2056 goto out_v4_not_compiled;
2057#endif /* CONFIG_NFS_V4 */
2058 } else
2059 nfs_set_mount_transport_protocol(args);
2060
2061 nfs_set_port(sap, &args->nfs_server.port, port);
2062
2063 if (args->auth_flavor_len > 1)
2064 goto out_bad_auth;
2065
2066 return nfs_parse_devname(dev_name,
2067 &args->nfs_server.hostname,
2068 max_namelen,
2069 &args->nfs_server.export_path,
2070 max_pathlen);
2071
2072#ifndef CONFIG_NFS_V4
2073out_v4_not_compiled:
2074 dfprintk(MOUNT, "NFS: NFSv4 is not compiled into kernel\n");
2075 return -EPROTONOSUPPORT;
2076#endif /* !CONFIG_NFS_V4 */
2077
2078out_no_address:
2079 dfprintk(MOUNT, "NFS: mount program didn't pass remote address\n");
2080 return -EINVAL;
2081
2082out_bad_auth:
2083 dfprintk(MOUNT, "NFS: Too many RPC auth flavours specified\n");
2084 return -EINVAL;
2085}
2086
2021static int 2087static int
2022nfs_compare_remount_data(struct nfs_server *nfss, 2088nfs_compare_remount_data(struct nfs_server *nfss,
2023 struct nfs_parsed_mount_data *data) 2089 struct nfs_parsed_mount_data *data)
@@ -2129,8 +2195,9 @@ static inline void nfs_initialise_sb(struct super_block *sb)
2129 * Finish setting up an NFS2/3 superblock 2195 * Finish setting up an NFS2/3 superblock
2130 */ 2196 */
2131static void nfs_fill_super(struct super_block *sb, 2197static void nfs_fill_super(struct super_block *sb,
2132 struct nfs_parsed_mount_data *data) 2198 struct nfs_mount_info *mount_info)
2133{ 2199{
2200 struct nfs_parsed_mount_data *data = mount_info->parsed;
2134 struct nfs_server *server = NFS_SB(sb); 2201 struct nfs_server *server = NFS_SB(sb);
2135 2202
2136 sb->s_blocksize_bits = 0; 2203 sb->s_blocksize_bits = 0;
@@ -2154,8 +2221,9 @@ static void nfs_fill_super(struct super_block *sb,
2154 * Finish setting up a cloned NFS2/3 superblock 2221 * Finish setting up a cloned NFS2/3 superblock
2155 */ 2222 */
2156static void nfs_clone_super(struct super_block *sb, 2223static void nfs_clone_super(struct super_block *sb,
2157 const struct super_block *old_sb) 2224 struct nfs_mount_info *mount_info)
2158{ 2225{
2226 const struct super_block *old_sb = mount_info->cloned->sb;
2159 struct nfs_server *server = NFS_SB(sb); 2227 struct nfs_server *server = NFS_SB(sb);
2160 2228
2161 sb->s_blocksize_bits = old_sb->s_blocksize_bits; 2229 sb->s_blocksize_bits = old_sb->s_blocksize_bits;
@@ -2278,52 +2346,70 @@ static int nfs_compare_super(struct super_block *sb, void *data)
2278 return nfs_compare_mount_options(sb, server, mntflags); 2346 return nfs_compare_mount_options(sb, server, mntflags);
2279} 2347}
2280 2348
2349#ifdef CONFIG_NFS_FSCACHE
2350static void nfs_get_cache_cookie(struct super_block *sb,
2351 struct nfs_parsed_mount_data *parsed,
2352 struct nfs_clone_mount *cloned)
2353{
2354 char *uniq = NULL;
2355 int ulen = 0;
2356
2357 if (parsed && parsed->fscache_uniq) {
2358 uniq = parsed->fscache_uniq;
2359 ulen = strlen(parsed->fscache_uniq);
2360 } else if (cloned) {
2361 struct nfs_server *mnt_s = NFS_SB(cloned->sb);
2362 if (mnt_s->fscache_key) {
2363 uniq = mnt_s->fscache_key->key.uniquifier;
2364 ulen = mnt_s->fscache_key->key.uniq_len;
2365 };
2366 }
2367
2368 nfs_fscache_get_super_cookie(sb, uniq, ulen);
2369}
2370#else
2371static void nfs_get_cache_cookie(struct super_block *sb,
2372 struct nfs_parsed_mount_data *parsed,
2373 struct nfs_clone_mount *cloned)
2374{
2375}
2376#endif
2377
2281static int nfs_bdi_register(struct nfs_server *server) 2378static int nfs_bdi_register(struct nfs_server *server)
2282{ 2379{
2283 return bdi_register_dev(&server->backing_dev_info, server->s_dev); 2380 return bdi_register_dev(&server->backing_dev_info, server->s_dev);
2284} 2381}
2285 2382
2286static struct dentry *nfs_fs_mount(struct file_system_type *fs_type, 2383static int nfs_set_sb_security(struct super_block *s, struct dentry *mntroot,
2287 int flags, const char *dev_name, void *raw_data) 2384 struct nfs_mount_info *mount_info)
2385{
2386 return security_sb_set_mnt_opts(s, &mount_info->parsed->lsm_opts);
2387}
2388
2389static int nfs_clone_sb_security(struct super_block *s, struct dentry *mntroot,
2390 struct nfs_mount_info *mount_info)
2391{
2392 /* clone any lsm security options from the parent to the new sb */
2393 security_sb_clone_mnt_opts(mount_info->cloned->sb, s);
2394 if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops)
2395 return -ESTALE;
2396 return 0;
2397}
2398
2399static struct dentry *nfs_fs_mount_common(struct file_system_type *fs_type,
2400 struct nfs_server *server,
2401 int flags, const char *dev_name,
2402 struct nfs_mount_info *mount_info)
2288{ 2403{
2289 struct nfs_server *server = NULL;
2290 struct super_block *s; 2404 struct super_block *s;
2291 struct nfs_parsed_mount_data *data;
2292 struct nfs_fh *mntfh;
2293 struct dentry *mntroot = ERR_PTR(-ENOMEM); 2405 struct dentry *mntroot = ERR_PTR(-ENOMEM);
2294 int (*compare_super)(struct super_block *, void *) = nfs_compare_super; 2406 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
2295 struct nfs_sb_mountdata sb_mntdata = { 2407 struct nfs_sb_mountdata sb_mntdata = {
2296 .mntflags = flags, 2408 .mntflags = flags,
2409 .server = server,
2297 }; 2410 };
2298 int error; 2411 int error;
2299 2412
2300 data = nfs_alloc_parsed_mount_data(NFS_DEFAULT_VERSION);
2301 mntfh = nfs_alloc_fhandle();
2302 if (data == NULL || mntfh == NULL)
2303 goto out;
2304
2305 /* Validate the mount data */
2306 error = nfs_validate_mount_data(raw_data, data, mntfh, dev_name);
2307 if (error < 0) {
2308 mntroot = ERR_PTR(error);
2309 goto out;
2310 }
2311
2312#ifdef CONFIG_NFS_V4
2313 if (data->version == 4) {
2314 mntroot = nfs4_try_mount(flags, dev_name, data);
2315 goto out;
2316 }
2317#endif /* CONFIG_NFS_V4 */
2318
2319 /* Get a volume representation */
2320 server = nfs_create_server(data, mntfh);
2321 if (IS_ERR(server)) {
2322 mntroot = ERR_CAST(server);
2323 goto out;
2324 }
2325 sb_mntdata.server = server;
2326
2327 if (server->flags & NFS_MOUNT_UNSHARED) 2413 if (server->flags & NFS_MOUNT_UNSHARED)
2328 compare_super = NULL; 2414 compare_super = NULL;
2329 2415
@@ -2351,23 +2437,21 @@ static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
2351 2437
2352 if (!s->s_root) { 2438 if (!s->s_root) {
2353 /* initial superblock/root creation */ 2439 /* initial superblock/root creation */
2354 nfs_fill_super(s, data); 2440 mount_info->fill_super(s, mount_info);
2355 nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL); 2441 nfs_get_cache_cookie(s, mount_info->parsed, mount_info->cloned);
2356 } 2442 }
2357 2443
2358 mntroot = nfs_get_root(s, mntfh, dev_name); 2444 mntroot = nfs_get_root(s, mount_info->mntfh, dev_name);
2359 if (IS_ERR(mntroot)) 2445 if (IS_ERR(mntroot))
2360 goto error_splat_super; 2446 goto error_splat_super;
2361 2447
2362 error = security_sb_set_mnt_opts(s, &data->lsm_opts); 2448 error = mount_info->set_security(s, mntroot, mount_info);
2363 if (error) 2449 if (error)
2364 goto error_splat_root; 2450 goto error_splat_root;
2365 2451
2366 s->s_flags |= MS_ACTIVE; 2452 s->s_flags |= MS_ACTIVE;
2367 2453
2368out: 2454out:
2369 nfs_free_parsed_mount_data(data);
2370 nfs_free_fhandle(mntfh);
2371 return mntroot; 2455 return mntroot;
2372 2456
2373out_err_nosb: 2457out_err_nosb:
@@ -2385,6 +2469,43 @@ error_splat_bdi:
2385 goto out; 2469 goto out;
2386} 2470}
2387 2471
2472static struct dentry *nfs_fs_mount(struct file_system_type *fs_type,
2473 int flags, const char *dev_name, void *raw_data)
2474{
2475 struct nfs_mount_info mount_info = {
2476 .fill_super = nfs_fill_super,
2477 .set_security = nfs_set_sb_security,
2478 };
2479 struct dentry *mntroot = ERR_PTR(-ENOMEM);
2480 int error;
2481
2482 mount_info.parsed = nfs_alloc_parsed_mount_data();
2483 mount_info.mntfh = nfs_alloc_fhandle();
2484 if (mount_info.parsed == NULL || mount_info.mntfh == NULL)
2485 goto out;
2486
2487 /* Validate the mount data */
2488 error = nfs_validate_mount_data(fs_type, raw_data, mount_info.parsed, mount_info.mntfh, dev_name);
2489 if (error == NFS_TEXT_DATA)
2490 error = nfs_validate_text_mount_data(raw_data, mount_info.parsed, dev_name);
2491 if (error < 0) {
2492 mntroot = ERR_PTR(error);
2493 goto out;
2494 }
2495
2496#ifdef CONFIG_NFS_V4
2497 if (mount_info.parsed->version == 4)
2498 mntroot = nfs4_try_mount(flags, dev_name, &mount_info);
2499 else
2500#endif /* CONFIG_NFS_V4 */
2501 mntroot = nfs_try_mount(flags, dev_name, &mount_info);
2502
2503out:
2504 nfs_free_parsed_mount_data(mount_info.parsed);
2505 nfs_free_fhandle(mount_info.mntfh);
2506 return mntroot;
2507}
2508
2388/* 2509/*
2389 * Ensure that we unregister the bdi before kill_anon_super 2510 * Ensure that we unregister the bdi before kill_anon_super
2390 * releases the device name 2511 * releases the device name
@@ -2409,93 +2530,51 @@ static void nfs_kill_super(struct super_block *s)
2409} 2530}
2410 2531
2411/* 2532/*
2412 * Clone an NFS2/3 server record on xdev traversal (FSID-change) 2533 * Clone an NFS2/3/4 server record on xdev traversal (FSID-change)
2413 */ 2534 */
2414static struct dentry * 2535static struct dentry *
2415nfs_xdev_mount(struct file_system_type *fs_type, int flags, 2536nfs_xdev_mount_common(struct file_system_type *fs_type, int flags,
2416 const char *dev_name, void *raw_data) 2537 const char *dev_name, struct nfs_mount_info *mount_info)
2417{ 2538{
2418 struct nfs_clone_mount *data = raw_data; 2539 struct nfs_clone_mount *data = mount_info->cloned;
2419 struct super_block *s;
2420 struct nfs_server *server; 2540 struct nfs_server *server;
2421 struct dentry *mntroot; 2541 struct dentry *mntroot = ERR_PTR(-ENOMEM);
2422 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
2423 struct nfs_sb_mountdata sb_mntdata = {
2424 .mntflags = flags,
2425 };
2426 int error; 2542 int error;
2427 2543
2428 dprintk("--> nfs_xdev_mount()\n"); 2544 dprintk("--> nfs_xdev_mount_common()\n");
2545
2546 mount_info->mntfh = data->fh;
2429 2547
2430 /* create a new volume representation */ 2548 /* create a new volume representation */
2431 server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor); 2549 server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
2432 if (IS_ERR(server)) { 2550 if (IS_ERR(server)) {
2433 error = PTR_ERR(server); 2551 error = PTR_ERR(server);
2434 goto out_err_noserver; 2552 goto out_err;
2435 }
2436 sb_mntdata.server = server;
2437
2438 if (server->flags & NFS_MOUNT_UNSHARED)
2439 compare_super = NULL;
2440
2441 /* -o noac implies -o sync */
2442 if (server->flags & NFS_MOUNT_NOAC)
2443 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2444
2445 /* Get a superblock - note that we may end up sharing one that already exists */
2446 s = sget(&nfs_fs_type, compare_super, nfs_set_super, &sb_mntdata);
2447 if (IS_ERR(s)) {
2448 error = PTR_ERR(s);
2449 goto out_err_nosb;
2450 }
2451
2452 if (s->s_fs_info != server) {
2453 nfs_free_server(server);
2454 server = NULL;
2455 } else {
2456 error = nfs_bdi_register(server);
2457 if (error)
2458 goto error_splat_bdi;
2459 }
2460
2461 if (!s->s_root) {
2462 /* initial superblock/root creation */
2463 nfs_clone_super(s, data->sb);
2464 nfs_fscache_get_super_cookie(s, NULL, data);
2465 }
2466
2467 mntroot = nfs_get_root(s, data->fh, dev_name);
2468 if (IS_ERR(mntroot)) {
2469 error = PTR_ERR(mntroot);
2470 goto error_splat_super;
2471 }
2472 if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
2473 dput(mntroot);
2474 error = -ESTALE;
2475 goto error_splat_super;
2476 } 2553 }
2477 2554
2478 s->s_flags |= MS_ACTIVE; 2555 mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
2479 2556 dprintk("<-- nfs_xdev_mount_common() = 0\n");
2480 /* clone any lsm security options from the parent to the new sb */ 2557out:
2481 security_sb_clone_mnt_opts(data->sb, s);
2482
2483 dprintk("<-- nfs_xdev_mount() = 0\n");
2484 return mntroot; 2558 return mntroot;
2485 2559
2486out_err_nosb: 2560out_err:
2487 nfs_free_server(server); 2561 dprintk("<-- nfs_xdev_mount_common() = %d [error]\n", error);
2488out_err_noserver: 2562 goto out;
2489 dprintk("<-- nfs_xdev_mount() = %d [error]\n", error); 2563}
2490 return ERR_PTR(error);
2491 2564
2492error_splat_super: 2565/*
2493 if (server && !s->s_root) 2566 * Clone an NFS2/3 server record on xdev traversal (FSID-change)
2494 bdi_unregister(&server->backing_dev_info); 2567 */
2495error_splat_bdi: 2568static struct dentry *
2496 deactivate_locked_super(s); 2569nfs_xdev_mount(struct file_system_type *fs_type, int flags,
2497 dprintk("<-- nfs_xdev_mount() = %d [splat]\n", error); 2570 const char *dev_name, void *raw_data)
2498 return ERR_PTR(error); 2571{
2572 struct nfs_mount_info mount_info = {
2573 .fill_super = nfs_clone_super,
2574 .set_security = nfs_clone_sb_security,
2575 .cloned = raw_data,
2576 };
2577 return nfs_xdev_mount_common(&nfs_fs_type, flags, dev_name, &mount_info);
2499} 2578}
2500 2579
2501#ifdef CONFIG_NFS_V4 2580#ifdef CONFIG_NFS_V4
@@ -2504,8 +2583,9 @@ error_splat_bdi:
2504 * Finish setting up a cloned NFS4 superblock 2583 * Finish setting up a cloned NFS4 superblock
2505 */ 2584 */
2506static void nfs4_clone_super(struct super_block *sb, 2585static void nfs4_clone_super(struct super_block *sb,
2507 const struct super_block *old_sb) 2586 struct nfs_mount_info *mount_info)
2508{ 2587{
2588 const struct super_block *old_sb = mount_info->cloned->sb;
2509 sb->s_blocksize_bits = old_sb->s_blocksize_bits; 2589 sb->s_blocksize_bits = old_sb->s_blocksize_bits;
2510 sb->s_blocksize = old_sb->s_blocksize; 2590 sb->s_blocksize = old_sb->s_blocksize;
2511 sb->s_maxbytes = old_sb->s_maxbytes; 2591 sb->s_maxbytes = old_sb->s_maxbytes;
@@ -2523,7 +2603,8 @@ static void nfs4_clone_super(struct super_block *sb,
2523/* 2603/*
2524 * Set up an NFS4 superblock 2604 * Set up an NFS4 superblock
2525 */ 2605 */
2526static void nfs4_fill_super(struct super_block *sb) 2606static void nfs4_fill_super(struct super_block *sb,
2607 struct nfs_mount_info *mount_info)
2527{ 2608{
2528 sb->s_time_gran = 1; 2609 sb->s_time_gran = 1;
2529 sb->s_op = &nfs4_sops; 2610 sb->s_op = &nfs4_sops;
@@ -2542,37 +2623,6 @@ static void nfs4_validate_mount_flags(struct nfs_parsed_mount_data *args)
2542 NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL); 2623 NFS_MOUNT_LOCAL_FLOCK|NFS_MOUNT_LOCAL_FCNTL);
2543} 2624}
2544 2625
2545static int nfs4_validate_text_mount_data(void *options,
2546 struct nfs_parsed_mount_data *args,
2547 const char *dev_name)
2548{
2549 struct sockaddr *sap = (struct sockaddr *)&args->nfs_server.address;
2550
2551 nfs_set_port(sap, &args->nfs_server.port, NFS_PORT);
2552
2553 nfs_validate_transport_protocol(args);
2554
2555 nfs4_validate_mount_flags(args);
2556
2557 if (args->version != 4) {
2558 dfprintk(MOUNT,
2559 "NFS4: Illegal mount version\n");
2560 return -EINVAL;
2561 }
2562
2563 if (args->auth_flavor_len > 1) {
2564 dfprintk(MOUNT,
2565 "NFS4: Too many RPC auth flavours specified\n");
2566 return -EINVAL;
2567 }
2568
2569 return nfs_parse_devname(dev_name,
2570 &args->nfs_server.hostname,
2571 NFS4_MAXNAMLEN,
2572 &args->nfs_server.export_path,
2573 NFS4_MAXPATHLEN);
2574}
2575
2576/* 2626/*
2577 * Validate NFSv4 mount options 2627 * Validate NFSv4 mount options
2578 */ 2628 */
@@ -2643,13 +2693,7 @@ static int nfs4_validate_mount_data(void *options,
2643 2693
2644 break; 2694 break;
2645 default: 2695 default:
2646 if (nfs_parse_mount_options((char *)options, args) == 0) 2696 return NFS_TEXT_DATA;
2647 return -EINVAL;
2648
2649 if (!nfs_verify_server_address(sap))
2650 return -EINVAL;
2651
2652 return nfs4_validate_text_mount_data(options, args, dev_name);
2653 } 2697 }
2654 2698
2655 return 0; 2699 return 0;
@@ -2673,91 +2717,26 @@ out_no_address:
2673 */ 2717 */
2674static struct dentry * 2718static struct dentry *
2675nfs4_remote_mount(struct file_system_type *fs_type, int flags, 2719nfs4_remote_mount(struct file_system_type *fs_type, int flags,
2676 const char *dev_name, void *raw_data) 2720 const char *dev_name, void *info)
2677{ 2721{
2678 struct nfs_parsed_mount_data *data = raw_data; 2722 struct nfs_mount_info *mount_info = info;
2679 struct super_block *s;
2680 struct nfs_server *server; 2723 struct nfs_server *server;
2681 struct nfs_fh *mntfh; 2724 struct dentry *mntroot = ERR_PTR(-ENOMEM);
2682 struct dentry *mntroot;
2683 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
2684 struct nfs_sb_mountdata sb_mntdata = {
2685 .mntflags = flags,
2686 };
2687 int error = -ENOMEM;
2688 2725
2689 mntfh = nfs_alloc_fhandle(); 2726 mount_info->fill_super = nfs4_fill_super;
2690 if (data == NULL || mntfh == NULL) 2727 mount_info->set_security = nfs_set_sb_security;
2691 goto out;
2692 2728
2693 /* Get a volume representation */ 2729 /* Get a volume representation */
2694 server = nfs4_create_server(data, mntfh); 2730 server = nfs4_create_server(mount_info->parsed, mount_info->mntfh);
2695 if (IS_ERR(server)) { 2731 if (IS_ERR(server)) {
2696 error = PTR_ERR(server); 2732 mntroot = ERR_CAST(server);
2697 goto out; 2733 goto out;
2698 } 2734 }
2699 sb_mntdata.server = server;
2700 2735
2701 if (server->flags & NFS4_MOUNT_UNSHARED) 2736 mntroot = nfs_fs_mount_common(fs_type, server, flags, dev_name, mount_info);
2702 compare_super = NULL;
2703
2704 /* -o noac implies -o sync */
2705 if (server->flags & NFS_MOUNT_NOAC)
2706 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2707
2708 /* Get a superblock - note that we may end up sharing one that already exists */
2709 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
2710 if (IS_ERR(s)) {
2711 error = PTR_ERR(s);
2712 goto out_free;
2713 }
2714
2715 if (s->s_fs_info != server) {
2716 nfs_free_server(server);
2717 server = NULL;
2718 } else {
2719 error = nfs_bdi_register(server);
2720 if (error)
2721 goto error_splat_bdi;
2722 }
2723
2724 if (!s->s_root) {
2725 /* initial superblock/root creation */
2726 nfs4_fill_super(s);
2727 nfs_fscache_get_super_cookie(s, data->fscache_uniq, NULL);
2728 }
2729
2730 mntroot = nfs4_get_root(s, mntfh, dev_name);
2731 if (IS_ERR(mntroot)) {
2732 error = PTR_ERR(mntroot);
2733 goto error_splat_super;
2734 }
2735
2736 error = security_sb_set_mnt_opts(s, &data->lsm_opts);
2737 if (error)
2738 goto error_splat_root;
2739
2740 s->s_flags |= MS_ACTIVE;
2741
2742 nfs_free_fhandle(mntfh);
2743 return mntroot;
2744 2737
2745out: 2738out:
2746 nfs_free_fhandle(mntfh); 2739 return mntroot;
2747 return ERR_PTR(error);
2748
2749out_free:
2750 nfs_free_server(server);
2751 goto out;
2752
2753error_splat_root:
2754 dput(mntroot);
2755error_splat_super:
2756 if (server && !s->s_root)
2757 bdi_unregister(&server->backing_dev_info);
2758error_splat_bdi:
2759 deactivate_locked_super(s);
2760 goto out;
2761} 2740}
2762 2741
2763static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type, 2742static struct vfsmount *nfs_do_root_mount(struct file_system_type *fs_type,
@@ -2869,17 +2848,18 @@ static struct dentry *nfs_follow_remote_path(struct vfsmount *root_mnt,
2869} 2848}
2870 2849
2871static struct dentry *nfs4_try_mount(int flags, const char *dev_name, 2850static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
2872 struct nfs_parsed_mount_data *data) 2851 struct nfs_mount_info *mount_info)
2873{ 2852{
2874 char *export_path; 2853 char *export_path;
2875 struct vfsmount *root_mnt; 2854 struct vfsmount *root_mnt;
2876 struct dentry *res; 2855 struct dentry *res;
2856 struct nfs_parsed_mount_data *data = mount_info->parsed;
2877 2857
2878 dfprintk(MOUNT, "--> nfs4_try_mount()\n"); 2858 dfprintk(MOUNT, "--> nfs4_try_mount()\n");
2879 2859
2880 export_path = data->nfs_server.export_path; 2860 export_path = data->nfs_server.export_path;
2881 data->nfs_server.export_path = "/"; 2861 data->nfs_server.export_path = "/";
2882 root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, data, 2862 root_mnt = nfs_do_root_mount(&nfs4_remote_fs_type, flags, mount_info,
2883 data->nfs_server.hostname); 2863 data->nfs_server.hostname);
2884 data->nfs_server.export_path = export_path; 2864 data->nfs_server.export_path = export_path;
2885 2865
@@ -2891,38 +2871,6 @@ static struct dentry *nfs4_try_mount(int flags, const char *dev_name,
2891 return res; 2871 return res;
2892} 2872}
2893 2873
2894/*
2895 * Get the superblock for an NFS4 mountpoint
2896 */
2897static struct dentry *nfs4_mount(struct file_system_type *fs_type,
2898 int flags, const char *dev_name, void *raw_data)
2899{
2900 struct nfs_parsed_mount_data *data;
2901 int error = -ENOMEM;
2902 struct dentry *res = ERR_PTR(-ENOMEM);
2903
2904 data = nfs_alloc_parsed_mount_data(4);
2905 if (data == NULL)
2906 goto out;
2907
2908 /* Validate the mount data */
2909 error = nfs4_validate_mount_data(raw_data, data, dev_name);
2910 if (error < 0) {
2911 res = ERR_PTR(error);
2912 goto out;
2913 }
2914
2915 res = nfs4_try_mount(flags, dev_name, data);
2916 if (IS_ERR(res))
2917 error = PTR_ERR(res);
2918
2919out:
2920 nfs_free_parsed_mount_data(data);
2921 dprintk("<-- nfs4_mount() = %d%s\n", error,
2922 error != 0 ? " [error]" : "");
2923 return res;
2924}
2925
2926static void nfs4_kill_super(struct super_block *sb) 2874static void nfs4_kill_super(struct super_block *sb)
2927{ 2875{
2928 struct nfs_server *server = NFS_SB(sb); 2876 struct nfs_server *server = NFS_SB(sb);
@@ -2942,181 +2890,43 @@ static struct dentry *
2942nfs4_xdev_mount(struct file_system_type *fs_type, int flags, 2890nfs4_xdev_mount(struct file_system_type *fs_type, int flags,
2943 const char *dev_name, void *raw_data) 2891 const char *dev_name, void *raw_data)
2944{ 2892{
2945 struct nfs_clone_mount *data = raw_data; 2893 struct nfs_mount_info mount_info = {
2946 struct super_block *s; 2894 .fill_super = nfs4_clone_super,
2947 struct nfs_server *server; 2895 .set_security = nfs_clone_sb_security,
2948 struct dentry *mntroot; 2896 .cloned = raw_data,
2949 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
2950 struct nfs_sb_mountdata sb_mntdata = {
2951 .mntflags = flags,
2952 }; 2897 };
2953 int error; 2898 return nfs_xdev_mount_common(&nfs4_fs_type, flags, dev_name, &mount_info);
2954
2955 dprintk("--> nfs4_xdev_mount()\n");
2956
2957 /* create a new volume representation */
2958 server = nfs_clone_server(NFS_SB(data->sb), data->fh, data->fattr, data->authflavor);
2959 if (IS_ERR(server)) {
2960 error = PTR_ERR(server);
2961 goto out_err_noserver;
2962 }
2963 sb_mntdata.server = server;
2964
2965 if (server->flags & NFS4_MOUNT_UNSHARED)
2966 compare_super = NULL;
2967
2968 /* -o noac implies -o sync */
2969 if (server->flags & NFS_MOUNT_NOAC)
2970 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
2971
2972 /* Get a superblock - note that we may end up sharing one that already exists */
2973 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
2974 if (IS_ERR(s)) {
2975 error = PTR_ERR(s);
2976 goto out_err_nosb;
2977 }
2978
2979 if (s->s_fs_info != server) {
2980 nfs_free_server(server);
2981 server = NULL;
2982 } else {
2983 error = nfs_bdi_register(server);
2984 if (error)
2985 goto error_splat_bdi;
2986 }
2987
2988 if (!s->s_root) {
2989 /* initial superblock/root creation */
2990 nfs4_clone_super(s, data->sb);
2991 nfs_fscache_get_super_cookie(s, NULL, data);
2992 }
2993
2994 mntroot = nfs4_get_root(s, data->fh, dev_name);
2995 if (IS_ERR(mntroot)) {
2996 error = PTR_ERR(mntroot);
2997 goto error_splat_super;
2998 }
2999 if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
3000 dput(mntroot);
3001 error = -ESTALE;
3002 goto error_splat_super;
3003 }
3004
3005 s->s_flags |= MS_ACTIVE;
3006
3007 security_sb_clone_mnt_opts(data->sb, s);
3008
3009 dprintk("<-- nfs4_xdev_mount() = 0\n");
3010 return mntroot;
3011
3012out_err_nosb:
3013 nfs_free_server(server);
3014out_err_noserver:
3015 dprintk("<-- nfs4_xdev_mount() = %d [error]\n", error);
3016 return ERR_PTR(error);
3017
3018error_splat_super:
3019 if (server && !s->s_root)
3020 bdi_unregister(&server->backing_dev_info);
3021error_splat_bdi:
3022 deactivate_locked_super(s);
3023 dprintk("<-- nfs4_xdev_mount() = %d [splat]\n", error);
3024 return ERR_PTR(error);
3025} 2899}
3026 2900
3027static struct dentry * 2901static struct dentry *
3028nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags, 2902nfs4_remote_referral_mount(struct file_system_type *fs_type, int flags,
3029 const char *dev_name, void *raw_data) 2903 const char *dev_name, void *raw_data)
3030{ 2904{
3031 struct nfs_clone_mount *data = raw_data; 2905 struct nfs_mount_info mount_info = {
3032 struct super_block *s; 2906 .fill_super = nfs4_fill_super,
3033 struct nfs_server *server; 2907 .set_security = nfs_clone_sb_security,
3034 struct dentry *mntroot; 2908 .cloned = raw_data,
3035 struct nfs_fh *mntfh;
3036 int (*compare_super)(struct super_block *, void *) = nfs_compare_super;
3037 struct nfs_sb_mountdata sb_mntdata = {
3038 .mntflags = flags,
3039 }; 2909 };
3040 int error = -ENOMEM; 2910 struct nfs_server *server;
2911 struct dentry *mntroot = ERR_PTR(-ENOMEM);
3041 2912
3042 dprintk("--> nfs4_referral_get_sb()\n"); 2913 dprintk("--> nfs4_referral_get_sb()\n");
3043 2914
3044 mntfh = nfs_alloc_fhandle(); 2915 mount_info.mntfh = nfs_alloc_fhandle();
3045 if (mntfh == NULL) 2916 if (mount_info.cloned == NULL || mount_info.mntfh == NULL)
3046 goto out_err_nofh; 2917 goto out;
3047 2918
3048 /* create a new volume representation */ 2919 /* create a new volume representation */
3049 server = nfs4_create_referral_server(data, mntfh); 2920 server = nfs4_create_referral_server(mount_info.cloned, mount_info.mntfh);
3050 if (IS_ERR(server)) { 2921 if (IS_ERR(server)) {
3051 error = PTR_ERR(server); 2922 mntroot = ERR_CAST(server);
3052 goto out_err_noserver; 2923 goto out;
3053 }
3054 sb_mntdata.server = server;
3055
3056 if (server->flags & NFS4_MOUNT_UNSHARED)
3057 compare_super = NULL;
3058
3059 /* -o noac implies -o sync */
3060 if (server->flags & NFS_MOUNT_NOAC)
3061 sb_mntdata.mntflags |= MS_SYNCHRONOUS;
3062
3063 /* Get a superblock - note that we may end up sharing one that already exists */
3064 s = sget(&nfs4_fs_type, compare_super, nfs_set_super, &sb_mntdata);
3065 if (IS_ERR(s)) {
3066 error = PTR_ERR(s);
3067 goto out_err_nosb;
3068 }
3069
3070 if (s->s_fs_info != server) {
3071 nfs_free_server(server);
3072 server = NULL;
3073 } else {
3074 error = nfs_bdi_register(server);
3075 if (error)
3076 goto error_splat_bdi;
3077 }
3078
3079 if (!s->s_root) {
3080 /* initial superblock/root creation */
3081 nfs4_fill_super(s);
3082 nfs_fscache_get_super_cookie(s, NULL, data);
3083 }
3084
3085 mntroot = nfs4_get_root(s, mntfh, dev_name);
3086 if (IS_ERR(mntroot)) {
3087 error = PTR_ERR(mntroot);
3088 goto error_splat_super;
3089 }
3090 if (mntroot->d_inode->i_op != NFS_SB(s)->nfs_client->rpc_ops->dir_inode_ops) {
3091 dput(mntroot);
3092 error = -ESTALE;
3093 goto error_splat_super;
3094 } 2924 }
3095 2925
3096 s->s_flags |= MS_ACTIVE; 2926 mntroot = nfs_fs_mount_common(&nfs4_fs_type, server, flags, dev_name, &mount_info);
3097 2927out:
3098 security_sb_clone_mnt_opts(data->sb, s); 2928 nfs_free_fhandle(mount_info.mntfh);
3099
3100 nfs_free_fhandle(mntfh);
3101 dprintk("<-- nfs4_referral_get_sb() = 0\n");
3102 return mntroot; 2929 return mntroot;
3103
3104out_err_nosb:
3105 nfs_free_server(server);
3106out_err_noserver:
3107 nfs_free_fhandle(mntfh);
3108out_err_nofh:
3109 dprintk("<-- nfs4_referral_get_sb() = %d [error]\n", error);
3110 return ERR_PTR(error);
3111
3112error_splat_super:
3113 if (server && !s->s_root)
3114 bdi_unregister(&server->backing_dev_info);
3115error_splat_bdi:
3116 deactivate_locked_super(s);
3117 nfs_free_fhandle(mntfh);
3118 dprintk("<-- nfs4_referral_get_sb() = %d [splat]\n", error);
3119 return ERR_PTR(error);
3120} 2930}
3121 2931
3122/* 2932/*
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index c07462320f6b..e6fe3d69d14c 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -39,20 +39,20 @@
39/* 39/*
40 * Local function declarations 40 * Local function declarations
41 */ 41 */
42static void nfs_pageio_init_write(struct nfs_pageio_descriptor *desc,
43 struct inode *inode, int ioflags);
44static void nfs_redirty_request(struct nfs_page *req); 42static void nfs_redirty_request(struct nfs_page *req);
45static const struct rpc_call_ops nfs_write_partial_ops; 43static const struct rpc_call_ops nfs_write_common_ops;
46static const struct rpc_call_ops nfs_write_full_ops;
47static const struct rpc_call_ops nfs_commit_ops; 44static const struct rpc_call_ops nfs_commit_ops;
45static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops;
46static const struct nfs_commit_completion_ops nfs_commit_completion_ops;
48 47
49static struct kmem_cache *nfs_wdata_cachep; 48static struct kmem_cache *nfs_wdata_cachep;
50static mempool_t *nfs_wdata_mempool; 49static mempool_t *nfs_wdata_mempool;
50static struct kmem_cache *nfs_cdata_cachep;
51static mempool_t *nfs_commit_mempool; 51static mempool_t *nfs_commit_mempool;
52 52
53struct nfs_write_data *nfs_commitdata_alloc(void) 53struct nfs_commit_data *nfs_commitdata_alloc(void)
54{ 54{
55 struct nfs_write_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS); 55 struct nfs_commit_data *p = mempool_alloc(nfs_commit_mempool, GFP_NOFS);
56 56
57 if (p) { 57 if (p) {
58 memset(p, 0, sizeof(*p)); 58 memset(p, 0, sizeof(*p));
@@ -62,46 +62,73 @@ struct nfs_write_data *nfs_commitdata_alloc(void)
62} 62}
63EXPORT_SYMBOL_GPL(nfs_commitdata_alloc); 63EXPORT_SYMBOL_GPL(nfs_commitdata_alloc);
64 64
65void nfs_commit_free(struct nfs_write_data *p) 65void nfs_commit_free(struct nfs_commit_data *p)
66{ 66{
67 if (p && (p->pagevec != &p->page_array[0]))
68 kfree(p->pagevec);
69 mempool_free(p, nfs_commit_mempool); 67 mempool_free(p, nfs_commit_mempool);
70} 68}
71EXPORT_SYMBOL_GPL(nfs_commit_free); 69EXPORT_SYMBOL_GPL(nfs_commit_free);
72 70
73struct nfs_write_data *nfs_writedata_alloc(unsigned int pagecount) 71struct nfs_write_header *nfs_writehdr_alloc(void)
74{ 72{
75 struct nfs_write_data *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS); 73 struct nfs_write_header *p = mempool_alloc(nfs_wdata_mempool, GFP_NOFS);
76 74
77 if (p) { 75 if (p) {
76 struct nfs_pgio_header *hdr = &p->header;
77
78 memset(p, 0, sizeof(*p)); 78 memset(p, 0, sizeof(*p));
79 INIT_LIST_HEAD(&p->pages); 79 INIT_LIST_HEAD(&hdr->pages);
80 p->npages = pagecount; 80 INIT_LIST_HEAD(&hdr->rpc_list);
81 if (pagecount <= ARRAY_SIZE(p->page_array)) 81 spin_lock_init(&hdr->lock);
82 p->pagevec = p->page_array; 82 atomic_set(&hdr->refcnt, 0);
83 else {
84 p->pagevec = kcalloc(pagecount, sizeof(struct page *), GFP_NOFS);
85 if (!p->pagevec) {
86 mempool_free(p, nfs_wdata_mempool);
87 p = NULL;
88 }
89 }
90 } 83 }
91 return p; 84 return p;
92} 85}
93 86
94void nfs_writedata_free(struct nfs_write_data *p) 87static struct nfs_write_data *nfs_writedata_alloc(struct nfs_pgio_header *hdr,
88 unsigned int pagecount)
89{
90 struct nfs_write_data *data, *prealloc;
91
92 prealloc = &container_of(hdr, struct nfs_write_header, header)->rpc_data;
93 if (prealloc->header == NULL)
94 data = prealloc;
95 else
96 data = kzalloc(sizeof(*data), GFP_KERNEL);
97 if (!data)
98 goto out;
99
100 if (nfs_pgarray_set(&data->pages, pagecount)) {
101 data->header = hdr;
102 atomic_inc(&hdr->refcnt);
103 } else {
104 if (data != prealloc)
105 kfree(data);
106 data = NULL;
107 }
108out:
109 return data;
110}
111
112void nfs_writehdr_free(struct nfs_pgio_header *hdr)
95{ 113{
96 if (p && (p->pagevec != &p->page_array[0])) 114 struct nfs_write_header *whdr = container_of(hdr, struct nfs_write_header, header);
97 kfree(p->pagevec); 115 mempool_free(whdr, nfs_wdata_mempool);
98 mempool_free(p, nfs_wdata_mempool);
99} 116}
100 117
101void nfs_writedata_release(struct nfs_write_data *wdata) 118void nfs_writedata_release(struct nfs_write_data *wdata)
102{ 119{
120 struct nfs_pgio_header *hdr = wdata->header;
121 struct nfs_write_header *write_header = container_of(hdr, struct nfs_write_header, header);
122
103 put_nfs_open_context(wdata->args.context); 123 put_nfs_open_context(wdata->args.context);
104 nfs_writedata_free(wdata); 124 if (wdata->pages.pagevec != wdata->pages.page_array)
125 kfree(wdata->pages.pagevec);
126 if (wdata != &write_header->rpc_data)
127 kfree(wdata);
128 else
129 wdata->header = NULL;
130 if (atomic_dec_and_test(&hdr->refcnt))
131 hdr->completion_ops->completion(hdr);
105} 132}
106 133
107static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error) 134static void nfs_context_set_write_error(struct nfs_open_context *ctx, int error)
@@ -203,7 +230,6 @@ static int nfs_set_page_writeback(struct page *page)
203 struct inode *inode = page->mapping->host; 230 struct inode *inode = page->mapping->host;
204 struct nfs_server *nfss = NFS_SERVER(inode); 231 struct nfs_server *nfss = NFS_SERVER(inode);
205 232
206 page_cache_get(page);
207 if (atomic_long_inc_return(&nfss->writeback) > 233 if (atomic_long_inc_return(&nfss->writeback) >
208 NFS_CONGESTION_ON_THRESH) { 234 NFS_CONGESTION_ON_THRESH) {
209 set_bdi_congested(&nfss->backing_dev_info, 235 set_bdi_congested(&nfss->backing_dev_info,
@@ -219,7 +245,6 @@ static void nfs_end_page_writeback(struct page *page)
219 struct nfs_server *nfss = NFS_SERVER(inode); 245 struct nfs_server *nfss = NFS_SERVER(inode);
220 246
221 end_page_writeback(page); 247 end_page_writeback(page);
222 page_cache_release(page);
223 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH) 248 if (atomic_long_dec_return(&nfss->writeback) < NFS_CONGESTION_OFF_THRESH)
224 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC); 249 clear_bdi_congested(&nfss->backing_dev_info, BLK_RW_ASYNC);
225} 250}
@@ -235,10 +260,10 @@ static struct nfs_page *nfs_find_and_lock_request(struct page *page, bool nonblo
235 req = nfs_page_find_request_locked(page); 260 req = nfs_page_find_request_locked(page);
236 if (req == NULL) 261 if (req == NULL)
237 break; 262 break;
238 if (nfs_lock_request_dontget(req)) 263 if (nfs_lock_request(req))
239 break; 264 break;
240 /* Note: If we hold the page lock, as is the case in nfs_writepage, 265 /* Note: If we hold the page lock, as is the case in nfs_writepage,
241 * then the call to nfs_lock_request_dontget() will always 266 * then the call to nfs_lock_request() will always
242 * succeed provided that someone hasn't already marked the 267 * succeed provided that someone hasn't already marked the
243 * request as dirty (in which case we don't care). 268 * request as dirty (in which case we don't care).
244 */ 269 */
@@ -310,7 +335,8 @@ static int nfs_writepage_locked(struct page *page, struct writeback_control *wbc
310 struct nfs_pageio_descriptor pgio; 335 struct nfs_pageio_descriptor pgio;
311 int err; 336 int err;
312 337
313 nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc)); 338 nfs_pageio_init_write(&pgio, page->mapping->host, wb_priority(wbc),
339 &nfs_async_write_completion_ops);
314 err = nfs_do_writepage(page, wbc, &pgio); 340 err = nfs_do_writepage(page, wbc, &pgio);
315 nfs_pageio_complete(&pgio); 341 nfs_pageio_complete(&pgio);
316 if (err < 0) 342 if (err < 0)
@@ -353,7 +379,8 @@ int nfs_writepages(struct address_space *mapping, struct writeback_control *wbc)
353 379
354 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES); 380 nfs_inc_stats(inode, NFSIOS_VFSWRITEPAGES);
355 381
356 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc)); 382 nfs_pageio_init_write(&pgio, inode, wb_priority(wbc),
383 &nfs_async_write_completion_ops);
357 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio); 384 err = write_cache_pages(mapping, wbc, nfs_writepages_callback, &pgio);
358 nfs_pageio_complete(&pgio); 385 nfs_pageio_complete(&pgio);
359 386
@@ -379,7 +406,7 @@ static void nfs_inode_add_request(struct inode *inode, struct nfs_page *req)
379 struct nfs_inode *nfsi = NFS_I(inode); 406 struct nfs_inode *nfsi = NFS_I(inode);
380 407
381 /* Lock the request! */ 408 /* Lock the request! */
382 nfs_lock_request_dontget(req); 409 nfs_lock_request(req);
383 410
384 spin_lock(&inode->i_lock); 411 spin_lock(&inode->i_lock);
385 if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE)) 412 if (!nfsi->npages && nfs_have_delegation(inode, FMODE_WRITE))
@@ -421,65 +448,88 @@ nfs_mark_request_dirty(struct nfs_page *req)
421/** 448/**
422 * nfs_request_add_commit_list - add request to a commit list 449 * nfs_request_add_commit_list - add request to a commit list
423 * @req: pointer to a struct nfs_page 450 * @req: pointer to a struct nfs_page
424 * @head: commit list head 451 * @dst: commit list head
452 * @cinfo: holds list lock and accounting info
425 * 453 *
426 * This sets the PG_CLEAN bit, updates the inode global count of 454 * This sets the PG_CLEAN bit, updates the cinfo count of
427 * number of outstanding requests requiring a commit as well as 455 * number of outstanding requests requiring a commit as well as
428 * the MM page stats. 456 * the MM page stats.
429 * 457 *
430 * The caller must _not_ hold the inode->i_lock, but must be 458 * The caller must _not_ hold the cinfo->lock, but must be
431 * holding the nfs_page lock. 459 * holding the nfs_page lock.
432 */ 460 */
433void 461void
434nfs_request_add_commit_list(struct nfs_page *req, struct list_head *head) 462nfs_request_add_commit_list(struct nfs_page *req, struct list_head *dst,
463 struct nfs_commit_info *cinfo)
435{ 464{
436 struct inode *inode = req->wb_context->dentry->d_inode;
437
438 set_bit(PG_CLEAN, &(req)->wb_flags); 465 set_bit(PG_CLEAN, &(req)->wb_flags);
439 spin_lock(&inode->i_lock); 466 spin_lock(cinfo->lock);
440 nfs_list_add_request(req, head); 467 nfs_list_add_request(req, dst);
441 NFS_I(inode)->ncommit++; 468 cinfo->mds->ncommit++;
442 spin_unlock(&inode->i_lock); 469 spin_unlock(cinfo->lock);
443 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 470 if (!cinfo->dreq) {
444 inc_bdi_stat(req->wb_page->mapping->backing_dev_info, BDI_RECLAIMABLE); 471 inc_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
445 __mark_inode_dirty(inode, I_DIRTY_DATASYNC); 472 inc_bdi_stat(req->wb_page->mapping->backing_dev_info,
473 BDI_RECLAIMABLE);
474 __mark_inode_dirty(req->wb_context->dentry->d_inode,
475 I_DIRTY_DATASYNC);
476 }
446} 477}
447EXPORT_SYMBOL_GPL(nfs_request_add_commit_list); 478EXPORT_SYMBOL_GPL(nfs_request_add_commit_list);
448 479
449/** 480/**
450 * nfs_request_remove_commit_list - Remove request from a commit list 481 * nfs_request_remove_commit_list - Remove request from a commit list
451 * @req: pointer to a nfs_page 482 * @req: pointer to a nfs_page
483 * @cinfo: holds list lock and accounting info
452 * 484 *
453 * This clears the PG_CLEAN bit, and updates the inode global count of 485 * This clears the PG_CLEAN bit, and updates the cinfo's count of
454 * number of outstanding requests requiring a commit 486 * number of outstanding requests requiring a commit
455 * It does not update the MM page stats. 487 * It does not update the MM page stats.
456 * 488 *
457 * The caller _must_ hold the inode->i_lock and the nfs_page lock. 489 * The caller _must_ hold the cinfo->lock and the nfs_page lock.
458 */ 490 */
459void 491void
460nfs_request_remove_commit_list(struct nfs_page *req) 492nfs_request_remove_commit_list(struct nfs_page *req,
493 struct nfs_commit_info *cinfo)
461{ 494{
462 struct inode *inode = req->wb_context->dentry->d_inode;
463
464 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags)) 495 if (!test_and_clear_bit(PG_CLEAN, &(req)->wb_flags))
465 return; 496 return;
466 nfs_list_remove_request(req); 497 nfs_list_remove_request(req);
467 NFS_I(inode)->ncommit--; 498 cinfo->mds->ncommit--;
468} 499}
469EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list); 500EXPORT_SYMBOL_GPL(nfs_request_remove_commit_list);
470 501
502static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
503 struct inode *inode)
504{
505 cinfo->lock = &inode->i_lock;
506 cinfo->mds = &NFS_I(inode)->commit_info;
507 cinfo->ds = pnfs_get_ds_info(inode);
508 cinfo->dreq = NULL;
509 cinfo->completion_ops = &nfs_commit_completion_ops;
510}
511
512void nfs_init_cinfo(struct nfs_commit_info *cinfo,
513 struct inode *inode,
514 struct nfs_direct_req *dreq)
515{
516 if (dreq)
517 nfs_init_cinfo_from_dreq(cinfo, dreq);
518 else
519 nfs_init_cinfo_from_inode(cinfo, inode);
520}
521EXPORT_SYMBOL_GPL(nfs_init_cinfo);
471 522
472/* 523/*
473 * Add a request to the inode's commit list. 524 * Add a request to the inode's commit list.
474 */ 525 */
475static void 526void
476nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) 527nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
528 struct nfs_commit_info *cinfo)
477{ 529{
478 struct inode *inode = req->wb_context->dentry->d_inode; 530 if (pnfs_mark_request_commit(req, lseg, cinfo))
479
480 if (pnfs_mark_request_commit(req, lseg))
481 return; 531 return;
482 nfs_request_add_commit_list(req, &NFS_I(inode)->commit_list); 532 nfs_request_add_commit_list(req, &cinfo->mds->list, cinfo);
483} 533}
484 534
485static void 535static void
@@ -494,11 +544,13 @@ nfs_clear_request_commit(struct nfs_page *req)
494{ 544{
495 if (test_bit(PG_CLEAN, &req->wb_flags)) { 545 if (test_bit(PG_CLEAN, &req->wb_flags)) {
496 struct inode *inode = req->wb_context->dentry->d_inode; 546 struct inode *inode = req->wb_context->dentry->d_inode;
547 struct nfs_commit_info cinfo;
497 548
498 if (!pnfs_clear_request_commit(req)) { 549 nfs_init_cinfo_from_inode(&cinfo, inode);
499 spin_lock(&inode->i_lock); 550 if (!pnfs_clear_request_commit(req, &cinfo)) {
500 nfs_request_remove_commit_list(req); 551 spin_lock(cinfo.lock);
501 spin_unlock(&inode->i_lock); 552 nfs_request_remove_commit_list(req, &cinfo);
553 spin_unlock(cinfo.lock);
502 } 554 }
503 nfs_clear_page_commit(req->wb_page); 555 nfs_clear_page_commit(req->wb_page);
504 } 556 }
@@ -508,28 +560,25 @@ static inline
508int nfs_write_need_commit(struct nfs_write_data *data) 560int nfs_write_need_commit(struct nfs_write_data *data)
509{ 561{
510 if (data->verf.committed == NFS_DATA_SYNC) 562 if (data->verf.committed == NFS_DATA_SYNC)
511 return data->lseg == NULL; 563 return data->header->lseg == NULL;
512 else 564 return data->verf.committed != NFS_FILE_SYNC;
513 return data->verf.committed != NFS_FILE_SYNC;
514} 565}
515 566
516static inline 567#else
517int nfs_reschedule_unstable_write(struct nfs_page *req, 568static void nfs_init_cinfo_from_inode(struct nfs_commit_info *cinfo,
518 struct nfs_write_data *data) 569 struct inode *inode)
519{ 570{
520 if (test_and_clear_bit(PG_NEED_COMMIT, &req->wb_flags)) {
521 nfs_mark_request_commit(req, data->lseg);
522 return 1;
523 }
524 if (test_and_clear_bit(PG_NEED_RESCHED, &req->wb_flags)) {
525 nfs_mark_request_dirty(req);
526 return 1;
527 }
528 return 0;
529} 571}
530#else 572
531static void 573void nfs_init_cinfo(struct nfs_commit_info *cinfo,
532nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg) 574 struct inode *inode,
575 struct nfs_direct_req *dreq)
576{
577}
578
579void
580nfs_mark_request_commit(struct nfs_page *req, struct pnfs_layout_segment *lseg,
581 struct nfs_commit_info *cinfo)
533{ 582{
534} 583}
535 584
@@ -544,25 +593,57 @@ int nfs_write_need_commit(struct nfs_write_data *data)
544 return 0; 593 return 0;
545} 594}
546 595
547static inline 596#endif
548int nfs_reschedule_unstable_write(struct nfs_page *req, 597
549 struct nfs_write_data *data) 598static void nfs_write_completion(struct nfs_pgio_header *hdr)
550{ 599{
551 return 0; 600 struct nfs_commit_info cinfo;
601 unsigned long bytes = 0;
602
603 if (test_bit(NFS_IOHDR_REDO, &hdr->flags))
604 goto out;
605 nfs_init_cinfo_from_inode(&cinfo, hdr->inode);
606 while (!list_empty(&hdr->pages)) {
607 struct nfs_page *req = nfs_list_entry(hdr->pages.next);
608
609 bytes += req->wb_bytes;
610 nfs_list_remove_request(req);
611 if (test_bit(NFS_IOHDR_ERROR, &hdr->flags) &&
612 (hdr->good_bytes < bytes)) {
613 nfs_set_pageerror(req->wb_page);
614 nfs_context_set_write_error(req->wb_context, hdr->error);
615 goto remove_req;
616 }
617 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags)) {
618 nfs_mark_request_dirty(req);
619 goto next;
620 }
621 if (test_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags)) {
622 nfs_mark_request_commit(req, hdr->lseg, &cinfo);
623 goto next;
624 }
625remove_req:
626 nfs_inode_remove_request(req);
627next:
628 nfs_unlock_request(req);
629 nfs_end_page_writeback(req->wb_page);
630 nfs_release_request(req);
631 }
632out:
633 hdr->release(hdr);
552} 634}
553#endif
554 635
555#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 636#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
556static int 637static unsigned long
557nfs_need_commit(struct nfs_inode *nfsi) 638nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
558{ 639{
559 return nfsi->ncommit > 0; 640 return cinfo->mds->ncommit;
560} 641}
561 642
562/* i_lock held by caller */ 643/* cinfo->lock held by caller */
563static int 644int
564nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max, 645nfs_scan_commit_list(struct list_head *src, struct list_head *dst,
565 spinlock_t *lock) 646 struct nfs_commit_info *cinfo, int max)
566{ 647{
567 struct nfs_page *req, *tmp; 648 struct nfs_page *req, *tmp;
568 int ret = 0; 649 int ret = 0;
@@ -570,12 +651,13 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
570 list_for_each_entry_safe(req, tmp, src, wb_list) { 651 list_for_each_entry_safe(req, tmp, src, wb_list) {
571 if (!nfs_lock_request(req)) 652 if (!nfs_lock_request(req))
572 continue; 653 continue;
573 if (cond_resched_lock(lock)) 654 kref_get(&req->wb_kref);
655 if (cond_resched_lock(cinfo->lock))
574 list_safe_reset_next(req, tmp, wb_list); 656 list_safe_reset_next(req, tmp, wb_list);
575 nfs_request_remove_commit_list(req); 657 nfs_request_remove_commit_list(req, cinfo);
576 nfs_list_add_request(req, dst); 658 nfs_list_add_request(req, dst);
577 ret++; 659 ret++;
578 if (ret == max) 660 if ((ret == max) && !cinfo->dreq)
579 break; 661 break;
580 } 662 }
581 return ret; 663 return ret;
@@ -584,37 +666,38 @@ nfs_scan_commit_list(struct list_head *src, struct list_head *dst, int max,
584/* 666/*
585 * nfs_scan_commit - Scan an inode for commit requests 667 * nfs_scan_commit - Scan an inode for commit requests
586 * @inode: NFS inode to scan 668 * @inode: NFS inode to scan
587 * @dst: destination list 669 * @dst: mds destination list
670 * @cinfo: mds and ds lists of reqs ready to commit
588 * 671 *
589 * Moves requests from the inode's 'commit' request list. 672 * Moves requests from the inode's 'commit' request list.
590 * The requests are *not* checked to ensure that they form a contiguous set. 673 * The requests are *not* checked to ensure that they form a contiguous set.
591 */ 674 */
592static int 675int
593nfs_scan_commit(struct inode *inode, struct list_head *dst) 676nfs_scan_commit(struct inode *inode, struct list_head *dst,
677 struct nfs_commit_info *cinfo)
594{ 678{
595 struct nfs_inode *nfsi = NFS_I(inode);
596 int ret = 0; 679 int ret = 0;
597 680
598 spin_lock(&inode->i_lock); 681 spin_lock(cinfo->lock);
599 if (nfsi->ncommit > 0) { 682 if (cinfo->mds->ncommit > 0) {
600 const int max = INT_MAX; 683 const int max = INT_MAX;
601 684
602 ret = nfs_scan_commit_list(&nfsi->commit_list, dst, max, 685 ret = nfs_scan_commit_list(&cinfo->mds->list, dst,
603 &inode->i_lock); 686 cinfo, max);
604 ret += pnfs_scan_commit_lists(inode, max - ret, 687 ret += pnfs_scan_commit_lists(inode, cinfo, max - ret);
605 &inode->i_lock);
606 } 688 }
607 spin_unlock(&inode->i_lock); 689 spin_unlock(cinfo->lock);
608 return ret; 690 return ret;
609} 691}
610 692
611#else 693#else
612static inline int nfs_need_commit(struct nfs_inode *nfsi) 694static unsigned long nfs_reqs_to_commit(struct nfs_commit_info *cinfo)
613{ 695{
614 return 0; 696 return 0;
615} 697}
616 698
617static inline int nfs_scan_commit(struct inode *inode, struct list_head *dst) 699int nfs_scan_commit(struct inode *inode, struct list_head *dst,
700 struct nfs_commit_info *cinfo)
618{ 701{
619 return 0; 702 return 0;
620} 703}
@@ -659,7 +742,7 @@ static struct nfs_page *nfs_try_to_update_request(struct inode *inode,
659 || end < req->wb_offset) 742 || end < req->wb_offset)
660 goto out_flushme; 743 goto out_flushme;
661 744
662 if (nfs_lock_request_dontget(req)) 745 if (nfs_lock_request(req))
663 break; 746 break;
664 747
665 /* The request is locked, so wait and then retry */ 748 /* The request is locked, so wait and then retry */
@@ -729,7 +812,7 @@ static int nfs_writepage_setup(struct nfs_open_context *ctx, struct page *page,
729 nfs_grow_file(page, offset, count); 812 nfs_grow_file(page, offset, count);
730 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes); 813 nfs_mark_uptodate(page, req->wb_pgbase, req->wb_bytes);
731 nfs_mark_request_dirty(req); 814 nfs_mark_request_dirty(req);
732 nfs_unlock_request(req); 815 nfs_unlock_and_release_request(req);
733 return 0; 816 return 0;
734} 817}
735 818
@@ -766,10 +849,14 @@ int nfs_flush_incompatible(struct file *file, struct page *page)
766 * the PageUptodate() flag. In this case, we will need to turn off 849 * the PageUptodate() flag. In this case, we will need to turn off
767 * write optimisations that depend on the page contents being correct. 850 * write optimisations that depend on the page contents being correct.
768 */ 851 */
769static int nfs_write_pageuptodate(struct page *page, struct inode *inode) 852static bool nfs_write_pageuptodate(struct page *page, struct inode *inode)
770{ 853{
771 return PageUptodate(page) && 854 if (nfs_have_delegated_attributes(inode))
772 !(NFS_I(inode)->cache_validity & (NFS_INO_REVAL_PAGECACHE|NFS_INO_INVALID_DATA)); 855 goto out;
856 if (NFS_I(inode)->cache_validity & NFS_INO_REVAL_PAGECACHE)
857 return false;
858out:
859 return PageUptodate(page) != 0;
773} 860}
774 861
775/* 862/*
@@ -815,17 +902,6 @@ int nfs_updatepage(struct file *file, struct page *page,
815 return status; 902 return status;
816} 903}
817 904
818static void nfs_writepage_release(struct nfs_page *req,
819 struct nfs_write_data *data)
820{
821 struct page *page = req->wb_page;
822
823 if (PageError(req->wb_page) || !nfs_reschedule_unstable_write(req, data))
824 nfs_inode_remove_request(req);
825 nfs_unlock_request(req);
826 nfs_end_page_writeback(page);
827}
828
829static int flush_task_priority(int how) 905static int flush_task_priority(int how)
830{ 906{
831 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) { 907 switch (how & (FLUSH_HIGHPRI|FLUSH_LOWPRI)) {
@@ -837,18 +913,18 @@ static int flush_task_priority(int how)
837 return RPC_PRIORITY_NORMAL; 913 return RPC_PRIORITY_NORMAL;
838} 914}
839 915
840int nfs_initiate_write(struct nfs_write_data *data, 916int nfs_initiate_write(struct rpc_clnt *clnt,
841 struct rpc_clnt *clnt, 917 struct nfs_write_data *data,
842 const struct rpc_call_ops *call_ops, 918 const struct rpc_call_ops *call_ops,
843 int how) 919 int how, int flags)
844{ 920{
845 struct inode *inode = data->inode; 921 struct inode *inode = data->header->inode;
846 int priority = flush_task_priority(how); 922 int priority = flush_task_priority(how);
847 struct rpc_task *task; 923 struct rpc_task *task;
848 struct rpc_message msg = { 924 struct rpc_message msg = {
849 .rpc_argp = &data->args, 925 .rpc_argp = &data->args,
850 .rpc_resp = &data->res, 926 .rpc_resp = &data->res,
851 .rpc_cred = data->cred, 927 .rpc_cred = data->header->cred,
852 }; 928 };
853 struct rpc_task_setup task_setup_data = { 929 struct rpc_task_setup task_setup_data = {
854 .rpc_client = clnt, 930 .rpc_client = clnt,
@@ -857,7 +933,7 @@ int nfs_initiate_write(struct nfs_write_data *data,
857 .callback_ops = call_ops, 933 .callback_ops = call_ops,
858 .callback_data = data, 934 .callback_data = data,
859 .workqueue = nfsiod_workqueue, 935 .workqueue = nfsiod_workqueue,
860 .flags = RPC_TASK_ASYNC, 936 .flags = RPC_TASK_ASYNC | flags,
861 .priority = priority, 937 .priority = priority,
862 }; 938 };
863 int ret = 0; 939 int ret = 0;
@@ -892,26 +968,21 @@ EXPORT_SYMBOL_GPL(nfs_initiate_write);
892/* 968/*
893 * Set up the argument/result storage required for the RPC call. 969 * Set up the argument/result storage required for the RPC call.
894 */ 970 */
895static void nfs_write_rpcsetup(struct nfs_page *req, 971static void nfs_write_rpcsetup(struct nfs_write_data *data,
896 struct nfs_write_data *data,
897 unsigned int count, unsigned int offset, 972 unsigned int count, unsigned int offset,
898 int how) 973 int how, struct nfs_commit_info *cinfo)
899{ 974{
900 struct inode *inode = req->wb_context->dentry->d_inode; 975 struct nfs_page *req = data->header->req;
901 976
902 /* Set up the RPC argument and reply structs 977 /* Set up the RPC argument and reply structs
903 * NB: take care not to mess about with data->commit et al. */ 978 * NB: take care not to mess about with data->commit et al. */
904 979
905 data->req = req; 980 data->args.fh = NFS_FH(data->header->inode);
906 data->inode = inode = req->wb_context->dentry->d_inode;
907 data->cred = req->wb_context->cred;
908
909 data->args.fh = NFS_FH(inode);
910 data->args.offset = req_offset(req) + offset; 981 data->args.offset = req_offset(req) + offset;
911 /* pnfs_set_layoutcommit needs this */ 982 /* pnfs_set_layoutcommit needs this */
912 data->mds_offset = data->args.offset; 983 data->mds_offset = data->args.offset;
913 data->args.pgbase = req->wb_pgbase + offset; 984 data->args.pgbase = req->wb_pgbase + offset;
914 data->args.pages = data->pagevec; 985 data->args.pages = data->pages.pagevec;
915 data->args.count = count; 986 data->args.count = count;
916 data->args.context = get_nfs_open_context(req->wb_context); 987 data->args.context = get_nfs_open_context(req->wb_context);
917 data->args.lock_context = req->wb_lock_context; 988 data->args.lock_context = req->wb_lock_context;
@@ -920,7 +991,7 @@ static void nfs_write_rpcsetup(struct nfs_page *req,
920 case 0: 991 case 0:
921 break; 992 break;
922 case FLUSH_COND_STABLE: 993 case FLUSH_COND_STABLE:
923 if (nfs_need_commit(NFS_I(inode))) 994 if (nfs_reqs_to_commit(cinfo))
924 break; 995 break;
925 default: 996 default:
926 data->args.stable = NFS_FILE_SYNC; 997 data->args.stable = NFS_FILE_SYNC;
@@ -936,9 +1007,9 @@ static int nfs_do_write(struct nfs_write_data *data,
936 const struct rpc_call_ops *call_ops, 1007 const struct rpc_call_ops *call_ops,
937 int how) 1008 int how)
938{ 1009{
939 struct inode *inode = data->args.context->dentry->d_inode; 1010 struct inode *inode = data->header->inode;
940 1011
941 return nfs_initiate_write(data, NFS_CLIENT(inode), call_ops, how); 1012 return nfs_initiate_write(NFS_CLIENT(inode), data, call_ops, how, 0);
942} 1013}
943 1014
944static int nfs_do_multiple_writes(struct list_head *head, 1015static int nfs_do_multiple_writes(struct list_head *head,
@@ -951,7 +1022,7 @@ static int nfs_do_multiple_writes(struct list_head *head,
951 while (!list_empty(head)) { 1022 while (!list_empty(head)) {
952 int ret2; 1023 int ret2;
953 1024
954 data = list_entry(head->next, struct nfs_write_data, list); 1025 data = list_first_entry(head, struct nfs_write_data, list);
955 list_del_init(&data->list); 1026 list_del_init(&data->list);
956 1027
957 ret2 = nfs_do_write(data, call_ops, how); 1028 ret2 = nfs_do_write(data, call_ops, how);
@@ -967,31 +1038,60 @@ static int nfs_do_multiple_writes(struct list_head *head,
967 */ 1038 */
968static void nfs_redirty_request(struct nfs_page *req) 1039static void nfs_redirty_request(struct nfs_page *req)
969{ 1040{
970 struct page *page = req->wb_page;
971
972 nfs_mark_request_dirty(req); 1041 nfs_mark_request_dirty(req);
973 nfs_unlock_request(req); 1042 nfs_unlock_request(req);
974 nfs_end_page_writeback(page); 1043 nfs_end_page_writeback(req->wb_page);
1044 nfs_release_request(req);
1045}
1046
1047static void nfs_async_write_error(struct list_head *head)
1048{
1049 struct nfs_page *req;
1050
1051 while (!list_empty(head)) {
1052 req = nfs_list_entry(head->next);
1053 nfs_list_remove_request(req);
1054 nfs_redirty_request(req);
1055 }
1056}
1057
1058static const struct nfs_pgio_completion_ops nfs_async_write_completion_ops = {
1059 .error_cleanup = nfs_async_write_error,
1060 .completion = nfs_write_completion,
1061};
1062
1063static void nfs_flush_error(struct nfs_pageio_descriptor *desc,
1064 struct nfs_pgio_header *hdr)
1065{
1066 set_bit(NFS_IOHDR_REDO, &hdr->flags);
1067 while (!list_empty(&hdr->rpc_list)) {
1068 struct nfs_write_data *data = list_first_entry(&hdr->rpc_list,
1069 struct nfs_write_data, list);
1070 list_del(&data->list);
1071 nfs_writedata_release(data);
1072 }
1073 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
975} 1074}
976 1075
977/* 1076/*
978 * Generate multiple small requests to write out a single 1077 * Generate multiple small requests to write out a single
979 * contiguous dirty area on one page. 1078 * contiguous dirty area on one page.
980 */ 1079 */
981static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head *res) 1080static int nfs_flush_multi(struct nfs_pageio_descriptor *desc,
1081 struct nfs_pgio_header *hdr)
982{ 1082{
983 struct nfs_page *req = nfs_list_entry(desc->pg_list.next); 1083 struct nfs_page *req = hdr->req;
984 struct page *page = req->wb_page; 1084 struct page *page = req->wb_page;
985 struct nfs_write_data *data; 1085 struct nfs_write_data *data;
986 size_t wsize = desc->pg_bsize, nbytes; 1086 size_t wsize = desc->pg_bsize, nbytes;
987 unsigned int offset; 1087 unsigned int offset;
988 int requests = 0; 1088 int requests = 0;
989 int ret = 0; 1089 struct nfs_commit_info cinfo;
990 1090
991 nfs_list_remove_request(req); 1091 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
992 1092
993 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 1093 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
994 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit || 1094 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo) ||
995 desc->pg_count > wsize)) 1095 desc->pg_count > wsize))
996 desc->pg_ioflags &= ~FLUSH_COND_STABLE; 1096 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
997 1097
@@ -1001,28 +1101,22 @@ static int nfs_flush_multi(struct nfs_pageio_descriptor *desc, struct list_head
1001 do { 1101 do {
1002 size_t len = min(nbytes, wsize); 1102 size_t len = min(nbytes, wsize);
1003 1103
1004 data = nfs_writedata_alloc(1); 1104 data = nfs_writedata_alloc(hdr, 1);
1005 if (!data) 1105 if (!data) {
1006 goto out_bad; 1106 nfs_flush_error(desc, hdr);
1007 data->pagevec[0] = page; 1107 return -ENOMEM;
1008 nfs_write_rpcsetup(req, data, len, offset, desc->pg_ioflags); 1108 }
1009 list_add(&data->list, res); 1109 data->pages.pagevec[0] = page;
1110 nfs_write_rpcsetup(data, len, offset, desc->pg_ioflags, &cinfo);
1111 list_add(&data->list, &hdr->rpc_list);
1010 requests++; 1112 requests++;
1011 nbytes -= len; 1113 nbytes -= len;
1012 offset += len; 1114 offset += len;
1013 } while (nbytes != 0); 1115 } while (nbytes != 0);
1014 atomic_set(&req->wb_complete, requests); 1116 nfs_list_remove_request(req);
1015 desc->pg_rpc_callops = &nfs_write_partial_ops; 1117 nfs_list_add_request(req, &hdr->pages);
1016 return ret; 1118 desc->pg_rpc_callops = &nfs_write_common_ops;
1017 1119 return 0;
1018out_bad:
1019 while (!list_empty(res)) {
1020 data = list_entry(res->next, struct nfs_write_data, list);
1021 list_del(&data->list);
1022 nfs_writedata_release(data);
1023 }
1024 nfs_redirty_request(req);
1025 return -ENOMEM;
1026} 1120}
1027 1121
1028/* 1122/*
@@ -1033,62 +1127,71 @@ out_bad:
1033 * This is the case if nfs_updatepage detects a conflicting request 1127 * This is the case if nfs_updatepage detects a conflicting request
1034 * that has been written but not committed. 1128 * that has been written but not committed.
1035 */ 1129 */
1036static int nfs_flush_one(struct nfs_pageio_descriptor *desc, struct list_head *res) 1130static int nfs_flush_one(struct nfs_pageio_descriptor *desc,
1131 struct nfs_pgio_header *hdr)
1037{ 1132{
1038 struct nfs_page *req; 1133 struct nfs_page *req;
1039 struct page **pages; 1134 struct page **pages;
1040 struct nfs_write_data *data; 1135 struct nfs_write_data *data;
1041 struct list_head *head = &desc->pg_list; 1136 struct list_head *head = &desc->pg_list;
1042 int ret = 0; 1137 struct nfs_commit_info cinfo;
1043 1138
1044 data = nfs_writedata_alloc(nfs_page_array_len(desc->pg_base, 1139 data = nfs_writedata_alloc(hdr, nfs_page_array_len(desc->pg_base,
1045 desc->pg_count)); 1140 desc->pg_count));
1046 if (!data) { 1141 if (!data) {
1047 while (!list_empty(head)) { 1142 nfs_flush_error(desc, hdr);
1048 req = nfs_list_entry(head->next); 1143 return -ENOMEM;
1049 nfs_list_remove_request(req);
1050 nfs_redirty_request(req);
1051 }
1052 ret = -ENOMEM;
1053 goto out;
1054 } 1144 }
1055 pages = data->pagevec; 1145
1146 nfs_init_cinfo(&cinfo, desc->pg_inode, desc->pg_dreq);
1147 pages = data->pages.pagevec;
1056 while (!list_empty(head)) { 1148 while (!list_empty(head)) {
1057 req = nfs_list_entry(head->next); 1149 req = nfs_list_entry(head->next);
1058 nfs_list_remove_request(req); 1150 nfs_list_remove_request(req);
1059 nfs_list_add_request(req, &data->pages); 1151 nfs_list_add_request(req, &hdr->pages);
1060 *pages++ = req->wb_page; 1152 *pages++ = req->wb_page;
1061 } 1153 }
1062 req = nfs_list_entry(data->pages.next);
1063 1154
1064 if ((desc->pg_ioflags & FLUSH_COND_STABLE) && 1155 if ((desc->pg_ioflags & FLUSH_COND_STABLE) &&
1065 (desc->pg_moreio || NFS_I(desc->pg_inode)->ncommit)) 1156 (desc->pg_moreio || nfs_reqs_to_commit(&cinfo)))
1066 desc->pg_ioflags &= ~FLUSH_COND_STABLE; 1157 desc->pg_ioflags &= ~FLUSH_COND_STABLE;
1067 1158
1068 /* Set up the argument struct */ 1159 /* Set up the argument struct */
1069 nfs_write_rpcsetup(req, data, desc->pg_count, 0, desc->pg_ioflags); 1160 nfs_write_rpcsetup(data, desc->pg_count, 0, desc->pg_ioflags, &cinfo);
1070 list_add(&data->list, res); 1161 list_add(&data->list, &hdr->rpc_list);
1071 desc->pg_rpc_callops = &nfs_write_full_ops; 1162 desc->pg_rpc_callops = &nfs_write_common_ops;
1072out: 1163 return 0;
1073 return ret;
1074} 1164}
1075 1165
1076int nfs_generic_flush(struct nfs_pageio_descriptor *desc, struct list_head *head) 1166int nfs_generic_flush(struct nfs_pageio_descriptor *desc,
1167 struct nfs_pgio_header *hdr)
1077{ 1168{
1078 if (desc->pg_bsize < PAGE_CACHE_SIZE) 1169 if (desc->pg_bsize < PAGE_CACHE_SIZE)
1079 return nfs_flush_multi(desc, head); 1170 return nfs_flush_multi(desc, hdr);
1080 return nfs_flush_one(desc, head); 1171 return nfs_flush_one(desc, hdr);
1081} 1172}
1082 1173
1083static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc) 1174static int nfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
1084{ 1175{
1085 LIST_HEAD(head); 1176 struct nfs_write_header *whdr;
1177 struct nfs_pgio_header *hdr;
1086 int ret; 1178 int ret;
1087 1179
1088 ret = nfs_generic_flush(desc, &head); 1180 whdr = nfs_writehdr_alloc();
1181 if (!whdr) {
1182 desc->pg_completion_ops->error_cleanup(&desc->pg_list);
1183 return -ENOMEM;
1184 }
1185 hdr = &whdr->header;
1186 nfs_pgheader_init(desc, hdr, nfs_writehdr_free);
1187 atomic_inc(&hdr->refcnt);
1188 ret = nfs_generic_flush(desc, hdr);
1089 if (ret == 0) 1189 if (ret == 0)
1090 ret = nfs_do_multiple_writes(&head, desc->pg_rpc_callops, 1190 ret = nfs_do_multiple_writes(&hdr->rpc_list,
1091 desc->pg_ioflags); 1191 desc->pg_rpc_callops,
1192 desc->pg_ioflags);
1193 if (atomic_dec_and_test(&hdr->refcnt))
1194 hdr->completion_ops->completion(hdr);
1092 return ret; 1195 return ret;
1093} 1196}
1094 1197
@@ -1098,9 +1201,10 @@ static const struct nfs_pageio_ops nfs_pageio_write_ops = {
1098}; 1201};
1099 1202
1100void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio, 1203void nfs_pageio_init_write_mds(struct nfs_pageio_descriptor *pgio,
1101 struct inode *inode, int ioflags) 1204 struct inode *inode, int ioflags,
1205 const struct nfs_pgio_completion_ops *compl_ops)
1102{ 1206{
1103 nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, 1207 nfs_pageio_init(pgio, inode, &nfs_pageio_write_ops, compl_ops,
1104 NFS_SERVER(inode)->wsize, ioflags); 1208 NFS_SERVER(inode)->wsize, ioflags);
1105} 1209}
1106 1210
@@ -1111,80 +1215,27 @@ void nfs_pageio_reset_write_mds(struct nfs_pageio_descriptor *pgio)
1111} 1215}
1112EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds); 1216EXPORT_SYMBOL_GPL(nfs_pageio_reset_write_mds);
1113 1217
1114static void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, 1218void nfs_pageio_init_write(struct nfs_pageio_descriptor *pgio,
1115 struct inode *inode, int ioflags) 1219 struct inode *inode, int ioflags,
1220 const struct nfs_pgio_completion_ops *compl_ops)
1116{ 1221{
1117 if (!pnfs_pageio_init_write(pgio, inode, ioflags)) 1222 if (!pnfs_pageio_init_write(pgio, inode, ioflags, compl_ops))
1118 nfs_pageio_init_write_mds(pgio, inode, ioflags); 1223 nfs_pageio_init_write_mds(pgio, inode, ioflags, compl_ops);
1119} 1224}
1120 1225
1121/* 1226void nfs_write_prepare(struct rpc_task *task, void *calldata)
1122 * Handle a write reply that flushed part of a page.
1123 */
1124static void nfs_writeback_done_partial(struct rpc_task *task, void *calldata)
1125{ 1227{
1126 struct nfs_write_data *data = calldata; 1228 struct nfs_write_data *data = calldata;
1127 1229 NFS_PROTO(data->header->inode)->write_rpc_prepare(task, data);
1128 dprintk("NFS: %5u write(%s/%lld %d@%lld)",
1129 task->tk_pid,
1130 data->req->wb_context->dentry->d_inode->i_sb->s_id,
1131 (long long)
1132 NFS_FILEID(data->req->wb_context->dentry->d_inode),
1133 data->req->wb_bytes, (long long)req_offset(data->req));
1134
1135 nfs_writeback_done(task, data);
1136} 1230}
1137 1231
1138static void nfs_writeback_release_partial(void *calldata) 1232void nfs_commit_prepare(struct rpc_task *task, void *calldata)
1139{ 1233{
1140 struct nfs_write_data *data = calldata; 1234 struct nfs_commit_data *data = calldata;
1141 struct nfs_page *req = data->req;
1142 struct page *page = req->wb_page;
1143 int status = data->task.tk_status;
1144 1235
1145 if (status < 0) { 1236 NFS_PROTO(data->inode)->commit_rpc_prepare(task, data);
1146 nfs_set_pageerror(page);
1147 nfs_context_set_write_error(req->wb_context, status);
1148 dprintk(", error = %d\n", status);
1149 goto out;
1150 }
1151
1152 if (nfs_write_need_commit(data)) {
1153 struct inode *inode = page->mapping->host;
1154
1155 spin_lock(&inode->i_lock);
1156 if (test_bit(PG_NEED_RESCHED, &req->wb_flags)) {
1157 /* Do nothing we need to resend the writes */
1158 } else if (!test_and_set_bit(PG_NEED_COMMIT, &req->wb_flags)) {
1159 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1160 dprintk(" defer commit\n");
1161 } else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf))) {
1162 set_bit(PG_NEED_RESCHED, &req->wb_flags);
1163 clear_bit(PG_NEED_COMMIT, &req->wb_flags);
1164 dprintk(" server reboot detected\n");
1165 }
1166 spin_unlock(&inode->i_lock);
1167 } else
1168 dprintk(" OK\n");
1169
1170out:
1171 if (atomic_dec_and_test(&req->wb_complete))
1172 nfs_writepage_release(req, data);
1173 nfs_writedata_release(calldata);
1174} 1237}
1175 1238
1176void nfs_write_prepare(struct rpc_task *task, void *calldata)
1177{
1178 struct nfs_write_data *data = calldata;
1179 NFS_PROTO(data->inode)->write_rpc_prepare(task, data);
1180}
1181
1182static const struct rpc_call_ops nfs_write_partial_ops = {
1183 .rpc_call_prepare = nfs_write_prepare,
1184 .rpc_call_done = nfs_writeback_done_partial,
1185 .rpc_release = nfs_writeback_release_partial,
1186};
1187
1188/* 1239/*
1189 * Handle a write reply that flushes a whole page. 1240 * Handle a write reply that flushes a whole page.
1190 * 1241 *
@@ -1192,59 +1243,37 @@ static const struct rpc_call_ops nfs_write_partial_ops = {
1192 * writebacks since the page->count is kept > 1 for as long 1243 * writebacks since the page->count is kept > 1 for as long
1193 * as the page has a write request pending. 1244 * as the page has a write request pending.
1194 */ 1245 */
1195static void nfs_writeback_done_full(struct rpc_task *task, void *calldata) 1246static void nfs_writeback_done_common(struct rpc_task *task, void *calldata)
1196{ 1247{
1197 struct nfs_write_data *data = calldata; 1248 struct nfs_write_data *data = calldata;
1198 1249
1199 nfs_writeback_done(task, data); 1250 nfs_writeback_done(task, data);
1200} 1251}
1201 1252
1202static void nfs_writeback_release_full(void *calldata) 1253static void nfs_writeback_release_common(void *calldata)
1203{ 1254{
1204 struct nfs_write_data *data = calldata; 1255 struct nfs_write_data *data = calldata;
1256 struct nfs_pgio_header *hdr = data->header;
1205 int status = data->task.tk_status; 1257 int status = data->task.tk_status;
1258 struct nfs_page *req = hdr->req;
1206 1259
1207 /* Update attributes as result of writeback. */ 1260 if ((status >= 0) && nfs_write_need_commit(data)) {
1208 while (!list_empty(&data->pages)) { 1261 spin_lock(&hdr->lock);
1209 struct nfs_page *req = nfs_list_entry(data->pages.next); 1262 if (test_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags))
1210 struct page *page = req->wb_page; 1263 ; /* Do nothing */
1211 1264 else if (!test_and_set_bit(NFS_IOHDR_NEED_COMMIT, &hdr->flags))
1212 nfs_list_remove_request(req);
1213
1214 dprintk("NFS: %5u write (%s/%lld %d@%lld)",
1215 data->task.tk_pid,
1216 req->wb_context->dentry->d_inode->i_sb->s_id,
1217 (long long)NFS_FILEID(req->wb_context->dentry->d_inode),
1218 req->wb_bytes,
1219 (long long)req_offset(req));
1220
1221 if (status < 0) {
1222 nfs_set_pageerror(page);
1223 nfs_context_set_write_error(req->wb_context, status);
1224 dprintk(", error = %d\n", status);
1225 goto remove_request;
1226 }
1227
1228 if (nfs_write_need_commit(data)) {
1229 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf)); 1265 memcpy(&req->wb_verf, &data->verf, sizeof(req->wb_verf));
1230 nfs_mark_request_commit(req, data->lseg); 1266 else if (memcmp(&req->wb_verf, &data->verf, sizeof(req->wb_verf)))
1231 dprintk(" marked for commit\n"); 1267 set_bit(NFS_IOHDR_NEED_RESCHED, &hdr->flags);
1232 goto next; 1268 spin_unlock(&hdr->lock);
1233 }
1234 dprintk(" OK\n");
1235remove_request:
1236 nfs_inode_remove_request(req);
1237 next:
1238 nfs_unlock_request(req);
1239 nfs_end_page_writeback(page);
1240 } 1269 }
1241 nfs_writedata_release(calldata); 1270 nfs_writedata_release(data);
1242} 1271}
1243 1272
1244static const struct rpc_call_ops nfs_write_full_ops = { 1273static const struct rpc_call_ops nfs_write_common_ops = {
1245 .rpc_call_prepare = nfs_write_prepare, 1274 .rpc_call_prepare = nfs_write_prepare,
1246 .rpc_call_done = nfs_writeback_done_full, 1275 .rpc_call_done = nfs_writeback_done_common,
1247 .rpc_release = nfs_writeback_release_full, 1276 .rpc_release = nfs_writeback_release_common,
1248}; 1277};
1249 1278
1250 1279
@@ -1255,6 +1284,7 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1255{ 1284{
1256 struct nfs_writeargs *argp = &data->args; 1285 struct nfs_writeargs *argp = &data->args;
1257 struct nfs_writeres *resp = &data->res; 1286 struct nfs_writeres *resp = &data->res;
1287 struct inode *inode = data->header->inode;
1258 int status; 1288 int status;
1259 1289
1260 dprintk("NFS: %5u nfs_writeback_done (status %d)\n", 1290 dprintk("NFS: %5u nfs_writeback_done (status %d)\n",
@@ -1267,10 +1297,10 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1267 * another writer had changed the file, but some applications 1297 * another writer had changed the file, but some applications
1268 * depend on tighter cache coherency when writing. 1298 * depend on tighter cache coherency when writing.
1269 */ 1299 */
1270 status = NFS_PROTO(data->inode)->write_done(task, data); 1300 status = NFS_PROTO(inode)->write_done(task, data);
1271 if (status != 0) 1301 if (status != 0)
1272 return; 1302 return;
1273 nfs_add_stats(data->inode, NFSIOS_SERVERWRITTENBYTES, resp->count); 1303 nfs_add_stats(inode, NFSIOS_SERVERWRITTENBYTES, resp->count);
1274 1304
1275#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 1305#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
1276 if (resp->verf->committed < argp->stable && task->tk_status >= 0) { 1306 if (resp->verf->committed < argp->stable && task->tk_status >= 0) {
@@ -1288,46 +1318,47 @@ void nfs_writeback_done(struct rpc_task *task, struct nfs_write_data *data)
1288 if (time_before(complain, jiffies)) { 1318 if (time_before(complain, jiffies)) {
1289 dprintk("NFS: faulty NFS server %s:" 1319 dprintk("NFS: faulty NFS server %s:"
1290 " (committed = %d) != (stable = %d)\n", 1320 " (committed = %d) != (stable = %d)\n",
1291 NFS_SERVER(data->inode)->nfs_client->cl_hostname, 1321 NFS_SERVER(inode)->nfs_client->cl_hostname,
1292 resp->verf->committed, argp->stable); 1322 resp->verf->committed, argp->stable);
1293 complain = jiffies + 300 * HZ; 1323 complain = jiffies + 300 * HZ;
1294 } 1324 }
1295 } 1325 }
1296#endif 1326#endif
1297 /* Is this a short write? */ 1327 if (task->tk_status < 0)
1298 if (task->tk_status >= 0 && resp->count < argp->count) { 1328 nfs_set_pgio_error(data->header, task->tk_status, argp->offset);
1329 else if (resp->count < argp->count) {
1299 static unsigned long complain; 1330 static unsigned long complain;
1300 1331
1301 nfs_inc_stats(data->inode, NFSIOS_SHORTWRITE); 1332 /* This a short write! */
1333 nfs_inc_stats(inode, NFSIOS_SHORTWRITE);
1302 1334
1303 /* Has the server at least made some progress? */ 1335 /* Has the server at least made some progress? */
1304 if (resp->count != 0) { 1336 if (resp->count == 0) {
1305 /* Was this an NFSv2 write or an NFSv3 stable write? */ 1337 if (time_before(complain, jiffies)) {
1306 if (resp->verf->committed != NFS_UNSTABLE) { 1338 printk(KERN_WARNING
1307 /* Resend from where the server left off */ 1339 "NFS: Server wrote zero bytes, expected %u.\n",
1308 data->mds_offset += resp->count; 1340 argp->count);
1309 argp->offset += resp->count; 1341 complain = jiffies + 300 * HZ;
1310 argp->pgbase += resp->count;
1311 argp->count -= resp->count;
1312 } else {
1313 /* Resend as a stable write in order to avoid
1314 * headaches in the case of a server crash.
1315 */
1316 argp->stable = NFS_FILE_SYNC;
1317 } 1342 }
1318 rpc_restart_call_prepare(task); 1343 nfs_set_pgio_error(data->header, -EIO, argp->offset);
1344 task->tk_status = -EIO;
1319 return; 1345 return;
1320 } 1346 }
1321 if (time_before(complain, jiffies)) { 1347 /* Was this an NFSv2 write or an NFSv3 stable write? */
1322 printk(KERN_WARNING 1348 if (resp->verf->committed != NFS_UNSTABLE) {
1323 "NFS: Server wrote zero bytes, expected %u.\n", 1349 /* Resend from where the server left off */
1324 argp->count); 1350 data->mds_offset += resp->count;
1325 complain = jiffies + 300 * HZ; 1351 argp->offset += resp->count;
1352 argp->pgbase += resp->count;
1353 argp->count -= resp->count;
1354 } else {
1355 /* Resend as a stable write in order to avoid
1356 * headaches in the case of a server crash.
1357 */
1358 argp->stable = NFS_FILE_SYNC;
1326 } 1359 }
1327 /* Can't do anything about it except throw an error. */ 1360 rpc_restart_call_prepare(task);
1328 task->tk_status = -EIO;
1329 } 1361 }
1330 return;
1331} 1362}
1332 1363
1333 1364
@@ -1347,26 +1378,23 @@ static int nfs_commit_set_lock(struct nfs_inode *nfsi, int may_wait)
1347 return (ret < 0) ? ret : 1; 1378 return (ret < 0) ? ret : 1;
1348} 1379}
1349 1380
1350void nfs_commit_clear_lock(struct nfs_inode *nfsi) 1381static void nfs_commit_clear_lock(struct nfs_inode *nfsi)
1351{ 1382{
1352 clear_bit(NFS_INO_COMMIT, &nfsi->flags); 1383 clear_bit(NFS_INO_COMMIT, &nfsi->flags);
1353 smp_mb__after_clear_bit(); 1384 smp_mb__after_clear_bit();
1354 wake_up_bit(&nfsi->flags, NFS_INO_COMMIT); 1385 wake_up_bit(&nfsi->flags, NFS_INO_COMMIT);
1355} 1386}
1356EXPORT_SYMBOL_GPL(nfs_commit_clear_lock);
1357 1387
1358void nfs_commitdata_release(void *data) 1388void nfs_commitdata_release(struct nfs_commit_data *data)
1359{ 1389{
1360 struct nfs_write_data *wdata = data; 1390 put_nfs_open_context(data->context);
1361 1391 nfs_commit_free(data);
1362 put_nfs_open_context(wdata->args.context);
1363 nfs_commit_free(wdata);
1364} 1392}
1365EXPORT_SYMBOL_GPL(nfs_commitdata_release); 1393EXPORT_SYMBOL_GPL(nfs_commitdata_release);
1366 1394
1367int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt, 1395int nfs_initiate_commit(struct rpc_clnt *clnt, struct nfs_commit_data *data,
1368 const struct rpc_call_ops *call_ops, 1396 const struct rpc_call_ops *call_ops,
1369 int how) 1397 int how, int flags)
1370{ 1398{
1371 struct rpc_task *task; 1399 struct rpc_task *task;
1372 int priority = flush_task_priority(how); 1400 int priority = flush_task_priority(how);
@@ -1382,7 +1410,7 @@ int nfs_initiate_commit(struct nfs_write_data *data, struct rpc_clnt *clnt,
1382 .callback_ops = call_ops, 1410 .callback_ops = call_ops,
1383 .callback_data = data, 1411 .callback_data = data,
1384 .workqueue = nfsiod_workqueue, 1412 .workqueue = nfsiod_workqueue,
1385 .flags = RPC_TASK_ASYNC, 1413 .flags = RPC_TASK_ASYNC | flags,
1386 .priority = priority, 1414 .priority = priority,
1387 }; 1415 };
1388 /* Set up the initial task struct. */ 1416 /* Set up the initial task struct. */
@@ -1403,9 +1431,10 @@ EXPORT_SYMBOL_GPL(nfs_initiate_commit);
1403/* 1431/*
1404 * Set up the argument/result storage required for the RPC call. 1432 * Set up the argument/result storage required for the RPC call.
1405 */ 1433 */
1406void nfs_init_commit(struct nfs_write_data *data, 1434void nfs_init_commit(struct nfs_commit_data *data,
1407 struct list_head *head, 1435 struct list_head *head,
1408 struct pnfs_layout_segment *lseg) 1436 struct pnfs_layout_segment *lseg,
1437 struct nfs_commit_info *cinfo)
1409{ 1438{
1410 struct nfs_page *first = nfs_list_entry(head->next); 1439 struct nfs_page *first = nfs_list_entry(head->next);
1411 struct inode *inode = first->wb_context->dentry->d_inode; 1440 struct inode *inode = first->wb_context->dentry->d_inode;
@@ -1419,13 +1448,14 @@ void nfs_init_commit(struct nfs_write_data *data,
1419 data->cred = first->wb_context->cred; 1448 data->cred = first->wb_context->cred;
1420 data->lseg = lseg; /* reference transferred */ 1449 data->lseg = lseg; /* reference transferred */
1421 data->mds_ops = &nfs_commit_ops; 1450 data->mds_ops = &nfs_commit_ops;
1451 data->completion_ops = cinfo->completion_ops;
1452 data->dreq = cinfo->dreq;
1422 1453
1423 data->args.fh = NFS_FH(data->inode); 1454 data->args.fh = NFS_FH(data->inode);
1424 /* Note: we always request a commit of the entire inode */ 1455 /* Note: we always request a commit of the entire inode */
1425 data->args.offset = 0; 1456 data->args.offset = 0;
1426 data->args.count = 0; 1457 data->args.count = 0;
1427 data->args.context = get_nfs_open_context(first->wb_context); 1458 data->context = get_nfs_open_context(first->wb_context);
1428 data->res.count = 0;
1429 data->res.fattr = &data->fattr; 1459 data->res.fattr = &data->fattr;
1430 data->res.verf = &data->verf; 1460 data->res.verf = &data->verf;
1431 nfs_fattr_init(&data->fattr); 1461 nfs_fattr_init(&data->fattr);
@@ -1433,18 +1463,21 @@ void nfs_init_commit(struct nfs_write_data *data,
1433EXPORT_SYMBOL_GPL(nfs_init_commit); 1463EXPORT_SYMBOL_GPL(nfs_init_commit);
1434 1464
1435void nfs_retry_commit(struct list_head *page_list, 1465void nfs_retry_commit(struct list_head *page_list,
1436 struct pnfs_layout_segment *lseg) 1466 struct pnfs_layout_segment *lseg,
1467 struct nfs_commit_info *cinfo)
1437{ 1468{
1438 struct nfs_page *req; 1469 struct nfs_page *req;
1439 1470
1440 while (!list_empty(page_list)) { 1471 while (!list_empty(page_list)) {
1441 req = nfs_list_entry(page_list->next); 1472 req = nfs_list_entry(page_list->next);
1442 nfs_list_remove_request(req); 1473 nfs_list_remove_request(req);
1443 nfs_mark_request_commit(req, lseg); 1474 nfs_mark_request_commit(req, lseg, cinfo);
1444 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS); 1475 if (!cinfo->dreq) {
1445 dec_bdi_stat(req->wb_page->mapping->backing_dev_info, 1476 dec_zone_page_state(req->wb_page, NR_UNSTABLE_NFS);
1446 BDI_RECLAIMABLE); 1477 dec_bdi_stat(req->wb_page->mapping->backing_dev_info,
1447 nfs_unlock_request(req); 1478 BDI_RECLAIMABLE);
1479 }
1480 nfs_unlock_and_release_request(req);
1448 } 1481 }
1449} 1482}
1450EXPORT_SYMBOL_GPL(nfs_retry_commit); 1483EXPORT_SYMBOL_GPL(nfs_retry_commit);
@@ -1453,9 +1486,10 @@ EXPORT_SYMBOL_GPL(nfs_retry_commit);
1453 * Commit dirty pages 1486 * Commit dirty pages
1454 */ 1487 */
1455static int 1488static int
1456nfs_commit_list(struct inode *inode, struct list_head *head, int how) 1489nfs_commit_list(struct inode *inode, struct list_head *head, int how,
1490 struct nfs_commit_info *cinfo)
1457{ 1491{
1458 struct nfs_write_data *data; 1492 struct nfs_commit_data *data;
1459 1493
1460 data = nfs_commitdata_alloc(); 1494 data = nfs_commitdata_alloc();
1461 1495
@@ -1463,11 +1497,13 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1463 goto out_bad; 1497 goto out_bad;
1464 1498
1465 /* Set up the argument struct */ 1499 /* Set up the argument struct */
1466 nfs_init_commit(data, head, NULL); 1500 nfs_init_commit(data, head, NULL, cinfo);
1467 return nfs_initiate_commit(data, NFS_CLIENT(inode), data->mds_ops, how); 1501 atomic_inc(&cinfo->mds->rpcs_out);
1502 return nfs_initiate_commit(NFS_CLIENT(inode), data, data->mds_ops,
1503 how, 0);
1468 out_bad: 1504 out_bad:
1469 nfs_retry_commit(head, NULL); 1505 nfs_retry_commit(head, NULL, cinfo);
1470 nfs_commit_clear_lock(NFS_I(inode)); 1506 cinfo->completion_ops->error_cleanup(NFS_I(inode));
1471 return -ENOMEM; 1507 return -ENOMEM;
1472} 1508}
1473 1509
@@ -1476,7 +1512,7 @@ nfs_commit_list(struct inode *inode, struct list_head *head, int how)
1476 */ 1512 */
1477static void nfs_commit_done(struct rpc_task *task, void *calldata) 1513static void nfs_commit_done(struct rpc_task *task, void *calldata)
1478{ 1514{
1479 struct nfs_write_data *data = calldata; 1515 struct nfs_commit_data *data = calldata;
1480 1516
1481 dprintk("NFS: %5u nfs_commit_done (status %d)\n", 1517 dprintk("NFS: %5u nfs_commit_done (status %d)\n",
1482 task->tk_pid, task->tk_status); 1518 task->tk_pid, task->tk_status);
@@ -1485,10 +1521,11 @@ static void nfs_commit_done(struct rpc_task *task, void *calldata)
1485 NFS_PROTO(data->inode)->commit_done(task, data); 1521 NFS_PROTO(data->inode)->commit_done(task, data);
1486} 1522}
1487 1523
1488void nfs_commit_release_pages(struct nfs_write_data *data) 1524static void nfs_commit_release_pages(struct nfs_commit_data *data)
1489{ 1525{
1490 struct nfs_page *req; 1526 struct nfs_page *req;
1491 int status = data->task.tk_status; 1527 int status = data->task.tk_status;
1528 struct nfs_commit_info cinfo;
1492 1529
1493 while (!list_empty(&data->pages)) { 1530 while (!list_empty(&data->pages)) {
1494 req = nfs_list_entry(data->pages.next); 1531 req = nfs_list_entry(data->pages.next);
@@ -1519,42 +1556,59 @@ void nfs_commit_release_pages(struct nfs_write_data *data)
1519 dprintk(" mismatch\n"); 1556 dprintk(" mismatch\n");
1520 nfs_mark_request_dirty(req); 1557 nfs_mark_request_dirty(req);
1521 next: 1558 next:
1522 nfs_unlock_request(req); 1559 nfs_unlock_and_release_request(req);
1523 } 1560 }
1561 nfs_init_cinfo(&cinfo, data->inode, data->dreq);
1562 if (atomic_dec_and_test(&cinfo.mds->rpcs_out))
1563 nfs_commit_clear_lock(NFS_I(data->inode));
1524} 1564}
1525EXPORT_SYMBOL_GPL(nfs_commit_release_pages);
1526 1565
1527static void nfs_commit_release(void *calldata) 1566static void nfs_commit_release(void *calldata)
1528{ 1567{
1529 struct nfs_write_data *data = calldata; 1568 struct nfs_commit_data *data = calldata;
1530 1569
1531 nfs_commit_release_pages(data); 1570 data->completion_ops->completion(data);
1532 nfs_commit_clear_lock(NFS_I(data->inode));
1533 nfs_commitdata_release(calldata); 1571 nfs_commitdata_release(calldata);
1534} 1572}
1535 1573
1536static const struct rpc_call_ops nfs_commit_ops = { 1574static const struct rpc_call_ops nfs_commit_ops = {
1537 .rpc_call_prepare = nfs_write_prepare, 1575 .rpc_call_prepare = nfs_commit_prepare,
1538 .rpc_call_done = nfs_commit_done, 1576 .rpc_call_done = nfs_commit_done,
1539 .rpc_release = nfs_commit_release, 1577 .rpc_release = nfs_commit_release,
1540}; 1578};
1541 1579
1580static const struct nfs_commit_completion_ops nfs_commit_completion_ops = {
1581 .completion = nfs_commit_release_pages,
1582 .error_cleanup = nfs_commit_clear_lock,
1583};
1584
1585int nfs_generic_commit_list(struct inode *inode, struct list_head *head,
1586 int how, struct nfs_commit_info *cinfo)
1587{
1588 int status;
1589
1590 status = pnfs_commit_list(inode, head, how, cinfo);
1591 if (status == PNFS_NOT_ATTEMPTED)
1592 status = nfs_commit_list(inode, head, how, cinfo);
1593 return status;
1594}
1595
1542int nfs_commit_inode(struct inode *inode, int how) 1596int nfs_commit_inode(struct inode *inode, int how)
1543{ 1597{
1544 LIST_HEAD(head); 1598 LIST_HEAD(head);
1599 struct nfs_commit_info cinfo;
1545 int may_wait = how & FLUSH_SYNC; 1600 int may_wait = how & FLUSH_SYNC;
1546 int res; 1601 int res;
1547 1602
1548 res = nfs_commit_set_lock(NFS_I(inode), may_wait); 1603 res = nfs_commit_set_lock(NFS_I(inode), may_wait);
1549 if (res <= 0) 1604 if (res <= 0)
1550 goto out_mark_dirty; 1605 goto out_mark_dirty;
1551 res = nfs_scan_commit(inode, &head); 1606 nfs_init_cinfo_from_inode(&cinfo, inode);
1607 res = nfs_scan_commit(inode, &head, &cinfo);
1552 if (res) { 1608 if (res) {
1553 int error; 1609 int error;
1554 1610
1555 error = pnfs_commit_list(inode, &head, how); 1611 error = nfs_generic_commit_list(inode, &head, how, &cinfo);
1556 if (error == PNFS_NOT_ATTEMPTED)
1557 error = nfs_commit_list(inode, &head, how);
1558 if (error < 0) 1612 if (error < 0)
1559 return error; 1613 return error;
1560 if (!may_wait) 1614 if (!may_wait)
@@ -1585,14 +1639,14 @@ static int nfs_commit_unstable_pages(struct inode *inode, struct writeback_contr
1585 int ret = 0; 1639 int ret = 0;
1586 1640
1587 /* no commits means nothing needs to be done */ 1641 /* no commits means nothing needs to be done */
1588 if (!nfsi->ncommit) 1642 if (!nfsi->commit_info.ncommit)
1589 return ret; 1643 return ret;
1590 1644
1591 if (wbc->sync_mode == WB_SYNC_NONE) { 1645 if (wbc->sync_mode == WB_SYNC_NONE) {
1592 /* Don't commit yet if this is a non-blocking flush and there 1646 /* Don't commit yet if this is a non-blocking flush and there
1593 * are a lot of outstanding writes for this mapping. 1647 * are a lot of outstanding writes for this mapping.
1594 */ 1648 */
1595 if (nfsi->ncommit <= (nfsi->npages >> 1)) 1649 if (nfsi->commit_info.ncommit <= (nfsi->npages >> 1))
1596 goto out_mark_dirty; 1650 goto out_mark_dirty;
1597 1651
1598 /* don't wait for the COMMIT response */ 1652 /* don't wait for the COMMIT response */
@@ -1665,7 +1719,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1665 req = nfs_page_find_request(page); 1719 req = nfs_page_find_request(page);
1666 if (req == NULL) 1720 if (req == NULL)
1667 break; 1721 break;
1668 if (nfs_lock_request_dontget(req)) { 1722 if (nfs_lock_request(req)) {
1669 nfs_clear_request_commit(req); 1723 nfs_clear_request_commit(req);
1670 nfs_inode_remove_request(req); 1724 nfs_inode_remove_request(req);
1671 /* 1725 /*
@@ -1673,7 +1727,7 @@ int nfs_wb_page_cancel(struct inode *inode, struct page *page)
1673 * page as being dirty 1727 * page as being dirty
1674 */ 1728 */
1675 cancel_dirty_page(page, PAGE_CACHE_SIZE); 1729 cancel_dirty_page(page, PAGE_CACHE_SIZE);
1676 nfs_unlock_request(req); 1730 nfs_unlock_and_release_request(req);
1677 break; 1731 break;
1678 } 1732 }
1679 ret = nfs_wait_on_request(req); 1733 ret = nfs_wait_on_request(req);
@@ -1742,7 +1796,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
1742int __init nfs_init_writepagecache(void) 1796int __init nfs_init_writepagecache(void)
1743{ 1797{
1744 nfs_wdata_cachep = kmem_cache_create("nfs_write_data", 1798 nfs_wdata_cachep = kmem_cache_create("nfs_write_data",
1745 sizeof(struct nfs_write_data), 1799 sizeof(struct nfs_write_header),
1746 0, SLAB_HWCACHE_ALIGN, 1800 0, SLAB_HWCACHE_ALIGN,
1747 NULL); 1801 NULL);
1748 if (nfs_wdata_cachep == NULL) 1802 if (nfs_wdata_cachep == NULL)
@@ -1753,6 +1807,13 @@ int __init nfs_init_writepagecache(void)
1753 if (nfs_wdata_mempool == NULL) 1807 if (nfs_wdata_mempool == NULL)
1754 return -ENOMEM; 1808 return -ENOMEM;
1755 1809
1810 nfs_cdata_cachep = kmem_cache_create("nfs_commit_data",
1811 sizeof(struct nfs_commit_data),
1812 0, SLAB_HWCACHE_ALIGN,
1813 NULL);
1814 if (nfs_cdata_cachep == NULL)
1815 return -ENOMEM;
1816
1756 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT, 1817 nfs_commit_mempool = mempool_create_slab_pool(MIN_POOL_COMMIT,
1757 nfs_wdata_cachep); 1818 nfs_wdata_cachep);
1758 if (nfs_commit_mempool == NULL) 1819 if (nfs_commit_mempool == NULL)
diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c
index 8f7b95ac1f7e..7cc64465ec26 100644
--- a/fs/nilfs2/inode.c
+++ b/fs/nilfs2/inode.c
@@ -734,7 +734,7 @@ void nilfs_evict_inode(struct inode *inode)
734 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) { 734 if (inode->i_nlink || !ii->i_root || unlikely(is_bad_inode(inode))) {
735 if (inode->i_data.nrpages) 735 if (inode->i_data.nrpages)
736 truncate_inode_pages(&inode->i_data, 0); 736 truncate_inode_pages(&inode->i_data, 0);
737 end_writeback(inode); 737 clear_inode(inode);
738 nilfs_clear_inode(inode); 738 nilfs_clear_inode(inode);
739 return; 739 return;
740 } 740 }
@@ -746,7 +746,7 @@ void nilfs_evict_inode(struct inode *inode)
746 /* TODO: some of the following operations may fail. */ 746 /* TODO: some of the following operations may fail. */
747 nilfs_truncate_bmap(ii, 0); 747 nilfs_truncate_bmap(ii, 0);
748 nilfs_mark_inode_dirty(inode); 748 nilfs_mark_inode_dirty(inode);
749 end_writeback(inode); 749 clear_inode(inode);
750 750
751 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino); 751 ret = nilfs_ifile_delete_inode(ii->i_root->ifile, inode->i_ino);
752 if (!ret) 752 if (!ret)
diff --git a/fs/ntfs/inode.c b/fs/ntfs/inode.c
index 2eaa66652944..c6dbd3db6ca8 100644
--- a/fs/ntfs/inode.c
+++ b/fs/ntfs/inode.c
@@ -2258,7 +2258,7 @@ void ntfs_evict_big_inode(struct inode *vi)
2258 ntfs_inode *ni = NTFS_I(vi); 2258 ntfs_inode *ni = NTFS_I(vi);
2259 2259
2260 truncate_inode_pages(&vi->i_data, 0); 2260 truncate_inode_pages(&vi->i_data, 0);
2261 end_writeback(vi); 2261 clear_inode(vi);
2262 2262
2263#ifdef NTFS_RW 2263#ifdef NTFS_RW
2264 if (NInoDirty(ni)) { 2264 if (NInoDirty(ni)) {
diff --git a/fs/ocfs2/dlmfs/dlmfs.c b/fs/ocfs2/dlmfs/dlmfs.c
index 3b5825ef3193..e31d6ae013ab 100644
--- a/fs/ocfs2/dlmfs/dlmfs.c
+++ b/fs/ocfs2/dlmfs/dlmfs.c
@@ -367,7 +367,7 @@ static void dlmfs_evict_inode(struct inode *inode)
367 int status; 367 int status;
368 struct dlmfs_inode_private *ip; 368 struct dlmfs_inode_private *ip;
369 369
370 end_writeback(inode); 370 clear_inode(inode);
371 371
372 mlog(0, "inode %lu\n", inode->i_ino); 372 mlog(0, "inode %lu\n", inode->i_ino);
373 373
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 17454a904d7b..735514ca400f 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -1069,7 +1069,7 @@ static void ocfs2_clear_inode(struct inode *inode)
1069 int status; 1069 int status;
1070 struct ocfs2_inode_info *oi = OCFS2_I(inode); 1070 struct ocfs2_inode_info *oi = OCFS2_I(inode);
1071 1071
1072 end_writeback(inode); 1072 clear_inode(inode);
1073 trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno, 1073 trace_ocfs2_clear_inode((unsigned long long)oi->ip_blkno,
1074 inode->i_nlink); 1074 inode->i_nlink);
1075 1075
diff --git a/fs/omfs/inode.c b/fs/omfs/inode.c
index dbc842222589..e6213b3725d1 100644
--- a/fs/omfs/inode.c
+++ b/fs/omfs/inode.c
@@ -184,7 +184,7 @@ int omfs_sync_inode(struct inode *inode)
184static void omfs_evict_inode(struct inode *inode) 184static void omfs_evict_inode(struct inode *inode)
185{ 185{
186 truncate_inode_pages(&inode->i_data, 0); 186 truncate_inode_pages(&inode->i_data, 0);
187 end_writeback(inode); 187 clear_inode(inode);
188 188
189 if (inode->i_nlink) 189 if (inode->i_nlink)
190 return; 190 return;
diff --git a/fs/proc/base.c b/fs/proc/base.c
index d2d3108a611c..d7d711876b6a 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -411,12 +411,13 @@ static const struct file_operations proc_lstats_operations = {
411 411
412static int proc_oom_score(struct task_struct *task, char *buffer) 412static int proc_oom_score(struct task_struct *task, char *buffer)
413{ 413{
414 unsigned long totalpages = totalram_pages + total_swap_pages;
414 unsigned long points = 0; 415 unsigned long points = 0;
415 416
416 read_lock(&tasklist_lock); 417 read_lock(&tasklist_lock);
417 if (pid_alive(task)) 418 if (pid_alive(task))
418 points = oom_badness(task, NULL, NULL, 419 points = oom_badness(task, NULL, NULL, totalpages) *
419 totalram_pages + total_swap_pages); 420 1000 / totalpages;
420 read_unlock(&tasklist_lock); 421 read_unlock(&tasklist_lock);
421 return sprintf(buffer, "%lu\n", points); 422 return sprintf(buffer, "%lu\n", points);
422} 423}
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 554ecc54799f..7ac817b64a71 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -33,7 +33,7 @@ static void proc_evict_inode(struct inode *inode)
33 const struct proc_ns_operations *ns_ops; 33 const struct proc_ns_operations *ns_ops;
34 34
35 truncate_inode_pages(&inode->i_data, 0); 35 truncate_inode_pages(&inode->i_data, 0);
36 end_writeback(inode); 36 clear_inode(inode);
37 37
38 /* Stop tracking associated processes */ 38 /* Stop tracking associated processes */
39 put_pid(PROC_I(inode)->pid); 39 put_pid(PROC_I(inode)->pid);
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 1030a716d155..7faaf2acc570 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -784,7 +784,7 @@ static int pagemap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
784 784
785 /* find the first VMA at or above 'addr' */ 785 /* find the first VMA at or above 'addr' */
786 vma = find_vma(walk->mm, addr); 786 vma = find_vma(walk->mm, addr);
787 if (pmd_trans_huge_lock(pmd, vma) == 1) { 787 if (vma && pmd_trans_huge_lock(pmd, vma) == 1) {
788 for (; addr != end; addr += PAGE_SIZE) { 788 for (; addr != end; addr += PAGE_SIZE) {
789 unsigned long offset; 789 unsigned long offset;
790 790
diff --git a/fs/pstore/inode.c b/fs/pstore/inode.c
index 19507889bb7f..aeb19e68e086 100644
--- a/fs/pstore/inode.c
+++ b/fs/pstore/inode.c
@@ -85,7 +85,7 @@ static void pstore_evict_inode(struct inode *inode)
85 struct pstore_private *p = inode->i_private; 85 struct pstore_private *p = inode->i_private;
86 unsigned long flags; 86 unsigned long flags;
87 87
88 end_writeback(inode); 88 clear_inode(inode);
89 if (p) { 89 if (p) {
90 spin_lock_irqsave(&allpstore_lock, flags); 90 spin_lock_irqsave(&allpstore_lock, flags);
91 list_del(&p->list); 91 list_del(&p->list);
diff --git a/fs/quota/dquot.c b/fs/quota/dquot.c
index d69a1d1d7e15..10cbe841cb7e 100644
--- a/fs/quota/dquot.c
+++ b/fs/quota/dquot.c
@@ -116,15 +116,15 @@
116 * spinlock to internal buffers before writing. 116 * spinlock to internal buffers before writing.
117 * 117 *
118 * Lock ordering (including related VFS locks) is the following: 118 * Lock ordering (including related VFS locks) is the following:
119 * i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock > 119 * dqonoff_mutex > i_mutex > journal_lock > dqptr_sem > dquot->dq_lock >
120 * dqio_mutex 120 * dqio_mutex
121 * dqonoff_mutex > i_mutex comes from dquot_quota_sync, dquot_enable, etc.
121 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem > 122 * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
122 * dqptr_sem. But filesystem has to count with the fact that functions such as 123 * dqptr_sem. But filesystem has to count with the fact that functions such as
123 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called 124 * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
124 * from inside a transaction to keep filesystem consistency after a crash. Also 125 * from inside a transaction to keep filesystem consistency after a crash. Also
125 * filesystems usually want to do some IO on dquot from ->mark_dirty which is 126 * filesystems usually want to do some IO on dquot from ->mark_dirty which is
126 * called with dqptr_sem held. 127 * called with dqptr_sem held.
127 * i_mutex on quota files is special (it's below dqio_mutex)
128 */ 128 */
129 129
130static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock); 130static __cacheline_aligned_in_smp DEFINE_SPINLOCK(dq_list_lock);
@@ -638,7 +638,7 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
638 dqstats_inc(DQST_SYNCS); 638 dqstats_inc(DQST_SYNCS);
639 mutex_unlock(&dqopt->dqonoff_mutex); 639 mutex_unlock(&dqopt->dqonoff_mutex);
640 640
641 if (!wait || (sb_dqopt(sb)->flags & DQUOT_QUOTA_SYS_FILE)) 641 if (!wait || (dqopt->flags & DQUOT_QUOTA_SYS_FILE))
642 return 0; 642 return 0;
643 643
644 /* This is not very clever (and fast) but currently I don't know about 644 /* This is not very clever (and fast) but currently I don't know about
@@ -652,18 +652,17 @@ int dquot_quota_sync(struct super_block *sb, int type, int wait)
652 * Now when everything is written we can discard the pagecache so 652 * Now when everything is written we can discard the pagecache so
653 * that userspace sees the changes. 653 * that userspace sees the changes.
654 */ 654 */
655 mutex_lock(&sb_dqopt(sb)->dqonoff_mutex); 655 mutex_lock(&dqopt->dqonoff_mutex);
656 for (cnt = 0; cnt < MAXQUOTAS; cnt++) { 656 for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
657 if (type != -1 && cnt != type) 657 if (type != -1 && cnt != type)
658 continue; 658 continue;
659 if (!sb_has_quota_active(sb, cnt)) 659 if (!sb_has_quota_active(sb, cnt))
660 continue; 660 continue;
661 mutex_lock_nested(&sb_dqopt(sb)->files[cnt]->i_mutex, 661 mutex_lock(&dqopt->files[cnt]->i_mutex);
662 I_MUTEX_QUOTA); 662 truncate_inode_pages(&dqopt->files[cnt]->i_data, 0);
663 truncate_inode_pages(&sb_dqopt(sb)->files[cnt]->i_data, 0); 663 mutex_unlock(&dqopt->files[cnt]->i_mutex);
664 mutex_unlock(&sb_dqopt(sb)->files[cnt]->i_mutex);
665 } 664 }
666 mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex); 665 mutex_unlock(&dqopt->dqonoff_mutex);
667 666
668 return 0; 667 return 0;
669} 668}
@@ -907,14 +906,14 @@ static void add_dquot_ref(struct super_block *sb, int type)
907 spin_unlock(&inode->i_lock); 906 spin_unlock(&inode->i_lock);
908 continue; 907 continue;
909 } 908 }
910#ifdef CONFIG_QUOTA_DEBUG
911 if (unlikely(inode_get_rsv_space(inode) > 0))
912 reserved = 1;
913#endif
914 __iget(inode); 909 __iget(inode);
915 spin_unlock(&inode->i_lock); 910 spin_unlock(&inode->i_lock);
916 spin_unlock(&inode_sb_list_lock); 911 spin_unlock(&inode_sb_list_lock);
917 912
913#ifdef CONFIG_QUOTA_DEBUG
914 if (unlikely(inode_get_rsv_space(inode) > 0))
915 reserved = 1;
916#endif
918 iput(old_inode); 917 iput(old_inode);
919 __dquot_initialize(inode, type); 918 __dquot_initialize(inode, type);
920 919
@@ -2037,8 +2036,7 @@ int dquot_disable(struct super_block *sb, int type, unsigned int flags)
2037 /* If quota was reenabled in the meantime, we have 2036 /* If quota was reenabled in the meantime, we have
2038 * nothing to do */ 2037 * nothing to do */
2039 if (!sb_has_quota_loaded(sb, cnt)) { 2038 if (!sb_has_quota_loaded(sb, cnt)) {
2040 mutex_lock_nested(&toputinode[cnt]->i_mutex, 2039 mutex_lock(&toputinode[cnt]->i_mutex);
2041 I_MUTEX_QUOTA);
2042 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE | 2040 toputinode[cnt]->i_flags &= ~(S_IMMUTABLE |
2043 S_NOATIME | S_NOQUOTA); 2041 S_NOATIME | S_NOQUOTA);
2044 truncate_inode_pages(&toputinode[cnt]->i_data, 2042 truncate_inode_pages(&toputinode[cnt]->i_data,
@@ -2133,7 +2131,7 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
2133 /* We don't want quota and atime on quota files (deadlocks 2131 /* We don't want quota and atime on quota files (deadlocks
2134 * possible) Also nobody should write to the file - we use 2132 * possible) Also nobody should write to the file - we use
2135 * special IO operations which ignore the immutable bit. */ 2133 * special IO operations which ignore the immutable bit. */
2136 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2134 mutex_lock(&inode->i_mutex);
2137 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE | 2135 oldflags = inode->i_flags & (S_NOATIME | S_IMMUTABLE |
2138 S_NOQUOTA); 2136 S_NOQUOTA);
2139 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE; 2137 inode->i_flags |= S_NOQUOTA | S_NOATIME | S_IMMUTABLE;
@@ -2180,7 +2178,7 @@ out_file_init:
2180 iput(inode); 2178 iput(inode);
2181out_lock: 2179out_lock:
2182 if (oldflags != -1) { 2180 if (oldflags != -1) {
2183 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); 2181 mutex_lock(&inode->i_mutex);
2184 /* Set the flags back (in the case of accidental quotaon() 2182 /* Set the flags back (in the case of accidental quotaon()
2185 * on a wrong file we don't want to mess up the flags) */ 2183 * on a wrong file we don't want to mess up the flags) */
2186 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE); 2184 inode->i_flags &= ~(S_NOATIME | S_NOQUOTA | S_IMMUTABLE);
diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c
index 494c315c7417..59d06871a850 100644
--- a/fs/reiserfs/inode.c
+++ b/fs/reiserfs/inode.c
@@ -76,14 +76,14 @@ void reiserfs_evict_inode(struct inode *inode)
76 ; 76 ;
77 } 77 }
78 out: 78 out:
79 end_writeback(inode); /* note this must go after the journal_end to prevent deadlock */ 79 clear_inode(inode); /* note this must go after the journal_end to prevent deadlock */
80 dquot_drop(inode); 80 dquot_drop(inode);
81 inode->i_blocks = 0; 81 inode->i_blocks = 0;
82 reiserfs_write_unlock_once(inode->i_sb, depth); 82 reiserfs_write_unlock_once(inode->i_sb, depth);
83 return; 83 return;
84 84
85no_delete: 85no_delete:
86 end_writeback(inode); 86 clear_inode(inode);
87 dquot_drop(inode); 87 dquot_drop(inode);
88} 88}
89 89
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index 8b7616ef06d8..c07b7d709447 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -2270,7 +2270,6 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
2270 (unsigned long long)off, (unsigned long long)len); 2270 (unsigned long long)off, (unsigned long long)len);
2271 return -EIO; 2271 return -EIO;
2272 } 2272 }
2273 mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA);
2274 while (towrite > 0) { 2273 while (towrite > 0) {
2275 tocopy = sb->s_blocksize - offset < towrite ? 2274 tocopy = sb->s_blocksize - offset < towrite ?
2276 sb->s_blocksize - offset : towrite; 2275 sb->s_blocksize - offset : towrite;
@@ -2302,16 +2301,13 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type,
2302 blk++; 2301 blk++;
2303 } 2302 }
2304out: 2303out:
2305 if (len == towrite) { 2304 if (len == towrite)
2306 mutex_unlock(&inode->i_mutex);
2307 return err; 2305 return err;
2308 }
2309 if (inode->i_size < off + len - towrite) 2306 if (inode->i_size < off + len - towrite)
2310 i_size_write(inode, off + len - towrite); 2307 i_size_write(inode, off + len - towrite);
2311 inode->i_version++; 2308 inode->i_version++;
2312 inode->i_mtime = inode->i_ctime = CURRENT_TIME; 2309 inode->i_mtime = inode->i_ctime = CURRENT_TIME;
2313 mark_inode_dirty(inode); 2310 mark_inode_dirty(inode);
2314 mutex_unlock(&inode->i_mutex);
2315 return len - towrite; 2311 return len - towrite;
2316} 2312}
2317 2313
diff --git a/fs/sysfs/inode.c b/fs/sysfs/inode.c
index 907c2b3af758..0ce3ccf7f401 100644
--- a/fs/sysfs/inode.c
+++ b/fs/sysfs/inode.c
@@ -310,7 +310,7 @@ void sysfs_evict_inode(struct inode *inode)
310 struct sysfs_dirent *sd = inode->i_private; 310 struct sysfs_dirent *sd = inode->i_private;
311 311
312 truncate_inode_pages(&inode->i_data, 0); 312 truncate_inode_pages(&inode->i_data, 0);
313 end_writeback(inode); 313 clear_inode(inode);
314 sysfs_put(sd); 314 sysfs_put(sd);
315} 315}
316 316
diff --git a/fs/sysv/inode.c b/fs/sysv/inode.c
index 3da5ce25faf0..08d0b2568cd3 100644
--- a/fs/sysv/inode.c
+++ b/fs/sysv/inode.c
@@ -316,7 +316,7 @@ static void sysv_evict_inode(struct inode *inode)
316 sysv_truncate(inode); 316 sysv_truncate(inode);
317 } 317 }
318 invalidate_inode_buffers(inode); 318 invalidate_inode_buffers(inode);
319 end_writeback(inode); 319 clear_inode(inode);
320 if (!inode->i_nlink) 320 if (!inode->i_nlink)
321 sysv_free_inode(inode); 321 sysv_free_inode(inode);
322} 322}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 001acccac0d6..5862dd9d2784 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -378,7 +378,7 @@ out:
378 smp_wmb(); 378 smp_wmb();
379 } 379 }
380done: 380done:
381 end_writeback(inode); 381 clear_inode(inode);
382} 382}
383 383
384static void ubifs_dirty_inode(struct inode *inode, int flags) 384static void ubifs_dirty_inode(struct inode *inode, int flags)
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 7d7528008359..873e1bab9c4c 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -80,7 +80,7 @@ void udf_evict_inode(struct inode *inode)
80 } else 80 } else
81 truncate_inode_pages(&inode->i_data, 0); 81 truncate_inode_pages(&inode->i_data, 0);
82 invalidate_inode_buffers(inode); 82 invalidate_inode_buffers(inode);
83 end_writeback(inode); 83 clear_inode(inode);
84 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && 84 if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB &&
85 inode->i_size != iinfo->i_lenExtents) { 85 inode->i_size != iinfo->i_lenExtents) {
86 udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n", 86 udf_warn(inode->i_sb, "Inode %lu (mode %o) has inode size %llu different from extent length %llu. Filesystem need not be standards compliant.\n",
diff --git a/fs/ufs/inode.c b/fs/ufs/inode.c
index 7cdd3953d67e..dd7c89d8a1c1 100644
--- a/fs/ufs/inode.c
+++ b/fs/ufs/inode.c
@@ -895,7 +895,7 @@ void ufs_evict_inode(struct inode * inode)
895 } 895 }
896 896
897 invalidate_inode_buffers(inode); 897 invalidate_inode_buffers(inode);
898 end_writeback(inode); 898 clear_inode(inode);
899 899
900 if (want_delete) { 900 if (want_delete) {
901 lock_ufs(inode->i_sb); 901 lock_ufs(inode->i_sb);
diff --git a/fs/xfs/xfs_super.c b/fs/xfs/xfs_super.c
index 2fcfd5b0b046..0d9de41a7151 100644
--- a/fs/xfs/xfs_super.c
+++ b/fs/xfs/xfs_super.c
@@ -932,7 +932,7 @@ xfs_fs_evict_inode(
932 trace_xfs_evict_inode(ip); 932 trace_xfs_evict_inode(ip);
933 933
934 truncate_inode_pages(&inode->i_data, 0); 934 truncate_inode_pages(&inode->i_data, 0);
935 end_writeback(inode); 935 clear_inode(inode);
936 XFS_STATS_INC(vn_rele); 936 XFS_STATS_INC(vn_rele);
937 XFS_STATS_INC(vn_remove); 937 XFS_STATS_INC(vn_remove);
938 XFS_STATS_DEC(vn_active); 938 XFS_STATS_DEC(vn_active);
diff --git a/include/asm-generic/dma-coherent.h b/include/asm-generic/dma-coherent.h
index 85a3ffaa0242..abfb2682de7f 100644
--- a/include/asm-generic/dma-coherent.h
+++ b/include/asm-generic/dma-coherent.h
@@ -3,13 +3,15 @@
3 3
4#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT 4#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
5/* 5/*
6 * These two functions are only for dma allocator. 6 * These three functions are only for dma allocator.
7 * Don't use them in device drivers. 7 * Don't use them in device drivers.
8 */ 8 */
9int dma_alloc_from_coherent(struct device *dev, ssize_t size, 9int dma_alloc_from_coherent(struct device *dev, ssize_t size,
10 dma_addr_t *dma_handle, void **ret); 10 dma_addr_t *dma_handle, void **ret);
11int dma_release_from_coherent(struct device *dev, int order, void *vaddr); 11int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
12 12
13int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
14 void *cpu_addr, size_t size, int *ret);
13/* 15/*
14 * Standard interface 16 * Standard interface
15 */ 17 */
diff --git a/include/asm-generic/dma-contiguous.h b/include/asm-generic/dma-contiguous.h
new file mode 100644
index 000000000000..c544356b374b
--- /dev/null
+++ b/include/asm-generic/dma-contiguous.h
@@ -0,0 +1,28 @@
1#ifndef ASM_DMA_CONTIGUOUS_H
2#define ASM_DMA_CONTIGUOUS_H
3
4#ifdef __KERNEL__
5#ifdef CONFIG_CMA
6
7#include <linux/device.h>
8#include <linux/dma-contiguous.h>
9
10static inline struct cma *dev_get_cma_area(struct device *dev)
11{
12 if (dev && dev->cma_area)
13 return dev->cma_area;
14 return dma_contiguous_default_area;
15}
16
17static inline void dev_set_cma_area(struct device *dev, struct cma *cma)
18{
19 if (dev)
20 dev->cma_area = cma;
21 if (!dev || !dma_contiguous_default_area)
22 dma_contiguous_default_area = cma;
23}
24
25#endif
26#endif
27
28#endif
diff --git a/include/asm-generic/kvm_para.h b/include/asm-generic/kvm_para.h
new file mode 100644
index 000000000000..5cba37f9eae1
--- /dev/null
+++ b/include/asm-generic/kvm_para.h
@@ -0,0 +1,22 @@
1#ifndef _ASM_GENERIC_KVM_PARA_H
2#define _ASM_GENERIC_KVM_PARA_H
3
4#ifdef __KERNEL__
5
6/*
7 * This function is used by architectures that support kvm to avoid issuing
8 * false soft lockup messages.
9 */
10static inline bool kvm_check_and_clear_guest_paused(void)
11{
12 return false;
13}
14
15static inline unsigned int kvm_arch_para_features(void)
16{
17 return 0;
18}
19
20#endif /* _KERNEL__ */
21
22#endif
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 125c54e98517..6f2b45a9b6bc 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -158,9 +158,8 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
158#endif 158#endif
159 159
160#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH 160#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
161extern pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, 161extern void pmdp_splitting_flush(struct vm_area_struct *vma,
162 unsigned long address, 162 unsigned long address, pmd_t *pmdp);
163 pmd_t *pmdp);
164#endif 163#endif
165 164
166#ifndef __HAVE_ARCH_PTE_SAME 165#ifndef __HAVE_ARCH_PTE_SAME
@@ -446,6 +445,18 @@ static inline int pmd_write(pmd_t pmd)
446#endif /* __HAVE_ARCH_PMD_WRITE */ 445#endif /* __HAVE_ARCH_PMD_WRITE */
447#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 446#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
448 447
448#ifndef pmd_read_atomic
449static inline pmd_t pmd_read_atomic(pmd_t *pmdp)
450{
451 /*
452 * Depend on compiler for an atomic pmd read. NOTE: this is
453 * only going to work, if the pmdval_t isn't larger than
454 * an unsigned long.
455 */
456 return *pmdp;
457}
458#endif
459
449/* 460/*
450 * This function is meant to be used by sites walking pagetables with 461 * This function is meant to be used by sites walking pagetables with
451 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and 462 * the mmap_sem hold in read mode to protect against MADV_DONTNEED and
@@ -459,11 +470,17 @@ static inline int pmd_write(pmd_t pmd)
459 * undefined so behaving like if the pmd was none is safe (because it 470 * undefined so behaving like if the pmd was none is safe (because it
460 * can return none anyway). The compiler level barrier() is critically 471 * can return none anyway). The compiler level barrier() is critically
461 * important to compute the two checks atomically on the same pmdval. 472 * important to compute the two checks atomically on the same pmdval.
473 *
474 * For 32bit kernels with a 64bit large pmd_t this automatically takes
475 * care of reading the pmd atomically to avoid SMP race conditions
476 * against pmd_populate() when the mmap_sem is hold for reading by the
477 * caller (a special atomic read not done by "gcc" as in the generic
478 * version above, is also needed when THP is disabled because the page
479 * fault can populate the pmd from under us).
462 */ 480 */
463static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd) 481static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
464{ 482{
465 /* depend on compiler for an atomic pmd read */ 483 pmd_t pmdval = pmd_read_atomic(pmd);
466 pmd_t pmdval = *pmd;
467 /* 484 /*
468 * The barrier will stabilize the pmdval in a register or on 485 * The barrier will stabilize the pmdval in a register or on
469 * the stack so that it will stop changing under the code. 486 * the stack so that it will stop changing under the code.
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 000000000000..3f21f1b72e45
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,52 @@
1#ifndef _ASM_WORD_AT_A_TIME_H
2#define _ASM_WORD_AT_A_TIME_H
3
4/*
5 * This says "generic", but it's actually big-endian only.
6 * Little-endian can use more efficient versions of these
7 * interfaces, see for example
8 * arch/x86/include/asm/word-at-a-time.h
9 * for those.
10 */
11
12#include <linux/kernel.h>
13
14struct word_at_a_time {
15 const unsigned long high_bits, low_bits;
16};
17
18#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
19
20/* Bit set in the bytes that have a zero */
21static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
22{
23 unsigned long mask = (val & c->low_bits) + c->low_bits;
24 return ~(mask | rhs);
25}
26
27#define create_zero_mask(mask) (mask)
28
29static inline long find_zero(unsigned long mask)
30{
31 long byte = 0;
32#ifdef CONFIG_64BIT
33 if (mask >> 32)
34 mask >>= 32;
35 else
36 byte = 4;
37#endif
38 if (mask >> 16)
39 mask >>= 16;
40 else
41 byte += 2;
42 return (mask >> 8) ? byte : byte + 1;
43}
44
45static inline bool has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
46{
47 unsigned long rhs = val | c->low_bits;
48 *data = rhs;
49 return (val + c->high_bits) & ~rhs;
50}
51
52#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/linux/Kbuild b/include/linux/Kbuild
index 4cd59b95858f..7185b8f15ced 100644
--- a/include/linux/Kbuild
+++ b/include/linux/Kbuild
@@ -225,6 +225,7 @@ header-y += kd.h
225header-y += kdev_t.h 225header-y += kdev_t.h
226header-y += kernel.h 226header-y += kernel.h
227header-y += kernelcapi.h 227header-y += kernelcapi.h
228header-y += kernel-page-flags.h
228header-y += keyboard.h 229header-y += keyboard.h
229header-y += keyctl.h 230header-y += keyctl.h
230header-y += l2tp.h 231header-y += l2tp.h
diff --git a/include/linux/amba/pl08x.h b/include/linux/amba/pl08x.h
index e64ce2cfee99..02549017212a 100644
--- a/include/linux/amba/pl08x.h
+++ b/include/linux/amba/pl08x.h
@@ -92,6 +92,8 @@ struct pl08x_bus_data {
92 * right now 92 * right now
93 * @serving: the virtual channel currently being served by this physical 93 * @serving: the virtual channel currently being served by this physical
94 * channel 94 * channel
95 * @locked: channel unavailable for the system, e.g. dedicated to secure
96 * world
95 */ 97 */
96struct pl08x_phy_chan { 98struct pl08x_phy_chan {
97 unsigned int id; 99 unsigned int id;
@@ -99,6 +101,7 @@ struct pl08x_phy_chan {
99 spinlock_t lock; 101 spinlock_t lock;
100 int signal; 102 int signal;
101 struct pl08x_dma_chan *serving; 103 struct pl08x_dma_chan *serving;
104 bool locked;
102}; 105};
103 106
104/** 107/**
diff --git a/include/linux/apple_bl.h b/include/linux/apple_bl.h
index 47bedc0eee69..0a95e730fcea 100644
--- a/include/linux/apple_bl.h
+++ b/include/linux/apple_bl.h
@@ -5,7 +5,7 @@
5#ifndef _LINUX_APPLE_BL_H 5#ifndef _LINUX_APPLE_BL_H
6#define _LINUX_APPLE_BL_H 6#define _LINUX_APPLE_BL_H
7 7
8#ifdef CONFIG_BACKLIGHT_APPLE 8#if defined(CONFIG_BACKLIGHT_APPLE) || defined(CONFIG_BACKLIGHT_APPLE_MODULE)
9 9
10extern int apple_bl_register(void); 10extern int apple_bl_register(void);
11extern void apple_bl_unregister(void); 11extern void apple_bl_unregister(void);
diff --git a/include/linux/bootmem.h b/include/linux/bootmem.h
index 1a0cd270bb7a..324fe08ea3b1 100644
--- a/include/linux/bootmem.h
+++ b/include/linux/bootmem.h
@@ -135,9 +135,6 @@ extern void *__alloc_bootmem_low_node(pg_data_t *pgdat,
135extern int reserve_bootmem_generic(unsigned long addr, unsigned long size, 135extern int reserve_bootmem_generic(unsigned long addr, unsigned long size,
136 int flags); 136 int flags);
137 137
138extern void *alloc_bootmem_section(unsigned long size,
139 unsigned long section_nr);
140
141#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP 138#ifdef CONFIG_HAVE_ARCH_ALLOC_REMAP
142extern void *alloc_remap(int nid, unsigned long size); 139extern void *alloc_remap(int nid, unsigned long size);
143#else 140#else
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 72961c39576a..aaac4bba6f5c 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -30,6 +30,13 @@ struct pt_regs;
30#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); })) 30#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
31#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); })) 31#define BUILD_BUG_ON_NULL(e) ((void *)sizeof(struct { int:-!!(e); }))
32 32
33/*
34 * BUILD_BUG_ON_INVALID() permits the compiler to check the validity of the
35 * expression but avoids the generation of any code, even if that expression
36 * has side-effects.
37 */
38#define BUILD_BUG_ON_INVALID(e) ((void)(sizeof((__force long)(e))))
39
33/** 40/**
34 * BUILD_BUG_ON - break compile if a condition is true. 41 * BUILD_BUG_ON - break compile if a condition is true.
35 * @condition: the condition which the compiler should know is false. 42 * @condition: the condition which the compiler should know is false.
diff --git a/include/linux/clk-private.h b/include/linux/clk-private.h
index 5e4312b6f5cc..eb3f84bc5325 100644
--- a/include/linux/clk-private.h
+++ b/include/linux/clk-private.h
@@ -30,7 +30,7 @@ struct clk {
30 const struct clk_ops *ops; 30 const struct clk_ops *ops;
31 struct clk_hw *hw; 31 struct clk_hw *hw;
32 struct clk *parent; 32 struct clk *parent;
33 char **parent_names; 33 const char **parent_names;
34 struct clk **parents; 34 struct clk **parents;
35 u8 num_parents; 35 u8 num_parents;
36 unsigned long rate; 36 unsigned long rate;
@@ -55,12 +55,22 @@ struct clk {
55 * alternative macro for static initialization 55 * alternative macro for static initialization
56 */ 56 */
57 57
58extern struct clk_ops clk_fixed_rate_ops; 58#define DEFINE_CLK(_name, _ops, _flags, _parent_names, \
59 _parents) \
60 static struct clk _name = { \
61 .name = #_name, \
62 .ops = &_ops, \
63 .hw = &_name##_hw.hw, \
64 .parent_names = _parent_names, \
65 .num_parents = ARRAY_SIZE(_parent_names), \
66 .parents = _parents, \
67 .flags = _flags, \
68 }
59 69
60#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \ 70#define DEFINE_CLK_FIXED_RATE(_name, _flags, _rate, \
61 _fixed_rate_flags) \ 71 _fixed_rate_flags) \
62 static struct clk _name; \ 72 static struct clk _name; \
63 static char *_name##_parent_names[] = {}; \ 73 static const char *_name##_parent_names[] = {}; \
64 static struct clk_fixed_rate _name##_hw = { \ 74 static struct clk_fixed_rate _name##_hw = { \
65 .hw = { \ 75 .hw = { \
66 .clk = &_name, \ 76 .clk = &_name, \
@@ -68,23 +78,14 @@ extern struct clk_ops clk_fixed_rate_ops;
68 .fixed_rate = _rate, \ 78 .fixed_rate = _rate, \
69 .flags = _fixed_rate_flags, \ 79 .flags = _fixed_rate_flags, \
70 }; \ 80 }; \
71 static struct clk _name = { \ 81 DEFINE_CLK(_name, clk_fixed_rate_ops, _flags, \
72 .name = #_name, \ 82 _name##_parent_names, NULL);
73 .ops = &clk_fixed_rate_ops, \
74 .hw = &_name##_hw.hw, \
75 .parent_names = _name##_parent_names, \
76 .num_parents = \
77 ARRAY_SIZE(_name##_parent_names), \
78 .flags = _flags, \
79 };
80
81extern struct clk_ops clk_gate_ops;
82 83
83#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \ 84#define DEFINE_CLK_GATE(_name, _parent_name, _parent_ptr, \
84 _flags, _reg, _bit_idx, \ 85 _flags, _reg, _bit_idx, \
85 _gate_flags, _lock) \ 86 _gate_flags, _lock) \
86 static struct clk _name; \ 87 static struct clk _name; \
87 static char *_name##_parent_names[] = { \ 88 static const char *_name##_parent_names[] = { \
88 _parent_name, \ 89 _parent_name, \
89 }; \ 90 }; \
90 static struct clk *_name##_parents[] = { \ 91 static struct clk *_name##_parents[] = { \
@@ -99,24 +100,14 @@ extern struct clk_ops clk_gate_ops;
99 .flags = _gate_flags, \ 100 .flags = _gate_flags, \
100 .lock = _lock, \ 101 .lock = _lock, \
101 }; \ 102 }; \
102 static struct clk _name = { \ 103 DEFINE_CLK(_name, clk_gate_ops, _flags, \
103 .name = #_name, \ 104 _name##_parent_names, _name##_parents);
104 .ops = &clk_gate_ops, \
105 .hw = &_name##_hw.hw, \
106 .parent_names = _name##_parent_names, \
107 .num_parents = \
108 ARRAY_SIZE(_name##_parent_names), \
109 .parents = _name##_parents, \
110 .flags = _flags, \
111 };
112
113extern struct clk_ops clk_divider_ops;
114 105
115#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \ 106#define DEFINE_CLK_DIVIDER(_name, _parent_name, _parent_ptr, \
116 _flags, _reg, _shift, _width, \ 107 _flags, _reg, _shift, _width, \
117 _divider_flags, _lock) \ 108 _divider_flags, _lock) \
118 static struct clk _name; \ 109 static struct clk _name; \
119 static char *_name##_parent_names[] = { \ 110 static const char *_name##_parent_names[] = { \
120 _parent_name, \ 111 _parent_name, \
121 }; \ 112 }; \
122 static struct clk *_name##_parents[] = { \ 113 static struct clk *_name##_parents[] = { \
@@ -132,18 +123,8 @@ extern struct clk_ops clk_divider_ops;
132 .flags = _divider_flags, \ 123 .flags = _divider_flags, \
133 .lock = _lock, \ 124 .lock = _lock, \
134 }; \ 125 }; \
135 static struct clk _name = { \ 126 DEFINE_CLK(_name, clk_divider_ops, _flags, \
136 .name = #_name, \ 127 _name##_parent_names, _name##_parents);
137 .ops = &clk_divider_ops, \
138 .hw = &_name##_hw.hw, \
139 .parent_names = _name##_parent_names, \
140 .num_parents = \
141 ARRAY_SIZE(_name##_parent_names), \
142 .parents = _name##_parents, \
143 .flags = _flags, \
144 };
145
146extern struct clk_ops clk_mux_ops;
147 128
148#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \ 129#define DEFINE_CLK_MUX(_name, _parent_names, _parents, _flags, \
149 _reg, _shift, _width, \ 130 _reg, _shift, _width, \
@@ -159,16 +140,28 @@ extern struct clk_ops clk_mux_ops;
159 .flags = _mux_flags, \ 140 .flags = _mux_flags, \
160 .lock = _lock, \ 141 .lock = _lock, \
161 }; \ 142 }; \
162 static struct clk _name = { \ 143 DEFINE_CLK(_name, clk_mux_ops, _flags, _parent_names, \
163 .name = #_name, \ 144 _parents);
164 .ops = &clk_mux_ops, \ 145
165 .hw = &_name##_hw.hw, \ 146#define DEFINE_CLK_FIXED_FACTOR(_name, _parent_name, \
166 .parent_names = _parent_names, \ 147 _parent_ptr, _flags, \
167 .num_parents = \ 148 _mult, _div) \
168 ARRAY_SIZE(_parent_names), \ 149 static struct clk _name; \
169 .parents = _parents, \ 150 static const char *_name##_parent_names[] = { \
170 .flags = _flags, \ 151 _parent_name, \
171 }; 152 }; \
153 static struct clk *_name##_parents[] = { \
154 _parent_ptr, \
155 }; \
156 static struct clk_fixed_factor _name##_hw = { \
157 .hw = { \
158 .clk = &_name, \
159 }, \
160 .mult = _mult, \
161 .div = _div, \
162 }; \
163 DEFINE_CLK(_name, clk_fixed_factor_ops, _flags, \
164 _name##_parent_names, _name##_parents);
172 165
173/** 166/**
174 * __clk_init - initialize the data structures in a struct clk 167 * __clk_init - initialize the data structures in a struct clk
@@ -189,8 +182,12 @@ extern struct clk_ops clk_mux_ops;
189 * 182 *
190 * It is not necessary to call clk_register if __clk_init is used directly with 183 * It is not necessary to call clk_register if __clk_init is used directly with
191 * statically initialized clock data. 184 * statically initialized clock data.
185 *
186 * Returns 0 on success, otherwise an error code.
192 */ 187 */
193void __clk_init(struct device *dev, struct clk *clk); 188int __clk_init(struct device *dev, struct clk *clk);
189
190struct clk *__clk_register(struct device *dev, struct clk_hw *hw);
194 191
195#endif /* CONFIG_COMMON_CLK */ 192#endif /* CONFIG_COMMON_CLK */
196#endif /* CLK_PRIVATE_H */ 193#endif /* CLK_PRIVATE_H */
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index 5508897ad376..4a0b483986c3 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -15,19 +15,6 @@
15 15
16#ifdef CONFIG_COMMON_CLK 16#ifdef CONFIG_COMMON_CLK
17 17
18/**
19 * struct clk_hw - handle for traversing from a struct clk to its corresponding
20 * hardware-specific structure. struct clk_hw should be declared within struct
21 * clk_foo and then referenced by the struct clk instance that uses struct
22 * clk_foo's clk_ops
23 *
24 * clk: pointer to the struct clk instance that points back to this struct
25 * clk_hw instance
26 */
27struct clk_hw {
28 struct clk *clk;
29};
30
31/* 18/*
32 * flags used across common struct clk. these flags should only affect the 19 * flags used across common struct clk. these flags should only affect the
33 * top-level framework. custom flags for dealing with hardware specifics 20 * top-level framework. custom flags for dealing with hardware specifics
@@ -39,6 +26,8 @@ struct clk_hw {
39#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */ 26#define CLK_IGNORE_UNUSED BIT(3) /* do not gate even if unused */
40#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */ 27#define CLK_IS_ROOT BIT(4) /* root clk, has no parent */
41 28
29struct clk_hw;
30
42/** 31/**
43 * struct clk_ops - Callback operations for hardware clocks; these are to 32 * struct clk_ops - Callback operations for hardware clocks; these are to
44 * be provided by the clock implementation, and will be called by drivers 33 * be provided by the clock implementation, and will be called by drivers
@@ -88,19 +77,11 @@ struct clk_hw {
88 * array index into the value programmed into the hardware. 77 * array index into the value programmed into the hardware.
89 * Returns 0 on success, -EERROR otherwise. 78 * Returns 0 on success, -EERROR otherwise.
90 * 79 *
91 * @set_rate: Change the rate of this clock. If this callback returns 80 * @set_rate: Change the rate of this clock. The requested rate is specified
92 * CLK_SET_RATE_PARENT, the rate change will be propagated to the 81 * by the second argument, which should typically be the return
93 * parent clock (which may propagate again if the parent clock 82 * of .round_rate call. The third argument gives the parent rate
94 * also sets this flag). The requested rate of the parent is 83 * which is likely helpful for most .set_rate implementation.
95 * passed back from the callback in the second 'unsigned long *' 84 * Returns 0 on success, -EERROR otherwise.
96 * argument. Note that it is up to the hardware clock's set_rate
97 * implementation to insure that clocks do not run out of spec
98 * when propgating the call to set_rate up to the parent. One way
99 * to do this is to gate the clock (via clk_disable and/or
100 * clk_unprepare) before calling clk_set_rate, then ungating it
101 * afterward. If your clock also has the CLK_GATE_SET_RATE flag
102 * set then this will insure safety. Returns 0 on success,
103 * -EERROR otherwise.
104 * 85 *
105 * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow 86 * The clk_enable/clk_disable and clk_prepare/clk_unprepare pairs allow
106 * implementations to split any work between atomic (enable) and sleepable 87 * implementations to split any work between atomic (enable) and sleepable
@@ -125,10 +106,46 @@ struct clk_ops {
125 unsigned long *); 106 unsigned long *);
126 int (*set_parent)(struct clk_hw *hw, u8 index); 107 int (*set_parent)(struct clk_hw *hw, u8 index);
127 u8 (*get_parent)(struct clk_hw *hw); 108 u8 (*get_parent)(struct clk_hw *hw);
128 int (*set_rate)(struct clk_hw *hw, unsigned long); 109 int (*set_rate)(struct clk_hw *hw, unsigned long,
110 unsigned long);
129 void (*init)(struct clk_hw *hw); 111 void (*init)(struct clk_hw *hw);
130}; 112};
131 113
114/**
115 * struct clk_init_data - holds init data that's common to all clocks and is
116 * shared between the clock provider and the common clock framework.
117 *
118 * @name: clock name
119 * @ops: operations this clock supports
120 * @parent_names: array of string names for all possible parents
121 * @num_parents: number of possible parents
122 * @flags: framework-level hints and quirks
123 */
124struct clk_init_data {
125 const char *name;
126 const struct clk_ops *ops;
127 const char **parent_names;
128 u8 num_parents;
129 unsigned long flags;
130};
131
132/**
133 * struct clk_hw - handle for traversing from a struct clk to its corresponding
134 * hardware-specific structure. struct clk_hw should be declared within struct
135 * clk_foo and then referenced by the struct clk instance that uses struct
136 * clk_foo's clk_ops
137 *
138 * @clk: pointer to the struct clk instance that points back to this struct
139 * clk_hw instance
140 *
141 * @init: pointer to struct clk_init_data that contains the init data shared
142 * with the common clock framework.
143 */
144struct clk_hw {
145 struct clk *clk;
146 struct clk_init_data *init;
147};
148
132/* 149/*
133 * DOC: Basic clock implementations common to many platforms 150 * DOC: Basic clock implementations common to many platforms
134 * 151 *
@@ -149,6 +166,7 @@ struct clk_fixed_rate {
149 u8 flags; 166 u8 flags;
150}; 167};
151 168
169extern const struct clk_ops clk_fixed_rate_ops;
152struct clk *clk_register_fixed_rate(struct device *dev, const char *name, 170struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
153 const char *parent_name, unsigned long flags, 171 const char *parent_name, unsigned long flags,
154 unsigned long fixed_rate); 172 unsigned long fixed_rate);
@@ -165,7 +183,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
165 * Clock which can gate its output. Implements .enable & .disable 183 * Clock which can gate its output. Implements .enable & .disable
166 * 184 *
167 * Flags: 185 * Flags:
168 * CLK_GATE_SET_DISABLE - by default this clock sets the bit at bit_idx to 186 * CLK_GATE_SET_TO_DISABLE - by default this clock sets the bit at bit_idx to
169 * enable the clock. Setting this flag does the opposite: setting the bit 187 * enable the clock. Setting this flag does the opposite: setting the bit
170 * disable the clock and clearing it enables the clock 188 * disable the clock and clearing it enables the clock
171 */ 189 */
@@ -175,11 +193,11 @@ struct clk_gate {
175 u8 bit_idx; 193 u8 bit_idx;
176 u8 flags; 194 u8 flags;
177 spinlock_t *lock; 195 spinlock_t *lock;
178 char *parent[1];
179}; 196};
180 197
181#define CLK_GATE_SET_TO_DISABLE BIT(0) 198#define CLK_GATE_SET_TO_DISABLE BIT(0)
182 199
200extern const struct clk_ops clk_gate_ops;
183struct clk *clk_register_gate(struct device *dev, const char *name, 201struct clk *clk_register_gate(struct device *dev, const char *name,
184 const char *parent_name, unsigned long flags, 202 const char *parent_name, unsigned long flags,
185 void __iomem *reg, u8 bit_idx, 203 void __iomem *reg, u8 bit_idx,
@@ -212,12 +230,12 @@ struct clk_divider {
212 u8 width; 230 u8 width;
213 u8 flags; 231 u8 flags;
214 spinlock_t *lock; 232 spinlock_t *lock;
215 char *parent[1];
216}; 233};
217 234
218#define CLK_DIVIDER_ONE_BASED BIT(0) 235#define CLK_DIVIDER_ONE_BASED BIT(0)
219#define CLK_DIVIDER_POWER_OF_TWO BIT(1) 236#define CLK_DIVIDER_POWER_OF_TWO BIT(1)
220 237
238extern const struct clk_ops clk_divider_ops;
221struct clk *clk_register_divider(struct device *dev, const char *name, 239struct clk *clk_register_divider(struct device *dev, const char *name,
222 const char *parent_name, unsigned long flags, 240 const char *parent_name, unsigned long flags,
223 void __iomem *reg, u8 shift, u8 width, 241 void __iomem *reg, u8 shift, u8 width,
@@ -238,7 +256,7 @@ struct clk *clk_register_divider(struct device *dev, const char *name,
238 * 256 *
239 * Flags: 257 * Flags:
240 * CLK_MUX_INDEX_ONE - register index starts at 1, not 0 258 * CLK_MUX_INDEX_ONE - register index starts at 1, not 0
241 * CLK_MUX_INDEX_BITWISE - register index is a single bit (power of two) 259 * CLK_MUX_INDEX_BIT - register index is a single bit (power of two)
242 */ 260 */
243struct clk_mux { 261struct clk_mux {
244 struct clk_hw hw; 262 struct clk_hw hw;
@@ -252,29 +270,49 @@ struct clk_mux {
252#define CLK_MUX_INDEX_ONE BIT(0) 270#define CLK_MUX_INDEX_ONE BIT(0)
253#define CLK_MUX_INDEX_BIT BIT(1) 271#define CLK_MUX_INDEX_BIT BIT(1)
254 272
273extern const struct clk_ops clk_mux_ops;
255struct clk *clk_register_mux(struct device *dev, const char *name, 274struct clk *clk_register_mux(struct device *dev, const char *name,
256 char **parent_names, u8 num_parents, unsigned long flags, 275 const char **parent_names, u8 num_parents, unsigned long flags,
257 void __iomem *reg, u8 shift, u8 width, 276 void __iomem *reg, u8 shift, u8 width,
258 u8 clk_mux_flags, spinlock_t *lock); 277 u8 clk_mux_flags, spinlock_t *lock);
259 278
260/** 279/**
280 * struct clk_fixed_factor - fixed multiplier and divider clock
281 *
282 * @hw: handle between common and hardware-specific interfaces
283 * @mult: multiplier
284 * @div: divider
285 *
286 * Clock with a fixed multiplier and divider. The output frequency is the
287 * parent clock rate divided by div and multiplied by mult.
288 * Implements .recalc_rate, .set_rate and .round_rate
289 */
290
291struct clk_fixed_factor {
292 struct clk_hw hw;
293 unsigned int mult;
294 unsigned int div;
295};
296
297extern struct clk_ops clk_fixed_factor_ops;
298struct clk *clk_register_fixed_factor(struct device *dev, const char *name,
299 const char *parent_name, unsigned long flags,
300 unsigned int mult, unsigned int div);
301
302/**
261 * clk_register - allocate a new clock, register it and return an opaque cookie 303 * clk_register - allocate a new clock, register it and return an opaque cookie
262 * @dev: device that is registering this clock 304 * @dev: device that is registering this clock
263 * @name: clock name
264 * @ops: operations this clock supports
265 * @hw: link to hardware-specific clock data 305 * @hw: link to hardware-specific clock data
266 * @parent_names: array of string names for all possible parents
267 * @num_parents: number of possible parents
268 * @flags: framework-level hints and quirks
269 * 306 *
270 * clk_register is the primary interface for populating the clock tree with new 307 * clk_register is the primary interface for populating the clock tree with new
271 * clock nodes. It returns a pointer to the newly allocated struct clk which 308 * clock nodes. It returns a pointer to the newly allocated struct clk which
272 * cannot be dereferenced by driver code but may be used in conjuction with the 309 * cannot be dereferenced by driver code but may be used in conjuction with the
273 * rest of the clock API. 310 * rest of the clock API. In the event of an error clk_register will return an
311 * error code; drivers must test for an error code after calling clk_register.
274 */ 312 */
275struct clk *clk_register(struct device *dev, const char *name, 313struct clk *clk_register(struct device *dev, struct clk_hw *hw);
276 const struct clk_ops *ops, struct clk_hw *hw, 314
277 char **parent_names, u8 num_parents, unsigned long flags); 315void clk_unregister(struct clk *clk);
278 316
279/* helper functions */ 317/* helper functions */
280const char *__clk_get_name(struct clk *clk); 318const char *__clk_get_name(struct clk *clk);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index 70cf722ac3af..ad5c43e8ae8a 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -81,7 +81,7 @@ int clk_notifier_register(struct clk *clk, struct notifier_block *nb);
81 81
82int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb); 82int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb);
83 83
84#endif /* !CONFIG_COMMON_CLK */ 84#endif
85 85
86/** 86/**
87 * clk_get - lookup and obtain a reference to a clock producer. 87 * clk_get - lookup and obtain a reference to a clock producer.
@@ -252,7 +252,7 @@ void devm_clk_put(struct device *dev, struct clk *clk);
252 * Returns rounded clock rate in Hz, or negative errno. 252 * Returns rounded clock rate in Hz, or negative errno.
253 */ 253 */
254long clk_round_rate(struct clk *clk, unsigned long rate); 254long clk_round_rate(struct clk *clk, unsigned long rate);
255 255
256/** 256/**
257 * clk_set_rate - set the clock rate for a clock source 257 * clk_set_rate - set the clock rate for a clock source
258 * @clk: clock source 258 * @clk: clock source
@@ -261,7 +261,7 @@ long clk_round_rate(struct clk *clk, unsigned long rate);
261 * Returns success (0) or negative errno. 261 * Returns success (0) or negative errno.
262 */ 262 */
263int clk_set_rate(struct clk *clk, unsigned long rate); 263int clk_set_rate(struct clk *clk, unsigned long rate);
264 264
265/** 265/**
266 * clk_set_parent - set the parent clock source for this clock 266 * clk_set_parent - set the parent clock source for this clock
267 * @clk: clock source 267 * @clk: clock source
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 51a90b7f2d60..e988037abd2a 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -1,6 +1,8 @@
1#ifndef _LINUX_COMPACTION_H 1#ifndef _LINUX_COMPACTION_H
2#define _LINUX_COMPACTION_H 2#define _LINUX_COMPACTION_H
3 3
4#include <linux/node.h>
5
4/* Return values for compact_zone() and try_to_compact_pages() */ 6/* Return values for compact_zone() and try_to_compact_pages() */
5/* compaction didn't start as it was not possible or direct reclaim was more suitable */ 7/* compaction didn't start as it was not possible or direct reclaim was more suitable */
6#define COMPACT_SKIPPED 0 8#define COMPACT_SKIPPED 0
@@ -11,6 +13,23 @@
11/* The full zone was compacted */ 13/* The full zone was compacted */
12#define COMPACT_COMPLETE 3 14#define COMPACT_COMPLETE 3
13 15
16/*
17 * compaction supports three modes
18 *
19 * COMPACT_ASYNC_MOVABLE uses asynchronous migration and only scans
20 * MIGRATE_MOVABLE pageblocks as migration sources and targets.
21 * COMPACT_ASYNC_UNMOVABLE uses asynchronous migration and only scans
22 * MIGRATE_MOVABLE pageblocks as migration sources.
23 * MIGRATE_UNMOVABLE pageblocks are scanned as potential migration
24 * targets and convers them to MIGRATE_MOVABLE if possible
25 * COMPACT_SYNC uses synchronous migration and scans all pageblocks
26 */
27enum compact_mode {
28 COMPACT_ASYNC_MOVABLE,
29 COMPACT_ASYNC_UNMOVABLE,
30 COMPACT_SYNC,
31};
32
14#ifdef CONFIG_COMPACTION 33#ifdef CONFIG_COMPACTION
15extern int sysctl_compact_memory; 34extern int sysctl_compact_memory;
16extern int sysctl_compaction_handler(struct ctl_table *table, int write, 35extern int sysctl_compaction_handler(struct ctl_table *table, int write,
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index ae36b72c22f3..66c434f5dd1e 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -93,6 +93,10 @@ struct dentry *debugfs_create_regset32(const char *name, umode_t mode,
93int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs, 93int debugfs_print_regs32(struct seq_file *s, const struct debugfs_reg32 *regs,
94 int nregs, void __iomem *base, char *prefix); 94 int nregs, void __iomem *base, char *prefix);
95 95
96struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
97 struct dentry *parent,
98 u32 *array, u32 elements);
99
96bool debugfs_initialized(void); 100bool debugfs_initialized(void);
97 101
98#else 102#else
@@ -219,6 +223,13 @@ static inline bool debugfs_initialized(void)
219 return false; 223 return false;
220} 224}
221 225
226static inline struct dentry *debugfs_create_u32_array(const char *name, umode_t mode,
227 struct dentry *parent,
228 u32 *array, u32 elements)
229{
230 return ERR_PTR(-ENODEV);
231}
232
222#endif 233#endif
223 234
224#endif 235#endif
diff --git a/include/linux/device.h b/include/linux/device.h
index e04f5776f6d0..161d96241b1b 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -667,6 +667,10 @@ struct device {
667 667
668 struct dma_coherent_mem *dma_mem; /* internal for coherent mem 668 struct dma_coherent_mem *dma_mem; /* internal for coherent mem
669 override */ 669 override */
670#ifdef CONFIG_CMA
671 struct cma *cma_area; /* contiguous memory area for dma
672 allocations */
673#endif
670 /* arch specific additions */ 674 /* arch specific additions */
671 struct dev_archdata archdata; 675 struct dev_archdata archdata;
672 676
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index 3efbfc2145c3..eb48f3816df9 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -61,6 +61,13 @@ struct dma_buf_attachment;
61 * This Callback must not sleep. 61 * This Callback must not sleep.
62 * @kmap: maps a page from the buffer into kernel address space. 62 * @kmap: maps a page from the buffer into kernel address space.
63 * @kunmap: [optional] unmaps a page from the buffer. 63 * @kunmap: [optional] unmaps a page from the buffer.
64 * @mmap: used to expose the backing storage to userspace. Note that the
65 * mapping needs to be coherent - if the exporter doesn't directly
66 * support this, it needs to fake coherency by shooting down any ptes
67 * when transitioning away from the cpu domain.
68 * @vmap: [optional] creates a virtual mapping for the buffer into kernel
69 * address space. Same restrictions as for vmap and friends apply.
70 * @vunmap: [optional] unmaps a vmap from the buffer
64 */ 71 */
65struct dma_buf_ops { 72struct dma_buf_ops {
66 int (*attach)(struct dma_buf *, struct device *, 73 int (*attach)(struct dma_buf *, struct device *,
@@ -92,6 +99,11 @@ struct dma_buf_ops {
92 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *); 99 void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
93 void *(*kmap)(struct dma_buf *, unsigned long); 100 void *(*kmap)(struct dma_buf *, unsigned long);
94 void (*kunmap)(struct dma_buf *, unsigned long, void *); 101 void (*kunmap)(struct dma_buf *, unsigned long, void *);
102
103 int (*mmap)(struct dma_buf *, struct vm_area_struct *vma);
104
105 void *(*vmap)(struct dma_buf *);
106 void (*vunmap)(struct dma_buf *, void *vaddr);
95}; 107};
96 108
97/** 109/**
@@ -167,6 +179,11 @@ void *dma_buf_kmap_atomic(struct dma_buf *, unsigned long);
167void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *); 179void dma_buf_kunmap_atomic(struct dma_buf *, unsigned long, void *);
168void *dma_buf_kmap(struct dma_buf *, unsigned long); 180void *dma_buf_kmap(struct dma_buf *, unsigned long);
169void dma_buf_kunmap(struct dma_buf *, unsigned long, void *); 181void dma_buf_kunmap(struct dma_buf *, unsigned long, void *);
182
183int dma_buf_mmap(struct dma_buf *, struct vm_area_struct *,
184 unsigned long);
185void *dma_buf_vmap(struct dma_buf *);
186void dma_buf_vunmap(struct dma_buf *, void *vaddr);
170#else 187#else
171 188
172static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf, 189static inline struct dma_buf_attachment *dma_buf_attach(struct dma_buf *dmabuf,
@@ -248,6 +265,22 @@ static inline void dma_buf_kunmap(struct dma_buf *dmabuf,
248 unsigned long pnum, void *vaddr) 265 unsigned long pnum, void *vaddr)
249{ 266{
250} 267}
268
269static inline int dma_buf_mmap(struct dma_buf *dmabuf,
270 struct vm_area_struct *vma,
271 unsigned long pgoff)
272{
273 return -ENODEV;
274}
275
276static inline void *dma_buf_vmap(struct dma_buf *dmabuf)
277{
278 return NULL;
279}
280
281static inline void dma_buf_vunmap(struct dma_buf *dmabuf, void *vaddr)
282{
283}
251#endif /* CONFIG_DMA_SHARED_BUFFER */ 284#endif /* CONFIG_DMA_SHARED_BUFFER */
252 285
253#endif /* __DMA_BUF_H__ */ 286#endif /* __DMA_BUF_H__ */
diff --git a/include/linux/dma-contiguous.h b/include/linux/dma-contiguous.h
new file mode 100644
index 000000000000..2f303e4b7ed3
--- /dev/null
+++ b/include/linux/dma-contiguous.h
@@ -0,0 +1,110 @@
1#ifndef __LINUX_CMA_H
2#define __LINUX_CMA_H
3
4/*
5 * Contiguous Memory Allocator for DMA mapping framework
6 * Copyright (c) 2010-2011 by Samsung Electronics.
7 * Written by:
8 * Marek Szyprowski <m.szyprowski@samsung.com>
9 * Michal Nazarewicz <mina86@mina86.com>
10 *
11 * This program is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; either version 2 of the
14 * License or (at your optional) any later version of the license.
15 */
16
17/*
18 * Contiguous Memory Allocator
19 *
20 * The Contiguous Memory Allocator (CMA) makes it possible to
21 * allocate big contiguous chunks of memory after the system has
22 * booted.
23 *
24 * Why is it needed?
25 *
26 * Various devices on embedded systems have no scatter-getter and/or
27 * IO map support and require contiguous blocks of memory to
28 * operate. They include devices such as cameras, hardware video
29 * coders, etc.
30 *
31 * Such devices often require big memory buffers (a full HD frame
32 * is, for instance, more then 2 mega pixels large, i.e. more than 6
33 * MB of memory), which makes mechanisms such as kmalloc() or
34 * alloc_page() ineffective.
35 *
36 * At the same time, a solution where a big memory region is
37 * reserved for a device is suboptimal since often more memory is
38 * reserved then strictly required and, moreover, the memory is
39 * inaccessible to page system even if device drivers don't use it.
40 *
41 * CMA tries to solve this issue by operating on memory regions
42 * where only movable pages can be allocated from. This way, kernel
43 * can use the memory for pagecache and when device driver requests
44 * it, allocated pages can be migrated.
45 *
46 * Driver usage
47 *
48 * CMA should not be used by the device drivers directly. It is
49 * only a helper framework for dma-mapping subsystem.
50 *
51 * For more information, see kernel-docs in drivers/base/dma-contiguous.c
52 */
53
54#ifdef __KERNEL__
55
56struct cma;
57struct page;
58struct device;
59
60#ifdef CONFIG_CMA
61
62/*
63 * There is always at least global CMA area and a few optional device
64 * private areas configured in kernel .config.
65 */
66#define MAX_CMA_AREAS (1 + CONFIG_CMA_AREAS)
67
68extern struct cma *dma_contiguous_default_area;
69
70void dma_contiguous_reserve(phys_addr_t addr_limit);
71int dma_declare_contiguous(struct device *dev, unsigned long size,
72 phys_addr_t base, phys_addr_t limit);
73
74struct page *dma_alloc_from_contiguous(struct device *dev, int count,
75 unsigned int order);
76bool dma_release_from_contiguous(struct device *dev, struct page *pages,
77 int count);
78
79#else
80
81#define MAX_CMA_AREAS (0)
82
83static inline void dma_contiguous_reserve(phys_addr_t limit) { }
84
85static inline
86int dma_declare_contiguous(struct device *dev, unsigned long size,
87 phys_addr_t base, phys_addr_t limit)
88{
89 return -ENOSYS;
90}
91
92static inline
93struct page *dma_alloc_from_contiguous(struct device *dev, int count,
94 unsigned int order)
95{
96 return NULL;
97}
98
99static inline
100bool dma_release_from_contiguous(struct device *dev, struct page *pages,
101 int count)
102{
103 return false;
104}
105
106#endif
107
108#endif
109
110#endif
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index f9a2e5e67a54..d3fec584e8c3 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -615,11 +615,13 @@ static inline int dmaengine_slave_config(struct dma_chan *chan,
615} 615}
616 616
617static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single( 617static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_single(
618 struct dma_chan *chan, void *buf, size_t len, 618 struct dma_chan *chan, dma_addr_t buf, size_t len,
619 enum dma_transfer_direction dir, unsigned long flags) 619 enum dma_transfer_direction dir, unsigned long flags)
620{ 620{
621 struct scatterlist sg; 621 struct scatterlist sg;
622 sg_init_one(&sg, buf, len); 622 sg_init_table(&sg, 1);
623 sg_dma_address(&sg) = buf;
624 sg_dma_len(&sg) = len;
623 625
624 return chan->device->device_prep_slave_sg(chan, &sg, 1, 626 return chan->device->device_prep_slave_sg(chan, &sg, 1,
625 dir, flags, NULL); 627 dir, flags, NULL);
diff --git a/include/linux/edac.h b/include/linux/edac.h
index c621d762bb2c..91ba3bae42ee 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -71,6 +71,25 @@ enum dev_type {
71#define DEV_FLAG_X64 BIT(DEV_X64) 71#define DEV_FLAG_X64 BIT(DEV_X64)
72 72
73/** 73/**
74 * enum hw_event_mc_err_type - type of the detected error
75 *
76 * @HW_EVENT_ERR_CORRECTED: Corrected Error - Indicates that an ECC
77 * corrected error was detected
78 * @HW_EVENT_ERR_UNCORRECTED: Uncorrected Error - Indicates an error that
79 * can't be corrected by ECC, but it is not
80 * fatal (maybe it is on an unused memory area,
81 * or the memory controller could recover from
82 * it for example, by re-trying the operation).
83 * @HW_EVENT_ERR_FATAL: Fatal Error - Uncorrected error that could not
84 * be recovered.
85 */
86enum hw_event_mc_err_type {
87 HW_EVENT_ERR_CORRECTED,
88 HW_EVENT_ERR_UNCORRECTED,
89 HW_EVENT_ERR_FATAL,
90};
91
92/**
74 * enum mem_type - memory types. For a more detailed reference, please see 93 * enum mem_type - memory types. For a more detailed reference, please see
75 * http://en.wikipedia.org/wiki/DRAM 94 * http://en.wikipedia.org/wiki/DRAM
76 * 95 *
@@ -313,38 +332,141 @@ enum scrub_type {
313 */ 332 */
314 333
315/** 334/**
335 * enum edac_mc_layer - memory controller hierarchy layer
336 *
337 * @EDAC_MC_LAYER_BRANCH: memory layer is named "branch"
338 * @EDAC_MC_LAYER_CHANNEL: memory layer is named "channel"
339 * @EDAC_MC_LAYER_SLOT: memory layer is named "slot"
340 * @EDAC_MC_LAYER_CHIP_SELECT: memory layer is named "chip select"
341 *
342 * This enum is used by the drivers to tell edac_mc_sysfs what name should
343 * be used when describing a memory stick location.
344 */
345enum edac_mc_layer_type {
346 EDAC_MC_LAYER_BRANCH,
347 EDAC_MC_LAYER_CHANNEL,
348 EDAC_MC_LAYER_SLOT,
349 EDAC_MC_LAYER_CHIP_SELECT,
350};
351
352/**
353 * struct edac_mc_layer - describes the memory controller hierarchy
354 * @layer: layer type
355 * @size: number of components per layer. For example,
356 * if the channel layer has two channels, size = 2
357 * @is_virt_csrow: This layer is part of the "csrow" when old API
358 * compatibility mode is enabled. Otherwise, it is
359 * a channel
360 */
361struct edac_mc_layer {
362 enum edac_mc_layer_type type;
363 unsigned size;
364 bool is_virt_csrow;
365};
366
367/*
368 * Maximum number of layers used by the memory controller to uniquely
369 * identify a single memory stick.
370 * NOTE: Changing this constant requires not only to change the constant
371 * below, but also to change the existing code at the core, as there are
372 * some code there that are optimized for 3 layers.
373 */
374#define EDAC_MAX_LAYERS 3
375
376/**
377 * EDAC_DIMM_PTR - Macro responsible to find a pointer inside a pointer array
378 * for the element given by [layer0,layer1,layer2] position
379 *
380 * @layers: a struct edac_mc_layer array, describing how many elements
381 * were allocated for each layer
382 * @var: name of the var where we want to get the pointer
383 * (like mci->dimms)
384 * @n_layers: Number of layers at the @layers array
385 * @layer0: layer0 position
386 * @layer1: layer1 position. Unused if n_layers < 2
387 * @layer2: layer2 position. Unused if n_layers < 3
388 *
389 * For 1 layer, this macro returns &var[layer0]
390 * For 2 layers, this macro is similar to allocate a bi-dimensional array
391 * and to return "&var[layer0][layer1]"
392 * For 3 layers, this macro is similar to allocate a tri-dimensional array
393 * and to return "&var[layer0][layer1][layer2]"
394 *
395 * A loop could be used here to make it more generic, but, as we only have
396 * 3 layers, this is a little faster.
397 * By design, layers can never be 0 or more than 3. If that ever happens,
398 * a NULL is returned, causing an OOPS during the memory allocation routine,
399 * with would point to the developer that he's doing something wrong.
400 */
401#define EDAC_DIMM_PTR(layers, var, nlayers, layer0, layer1, layer2) ({ \
402 typeof(var) __p; \
403 if ((nlayers) == 1) \
404 __p = &var[layer0]; \
405 else if ((nlayers) == 2) \
406 __p = &var[(layer1) + ((layers[1]).size * (layer0))]; \
407 else if ((nlayers) == 3) \
408 __p = &var[(layer2) + ((layers[2]).size * ((layer1) + \
409 ((layers[1]).size * (layer0))))]; \
410 else \
411 __p = NULL; \
412 __p; \
413})
414
415
416/* FIXME: add the proper per-location error counts */
417struct dimm_info {
418 char label[EDAC_MC_LABEL_LEN + 1]; /* DIMM label on motherboard */
419
420 /* Memory location data */
421 unsigned location[EDAC_MAX_LAYERS];
422
423 struct mem_ctl_info *mci; /* the parent */
424
425 u32 grain; /* granularity of reported error in bytes */
426 enum dev_type dtype; /* memory device type */
427 enum mem_type mtype; /* memory dimm type */
428 enum edac_type edac_mode; /* EDAC mode for this dimm */
429
430 u32 nr_pages; /* number of pages on this dimm */
431
432 unsigned csrow, cschannel; /* Points to the old API data */
433};
434
435/**
316 * struct rank_info - contains the information for one DIMM rank 436 * struct rank_info - contains the information for one DIMM rank
317 * 437 *
318 * @chan_idx: channel number where the rank is (typically, 0 or 1) 438 * @chan_idx: channel number where the rank is (typically, 0 or 1)
319 * @ce_count: number of correctable errors for this rank 439 * @ce_count: number of correctable errors for this rank
320 * @label: DIMM label. Different ranks for the same DIMM should be
321 * filled, on userspace, with the same label.
322 * FIXME: The core currently won't enforce it.
323 * @csrow: A pointer to the chip select row structure (the parent 440 * @csrow: A pointer to the chip select row structure (the parent
324 * structure). The location of the rank is given by 441 * structure). The location of the rank is given by
325 * the (csrow->csrow_idx, chan_idx) vector. 442 * the (csrow->csrow_idx, chan_idx) vector.
443 * @dimm: A pointer to the DIMM structure, where the DIMM label
444 * information is stored.
445 *
446 * FIXME: Currently, the EDAC core model will assume one DIMM per rank.
447 * This is a bad assumption, but it makes this patch easier. Later
448 * patches in this series will fix this issue.
326 */ 449 */
327struct rank_info { 450struct rank_info {
328 int chan_idx; 451 int chan_idx;
329 u32 ce_count; 452 struct csrow_info *csrow;
330 char label[EDAC_MC_LABEL_LEN + 1]; 453 struct dimm_info *dimm;
331 struct csrow_info *csrow; /* the parent */ 454
455 u32 ce_count; /* Correctable Errors for this csrow */
332}; 456};
333 457
334struct csrow_info { 458struct csrow_info {
335 unsigned long first_page; /* first page number in dimm */ 459 /* Used only by edac_mc_find_csrow_by_page() */
336 unsigned long last_page; /* last page number in dimm */ 460 unsigned long first_page; /* first page number in csrow */
461 unsigned long last_page; /* last page number in csrow */
337 unsigned long page_mask; /* used for interleaving - 462 unsigned long page_mask; /* used for interleaving -
338 * 0UL for non intlv 463 * 0UL for non intlv */
339 */ 464
340 u32 nr_pages; /* number of pages in csrow */ 465 int csrow_idx; /* the chip-select row */
341 u32 grain; /* granularity of reported error in bytes */ 466
342 int csrow_idx; /* the chip-select row */
343 enum dev_type dtype; /* memory device type */
344 u32 ue_count; /* Uncorrectable Errors for this csrow */ 467 u32 ue_count; /* Uncorrectable Errors for this csrow */
345 u32 ce_count; /* Correctable Errors for this csrow */ 468 u32 ce_count; /* Correctable Errors for this csrow */
346 enum mem_type mtype; /* memory csrow type */ 469
347 enum edac_type edac_mode; /* EDAC mode for this csrow */
348 struct mem_ctl_info *mci; /* the parent */ 470 struct mem_ctl_info *mci; /* the parent */
349 471
350 struct kobject kobj; /* sysfs kobject for this csrow */ 472 struct kobject kobj; /* sysfs kobject for this csrow */
@@ -426,8 +548,20 @@ struct mem_ctl_info {
426 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci, 548 unsigned long (*ctl_page_to_phys) (struct mem_ctl_info * mci,
427 unsigned long page); 549 unsigned long page);
428 int mc_idx; 550 int mc_idx;
429 int nr_csrows;
430 struct csrow_info *csrows; 551 struct csrow_info *csrows;
552 unsigned nr_csrows, num_cschannel;
553
554 /* Memory Controller hierarchy */
555 unsigned n_layers;
556 struct edac_mc_layer *layers;
557 bool mem_is_per_rank;
558
559 /*
560 * DIMM info. Will eventually remove the entire csrows_info some day
561 */
562 unsigned tot_dimms;
563 struct dimm_info *dimms;
564
431 /* 565 /*
432 * FIXME - what about controllers on other busses? - IDs must be 566 * FIXME - what about controllers on other busses? - IDs must be
433 * unique. dev pointer should be sufficiently unique, but 567 * unique. dev pointer should be sufficiently unique, but
@@ -440,12 +574,16 @@ struct mem_ctl_info {
440 const char *dev_name; 574 const char *dev_name;
441 char proc_name[MC_PROC_NAME_MAX_LEN + 1]; 575 char proc_name[MC_PROC_NAME_MAX_LEN + 1];
442 void *pvt_info; 576 void *pvt_info;
443 u32 ue_noinfo_count; /* Uncorrectable Errors w/o info */
444 u32 ce_noinfo_count; /* Correctable Errors w/o info */
445 u32 ue_count; /* Total Uncorrectable Errors for this MC */
446 u32 ce_count; /* Total Correctable Errors for this MC */
447 unsigned long start_time; /* mci load start time (in jiffies) */ 577 unsigned long start_time; /* mci load start time (in jiffies) */
448 578
579 /*
580 * drivers shouldn't access those fields directly, as the core
581 * already handles that.
582 */
583 u32 ce_noinfo_count, ue_noinfo_count;
584 u32 ue_mc, ce_mc;
585 u32 *ce_per_layer[EDAC_MAX_LAYERS], *ue_per_layer[EDAC_MAX_LAYERS];
586
449 struct completion complete; 587 struct completion complete;
450 588
451 /* edac sysfs device control */ 589 /* edac sysfs device control */
@@ -458,7 +596,7 @@ struct mem_ctl_info {
458 * by the low level driver. 596 * by the low level driver.
459 * 597 *
460 * Set by the low level driver to provide attributes at the 598 * Set by the low level driver to provide attributes at the
461 * controller level, same level as 'ue_count' and 'ce_count' above. 599 * controller level.
462 * An array of structures, NULL terminated 600 * An array of structures, NULL terminated
463 * 601 *
464 * If attributes are desired, then set to array of attributes 602 * If attributes are desired, then set to array of attributes
diff --git a/include/linux/fb.h b/include/linux/fb.h
index d31cb682e173..a3229d7ab9f2 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -554,6 +554,10 @@ struct fb_cursor_user {
554#define FB_EVENT_FB_UNBIND 0x0E 554#define FB_EVENT_FB_UNBIND 0x0E
555/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */ 555/* CONSOLE-SPECIFIC: remap all consoles to new fb - for vga switcheroo */
556#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F 556#define FB_EVENT_REMAP_ALL_CONSOLE 0x0F
557/* A hardware display blank early change occured */
558#define FB_EARLY_EVENT_BLANK 0x10
559/* A hardware display blank revert early change occured */
560#define FB_R_EARLY_EVENT_BLANK 0x11
557 561
558struct fb_event { 562struct fb_event {
559 struct fb_info *info; 563 struct fb_info *info;
diff --git a/include/linux/fs.h b/include/linux/fs.h
index c0e53372b082..038076b27ea4 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -1681,7 +1681,6 @@ struct inode_operations {
1681 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t); 1681 ssize_t (*getxattr) (struct dentry *, const char *, void *, size_t);
1682 ssize_t (*listxattr) (struct dentry *, char *, size_t); 1682 ssize_t (*listxattr) (struct dentry *, char *, size_t);
1683 int (*removexattr) (struct dentry *, const char *); 1683 int (*removexattr) (struct dentry *, const char *);
1684 void (*truncate_range)(struct inode *, loff_t, loff_t);
1685 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start, 1684 int (*fiemap)(struct inode *, struct fiemap_extent_info *, u64 start,
1686 u64 len); 1685 u64 len);
1687} ____cacheline_aligned; 1686} ____cacheline_aligned;
@@ -1764,8 +1763,8 @@ struct super_operations {
1764 * I_FREEING Set when inode is about to be freed but still has dirty 1763 * I_FREEING Set when inode is about to be freed but still has dirty
1765 * pages or buffers attached or the inode itself is still 1764 * pages or buffers attached or the inode itself is still
1766 * dirty. 1765 * dirty.
1767 * I_CLEAR Added by end_writeback(). In this state the inode is clean 1766 * I_CLEAR Added by clear_inode(). In this state the inode is
1768 * and can be destroyed. Inode keeps I_FREEING. 1767 * clean and can be destroyed. Inode keeps I_FREEING.
1769 * 1768 *
1770 * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are 1769 * Inodes that are I_WILL_FREE, I_FREEING or I_CLEAR are
1771 * prohibited for many purposes. iget() must wait for 1770 * prohibited for many purposes. iget() must wait for
@@ -1773,9 +1772,10 @@ struct super_operations {
1773 * anew. Other functions will just ignore such inodes, 1772 * anew. Other functions will just ignore such inodes,
1774 * if appropriate. I_NEW is used for waiting. 1773 * if appropriate. I_NEW is used for waiting.
1775 * 1774 *
1776 * I_SYNC Synchonized write of dirty inode data. The bits is 1775 * I_SYNC Writeback of inode is running. The bit is set during
1777 * set during data writeback, and cleared with a wakeup 1776 * data writeback, and cleared with a wakeup on the bit
1778 * on the bit address once it is done. 1777 * address once it is done. The bit is also used to pin
1778 * the inode in memory for flusher thread.
1779 * 1779 *
1780 * I_REFERENCED Marks the inode as recently references on the LRU list. 1780 * I_REFERENCED Marks the inode as recently references on the LRU list.
1781 * 1781 *
@@ -2349,7 +2349,7 @@ extern unsigned int get_next_ino(void);
2349 2349
2350extern void __iget(struct inode * inode); 2350extern void __iget(struct inode * inode);
2351extern void iget_failed(struct inode *); 2351extern void iget_failed(struct inode *);
2352extern void end_writeback(struct inode *); 2352extern void clear_inode(struct inode *);
2353extern void __destroy_inode(struct inode *); 2353extern void __destroy_inode(struct inode *);
2354extern struct inode *new_inode_pseudo(struct super_block *sb); 2354extern struct inode *new_inode_pseudo(struct super_block *sb);
2355extern struct inode *new_inode(struct super_block *sb); 2355extern struct inode *new_inode(struct super_block *sb);
diff --git a/include/linux/fsl/mxs-dma.h b/include/linux/fsl/mxs-dma.h
index 203d7c4a3e11..55d870238399 100644
--- a/include/linux/fsl/mxs-dma.h
+++ b/include/linux/fsl/mxs-dma.h
@@ -15,14 +15,6 @@ struct mxs_dma_data {
15 int chan_irq; 15 int chan_irq;
16}; 16};
17 17
18static inline int mxs_dma_is_apbh(struct dma_chan *chan) 18extern int mxs_dma_is_apbh(struct dma_chan *chan);
19{ 19extern int mxs_dma_is_apbx(struct dma_chan *chan);
20 return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbh");
21}
22
23static inline int mxs_dma_is_apbx(struct dma_chan *chan)
24{
25 return !strcmp(dev_name(chan->device->dev), "mxs-dma-apbx");
26}
27
28#endif /* __MACH_MXS_DMA_H__ */ 20#endif /* __MACH_MXS_DMA_H__ */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 581e74b7df95..1e49be49d324 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -391,4 +391,16 @@ static inline bool pm_suspended_storage(void)
391} 391}
392#endif /* CONFIG_PM_SLEEP */ 392#endif /* CONFIG_PM_SLEEP */
393 393
394#ifdef CONFIG_CMA
395
396/* The below functions must be run on a range from a single zone. */
397extern int alloc_contig_range(unsigned long start, unsigned long end,
398 unsigned migratetype);
399extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
400
401/* CMA stuff */
402extern void init_cma_reserved_pageblock(struct page *page);
403
404#endif
405
394#endif /* __LINUX_GFP_H */ 406#endif /* __LINUX_GFP_H */
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c8af7a2efb52..4c59b1131187 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -59,6 +59,8 @@ extern pmd_t *page_check_address_pmd(struct page *page,
59#define HPAGE_PMD_MASK HPAGE_MASK 59#define HPAGE_PMD_MASK HPAGE_MASK
60#define HPAGE_PMD_SIZE HPAGE_SIZE 60#define HPAGE_PMD_SIZE HPAGE_SIZE
61 61
62extern bool is_vma_temporary_stack(struct vm_area_struct *vma);
63
62#define transparent_hugepage_enabled(__vma) \ 64#define transparent_hugepage_enabled(__vma) \
63 ((transparent_hugepage_flags & \ 65 ((transparent_hugepage_flags & \
64 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \ 66 (1<<TRANSPARENT_HUGEPAGE_FLAG) || \
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 000837e126e6..d5d6bbe2259e 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -284,6 +284,14 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
284 284
285#include <asm/hugetlb.h> 285#include <asm/hugetlb.h>
286 286
287#ifndef arch_make_huge_pte
288static inline pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma,
289 struct page *page, int writable)
290{
291 return entry;
292}
293#endif
294
287static inline struct hstate *page_hstate(struct page *page) 295static inline struct hstate *page_hstate(struct page *page)
288{ 296{
289 return size_to_hstate(PAGE_SIZE << compound_order(page)); 297 return size_to_hstate(PAGE_SIZE << compound_order(page));
diff --git a/include/linux/gpio-i2cmux.h b/include/linux/i2c-mux-gpio.h
index 4a333bb0bd0d..a36343a37ebc 100644
--- a/include/linux/gpio-i2cmux.h
+++ b/include/linux/i2c-mux-gpio.h
@@ -1,5 +1,5 @@
1/* 1/*
2 * gpio-i2cmux interface to platform code 2 * i2c-mux-gpio interface to platform code
3 * 3 *
4 * Peter Korsgaard <peter.korsgaard@barco.com> 4 * Peter Korsgaard <peter.korsgaard@barco.com>
5 * 5 *
@@ -8,14 +8,14 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11#ifndef _LINUX_GPIO_I2CMUX_H 11#ifndef _LINUX_I2C_MUX_GPIO_H
12#define _LINUX_GPIO_I2CMUX_H 12#define _LINUX_I2C_MUX_GPIO_H
13 13
14/* MUX has no specific idle mode */ 14/* MUX has no specific idle mode */
15#define GPIO_I2CMUX_NO_IDLE ((unsigned)-1) 15#define I2C_MUX_GPIO_NO_IDLE ((unsigned)-1)
16 16
17/** 17/**
18 * struct gpio_i2cmux_platform_data - Platform-dependent data for gpio-i2cmux 18 * struct i2c_mux_gpio_platform_data - Platform-dependent data for i2c-mux-gpio
19 * @parent: Parent I2C bus adapter number 19 * @parent: Parent I2C bus adapter number
20 * @base_nr: Base I2C bus number to number adapters from or zero for dynamic 20 * @base_nr: Base I2C bus number to number adapters from or zero for dynamic
21 * @values: Array of bitmasks of GPIO settings (low/high) for each 21 * @values: Array of bitmasks of GPIO settings (low/high) for each
@@ -25,7 +25,7 @@
25 * @n_gpios: Number of GPIOs used to control MUX 25 * @n_gpios: Number of GPIOs used to control MUX
26 * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used 26 * @idle: Bitmask to write to MUX when idle or GPIO_I2CMUX_NO_IDLE if not used
27 */ 27 */
28struct gpio_i2cmux_platform_data { 28struct i2c_mux_gpio_platform_data {
29 int parent; 29 int parent;
30 int base_nr; 30 int base_nr;
31 const unsigned *values; 31 const unsigned *values;
@@ -35,4 +35,4 @@ struct gpio_i2cmux_platform_data {
35 unsigned idle; 35 unsigned idle;
36}; 36};
37 37
38#endif /* _LINUX_GPIO_I2CMUX_H */ 38#endif /* _LINUX_I2C_MUX_GPIO_H */
diff --git a/include/linux/i2c-mux.h b/include/linux/i2c-mux.h
index 747f0cde4164..c79083830014 100644
--- a/include/linux/i2c-mux.h
+++ b/include/linux/i2c-mux.h
@@ -34,7 +34,8 @@
34 * mux control. 34 * mux control.
35 */ 35 */
36struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent, 36struct i2c_adapter *i2c_add_mux_adapter(struct i2c_adapter *parent,
37 void *mux_dev, u32 force_nr, u32 chan_id, 37 struct device *mux_dev,
38 void *mux_priv, u32 force_nr, u32 chan_id,
38 int (*select) (struct i2c_adapter *, 39 int (*select) (struct i2c_adapter *,
39 void *mux_dev, u32 chan_id), 40 void *mux_dev, u32 chan_id),
40 int (*deselect) (struct i2c_adapter *, 41 int (*deselect) (struct i2c_adapter *,
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 195d8b3d9cfb..b66cb601435f 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -232,6 +232,7 @@ struct i2c_client {
232#define to_i2c_client(d) container_of(d, struct i2c_client, dev) 232#define to_i2c_client(d) container_of(d, struct i2c_client, dev)
233 233
234extern struct i2c_client *i2c_verify_client(struct device *dev); 234extern struct i2c_client *i2c_verify_client(struct device *dev);
235extern struct i2c_adapter *i2c_verify_adapter(struct device *dev);
235 236
236static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj) 237static inline struct i2c_client *kobj_to_i2c_client(struct kobject *kobj)
237{ 238{
diff --git a/include/linux/jbd.h b/include/linux/jbd.h
index d211732b9e99..c8f32975f0e4 100644
--- a/include/linux/jbd.h
+++ b/include/linux/jbd.h
@@ -479,12 +479,6 @@ struct transaction_s
479 * How many handles used this transaction? [t_handle_lock] 479 * How many handles used this transaction? [t_handle_lock]
480 */ 480 */
481 int t_handle_count; 481 int t_handle_count;
482
483 /*
484 * This transaction is being forced and some process is
485 * waiting for it to finish.
486 */
487 unsigned int t_synchronous_commit:1;
488}; 482};
489 483
490/** 484/**
@@ -531,6 +525,8 @@ struct transaction_s
531 * transaction 525 * transaction
532 * @j_commit_request: Sequence number of the most recent transaction wanting 526 * @j_commit_request: Sequence number of the most recent transaction wanting
533 * commit 527 * commit
528 * @j_commit_waited: Sequence number of the most recent transaction someone
529 * is waiting for to commit.
534 * @j_uuid: Uuid of client object. 530 * @j_uuid: Uuid of client object.
535 * @j_task: Pointer to the current commit thread for this journal 531 * @j_task: Pointer to the current commit thread for this journal
536 * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a 532 * @j_max_transaction_buffers: Maximum number of metadata buffers to allow in a
@@ -696,6 +692,13 @@ struct journal_s
696 tid_t j_commit_request; 692 tid_t j_commit_request;
697 693
698 /* 694 /*
695 * Sequence number of the most recent transaction someone is waiting
696 * for to commit.
697 * [j_state_lock]
698 */
699 tid_t j_commit_waited;
700
701 /*
699 * Journal uuid: identifies the object (filesystem, LVM volume etc) 702 * Journal uuid: identifies the object (filesystem, LVM volume etc)
700 * backed by this journal. This will eventually be replaced by an array 703 * backed by this journal. This will eventually be replaced by an array
701 * of uuids, allowing us to index multiple devices within a single 704 * of uuids, allowing us to index multiple devices within a single
@@ -861,7 +864,8 @@ extern int journal_destroy (journal_t *);
861extern int journal_recover (journal_t *journal); 864extern int journal_recover (journal_t *journal);
862extern int journal_wipe (journal_t *, int); 865extern int journal_wipe (journal_t *, int);
863extern int journal_skip_recovery (journal_t *); 866extern int journal_skip_recovery (journal_t *);
864extern void journal_update_superblock (journal_t *, int); 867extern void journal_update_sb_log_tail (journal_t *, tid_t, unsigned int,
868 int);
865extern void journal_abort (journal_t *, int); 869extern void journal_abort (journal_t *, int);
866extern int journal_errno (journal_t *); 870extern int journal_errno (journal_t *);
867extern void journal_ack_err (journal_t *); 871extern void journal_ack_err (journal_t *);
diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
index 387571959dd9..6883e197acb9 100644
--- a/include/linux/kallsyms.h
+++ b/include/linux/kallsyms.h
@@ -36,6 +36,7 @@ const char *kallsyms_lookup(unsigned long addr,
36 36
37/* Look up a kernel symbol and return it in a text buffer. */ 37/* Look up a kernel symbol and return it in a text buffer. */
38extern int sprint_symbol(char *buffer, unsigned long address); 38extern int sprint_symbol(char *buffer, unsigned long address);
39extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
39extern int sprint_backtrace(char *buffer, unsigned long address); 40extern int sprint_backtrace(char *buffer, unsigned long address);
40 41
41/* Look up a kernel symbol and print it to the kernel messages. */ 42/* Look up a kernel symbol and print it to the kernel messages. */
@@ -80,6 +81,12 @@ static inline int sprint_symbol(char *buffer, unsigned long addr)
80 return 0; 81 return 0;
81} 82}
82 83
84static inline int sprint_symbol_no_offset(char *buffer, unsigned long addr)
85{
86 *buffer = '\0';
87 return 0;
88}
89
83static inline int sprint_backtrace(char *buffer, unsigned long addr) 90static inline int sprint_backtrace(char *buffer, unsigned long addr)
84{ 91{
85 *buffer = '\0'; 92 *buffer = '\0';
diff --git a/include/linux/kernel-page-flags.h b/include/linux/kernel-page-flags.h
index 26a65711676f..a1bdf6966357 100644
--- a/include/linux/kernel-page-flags.h
+++ b/include/linux/kernel-page-flags.h
@@ -32,6 +32,8 @@
32#define KPF_KSM 21 32#define KPF_KSM 21
33#define KPF_THP 22 33#define KPF_THP 22
34 34
35#ifdef __KERNEL__
36
35/* kernel hacking assistances 37/* kernel hacking assistances
36 * WARNING: subject to change, never rely on them! 38 * WARNING: subject to change, never rely on them!
37 */ 39 */
@@ -44,4 +46,6 @@
44#define KPF_ARCH 38 46#define KPF_ARCH 38
45#define KPF_UNCACHED 39 47#define KPF_UNCACHED 39
46 48
49#endif /* __KERNEL__ */
50
47#endif /* LINUX_KERNEL_PAGE_FLAGS_H */ 51#endif /* LINUX_KERNEL_PAGE_FLAGS_H */
diff --git a/include/linux/kvm.h b/include/linux/kvm.h
index 6c322a90b92f..09f2b3aa2da7 100644
--- a/include/linux/kvm.h
+++ b/include/linux/kvm.h
@@ -449,6 +449,30 @@ struct kvm_ppc_pvinfo {
449 __u8 pad[108]; 449 __u8 pad[108];
450}; 450};
451 451
452/* for KVM_PPC_GET_SMMU_INFO */
453#define KVM_PPC_PAGE_SIZES_MAX_SZ 8
454
455struct kvm_ppc_one_page_size {
456 __u32 page_shift; /* Page shift (or 0) */
457 __u32 pte_enc; /* Encoding in the HPTE (>>12) */
458};
459
460struct kvm_ppc_one_seg_page_size {
461 __u32 page_shift; /* Base page shift of segment (or 0) */
462 __u32 slb_enc; /* SLB encoding for BookS */
463 struct kvm_ppc_one_page_size enc[KVM_PPC_PAGE_SIZES_MAX_SZ];
464};
465
466#define KVM_PPC_PAGE_SIZES_REAL 0x00000001
467#define KVM_PPC_1T_SEGMENTS 0x00000002
468
469struct kvm_ppc_smmu_info {
470 __u64 flags;
471 __u32 slb_size;
472 __u32 pad;
473 struct kvm_ppc_one_seg_page_size sps[KVM_PPC_PAGE_SIZES_MAX_SZ];
474};
475
452#define KVMIO 0xAE 476#define KVMIO 0xAE
453 477
454/* machine type bits, to be used as argument to KVM_CREATE_VM */ 478/* machine type bits, to be used as argument to KVM_CREATE_VM */
@@ -589,6 +613,10 @@ struct kvm_ppc_pvinfo {
589#define KVM_CAP_S390_UCONTROL 73 613#define KVM_CAP_S390_UCONTROL 73
590#define KVM_CAP_SYNC_REGS 74 614#define KVM_CAP_SYNC_REGS 74
591#define KVM_CAP_PCI_2_3 75 615#define KVM_CAP_PCI_2_3 75
616#define KVM_CAP_KVMCLOCK_CTRL 76
617#define KVM_CAP_SIGNAL_MSI 77
618#define KVM_CAP_PPC_GET_SMMU_INFO 78
619#define KVM_CAP_S390_COW 79
592 620
593#ifdef KVM_CAP_IRQ_ROUTING 621#ifdef KVM_CAP_IRQ_ROUTING
594 622
@@ -714,6 +742,14 @@ struct kvm_one_reg {
714 __u64 addr; 742 __u64 addr;
715}; 743};
716 744
745struct kvm_msi {
746 __u32 address_lo;
747 __u32 address_hi;
748 __u32 data;
749 __u32 flags;
750 __u8 pad[16];
751};
752
717/* 753/*
718 * ioctls for VM fds 754 * ioctls for VM fds
719 */ 755 */
@@ -788,6 +824,10 @@ struct kvm_s390_ucas_mapping {
788/* Available with KVM_CAP_PCI_2_3 */ 824/* Available with KVM_CAP_PCI_2_3 */
789#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \ 825#define KVM_ASSIGN_SET_INTX_MASK _IOW(KVMIO, 0xa4, \
790 struct kvm_assigned_pci_dev) 826 struct kvm_assigned_pci_dev)
827/* Available with KVM_CAP_SIGNAL_MSI */
828#define KVM_SIGNAL_MSI _IOW(KVMIO, 0xa5, struct kvm_msi)
829/* Available with KVM_CAP_PPC_GET_SMMU_INFO */
830#define KVM_PPC_GET_SMMU_INFO _IOR(KVMIO, 0xa6, struct kvm_ppc_smmu_info)
791 831
792/* 832/*
793 * ioctls for vcpu fds 833 * ioctls for vcpu fds
@@ -859,6 +899,8 @@ struct kvm_s390_ucas_mapping {
859/* Available with KVM_CAP_ONE_REG */ 899/* Available with KVM_CAP_ONE_REG */
860#define KVM_GET_ONE_REG _IOW(KVMIO, 0xab, struct kvm_one_reg) 900#define KVM_GET_ONE_REG _IOW(KVMIO, 0xab, struct kvm_one_reg)
861#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg) 901#define KVM_SET_ONE_REG _IOW(KVMIO, 0xac, struct kvm_one_reg)
902/* VM is being stopped by host */
903#define KVM_KVMCLOCK_CTRL _IO(KVMIO, 0xad)
862 904
863#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0) 905#define KVM_DEV_ASSIGN_ENABLE_IOMMU (1 << 0)
864#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1) 906#define KVM_DEV_ASSIGN_PCI_2_3 (1 << 1)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index 72cbf08d45fb..c4464356b35b 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -35,6 +35,20 @@
35#endif 35#endif
36 36
37/* 37/*
38 * If we support unaligned MMIO, at most one fragment will be split into two:
39 */
40#ifdef KVM_UNALIGNED_MMIO
41# define KVM_EXTRA_MMIO_FRAGMENTS 1
42#else
43# define KVM_EXTRA_MMIO_FRAGMENTS 0
44#endif
45
46#define KVM_USER_MMIO_SIZE 8
47
48#define KVM_MAX_MMIO_FRAGMENTS \
49 (KVM_MMIO_SIZE / KVM_USER_MMIO_SIZE + KVM_EXTRA_MMIO_FRAGMENTS)
50
51/*
38 * vcpu->requests bit members 52 * vcpu->requests bit members
39 */ 53 */
40#define KVM_REQ_TLB_FLUSH 0 54#define KVM_REQ_TLB_FLUSH 0
@@ -68,10 +82,11 @@ struct kvm_io_range {
68 struct kvm_io_device *dev; 82 struct kvm_io_device *dev;
69}; 83};
70 84
85#define NR_IOBUS_DEVS 1000
86
71struct kvm_io_bus { 87struct kvm_io_bus {
72 int dev_count; 88 int dev_count;
73#define NR_IOBUS_DEVS 300 89 struct kvm_io_range range[];
74 struct kvm_io_range range[NR_IOBUS_DEVS];
75}; 90};
76 91
77enum kvm_bus { 92enum kvm_bus {
@@ -113,7 +128,18 @@ int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
113enum { 128enum {
114 OUTSIDE_GUEST_MODE, 129 OUTSIDE_GUEST_MODE,
115 IN_GUEST_MODE, 130 IN_GUEST_MODE,
116 EXITING_GUEST_MODE 131 EXITING_GUEST_MODE,
132 READING_SHADOW_PAGE_TABLES,
133};
134
135/*
136 * Sometimes a large or cross-page mmio needs to be broken up into separate
137 * exits for userspace servicing.
138 */
139struct kvm_mmio_fragment {
140 gpa_t gpa;
141 void *data;
142 unsigned len;
117}; 143};
118 144
119struct kvm_vcpu { 145struct kvm_vcpu {
@@ -143,10 +169,9 @@ struct kvm_vcpu {
143 int mmio_needed; 169 int mmio_needed;
144 int mmio_read_completed; 170 int mmio_read_completed;
145 int mmio_is_write; 171 int mmio_is_write;
146 int mmio_size; 172 int mmio_cur_fragment;
147 int mmio_index; 173 int mmio_nr_fragments;
148 unsigned char mmio_data[KVM_MMIO_SIZE]; 174 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
149 gpa_t mmio_phys_addr;
150#endif 175#endif
151 176
152#ifdef CONFIG_KVM_ASYNC_PF 177#ifdef CONFIG_KVM_ASYNC_PF
@@ -178,8 +203,6 @@ struct kvm_memory_slot {
178 unsigned long flags; 203 unsigned long flags;
179 unsigned long *rmap; 204 unsigned long *rmap;
180 unsigned long *dirty_bitmap; 205 unsigned long *dirty_bitmap;
181 unsigned long *dirty_bitmap_head;
182 unsigned long nr_dirty_pages;
183 struct kvm_arch_memory_slot arch; 206 struct kvm_arch_memory_slot arch;
184 unsigned long userspace_addr; 207 unsigned long userspace_addr;
185 int user_alloc; 208 int user_alloc;
@@ -438,6 +461,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
438 gfn_t gfn); 461 gfn_t gfn);
439 462
440void kvm_vcpu_block(struct kvm_vcpu *vcpu); 463void kvm_vcpu_block(struct kvm_vcpu *vcpu);
464void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
465bool kvm_vcpu_yield_to(struct kvm_vcpu *target);
441void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); 466void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
442void kvm_resched(struct kvm_vcpu *vcpu); 467void kvm_resched(struct kvm_vcpu *vcpu);
443void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 468void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
@@ -506,6 +531,7 @@ int kvm_arch_hardware_setup(void);
506void kvm_arch_hardware_unsetup(void); 531void kvm_arch_hardware_unsetup(void);
507void kvm_arch_check_processor_compat(void *rtn); 532void kvm_arch_check_processor_compat(void *rtn);
508int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); 533int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
534int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
509 535
510void kvm_free_physmem(struct kvm *kvm); 536void kvm_free_physmem(struct kvm *kvm);
511 537
@@ -521,6 +547,15 @@ static inline void kvm_arch_free_vm(struct kvm *kvm)
521} 547}
522#endif 548#endif
523 549
550static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
551{
552#ifdef __KVM_HAVE_ARCH_WQP
553 return vcpu->arch.wqp;
554#else
555 return &vcpu->wq;
556#endif
557}
558
524int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); 559int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
525void kvm_arch_destroy_vm(struct kvm *kvm); 560void kvm_arch_destroy_vm(struct kvm *kvm);
526void kvm_free_all_assigned_devices(struct kvm *kvm); 561void kvm_free_all_assigned_devices(struct kvm *kvm);
@@ -769,6 +804,8 @@ int kvm_set_irq_routing(struct kvm *kvm,
769 unsigned flags); 804 unsigned flags);
770void kvm_free_irq_routing(struct kvm *kvm); 805void kvm_free_irq_routing(struct kvm *kvm);
771 806
807int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
808
772#else 809#else
773 810
774static inline void kvm_free_irq_routing(struct kvm *kvm) {} 811static inline void kvm_free_irq_routing(struct kvm *kvm) {}
diff --git a/include/linux/lcd.h b/include/linux/lcd.h
index 8877123f2d6e..e00c3b0ebc6b 100644
--- a/include/linux/lcd.h
+++ b/include/linux/lcd.h
@@ -40,6 +40,16 @@ struct lcd_ops {
40 /* Get the LCD panel power status (0: full on, 1..3: controller 40 /* Get the LCD panel power status (0: full on, 1..3: controller
41 power on, flat panel power off, 4: full off), see FB_BLANK_XXX */ 41 power on, flat panel power off, 4: full off), see FB_BLANK_XXX */
42 int (*get_power)(struct lcd_device *); 42 int (*get_power)(struct lcd_device *);
43 /*
44 * Enable or disable power to the LCD(0: on; 4: off, see FB_BLANK_XXX)
45 * and this callback would be called proir to fb driver's callback.
46 *
47 * P.S. note that if early_set_power is not NULL then early fb notifier
48 * would be registered.
49 */
50 int (*early_set_power)(struct lcd_device *, int power);
51 /* revert the effects of the early blank event. */
52 int (*r_early_set_power)(struct lcd_device *, int power);
43 /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */ 53 /* Enable or disable power to the LCD (0: on; 4: off, see FB_BLANK_XXX) */
44 int (*set_power)(struct lcd_device *, int power); 54 int (*set_power)(struct lcd_device *, int power);
45 /* Get the current contrast setting (0-max_contrast) */ 55 /* Get the current contrast setting (0-max_contrast) */
diff --git a/include/linux/led-lm3530.h b/include/linux/led-lm3530.h
index eeae6e742471..4b133479d6ea 100644
--- a/include/linux/led-lm3530.h
+++ b/include/linux/led-lm3530.h
@@ -92,7 +92,7 @@ struct lm3530_pwm_data {
92 * @als2_resistor_sel: internal resistance from ALS2 input to ground 92 * @als2_resistor_sel: internal resistance from ALS2 input to ground
93 * @als_vmin: als input voltage calibrated for max brightness in mV 93 * @als_vmin: als input voltage calibrated for max brightness in mV
94 * @als_vmax: als input voltage calibrated for min brightness in mV 94 * @als_vmax: als input voltage calibrated for min brightness in mV
95 * @brt_val: brightness value (0-255) 95 * @brt_val: brightness value (0-127)
96 * @pwm_data: PWM control functions (only valid when the mode is PWM) 96 * @pwm_data: PWM control functions (only valid when the mode is PWM)
97 */ 97 */
98struct lm3530_platform_data { 98struct lm3530_platform_data {
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 5884def15a24..39eee41d8c6f 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -73,6 +73,8 @@ struct led_classdev {
73 struct led_trigger *trigger; 73 struct led_trigger *trigger;
74 struct list_head trig_list; 74 struct list_head trig_list;
75 void *trigger_data; 75 void *trigger_data;
76 /* true if activated - deactivate routine uses it to do cleanup */
77 bool activated;
76#endif 78#endif
77}; 79};
78 80
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index f94efd2f6c27..83e7ba90d6e5 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -63,12 +63,7 @@ extern int mem_cgroup_cache_charge(struct page *page, struct mm_struct *mm,
63 gfp_t gfp_mask); 63 gfp_t gfp_mask);
64 64
65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *); 65struct lruvec *mem_cgroup_zone_lruvec(struct zone *, struct mem_cgroup *);
66struct lruvec *mem_cgroup_lru_add_list(struct zone *, struct page *, 66struct lruvec *mem_cgroup_page_lruvec(struct page *, struct zone *);
67 enum lru_list);
68void mem_cgroup_lru_del_list(struct page *, enum lru_list);
69void mem_cgroup_lru_del(struct page *);
70struct lruvec *mem_cgroup_lru_move_lists(struct zone *, struct page *,
71 enum lru_list, enum lru_list);
72 67
73/* For coalescing uncharge for reducing memcg' overhead*/ 68/* For coalescing uncharge for reducing memcg' overhead*/
74extern void mem_cgroup_uncharge_start(void); 69extern void mem_cgroup_uncharge_start(void);
@@ -79,6 +74,8 @@ extern void mem_cgroup_uncharge_cache_page(struct page *page);
79 74
80extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask, 75extern void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
81 int order); 76 int order);
77bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
78 struct mem_cgroup *memcg);
82int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg); 79int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg);
83 80
84extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page); 81extern struct mem_cgroup *try_get_mem_cgroup_from_page(struct page *page);
@@ -92,10 +89,13 @@ static inline
92int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup) 89int mm_match_cgroup(const struct mm_struct *mm, const struct mem_cgroup *cgroup)
93{ 90{
94 struct mem_cgroup *memcg; 91 struct mem_cgroup *memcg;
92 int match;
93
95 rcu_read_lock(); 94 rcu_read_lock();
96 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner)); 95 memcg = mem_cgroup_from_task(rcu_dereference((mm)->owner));
96 match = __mem_cgroup_same_or_subtree(cgroup, memcg);
97 rcu_read_unlock(); 97 rcu_read_unlock();
98 return cgroup == memcg; 98 return match;
99} 99}
100 100
101extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg); 101extern struct cgroup_subsys_state *mem_cgroup_css(struct mem_cgroup *memcg);
@@ -114,17 +114,11 @@ void mem_cgroup_iter_break(struct mem_cgroup *, struct mem_cgroup *);
114/* 114/*
115 * For memory reclaim. 115 * For memory reclaim.
116 */ 116 */
117int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, 117int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec);
118 struct zone *zone); 118int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec);
119int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg,
120 struct zone *zone);
121int mem_cgroup_select_victim_node(struct mem_cgroup *memcg); 119int mem_cgroup_select_victim_node(struct mem_cgroup *memcg);
122unsigned long mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, 120unsigned long mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list);
123 int nid, int zid, unsigned int lrumask); 121void mem_cgroup_update_lru_size(struct lruvec *, enum lru_list, int);
124struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
125 struct zone *zone);
126struct zone_reclaim_stat*
127mem_cgroup_get_reclaim_stat_from_page(struct page *page);
128extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg, 122extern void mem_cgroup_print_oom_info(struct mem_cgroup *memcg,
129 struct task_struct *p); 123 struct task_struct *p);
130extern void mem_cgroup_replace_page_cache(struct page *oldpage, 124extern void mem_cgroup_replace_page_cache(struct page *oldpage,
@@ -251,25 +245,8 @@ static inline struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
251 return &zone->lruvec; 245 return &zone->lruvec;
252} 246}
253 247
254static inline struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, 248static inline struct lruvec *mem_cgroup_page_lruvec(struct page *page,
255 struct page *page, 249 struct zone *zone)
256 enum lru_list lru)
257{
258 return &zone->lruvec;
259}
260
261static inline void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru)
262{
263}
264
265static inline void mem_cgroup_lru_del(struct page *page)
266{
267}
268
269static inline struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
270 struct page *page,
271 enum lru_list from,
272 enum lru_list to)
273{ 250{
274 return &zone->lruvec; 251 return &zone->lruvec;
275} 252}
@@ -333,35 +310,27 @@ static inline bool mem_cgroup_disabled(void)
333} 310}
334 311
335static inline int 312static inline int
336mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) 313mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
337{ 314{
338 return 1; 315 return 1;
339} 316}
340 317
341static inline int 318static inline int
342mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) 319mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
343{ 320{
344 return 1; 321 return 1;
345} 322}
346 323
347static inline unsigned long 324static inline unsigned long
348mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 325mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
349 unsigned int lru_mask)
350{ 326{
351 return 0; 327 return 0;
352} 328}
353 329
354 330static inline void
355static inline struct zone_reclaim_stat* 331mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
356mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg, struct zone *zone) 332 int increment)
357{
358 return NULL;
359}
360
361static inline struct zone_reclaim_stat*
362mem_cgroup_get_reclaim_stat_from_page(struct page *page)
363{ 333{
364 return NULL;
365} 334}
366 335
367static inline void 336static inline void
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 7c727a90d70d..4aa42732e47f 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -225,8 +225,8 @@ static inline void check_highest_zone(enum zone_type k)
225 policy_zone = k; 225 policy_zone = k;
226} 226}
227 227
228int do_migrate_pages(struct mm_struct *mm, 228int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
229 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags); 229 const nodemask_t *to, int flags);
230 230
231 231
232#ifdef CONFIG_TMPFS 232#ifdef CONFIG_TMPFS
@@ -354,9 +354,8 @@ static inline bool mempolicy_nodemask_intersects(struct task_struct *tsk,
354 return false; 354 return false;
355} 355}
356 356
357static inline int do_migrate_pages(struct mm_struct *mm, 357static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
358 const nodemask_t *from_nodes, 358 const nodemask_t *to, int flags)
359 const nodemask_t *to_nodes, int flags)
360{ 359{
361 return 0; 360 return 0;
362} 361}
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index fccc3002f271..91dd3ef63e99 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -7,6 +7,7 @@
7#ifndef MFD_AB8500_H 7#ifndef MFD_AB8500_H
8#define MFD_AB8500_H 8#define MFD_AB8500_H
9 9
10#include <linux/atomic.h>
10#include <linux/mutex.h> 11#include <linux/mutex.h>
11 12
12struct device; 13struct device;
@@ -194,6 +195,14 @@ enum ab8500_version {
194#define AB9540_INT_GPIO52F 123 195#define AB9540_INT_GPIO52F 123
195#define AB9540_INT_GPIO53F 124 196#define AB9540_INT_GPIO53F 124
196#define AB9540_INT_GPIO54F 125 /* not 8505 */ 197#define AB9540_INT_GPIO54F 125 /* not 8505 */
198/* ab8500_irq_regoffset[16] -> IT[Source|Latch|Mask]25 */
199#define AB8505_INT_KEYSTUCK 128
200#define AB8505_INT_IKR 129
201#define AB8505_INT_IKP 130
202#define AB8505_INT_KP 131
203#define AB8505_INT_KEYDEGLITCH 132
204#define AB8505_INT_MODPWRSTATUSF 134
205#define AB8505_INT_MODPWRSTATUSR 135
197 206
198/* 207/*
199 * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the 208 * AB8500_AB9540_NR_IRQS is used when configuring the IRQ numbers for the
@@ -203,8 +212,8 @@ enum ab8500_version {
203 * which is larger. 212 * which is larger.
204 */ 213 */
205#define AB8500_NR_IRQS 112 214#define AB8500_NR_IRQS 112
206#define AB8505_NR_IRQS 128 215#define AB8505_NR_IRQS 136
207#define AB9540_NR_IRQS 128 216#define AB9540_NR_IRQS 136
208/* This is set to the roof of any AB8500 chip variant IRQ counts */ 217/* This is set to the roof of any AB8500 chip variant IRQ counts */
209#define AB8500_MAX_NR_IRQS AB9540_NR_IRQS 218#define AB8500_MAX_NR_IRQS AB9540_NR_IRQS
210 219
@@ -216,6 +225,7 @@ enum ab8500_version {
216 * @dev: parent device 225 * @dev: parent device
217 * @lock: read/write operations lock 226 * @lock: read/write operations lock
218 * @irq_lock: genirq bus lock 227 * @irq_lock: genirq bus lock
228 * @transfer_ongoing: 0 if no transfer ongoing
219 * @irq: irq line 229 * @irq: irq line
220 * @version: chip version id (e.g. ab8500 or ab9540) 230 * @version: chip version id (e.g. ab8500 or ab9540)
221 * @chip_id: chip revision id 231 * @chip_id: chip revision id
@@ -234,7 +244,7 @@ struct ab8500 {
234 struct device *dev; 244 struct device *dev;
235 struct mutex lock; 245 struct mutex lock;
236 struct mutex irq_lock; 246 struct mutex irq_lock;
237 247 atomic_t transfer_ongoing;
238 int irq_base; 248 int irq_base;
239 int irq; 249 int irq;
240 enum ab8500_version version; 250 enum ab8500_version version;
@@ -280,6 +290,8 @@ extern int __devinit ab8500_init(struct ab8500 *ab8500,
280 enum ab8500_version version); 290 enum ab8500_version version);
281extern int __devexit ab8500_exit(struct ab8500 *ab8500); 291extern int __devexit ab8500_exit(struct ab8500 *ab8500);
282 292
293extern int ab8500_suspend(struct ab8500 *ab8500);
294
283static inline int is_ab8500(struct ab8500 *ab) 295static inline int is_ab8500(struct ab8500 *ab)
284{ 296{
285 return ab->version == AB8500_VERSION_AB8500; 297 return ab->version == AB8500_VERSION_AB8500;
diff --git a/include/linux/mfd/anatop.h b/include/linux/mfd/anatop.h
index 22c1007d3ec5..7f92acf03d9e 100644
--- a/include/linux/mfd/anatop.h
+++ b/include/linux/mfd/anatop.h
@@ -34,7 +34,7 @@ struct anatop {
34 spinlock_t reglock; 34 spinlock_t reglock;
35}; 35};
36 36
37extern u32 anatop_get_bits(struct anatop *, u32, int, int); 37extern u32 anatop_read_reg(struct anatop *, u32);
38extern void anatop_set_bits(struct anatop *, u32, int, int, u32); 38extern void anatop_write_reg(struct anatop *, u32, u32, u32);
39 39
40#endif /* __LINUX_MFD_ANATOP_H */ 40#endif /* __LINUX_MFD_ANATOP_H */
diff --git a/include/linux/mfd/asic3.h b/include/linux/mfd/asic3.h
index ef6faa5cee46..e1148d037e7b 100644
--- a/include/linux/mfd/asic3.h
+++ b/include/linux/mfd/asic3.h
@@ -31,6 +31,8 @@ struct asic3_platform_data {
31 31
32 unsigned int gpio_base; 32 unsigned int gpio_base;
33 33
34 unsigned int clock_rate;
35
34 struct asic3_led *leds; 36 struct asic3_led *leds;
35}; 37};
36 38
diff --git a/include/linux/mfd/da9052/da9052.h b/include/linux/mfd/da9052/da9052.h
index 8313cd9658e3..0507c4c21a7d 100644
--- a/include/linux/mfd/da9052/da9052.h
+++ b/include/linux/mfd/da9052/da9052.h
@@ -33,6 +33,18 @@
33 33
34#include <linux/mfd/da9052/reg.h> 34#include <linux/mfd/da9052/reg.h>
35 35
36/* Common - HWMON Channel Definations */
37#define DA9052_ADC_VDDOUT 0
38#define DA9052_ADC_ICH 1
39#define DA9052_ADC_TBAT 2
40#define DA9052_ADC_VBAT 3
41#define DA9052_ADC_IN4 4
42#define DA9052_ADC_IN5 5
43#define DA9052_ADC_IN6 6
44#define DA9052_ADC_TSI 7
45#define DA9052_ADC_TJUNC 8
46#define DA9052_ADC_VBBAT 9
47
36#define DA9052_IRQ_DCIN 0 48#define DA9052_IRQ_DCIN 0
37#define DA9052_IRQ_VBUS 1 49#define DA9052_IRQ_VBUS 1
38#define DA9052_IRQ_DCINREM 2 50#define DA9052_IRQ_DCINREM 2
@@ -79,6 +91,9 @@ struct da9052 {
79 struct device *dev; 91 struct device *dev;
80 struct regmap *regmap; 92 struct regmap *regmap;
81 93
94 struct mutex auxadc_lock;
95 struct completion done;
96
82 int irq_base; 97 int irq_base;
83 struct regmap_irq_chip_data *irq_data; 98 struct regmap_irq_chip_data *irq_data;
84 u8 chip_id; 99 u8 chip_id;
@@ -86,6 +101,10 @@ struct da9052 {
86 int chip_irq; 101 int chip_irq;
87}; 102};
88 103
104/* ADC API */
105int da9052_adc_manual_read(struct da9052 *da9052, unsigned char channel);
106int da9052_adc_read_temp(struct da9052 *da9052);
107
89/* Device I/O API */ 108/* Device I/O API */
90static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg) 109static inline int da9052_reg_read(struct da9052 *da9052, unsigned char reg)
91{ 110{
diff --git a/include/linux/mfd/lm3533.h b/include/linux/mfd/lm3533.h
new file mode 100644
index 000000000000..594bc591f256
--- /dev/null
+++ b/include/linux/mfd/lm3533.h
@@ -0,0 +1,104 @@
1/*
2 * lm3533.h -- LM3533 interface
3 *
4 * Copyright (C) 2011-2012 Texas Instruments
5 *
6 * Author: Johan Hovold <jhovold@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify it
9 * under the terms of the GNU General Public License as published by the
10 * Free Software Foundation; either version 2 of the License, or (at your
11 * option) any later version.
12 */
13
14#ifndef __LINUX_MFD_LM3533_H
15#define __LINUX_MFD_LM3533_H
16
17#define LM3533_ATTR_RO(_name) \
18 DEVICE_ATTR(_name, S_IRUGO, show_##_name, NULL)
19#define LM3533_ATTR_RW(_name) \
20 DEVICE_ATTR(_name, S_IRUGO | S_IWUSR , show_##_name, store_##_name)
21
22struct device;
23struct regmap;
24
25struct lm3533 {
26 struct device *dev;
27
28 struct regmap *regmap;
29
30 int gpio_hwen;
31 int irq;
32
33 unsigned have_als:1;
34 unsigned have_backlights:1;
35 unsigned have_leds:1;
36};
37
38struct lm3533_ctrlbank {
39 struct lm3533 *lm3533;
40 struct device *dev;
41 int id;
42};
43
44struct lm3533_als_platform_data {
45 unsigned pwm_mode:1; /* PWM input mode (default analog) */
46 u8 r_select; /* 1 - 127 (ignored in PWM-mode) */
47};
48
49struct lm3533_bl_platform_data {
50 char *name;
51 u16 max_current; /* 5000 - 29800 uA (800 uA step) */
52 u8 default_brightness; /* 0 - 255 */
53 u8 pwm; /* 0 - 0x3f */
54};
55
56struct lm3533_led_platform_data {
57 char *name;
58 const char *default_trigger;
59 u16 max_current; /* 5000 - 29800 uA (800 uA step) */
60 u8 pwm; /* 0 - 0x3f */
61};
62
63enum lm3533_boost_freq {
64 LM3533_BOOST_FREQ_500KHZ,
65 LM3533_BOOST_FREQ_1000KHZ,
66};
67
68enum lm3533_boost_ovp {
69 LM3533_BOOST_OVP_16V,
70 LM3533_BOOST_OVP_24V,
71 LM3533_BOOST_OVP_32V,
72 LM3533_BOOST_OVP_40V,
73};
74
75struct lm3533_platform_data {
76 int gpio_hwen;
77
78 enum lm3533_boost_ovp boost_ovp;
79 enum lm3533_boost_freq boost_freq;
80
81 struct lm3533_als_platform_data *als;
82
83 struct lm3533_bl_platform_data *backlights;
84 int num_backlights;
85
86 struct lm3533_led_platform_data *leds;
87 int num_leds;
88};
89
90extern int lm3533_ctrlbank_enable(struct lm3533_ctrlbank *cb);
91extern int lm3533_ctrlbank_disable(struct lm3533_ctrlbank *cb);
92
93extern int lm3533_ctrlbank_set_brightness(struct lm3533_ctrlbank *cb, u8 val);
94extern int lm3533_ctrlbank_get_brightness(struct lm3533_ctrlbank *cb, u8 *val);
95extern int lm3533_ctrlbank_set_max_current(struct lm3533_ctrlbank *cb,
96 u16 imax);
97extern int lm3533_ctrlbank_set_pwm(struct lm3533_ctrlbank *cb, u8 val);
98extern int lm3533_ctrlbank_get_pwm(struct lm3533_ctrlbank *cb, u8 *val);
99
100extern int lm3533_read(struct lm3533 *lm3533, u8 reg, u8 *val);
101extern int lm3533_write(struct lm3533 *lm3533, u8 reg, u8 val);
102extern int lm3533_update(struct lm3533 *lm3533, u8 reg, u8 val, u8 mask);
103
104#endif /* __LINUX_MFD_LM3533_H */
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
new file mode 100644
index 000000000000..fec5256c3f5d
--- /dev/null
+++ b/include/linux/mfd/lpc_ich.h
@@ -0,0 +1,48 @@
1/*
2 * linux/drivers/mfd/lpc_ich.h
3 *
4 * Copyright (c) 2012 Extreme Engineering Solution, Inc.
5 * Author: Aaron Sierra <asierra@xes-inc.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License 2 as published
9 * by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; see the file COPYING. If not, write to
18 * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
19 */
20#ifndef LPC_ICH_H
21#define LPC_ICH_H
22
23/* Watchdog resources */
24#define ICH_RES_IO_TCO 0
25#define ICH_RES_IO_SMI 1
26#define ICH_RES_MEM_OFF 2
27#define ICH_RES_MEM_GCS 0
28
29/* GPIO resources */
30#define ICH_RES_GPIO 0
31#define ICH_RES_GPE0 1
32
33/* GPIO compatibility */
34#define ICH_I3100_GPIO 0x401
35#define ICH_V5_GPIO 0x501
36#define ICH_V6_GPIO 0x601
37#define ICH_V7_GPIO 0x701
38#define ICH_V9_GPIO 0x801
39#define ICH_V10CORP_GPIO 0xa01
40#define ICH_V10CONS_GPIO 0xa11
41
42struct lpc_ich_info {
43 char name[32];
44 unsigned int iTCO_version;
45 unsigned int gpio_version;
46};
47
48#endif
diff --git a/include/linux/mfd/max77693-private.h b/include/linux/mfd/max77693-private.h
new file mode 100644
index 000000000000..68263c5fa53c
--- /dev/null
+++ b/include/linux/mfd/max77693-private.h
@@ -0,0 +1,227 @@
1/*
2 * max77693-private.h - Voltage regulator driver for the Maxim 77693
3 *
4 * Copyright (C) 2012 Samsung Electrnoics
5 * SangYoung Son <hello.son@samsung.com>
6 *
7 * This program is not provided / owned by Maxim Integrated Products.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 */
23
24#ifndef __LINUX_MFD_MAX77693_PRIV_H
25#define __LINUX_MFD_MAX77693_PRIV_H
26
27#include <linux/i2c.h>
28
29#define MAX77693_NUM_IRQ_MUIC_REGS 3
30#define MAX77693_REG_INVALID (0xff)
31
32/* Slave addr = 0xCC: PMIC, Charger, Flash LED */
33enum max77693_pmic_reg {
34 MAX77693_LED_REG_IFLASH1 = 0x00,
35 MAX77693_LED_REG_IFLASH2 = 0x01,
36 MAX77693_LED_REG_ITORCH = 0x02,
37 MAX77693_LED_REG_ITORCHTIMER = 0x03,
38 MAX77693_LED_REG_FLASH_TIMER = 0x04,
39 MAX77693_LED_REG_FLASH_EN = 0x05,
40 MAX77693_LED_REG_MAX_FLASH1 = 0x06,
41 MAX77693_LED_REG_MAX_FLASH2 = 0x07,
42 MAX77693_LED_REG_MAX_FLASH3 = 0x08,
43 MAX77693_LED_REG_MAX_FLASH4 = 0x09,
44 MAX77693_LED_REG_VOUT_CNTL = 0x0A,
45 MAX77693_LED_REG_VOUT_FLASH1 = 0x0B,
46 MAX77693_LED_REG_VOUT_FLASH2 = 0x0C,
47 MAX77693_LED_REG_FLASH_INT = 0x0E,
48 MAX77693_LED_REG_FLASH_INT_MASK = 0x0F,
49 MAX77693_LED_REG_FLASH_INT_STATUS = 0x10,
50
51 MAX77693_PMIC_REG_PMIC_ID1 = 0x20,
52 MAX77693_PMIC_REG_PMIC_ID2 = 0x21,
53 MAX77693_PMIC_REG_INTSRC = 0x22,
54 MAX77693_PMIC_REG_INTSRC_MASK = 0x23,
55 MAX77693_PMIC_REG_TOPSYS_INT = 0x24,
56 MAX77693_PMIC_REG_TOPSYS_INT_MASK = 0x26,
57 MAX77693_PMIC_REG_TOPSYS_STAT = 0x28,
58 MAX77693_PMIC_REG_MAINCTRL1 = 0x2A,
59 MAX77693_PMIC_REG_LSCNFG = 0x2B,
60
61 MAX77693_CHG_REG_CHG_INT = 0xB0,
62 MAX77693_CHG_REG_CHG_INT_MASK = 0xB1,
63 MAX77693_CHG_REG_CHG_INT_OK = 0xB2,
64 MAX77693_CHG_REG_CHG_DETAILS_00 = 0xB3,
65 MAX77693_CHG_REG_CHG_DETAILS_01 = 0xB4,
66 MAX77693_CHG_REG_CHG_DETAILS_02 = 0xB5,
67 MAX77693_CHG_REG_CHG_DETAILS_03 = 0xB6,
68 MAX77693_CHG_REG_CHG_CNFG_00 = 0xB7,
69 MAX77693_CHG_REG_CHG_CNFG_01 = 0xB8,
70 MAX77693_CHG_REG_CHG_CNFG_02 = 0xB9,
71 MAX77693_CHG_REG_CHG_CNFG_03 = 0xBA,
72 MAX77693_CHG_REG_CHG_CNFG_04 = 0xBB,
73 MAX77693_CHG_REG_CHG_CNFG_05 = 0xBC,
74 MAX77693_CHG_REG_CHG_CNFG_06 = 0xBD,
75 MAX77693_CHG_REG_CHG_CNFG_07 = 0xBE,
76 MAX77693_CHG_REG_CHG_CNFG_08 = 0xBF,
77 MAX77693_CHG_REG_CHG_CNFG_09 = 0xC0,
78 MAX77693_CHG_REG_CHG_CNFG_10 = 0xC1,
79 MAX77693_CHG_REG_CHG_CNFG_11 = 0xC2,
80 MAX77693_CHG_REG_CHG_CNFG_12 = 0xC3,
81 MAX77693_CHG_REG_CHG_CNFG_13 = 0xC4,
82 MAX77693_CHG_REG_CHG_CNFG_14 = 0xC5,
83 MAX77693_CHG_REG_SAFEOUT_CTRL = 0xC6,
84
85 MAX77693_PMIC_REG_END,
86};
87
88/* Slave addr = 0x4A: MUIC */
89enum max77693_muic_reg {
90 MAX77693_MUIC_REG_ID = 0x00,
91 MAX77693_MUIC_REG_INT1 = 0x01,
92 MAX77693_MUIC_REG_INT2 = 0x02,
93 MAX77693_MUIC_REG_INT3 = 0x03,
94 MAX77693_MUIC_REG_STATUS1 = 0x04,
95 MAX77693_MUIC_REG_STATUS2 = 0x05,
96 MAX77693_MUIC_REG_STATUS3 = 0x06,
97 MAX77693_MUIC_REG_INTMASK1 = 0x07,
98 MAX77693_MUIC_REG_INTMASK2 = 0x08,
99 MAX77693_MUIC_REG_INTMASK3 = 0x09,
100 MAX77693_MUIC_REG_CDETCTRL1 = 0x0A,
101 MAX77693_MUIC_REG_CDETCTRL2 = 0x0B,
102 MAX77693_MUIC_REG_CTRL1 = 0x0C,
103 MAX77693_MUIC_REG_CTRL2 = 0x0D,
104 MAX77693_MUIC_REG_CTRL3 = 0x0E,
105
106 MAX77693_MUIC_REG_END,
107};
108
109/* Slave addr = 0x90: Haptic */
110enum max77693_haptic_reg {
111 MAX77693_HAPTIC_REG_STATUS = 0x00,
112 MAX77693_HAPTIC_REG_CONFIG1 = 0x01,
113 MAX77693_HAPTIC_REG_CONFIG2 = 0x02,
114 MAX77693_HAPTIC_REG_CONFIG_CHNL = 0x03,
115 MAX77693_HAPTIC_REG_CONFG_CYC1 = 0x04,
116 MAX77693_HAPTIC_REG_CONFG_CYC2 = 0x05,
117 MAX77693_HAPTIC_REG_CONFIG_PER1 = 0x06,
118 MAX77693_HAPTIC_REG_CONFIG_PER2 = 0x07,
119 MAX77693_HAPTIC_REG_CONFIG_PER3 = 0x08,
120 MAX77693_HAPTIC_REG_CONFIG_PER4 = 0x09,
121 MAX77693_HAPTIC_REG_CONFIG_DUTY1 = 0x0A,
122 MAX77693_HAPTIC_REG_CONFIG_DUTY2 = 0x0B,
123 MAX77693_HAPTIC_REG_CONFIG_PWM1 = 0x0C,
124 MAX77693_HAPTIC_REG_CONFIG_PWM2 = 0x0D,
125 MAX77693_HAPTIC_REG_CONFIG_PWM3 = 0x0E,
126 MAX77693_HAPTIC_REG_CONFIG_PWM4 = 0x0F,
127 MAX77693_HAPTIC_REG_REV = 0x10,
128
129 MAX77693_HAPTIC_REG_END,
130};
131
132enum max77693_irq_source {
133 LED_INT = 0,
134 TOPSYS_INT,
135 CHG_INT,
136 MUIC_INT1,
137 MUIC_INT2,
138 MUIC_INT3,
139
140 MAX77693_IRQ_GROUP_NR,
141};
142
143enum max77693_irq {
144 /* PMIC - FLASH */
145 MAX77693_LED_IRQ_FLED2_OPEN,
146 MAX77693_LED_IRQ_FLED2_SHORT,
147 MAX77693_LED_IRQ_FLED1_OPEN,
148 MAX77693_LED_IRQ_FLED1_SHORT,
149 MAX77693_LED_IRQ_MAX_FLASH,
150
151 /* PMIC - TOPSYS */
152 MAX77693_TOPSYS_IRQ_T120C_INT,
153 MAX77693_TOPSYS_IRQ_T140C_INT,
154 MAX77693_TOPSYS_IRQ_LOWSYS_INT,
155
156 /* PMIC - Charger */
157 MAX77693_CHG_IRQ_BYP_I,
158 MAX77693_CHG_IRQ_THM_I,
159 MAX77693_CHG_IRQ_BAT_I,
160 MAX77693_CHG_IRQ_CHG_I,
161 MAX77693_CHG_IRQ_CHGIN_I,
162
163 /* MUIC INT1 */
164 MAX77693_MUIC_IRQ_INT1_ADC,
165 MAX77693_MUIC_IRQ_INT1_ADC_LOW,
166 MAX77693_MUIC_IRQ_INT1_ADC_ERR,
167 MAX77693_MUIC_IRQ_INT1_ADC1K,
168
169 /* MUIC INT2 */
170 MAX77693_MUIC_IRQ_INT2_CHGTYP,
171 MAX77693_MUIC_IRQ_INT2_CHGDETREUN,
172 MAX77693_MUIC_IRQ_INT2_DCDTMR,
173 MAX77693_MUIC_IRQ_INT2_DXOVP,
174 MAX77693_MUIC_IRQ_INT2_VBVOLT,
175 MAX77693_MUIC_IRQ_INT2_VIDRM,
176
177 /* MUIC INT3 */
178 MAX77693_MUIC_IRQ_INT3_EOC,
179 MAX77693_MUIC_IRQ_INT3_CGMBC,
180 MAX77693_MUIC_IRQ_INT3_OVP,
181 MAX77693_MUIC_IRQ_INT3_MBCCHG_ERR,
182 MAX77693_MUIC_IRQ_INT3_CHG_ENABLED,
183 MAX77693_MUIC_IRQ_INT3_BAT_DET,
184
185 MAX77693_IRQ_NR,
186};
187
188struct max77693_dev {
189 struct device *dev;
190 struct i2c_client *i2c; /* 0xCC , PMIC, Charger, Flash LED */
191 struct i2c_client *muic; /* 0x4A , MUIC */
192 struct i2c_client *haptic; /* 0x90 , Haptic */
193 struct mutex iolock;
194
195 int type;
196
197 struct regmap *regmap;
198 struct regmap *regmap_muic;
199 struct regmap *regmap_haptic;
200
201 struct irq_domain *irq_domain;
202
203 int irq;
204 int irq_gpio;
205 bool wakeup;
206 struct mutex irqlock;
207 int irq_masks_cur[MAX77693_IRQ_GROUP_NR];
208 int irq_masks_cache[MAX77693_IRQ_GROUP_NR];
209};
210
211enum max77693_types {
212 TYPE_MAX77693,
213};
214
215extern int max77693_read_reg(struct regmap *map, u8 reg, u8 *dest);
216extern int max77693_bulk_read(struct regmap *map, u8 reg, int count,
217 u8 *buf);
218extern int max77693_write_reg(struct regmap *map, u8 reg, u8 value);
219extern int max77693_bulk_write(struct regmap *map, u8 reg, int count,
220 u8 *buf);
221extern int max77693_update_reg(struct regmap *map, u8 reg, u8 val, u8 mask);
222
223extern int max77693_irq_init(struct max77693_dev *max77686);
224extern void max77693_irq_exit(struct max77693_dev *max77686);
225extern int max77693_irq_resume(struct max77693_dev *max77686);
226
227#endif /* __LINUX_MFD_MAX77693_PRIV_H */
diff --git a/include/linux/mfd/max77693.h b/include/linux/mfd/max77693.h
new file mode 100644
index 000000000000..1d28ae90384e
--- /dev/null
+++ b/include/linux/mfd/max77693.h
@@ -0,0 +1,36 @@
1/*
2 * max77693.h - Driver for the Maxim 77693
3 *
4 * Copyright (C) 2012 Samsung Electrnoics
5 * SangYoung Son <hello.son@samsung.com>
6 *
7 * This program is not provided / owned by Maxim Integrated Products.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
22 *
23 * This driver is based on max8997.h
24 *
25 * MAX77693 has PMIC, Charger, Flash LED, Haptic, MUIC devices.
26 * The devices share the same I2C bus and included in
27 * this mfd driver.
28 */
29
30#ifndef __LINUX_MFD_MAX77693_H
31#define __LINUX_MFD_MAX77693_H
32
33struct max77693_platform_data {
34 int wakeup;
35};
36#endif /* __LINUX_MFD_MAX77693_H */
diff --git a/include/linux/mfd/sta2x11-mfd.h b/include/linux/mfd/sta2x11-mfd.h
new file mode 100644
index 000000000000..d179227e866f
--- /dev/null
+++ b/include/linux/mfd/sta2x11-mfd.h
@@ -0,0 +1,324 @@
1/*
2 * Copyright (c) 2009-2011 Wind River Systems, Inc.
3 * Copyright (c) 2011 ST Microelectronics (Alessandro Rubini)
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
12 * See the GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
17 *
18 * The STMicroelectronics ConneXt (STA2X11) chip has several unrelated
19 * functions in one PCI endpoint functions. This driver simply
20 * registers the platform devices in this iomemregion and exports a few
21 * functions to access common registers
22 */
23
24#ifndef __STA2X11_MFD_H
25#define __STA2X11_MFD_H
26#include <linux/types.h>
27#include <linux/pci.h>
28
29/*
30 * The MFD PCI block includes the GPIO peripherals and other register blocks.
31 * For GPIO, we have 32*4 bits (I use "gsta" for "gpio sta2x11".)
32 */
33#define GSTA_GPIO_PER_BLOCK 32
34#define GSTA_NR_BLOCKS 4
35#define GSTA_NR_GPIO (GSTA_GPIO_PER_BLOCK * GSTA_NR_BLOCKS)
36
37/* Pinconfig is set by the board definition: altfunc, pull-up, pull-down */
38struct sta2x11_gpio_pdata {
39 unsigned pinconfig[GSTA_NR_GPIO];
40};
41
42/* Macros below lifted from sh_pfc.h, with minor differences */
43#define PINMUX_TYPE_NONE 0
44#define PINMUX_TYPE_FUNCTION 1
45#define PINMUX_TYPE_OUTPUT_LOW 2
46#define PINMUX_TYPE_OUTPUT_HIGH 3
47#define PINMUX_TYPE_INPUT 4
48#define PINMUX_TYPE_INPUT_PULLUP 5
49#define PINMUX_TYPE_INPUT_PULLDOWN 6
50
51/* Give names to GPIO pins, like PXA does, taken from the manual */
52#define STA2X11_GPIO0 0
53#define STA2X11_GPIO1 1
54#define STA2X11_GPIO2 2
55#define STA2X11_GPIO3 3
56#define STA2X11_GPIO4 4
57#define STA2X11_GPIO5 5
58#define STA2X11_GPIO6 6
59#define STA2X11_GPIO7 7
60#define STA2X11_GPIO8_RGBOUT_RED7 8
61#define STA2X11_GPIO9_RGBOUT_RED6 9
62#define STA2X11_GPIO10_RGBOUT_RED5 10
63#define STA2X11_GPIO11_RGBOUT_RED4 11
64#define STA2X11_GPIO12_RGBOUT_RED3 12
65#define STA2X11_GPIO13_RGBOUT_RED2 13
66#define STA2X11_GPIO14_RGBOUT_RED1 14
67#define STA2X11_GPIO15_RGBOUT_RED0 15
68#define STA2X11_GPIO16_RGBOUT_GREEN7 16
69#define STA2X11_GPIO17_RGBOUT_GREEN6 17
70#define STA2X11_GPIO18_RGBOUT_GREEN5 18
71#define STA2X11_GPIO19_RGBOUT_GREEN4 19
72#define STA2X11_GPIO20_RGBOUT_GREEN3 20
73#define STA2X11_GPIO21_RGBOUT_GREEN2 21
74#define STA2X11_GPIO22_RGBOUT_GREEN1 22
75#define STA2X11_GPIO23_RGBOUT_GREEN0 23
76#define STA2X11_GPIO24_RGBOUT_BLUE7 24
77#define STA2X11_GPIO25_RGBOUT_BLUE6 25
78#define STA2X11_GPIO26_RGBOUT_BLUE5 26
79#define STA2X11_GPIO27_RGBOUT_BLUE4 27
80#define STA2X11_GPIO28_RGBOUT_BLUE3 28
81#define STA2X11_GPIO29_RGBOUT_BLUE2 29
82#define STA2X11_GPIO30_RGBOUT_BLUE1 30
83#define STA2X11_GPIO31_RGBOUT_BLUE0 31
84#define STA2X11_GPIO32_RGBOUT_VSYNCH 32
85#define STA2X11_GPIO33_RGBOUT_HSYNCH 33
86#define STA2X11_GPIO34_RGBOUT_DEN 34
87#define STA2X11_GPIO35_ETH_CRS_DV 35
88#define STA2X11_GPIO36_ETH_TXD1 36
89#define STA2X11_GPIO37_ETH_TXD0 37
90#define STA2X11_GPIO38_ETH_TX_EN 38
91#define STA2X11_GPIO39_MDIO 39
92#define STA2X11_GPIO40_ETH_REF_CLK 40
93#define STA2X11_GPIO41_ETH_RXD1 41
94#define STA2X11_GPIO42_ETH_RXD0 42
95#define STA2X11_GPIO43_MDC 43
96#define STA2X11_GPIO44_CAN_TX 44
97#define STA2X11_GPIO45_CAN_RX 45
98#define STA2X11_GPIO46_MLB_DAT 46
99#define STA2X11_GPIO47_MLB_SIG 47
100#define STA2X11_GPIO48_SPI0_CLK 48
101#define STA2X11_GPIO49_SPI0_TXD 49
102#define STA2X11_GPIO50_SPI0_RXD 50
103#define STA2X11_GPIO51_SPI0_FRM 51
104#define STA2X11_GPIO52_SPI1_CLK 52
105#define STA2X11_GPIO53_SPI1_TXD 53
106#define STA2X11_GPIO54_SPI1_RXD 54
107#define STA2X11_GPIO55_SPI1_FRM 55
108#define STA2X11_GPIO56_SPI2_CLK 56
109#define STA2X11_GPIO57_SPI2_TXD 57
110#define STA2X11_GPIO58_SPI2_RXD 58
111#define STA2X11_GPIO59_SPI2_FRM 59
112#define STA2X11_GPIO60_I2C0_SCL 60
113#define STA2X11_GPIO61_I2C0_SDA 61
114#define STA2X11_GPIO62_I2C1_SCL 62
115#define STA2X11_GPIO63_I2C1_SDA 63
116#define STA2X11_GPIO64_I2C2_SCL 64
117#define STA2X11_GPIO65_I2C2_SDA 65
118#define STA2X11_GPIO66_I2C3_SCL 66
119#define STA2X11_GPIO67_I2C3_SDA 67
120#define STA2X11_GPIO68_MSP0_RCK 68
121#define STA2X11_GPIO69_MSP0_RXD 69
122#define STA2X11_GPIO70_MSP0_RFS 70
123#define STA2X11_GPIO71_MSP0_TCK 71
124#define STA2X11_GPIO72_MSP0_TXD 72
125#define STA2X11_GPIO73_MSP0_TFS 73
126#define STA2X11_GPIO74_MSP0_SCK 74
127#define STA2X11_GPIO75_MSP1_CK 75
128#define STA2X11_GPIO76_MSP1_RXD 76
129#define STA2X11_GPIO77_MSP1_FS 77
130#define STA2X11_GPIO78_MSP1_TXD 78
131#define STA2X11_GPIO79_MSP2_CK 79
132#define STA2X11_GPIO80_MSP2_RXD 80
133#define STA2X11_GPIO81_MSP2_FS 81
134#define STA2X11_GPIO82_MSP2_TXD 82
135#define STA2X11_GPIO83_MSP3_CK 83
136#define STA2X11_GPIO84_MSP3_RXD 84
137#define STA2X11_GPIO85_MSP3_FS 85
138#define STA2X11_GPIO86_MSP3_TXD 86
139#define STA2X11_GPIO87_MSP4_CK 87
140#define STA2X11_GPIO88_MSP4_RXD 88
141#define STA2X11_GPIO89_MSP4_FS 89
142#define STA2X11_GPIO90_MSP4_TXD 90
143#define STA2X11_GPIO91_MSP5_CK 91
144#define STA2X11_GPIO92_MSP5_RXD 92
145#define STA2X11_GPIO93_MSP5_FS 93
146#define STA2X11_GPIO94_MSP5_TXD 94
147#define STA2X11_GPIO95_SDIO3_DAT3 95
148#define STA2X11_GPIO96_SDIO3_DAT2 96
149#define STA2X11_GPIO97_SDIO3_DAT1 97
150#define STA2X11_GPIO98_SDIO3_DAT0 98
151#define STA2X11_GPIO99_SDIO3_CLK 99
152#define STA2X11_GPIO100_SDIO3_CMD 100
153#define STA2X11_GPIO101 101
154#define STA2X11_GPIO102 102
155#define STA2X11_GPIO103 103
156#define STA2X11_GPIO104 104
157#define STA2X11_GPIO105_SDIO2_DAT3 105
158#define STA2X11_GPIO106_SDIO2_DAT2 106
159#define STA2X11_GPIO107_SDIO2_DAT1 107
160#define STA2X11_GPIO108_SDIO2_DAT0 108
161#define STA2X11_GPIO109_SDIO2_CLK 109
162#define STA2X11_GPIO110_SDIO2_CMD 110
163#define STA2X11_GPIO111 111
164#define STA2X11_GPIO112 112
165#define STA2X11_GPIO113 113
166#define STA2X11_GPIO114 114
167#define STA2X11_GPIO115_SDIO1_DAT3 115
168#define STA2X11_GPIO116_SDIO1_DAT2 116
169#define STA2X11_GPIO117_SDIO1_DAT1 117
170#define STA2X11_GPIO118_SDIO1_DAT0 118
171#define STA2X11_GPIO119_SDIO1_CLK 119
172#define STA2X11_GPIO120_SDIO1_CMD 120
173#define STA2X11_GPIO121 121
174#define STA2X11_GPIO122 122
175#define STA2X11_GPIO123 123
176#define STA2X11_GPIO124 124
177#define STA2X11_GPIO125_UART2_TXD 125
178#define STA2X11_GPIO126_UART2_RXD 126
179#define STA2X11_GPIO127_UART3_TXD 127
180
181/*
182 * The APB bridge has its own registers, needed by our users as well.
183 * They are accessed with the following read/mask/write function.
184 */
185u32 sta2x11_apbreg_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
186
187/* CAN and MLB */
188#define APBREG_BSR 0x00 /* Bridge Status Reg */
189#define APBREG_PAER 0x08 /* Peripherals Address Error Reg */
190#define APBREG_PWAC 0x20 /* Peripheral Write Access Control reg */
191#define APBREG_PRAC 0x40 /* Peripheral Read Access Control reg */
192#define APBREG_PCG 0x60 /* Peripheral Clock Gating Reg */
193#define APBREG_PUR 0x80 /* Peripheral Under Reset Reg */
194#define APBREG_EMU_PCG 0xA0 /* Emulator Peripheral Clock Gating Reg */
195
196#define APBREG_CAN (1 << 1)
197#define APBREG_MLB (1 << 3)
198
199/* SARAC */
200#define APBREG_BSR_SARAC 0x100 /* Bridge Status Reg */
201#define APBREG_PAER_SARAC 0x108 /* Peripherals Address Error Reg */
202#define APBREG_PWAC_SARAC 0x120 /* Peripheral Write Access Control reg */
203#define APBREG_PRAC_SARAC 0x140 /* Peripheral Read Access Control reg */
204#define APBREG_PCG_SARAC 0x160 /* Peripheral Clock Gating Reg */
205#define APBREG_PUR_SARAC 0x180 /* Peripheral Under Reset Reg */
206#define APBREG_EMU_PCG_SARAC 0x1A0 /* Emulator Peripheral Clock Gating Reg */
207
208#define APBREG_SARAC (1 << 2)
209
210/*
211 * The system controller has its own registers. Some of these are accessed
212 * by out users as well, using the following read/mask/write/function
213 */
214u32 sta2x11_sctl_mask(struct pci_dev *pdev, u32 reg, u32 mask, u32 val);
215
216#define SCTL_SCCTL 0x00 /* System controller control register */
217#define SCTL_ARMCFG 0x04 /* ARM configuration register */
218#define SCTL_SCPLLCTL 0x08 /* PLL control status register */
219#define SCTL_SCPLLFCTRL 0x0c /* PLL frequency control register */
220#define SCTL_SCRESFRACT 0x10 /* PLL fractional input register */
221#define SCTL_SCRESCTRL1 0x14 /* Peripheral reset control 1 */
222#define SCTL_SCRESXTRL2 0x18 /* Peripheral reset control 2 */
223#define SCTL_SCPEREN0 0x1c /* Peripheral clock enable register 0 */
224#define SCTL_SCPEREN1 0x20 /* Peripheral clock enable register 1 */
225#define SCTL_SCPEREN2 0x24 /* Peripheral clock enable register 2 */
226#define SCTL_SCGRST 0x28 /* Peripheral global reset */
227#define SCTL_SCPCIPMCR1 0x30 /* PCI power management control 1 */
228#define SCTL_SCPCIPMCR2 0x34 /* PCI power management control 2 */
229#define SCTL_SCPCIPMSR1 0x38 /* PCI power management status 1 */
230#define SCTL_SCPCIPMSR2 0x3c /* PCI power management status 2 */
231#define SCTL_SCPCIPMSR3 0x40 /* PCI power management status 3 */
232#define SCTL_SCINTREN 0x44 /* Interrupt enable */
233#define SCTL_SCRISR 0x48 /* RAW interrupt status */
234#define SCTL_SCCLKSTAT0 0x4c /* Peripheral clocks status 0 */
235#define SCTL_SCCLKSTAT1 0x50 /* Peripheral clocks status 1 */
236#define SCTL_SCCLKSTAT2 0x54 /* Peripheral clocks status 2 */
237#define SCTL_SCRSTSTA 0x58 /* Reset status register */
238
239#define SCTL_SCRESCTRL1_USB_PHY_POR (1 << 0)
240#define SCTL_SCRESCTRL1_USB_OTG (1 << 1)
241#define SCTL_SCRESCTRL1_USB_HRST (1 << 2)
242#define SCTL_SCRESCTRL1_USB_PHY_HOST (1 << 3)
243#define SCTL_SCRESCTRL1_SATAII (1 << 4)
244#define SCTL_SCRESCTRL1_VIP (1 << 5)
245#define SCTL_SCRESCTRL1_PER_MMC0 (1 << 6)
246#define SCTL_SCRESCTRL1_PER_MMC1 (1 << 7)
247#define SCTL_SCRESCTRL1_PER_GPIO0 (1 << 8)
248#define SCTL_SCRESCTRL1_PER_GPIO1 (1 << 9)
249#define SCTL_SCRESCTRL1_PER_GPIO2 (1 << 10)
250#define SCTL_SCRESCTRL1_PER_GPIO3 (1 << 11)
251#define SCTL_SCRESCTRL1_PER_MTU0 (1 << 12)
252#define SCTL_SCRESCTRL1_KER_SPI0 (1 << 13)
253#define SCTL_SCRESCTRL1_KER_SPI1 (1 << 14)
254#define SCTL_SCRESCTRL1_KER_SPI2 (1 << 15)
255#define SCTL_SCRESCTRL1_KER_MCI0 (1 << 16)
256#define SCTL_SCRESCTRL1_KER_MCI1 (1 << 17)
257#define SCTL_SCRESCTRL1_PRE_HSI2C0 (1 << 18)
258#define SCTL_SCRESCTRL1_PER_HSI2C1 (1 << 19)
259#define SCTL_SCRESCTRL1_PER_HSI2C2 (1 << 20)
260#define SCTL_SCRESCTRL1_PER_HSI2C3 (1 << 21)
261#define SCTL_SCRESCTRL1_PER_MSP0 (1 << 22)
262#define SCTL_SCRESCTRL1_PER_MSP1 (1 << 23)
263#define SCTL_SCRESCTRL1_PER_MSP2 (1 << 24)
264#define SCTL_SCRESCTRL1_PER_MSP3 (1 << 25)
265#define SCTL_SCRESCTRL1_PER_MSP4 (1 << 26)
266#define SCTL_SCRESCTRL1_PER_MSP5 (1 << 27)
267#define SCTL_SCRESCTRL1_PER_MMC (1 << 28)
268#define SCTL_SCRESCTRL1_KER_MSP0 (1 << 29)
269#define SCTL_SCRESCTRL1_KER_MSP1 (1 << 30)
270#define SCTL_SCRESCTRL1_KER_MSP2 (1 << 31)
271
272#define SCTL_SCPEREN0_UART0 (1 << 0)
273#define SCTL_SCPEREN0_UART1 (1 << 1)
274#define SCTL_SCPEREN0_UART2 (1 << 2)
275#define SCTL_SCPEREN0_UART3 (1 << 3)
276#define SCTL_SCPEREN0_MSP0 (1 << 4)
277#define SCTL_SCPEREN0_MSP1 (1 << 5)
278#define SCTL_SCPEREN0_MSP2 (1 << 6)
279#define SCTL_SCPEREN0_MSP3 (1 << 7)
280#define SCTL_SCPEREN0_MSP4 (1 << 8)
281#define SCTL_SCPEREN0_MSP5 (1 << 9)
282#define SCTL_SCPEREN0_SPI0 (1 << 10)
283#define SCTL_SCPEREN0_SPI1 (1 << 11)
284#define SCTL_SCPEREN0_SPI2 (1 << 12)
285#define SCTL_SCPEREN0_I2C0 (1 << 13)
286#define SCTL_SCPEREN0_I2C1 (1 << 14)
287#define SCTL_SCPEREN0_I2C2 (1 << 15)
288#define SCTL_SCPEREN0_I2C3 (1 << 16)
289#define SCTL_SCPEREN0_SVDO_LVDS (1 << 17)
290#define SCTL_SCPEREN0_USB_HOST (1 << 18)
291#define SCTL_SCPEREN0_USB_OTG (1 << 19)
292#define SCTL_SCPEREN0_MCI0 (1 << 20)
293#define SCTL_SCPEREN0_MCI1 (1 << 21)
294#define SCTL_SCPEREN0_MCI2 (1 << 22)
295#define SCTL_SCPEREN0_MCI3 (1 << 23)
296#define SCTL_SCPEREN0_SATA (1 << 24)
297#define SCTL_SCPEREN0_ETHERNET (1 << 25)
298#define SCTL_SCPEREN0_VIC (1 << 26)
299#define SCTL_SCPEREN0_DMA_AUDIO (1 << 27)
300#define SCTL_SCPEREN0_DMA_SOC (1 << 28)
301#define SCTL_SCPEREN0_RAM (1 << 29)
302#define SCTL_SCPEREN0_VIP (1 << 30)
303#define SCTL_SCPEREN0_ARM (1 << 31)
304
305#define SCTL_SCPEREN1_UART0 (1 << 0)
306#define SCTL_SCPEREN1_UART1 (1 << 1)
307#define SCTL_SCPEREN1_UART2 (1 << 2)
308#define SCTL_SCPEREN1_UART3 (1 << 3)
309#define SCTL_SCPEREN1_MSP0 (1 << 4)
310#define SCTL_SCPEREN1_MSP1 (1 << 5)
311#define SCTL_SCPEREN1_MSP2 (1 << 6)
312#define SCTL_SCPEREN1_MSP3 (1 << 7)
313#define SCTL_SCPEREN1_MSP4 (1 << 8)
314#define SCTL_SCPEREN1_MSP5 (1 << 9)
315#define SCTL_SCPEREN1_SPI0 (1 << 10)
316#define SCTL_SCPEREN1_SPI1 (1 << 11)
317#define SCTL_SCPEREN1_SPI2 (1 << 12)
318#define SCTL_SCPEREN1_I2C0 (1 << 13)
319#define SCTL_SCPEREN1_I2C1 (1 << 14)
320#define SCTL_SCPEREN1_I2C2 (1 << 15)
321#define SCTL_SCPEREN1_I2C3 (1 << 16)
322#define SCTL_SCPEREN1_USB_PHY (1 << 17)
323
324#endif /* __STA2X11_MFD_H */
diff --git a/include/linux/mfd/stmpe.h b/include/linux/mfd/stmpe.h
index 8516fd1eaabc..f8d5b4d5843f 100644
--- a/include/linux/mfd/stmpe.h
+++ b/include/linux/mfd/stmpe.h
@@ -117,7 +117,7 @@ struct matrix_keymap_data;
117 * @no_autorepeat: disable key autorepeat 117 * @no_autorepeat: disable key autorepeat
118 */ 118 */
119struct stmpe_keypad_platform_data { 119struct stmpe_keypad_platform_data {
120 struct matrix_keymap_data *keymap_data; 120 const struct matrix_keymap_data *keymap_data;
121 unsigned int debounce_ms; 121 unsigned int debounce_ms;
122 unsigned int scan_count; 122 unsigned int scan_count;
123 bool no_autorepeat; 123 bool no_autorepeat;
diff --git a/include/linux/mfd/tps65910.h b/include/linux/mfd/tps65910.h
index 1c6c2860d1a6..dd8dc0a6c462 100644
--- a/include/linux/mfd/tps65910.h
+++ b/include/linux/mfd/tps65910.h
@@ -18,6 +18,7 @@
18#define __LINUX_MFD_TPS65910_H 18#define __LINUX_MFD_TPS65910_H
19 19
20#include <linux/gpio.h> 20#include <linux/gpio.h>
21#include <linux/regmap.h>
21 22
22/* TPS chip id list */ 23/* TPS chip id list */
23#define TPS65910 0 24#define TPS65910 0
@@ -783,6 +784,18 @@
783#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 0x4 784#define TPS65910_SLEEP_CONTROL_EXT_INPUT_EN3 0x4
784#define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP 0x8 785#define TPS65911_SLEEP_CONTROL_EXT_INPUT_SLEEP 0x8
785 786
787/*
788 * Sleep keepon data: Maintains the state in sleep mode
789 * @therm_keepon: Keep on the thermal monitoring in sleep state.
790 * @clkout32k_keepon: Keep on the 32KHz clock output in sleep state.
791 * @i2chs_keepon: Keep on high speed internal clock in sleep state.
792 */
793struct tps65910_sleep_keepon_data {
794 unsigned therm_keepon:1;
795 unsigned clkout32k_keepon:1;
796 unsigned i2chs_keepon:1;
797};
798
786/** 799/**
787 * struct tps65910_board 800 * struct tps65910_board
788 * Board platform data may be used to initialize regulators. 801 * Board platform data may be used to initialize regulators.
@@ -794,6 +807,8 @@ struct tps65910_board {
794 int irq_base; 807 int irq_base;
795 int vmbch_threshold; 808 int vmbch_threshold;
796 int vmbch2_threshold; 809 int vmbch2_threshold;
810 bool en_dev_slp;
811 struct tps65910_sleep_keepon_data *slp_keepon;
797 bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO]; 812 bool en_gpio_sleep[TPS6591X_MAX_NUM_GPIO];
798 unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS]; 813 unsigned long regulator_ext_sleep_control[TPS65910_NUM_REGS];
799 struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS]; 814 struct regulator_init_data *tps65910_pmic_init_data[TPS65910_NUM_REGS];
@@ -809,16 +824,14 @@ struct tps65910 {
809 struct regmap *regmap; 824 struct regmap *regmap;
810 struct mutex io_mutex; 825 struct mutex io_mutex;
811 unsigned int id; 826 unsigned int id;
812 int (*read)(struct tps65910 *tps65910, u8 reg, int size, void *dest);
813 int (*write)(struct tps65910 *tps65910, u8 reg, int size, void *src);
814 827
815 /* Client devices */ 828 /* Client devices */
816 struct tps65910_pmic *pmic; 829 struct tps65910_pmic *pmic;
817 struct tps65910_rtc *rtc; 830 struct tps65910_rtc *rtc;
818 struct tps65910_power *power; 831 struct tps65910_power *power;
819 832
820 /* GPIO Handling */ 833 /* Device node parsed board data */
821 struct gpio_chip gpio; 834 struct tps65910_board *of_plat_data;
822 835
823 /* IRQ Handling */ 836 /* IRQ Handling */
824 struct mutex irq_lock; 837 struct mutex irq_lock;
@@ -826,6 +839,7 @@ struct tps65910 {
826 int irq_base; 839 int irq_base;
827 int irq_num; 840 int irq_num;
828 u32 irq_mask; 841 u32 irq_mask;
842 struct irq_domain *domain;
829}; 843};
830 844
831struct tps65910_platform_data { 845struct tps65910_platform_data {
@@ -833,9 +847,6 @@ struct tps65910_platform_data {
833 int irq_base; 847 int irq_base;
834}; 848};
835 849
836int tps65910_set_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
837int tps65910_clear_bits(struct tps65910 *tps65910, u8 reg, u8 mask);
838void tps65910_gpio_init(struct tps65910 *tps65910, int gpio_base);
839int tps65910_irq_init(struct tps65910 *tps65910, int irq, 850int tps65910_irq_init(struct tps65910 *tps65910, int irq,
840 struct tps65910_platform_data *pdata); 851 struct tps65910_platform_data *pdata);
841int tps65910_irq_exit(struct tps65910 *tps65910); 852int tps65910_irq_exit(struct tps65910 *tps65910);
@@ -845,4 +856,28 @@ static inline int tps65910_chip_id(struct tps65910 *tps65910)
845 return tps65910->id; 856 return tps65910->id;
846} 857}
847 858
859static inline int tps65910_reg_read(struct tps65910 *tps65910, u8 reg,
860 unsigned int *val)
861{
862 return regmap_read(tps65910->regmap, reg, val);
863}
864
865static inline int tps65910_reg_write(struct tps65910 *tps65910, u8 reg,
866 unsigned int val)
867{
868 return regmap_write(tps65910->regmap, reg, val);
869}
870
871static inline int tps65910_reg_set_bits(struct tps65910 *tps65910, u8 reg,
872 u8 mask)
873{
874 return regmap_update_bits(tps65910->regmap, reg, mask, mask);
875}
876
877static inline int tps65910_reg_clear_bits(struct tps65910 *tps65910, u8 reg,
878 u8 mask)
879{
880 return regmap_update_bits(tps65910->regmap, reg, mask, 0);
881}
882
848#endif /* __LINUX_MFD_TPS65910_H */ 883#endif /* __LINUX_MFD_TPS65910_H */
diff --git a/include/linux/mfd/twl6040.h b/include/linux/mfd/twl6040.h
index b15b5f03f5c4..6659487c31e7 100644
--- a/include/linux/mfd/twl6040.h
+++ b/include/linux/mfd/twl6040.h
@@ -27,6 +27,7 @@
27 27
28#include <linux/interrupt.h> 28#include <linux/interrupt.h>
29#include <linux/mfd/core.h> 29#include <linux/mfd/core.h>
30#include <linux/regulator/consumer.h>
30 31
31#define TWL6040_REG_ASICID 0x01 32#define TWL6040_REG_ASICID 0x01
32#define TWL6040_REG_ASICREV 0x02 33#define TWL6040_REG_ASICREV 0x02
@@ -203,6 +204,7 @@ struct regmap;
203struct twl6040 { 204struct twl6040 {
204 struct device *dev; 205 struct device *dev;
205 struct regmap *regmap; 206 struct regmap *regmap;
207 struct regulator_bulk_data supplies[2]; /* supplies for vio, v2v1 */
206 struct mutex mutex; 208 struct mutex mutex;
207 struct mutex io_mutex; 209 struct mutex io_mutex;
208 struct mutex irq_mutex; 210 struct mutex irq_mutex;
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 4b1211859f74..4a3b83a77614 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -17,6 +17,7 @@
17 17
18#include <linux/completion.h> 18#include <linux/completion.h>
19#include <linux/interrupt.h> 19#include <linux/interrupt.h>
20#include <linux/irqdomain.h>
20#include <linux/list.h> 21#include <linux/list.h>
21#include <linux/regmap.h> 22#include <linux/regmap.h>
22 23
@@ -338,6 +339,7 @@
338#define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */ 339#define WM831X_FLL_CLK_SRC_WIDTH 2 /* FLL_CLK_SRC - [1:0] */
339 340
340struct regulator_dev; 341struct regulator_dev;
342struct irq_domain;
341 343
342#define WM831X_NUM_IRQ_REGS 5 344#define WM831X_NUM_IRQ_REGS 5
343#define WM831X_NUM_GPIO_REGS 16 345#define WM831X_NUM_GPIO_REGS 16
@@ -367,7 +369,7 @@ struct wm831x {
367 369
368 int irq; /* Our chip IRQ */ 370 int irq; /* Our chip IRQ */
369 struct mutex irq_lock; 371 struct mutex irq_lock;
370 int irq_base; 372 struct irq_domain *irq_domain;
371 int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */ 373 int irq_masks_cur[WM831X_NUM_IRQ_REGS]; /* Currently active value */
372 int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */ 374 int irq_masks_cache[WM831X_NUM_IRQ_REGS]; /* Cached hardware value */
373 375
@@ -382,7 +384,8 @@ struct wm831x {
382 384
383 /* Used by the interrupt controller code to post writes */ 385 /* Used by the interrupt controller code to post writes */
384 int gpio_update[WM831X_NUM_GPIO_REGS]; 386 int gpio_update[WM831X_NUM_GPIO_REGS];
385 bool gpio_level[WM831X_NUM_GPIO_REGS]; 387 bool gpio_level_high[WM831X_NUM_GPIO_REGS];
388 bool gpio_level_low[WM831X_NUM_GPIO_REGS];
386 389
387 struct mutex auxadc_lock; 390 struct mutex auxadc_lock;
388 struct list_head auxadc_pending; 391 struct list_head auxadc_pending;
@@ -417,6 +420,11 @@ int wm831x_irq_init(struct wm831x *wm831x, int irq);
417void wm831x_irq_exit(struct wm831x *wm831x); 420void wm831x_irq_exit(struct wm831x *wm831x);
418void wm831x_auxadc_init(struct wm831x *wm831x); 421void wm831x_auxadc_init(struct wm831x *wm831x);
419 422
423static inline int wm831x_irq(struct wm831x *wm831x, int irq)
424{
425 return irq_create_mapping(wm831x->irq_domain, irq);
426}
427
420extern struct regmap_config wm831x_regmap_config; 428extern struct regmap_config wm831x_regmap_config;
421 429
422#endif 430#endif
diff --git a/include/linux/mfd/wm8350/core.h b/include/linux/mfd/wm8350/core.h
index 98fcc977e82b..9192b6404a73 100644
--- a/include/linux/mfd/wm8350/core.h
+++ b/include/linux/mfd/wm8350/core.h
@@ -602,6 +602,7 @@ extern const u16 wm8352_mode2_defaults[];
602extern const u16 wm8352_mode3_defaults[]; 602extern const u16 wm8352_mode3_defaults[];
603 603
604struct wm8350; 604struct wm8350;
605struct regmap;
605 606
606struct wm8350_hwmon { 607struct wm8350_hwmon {
607 struct platform_device *pdev; 608 struct platform_device *pdev;
@@ -612,13 +613,7 @@ struct wm8350 {
612 struct device *dev; 613 struct device *dev;
613 614
614 /* device IO */ 615 /* device IO */
615 union { 616 struct regmap *regmap;
616 struct i2c_client *i2c_client;
617 struct spi_device *spi_device;
618 };
619 int (*read_dev)(struct wm8350 *wm8350, char reg, int size, void *dest);
620 int (*write_dev)(struct wm8350 *wm8350, char reg, int size,
621 void *src);
622 u16 *reg_cache; 617 u16 *reg_cache;
623 618
624 struct mutex auxadc_mutex; 619 struct mutex auxadc_mutex;
diff --git a/include/linux/mfd/wm8400-private.h b/include/linux/mfd/wm8400-private.h
index 0147b6968510..2de565b94d0c 100644
--- a/include/linux/mfd/wm8400-private.h
+++ b/include/linux/mfd/wm8400-private.h
@@ -24,19 +24,14 @@
24#include <linux/mfd/wm8400.h> 24#include <linux/mfd/wm8400.h>
25#include <linux/mutex.h> 25#include <linux/mutex.h>
26#include <linux/platform_device.h> 26#include <linux/platform_device.h>
27 27#include <linux/regmap.h>
28struct regmap;
29 28
30#define WM8400_REGISTER_COUNT 0x55 29#define WM8400_REGISTER_COUNT 0x55
31 30
32struct wm8400 { 31struct wm8400 {
33 struct device *dev; 32 struct device *dev;
34
35 struct mutex io_lock;
36 struct regmap *regmap; 33 struct regmap *regmap;
37 34
38 u16 reg_cache[WM8400_REGISTER_COUNT];
39
40 struct platform_device regulators[6]; 35 struct platform_device regulators[6];
41}; 36};
42 37
@@ -930,6 +925,11 @@ struct wm8400 {
930 925
931u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg); 926u16 wm8400_reg_read(struct wm8400 *wm8400, u8 reg);
932int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data); 927int wm8400_block_read(struct wm8400 *wm8400, u8 reg, int count, u16 *data);
933int wm8400_set_bits(struct wm8400 *wm8400, u8 reg, u16 mask, u16 val); 928
929static inline int wm8400_set_bits(struct wm8400 *wm8400, u8 reg,
930 u16 mask, u16 val)
931{
932 return regmap_update_bits(wm8400->regmap, reg, mask, val);
933}
934 934
935#endif 935#endif
diff --git a/include/linux/mfd/wm8994/core.h b/include/linux/mfd/wm8994/core.h
index 6695c3ec4518..1f173306bf05 100644
--- a/include/linux/mfd/wm8994/core.h
+++ b/include/linux/mfd/wm8994/core.h
@@ -57,6 +57,7 @@ struct wm8994 {
57 57
58 enum wm8994_type type; 58 enum wm8994_type type;
59 int revision; 59 int revision;
60 int cust_id;
60 61
61 struct device *dev; 62 struct device *dev;
62 struct regmap *regmap; 63 struct regmap *regmap;
diff --git a/include/linux/mfd/wm8994/registers.h b/include/linux/mfd/wm8994/registers.h
index 86e6a032a078..053548961c15 100644
--- a/include/linux/mfd/wm8994/registers.h
+++ b/include/linux/mfd/wm8994/registers.h
@@ -2212,6 +2212,9 @@
2212/* 2212/*
2213 * R256 (0x100) - Chip Revision 2213 * R256 (0x100) - Chip Revision
2214 */ 2214 */
2215#define WM8994_CUST_ID_MASK 0xFF00 /* CUST_ID - [15:8] */
2216#define WM8994_CUST_ID_SHIFT 8 /* CUST_ID - [15:8] */
2217#define WM8994_CUST_ID_WIDTH 8 /* CUST_ID - [15:8] */
2215#define WM8994_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */ 2218#define WM8994_CHIP_REV_MASK 0x000F /* CHIP_REV - [3:0] */
2216#define WM8994_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */ 2219#define WM8994_CHIP_REV_SHIFT 0 /* CHIP_REV - [3:0] */
2217#define WM8994_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */ 2220#define WM8994_CHIP_REV_WIDTH 4 /* CHIP_REV - [3:0] */
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 7d5c37f24c63..ce26716238c3 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -321,6 +321,7 @@ static inline int is_vmalloc_or_module_addr(const void *x)
321static inline void compound_lock(struct page *page) 321static inline void compound_lock(struct page *page)
322{ 322{
323#ifdef CONFIG_TRANSPARENT_HUGEPAGE 323#ifdef CONFIG_TRANSPARENT_HUGEPAGE
324 VM_BUG_ON(PageSlab(page));
324 bit_spin_lock(PG_compound_lock, &page->flags); 325 bit_spin_lock(PG_compound_lock, &page->flags);
325#endif 326#endif
326} 327}
@@ -328,6 +329,7 @@ static inline void compound_lock(struct page *page)
328static inline void compound_unlock(struct page *page) 329static inline void compound_unlock(struct page *page)
329{ 330{
330#ifdef CONFIG_TRANSPARENT_HUGEPAGE 331#ifdef CONFIG_TRANSPARENT_HUGEPAGE
332 VM_BUG_ON(PageSlab(page));
331 bit_spin_unlock(PG_compound_lock, &page->flags); 333 bit_spin_unlock(PG_compound_lock, &page->flags);
332#endif 334#endif
333} 335}
@@ -871,8 +873,6 @@ extern void pagefault_out_of_memory(void);
871extern void show_free_areas(unsigned int flags); 873extern void show_free_areas(unsigned int flags);
872extern bool skip_free_areas_node(unsigned int flags, int nid); 874extern bool skip_free_areas_node(unsigned int flags, int nid);
873 875
874int shmem_lock(struct file *file, int lock, struct user_struct *user);
875struct file *shmem_file_setup(const char *name, loff_t size, unsigned long flags);
876int shmem_zero_setup(struct vm_area_struct *); 876int shmem_zero_setup(struct vm_area_struct *);
877 877
878extern int can_do_mlock(void); 878extern int can_do_mlock(void);
@@ -951,11 +951,9 @@ static inline void unmap_shared_mapping_range(struct address_space *mapping,
951extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new); 951extern void truncate_pagecache(struct inode *inode, loff_t old, loff_t new);
952extern void truncate_setsize(struct inode *inode, loff_t newsize); 952extern void truncate_setsize(struct inode *inode, loff_t newsize);
953extern int vmtruncate(struct inode *inode, loff_t offset); 953extern int vmtruncate(struct inode *inode, loff_t offset);
954extern int vmtruncate_range(struct inode *inode, loff_t offset, loff_t end);
955void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end); 954void truncate_pagecache_range(struct inode *inode, loff_t offset, loff_t end);
956int truncate_inode_page(struct address_space *mapping, struct page *page); 955int truncate_inode_page(struct address_space *mapping, struct page *page);
957int generic_error_remove_page(struct address_space *mapping, struct page *page); 956int generic_error_remove_page(struct address_space *mapping, struct page *page);
958
959int invalidate_inode_page(struct page *page); 957int invalidate_inode_page(struct page *page);
960 958
961#ifdef CONFIG_MMU 959#ifdef CONFIG_MMU
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 227fd3e9a9c9..1397ccf81e91 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -21,22 +21,22 @@ static inline int page_is_file_cache(struct page *page)
21 return !PageSwapBacked(page); 21 return !PageSwapBacked(page);
22} 22}
23 23
24static inline void 24static __always_inline void add_page_to_lru_list(struct page *page,
25add_page_to_lru_list(struct zone *zone, struct page *page, enum lru_list lru) 25 struct lruvec *lruvec, enum lru_list lru)
26{ 26{
27 struct lruvec *lruvec; 27 int nr_pages = hpage_nr_pages(page);
28 28 mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
29 lruvec = mem_cgroup_lru_add_list(zone, page, lru);
30 list_add(&page->lru, &lruvec->lists[lru]); 29 list_add(&page->lru, &lruvec->lists[lru]);
31 __mod_zone_page_state(zone, NR_LRU_BASE + lru, hpage_nr_pages(page)); 30 __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, nr_pages);
32} 31}
33 32
34static inline void 33static __always_inline void del_page_from_lru_list(struct page *page,
35del_page_from_lru_list(struct zone *zone, struct page *page, enum lru_list lru) 34 struct lruvec *lruvec, enum lru_list lru)
36{ 35{
37 mem_cgroup_lru_del_list(page, lru); 36 int nr_pages = hpage_nr_pages(page);
37 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
38 list_del(&page->lru); 38 list_del(&page->lru);
39 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -hpage_nr_pages(page)); 39 __mod_zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru, -nr_pages);
40} 40}
41 41
42/** 42/**
@@ -61,7 +61,7 @@ static inline enum lru_list page_lru_base_type(struct page *page)
61 * Returns the LRU list a page was on, as an index into the array of LRU 61 * Returns the LRU list a page was on, as an index into the array of LRU
62 * lists; and clears its Unevictable or Active flags, ready for freeing. 62 * lists; and clears its Unevictable or Active flags, ready for freeing.
63 */ 63 */
64static inline enum lru_list page_off_lru(struct page *page) 64static __always_inline enum lru_list page_off_lru(struct page *page)
65{ 65{
66 enum lru_list lru; 66 enum lru_list lru;
67 67
@@ -85,7 +85,7 @@ static inline enum lru_list page_off_lru(struct page *page)
85 * Returns the LRU list a page should be on, as an index 85 * Returns the LRU list a page should be on, as an index
86 * into the array of LRU lists. 86 * into the array of LRU lists.
87 */ 87 */
88static inline enum lru_list page_lru(struct page *page) 88static __always_inline enum lru_list page_lru(struct page *page)
89{ 89{
90 enum lru_list lru; 90 enum lru_list lru;
91 91
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 26574c726121..dad95bdd06d7 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -345,17 +345,6 @@ struct mm_struct {
345 /* Architecture-specific MM context */ 345 /* Architecture-specific MM context */
346 mm_context_t context; 346 mm_context_t context;
347 347
348 /* Swap token stuff */
349 /*
350 * Last value of global fault stamp as seen by this process.
351 * In other words, this value gives an indication of how long
352 * it has been since this task got the token.
353 * Look at mm/thrash.c
354 */
355 unsigned int faultstamp;
356 unsigned int token_priority;
357 unsigned int last_interval;
358
359 unsigned long flags; /* Must use atomic bitops to access the bits */ 348 unsigned long flags; /* Must use atomic bitops to access the bits */
360 349
361 struct core_state *core_state; /* coredumping support */ 350 struct core_state *core_state; /* coredumping support */
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 629b823f8836..d76513b5b263 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -58,6 +58,10 @@ struct mmc_ext_csd {
58 unsigned int generic_cmd6_time; /* Units: 10ms */ 58 unsigned int generic_cmd6_time; /* Units: 10ms */
59 unsigned int power_off_longtime; /* Units: ms */ 59 unsigned int power_off_longtime; /* Units: ms */
60 unsigned int hs_max_dtr; 60 unsigned int hs_max_dtr;
61#define MMC_HIGH_26_MAX_DTR 26000000
62#define MMC_HIGH_52_MAX_DTR 52000000
63#define MMC_HIGH_DDR_MAX_DTR 52000000
64#define MMC_HS200_MAX_DTR 200000000
61 unsigned int sectors; 65 unsigned int sectors;
62 unsigned int card_type; 66 unsigned int card_type;
63 unsigned int hc_erase_size; /* In sectors */ 67 unsigned int hc_erase_size; /* In sectors */
diff --git a/include/linux/mmc/dw_mmc.h b/include/linux/mmc/dw_mmc.h
index 8f66e28f5a0f..7a7ebd367cfd 100644
--- a/include/linux/mmc/dw_mmc.h
+++ b/include/linux/mmc/dw_mmc.h
@@ -125,6 +125,7 @@ struct dw_mci {
125 struct mmc_request *mrq; 125 struct mmc_request *mrq;
126 struct mmc_command *cmd; 126 struct mmc_command *cmd;
127 struct mmc_data *data; 127 struct mmc_data *data;
128 struct workqueue_struct *card_workqueue;
128 129
129 /* DMA interface members*/ 130 /* DMA interface members*/
130 int use_dma; 131 int use_dma;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index cbde4b7e675e..0707d228d7f1 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -297,6 +297,7 @@ struct mmc_host {
297 297
298 unsigned int sdio_irqs; 298 unsigned int sdio_irqs;
299 struct task_struct *sdio_irq_thread; 299 struct task_struct *sdio_irq_thread;
300 bool sdio_irq_pending;
300 atomic_t sdio_irq_thread_abort; 301 atomic_t sdio_irq_thread_abort;
301 302
302 mmc_pm_flag_t pm_flags; /* requested pm features */ 303 mmc_pm_flag_t pm_flags; /* requested pm features */
@@ -352,6 +353,7 @@ extern int mmc_cache_ctrl(struct mmc_host *, u8);
352static inline void mmc_signal_sdio_irq(struct mmc_host *host) 353static inline void mmc_signal_sdio_irq(struct mmc_host *host)
353{ 354{
354 host->ops->enable_sdio_irq(host, 0); 355 host->ops->enable_sdio_irq(host, 0);
356 host->sdio_irq_pending = true;
355 wake_up_process(host->sdio_irq_thread); 357 wake_up_process(host->sdio_irq_thread);
356} 358}
357 359
diff --git a/include/linux/mmc/mmc.h b/include/linux/mmc/mmc.h
index b822a2cb6008..d425cab144d9 100644
--- a/include/linux/mmc/mmc.h
+++ b/include/linux/mmc/mmc.h
@@ -354,66 +354,6 @@ struct _mmc_csd {
354#define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */ 354#define EXT_CSD_CARD_TYPE_SDR_1_2V (1<<5) /* Card can run at 200MHz */
355 /* SDR mode @1.2V I/O */ 355 /* SDR mode @1.2V I/O */
356 356
357#define EXT_CSD_CARD_TYPE_SDR_200 (EXT_CSD_CARD_TYPE_SDR_1_8V | \
358 EXT_CSD_CARD_TYPE_SDR_1_2V)
359
360#define EXT_CSD_CARD_TYPE_SDR_ALL (EXT_CSD_CARD_TYPE_SDR_200 | \
361 EXT_CSD_CARD_TYPE_52 | \
362 EXT_CSD_CARD_TYPE_26)
363
364#define EXT_CSD_CARD_TYPE_SDR_1_2V_ALL (EXT_CSD_CARD_TYPE_SDR_1_2V | \
365 EXT_CSD_CARD_TYPE_52 | \
366 EXT_CSD_CARD_TYPE_26)
367
368#define EXT_CSD_CARD_TYPE_SDR_1_8V_ALL (EXT_CSD_CARD_TYPE_SDR_1_8V | \
369 EXT_CSD_CARD_TYPE_52 | \
370 EXT_CSD_CARD_TYPE_26)
371
372#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_2V | \
373 EXT_CSD_CARD_TYPE_DDR_1_8V | \
374 EXT_CSD_CARD_TYPE_52 | \
375 EXT_CSD_CARD_TYPE_26)
376
377#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_1_8V | \
378 EXT_CSD_CARD_TYPE_DDR_1_8V | \
379 EXT_CSD_CARD_TYPE_52 | \
380 EXT_CSD_CARD_TYPE_26)
381
382#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_2V | \
383 EXT_CSD_CARD_TYPE_DDR_1_2V | \
384 EXT_CSD_CARD_TYPE_52 | \
385 EXT_CSD_CARD_TYPE_26)
386
387#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_1_8V | \
388 EXT_CSD_CARD_TYPE_DDR_1_2V | \
389 EXT_CSD_CARD_TYPE_52 | \
390 EXT_CSD_CARD_TYPE_26)
391
392#define EXT_CSD_CARD_TYPE_SDR_1_2V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_2V | \
393 EXT_CSD_CARD_TYPE_DDR_52 | \
394 EXT_CSD_CARD_TYPE_52 | \
395 EXT_CSD_CARD_TYPE_26)
396
397#define EXT_CSD_CARD_TYPE_SDR_1_8V_DDR_52 (EXT_CSD_CARD_TYPE_SDR_1_8V | \
398 EXT_CSD_CARD_TYPE_DDR_52 | \
399 EXT_CSD_CARD_TYPE_52 | \
400 EXT_CSD_CARD_TYPE_26)
401
402#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_8V (EXT_CSD_CARD_TYPE_SDR_200 | \
403 EXT_CSD_CARD_TYPE_DDR_1_8V | \
404 EXT_CSD_CARD_TYPE_52 | \
405 EXT_CSD_CARD_TYPE_26)
406
407#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_1_2V (EXT_CSD_CARD_TYPE_SDR_200 | \
408 EXT_CSD_CARD_TYPE_DDR_1_2V | \
409 EXT_CSD_CARD_TYPE_52 | \
410 EXT_CSD_CARD_TYPE_26)
411
412#define EXT_CSD_CARD_TYPE_SDR_ALL_DDR_52 (EXT_CSD_CARD_TYPE_SDR_200 | \
413 EXT_CSD_CARD_TYPE_DDR_52 | \
414 EXT_CSD_CARD_TYPE_52 | \
415 EXT_CSD_CARD_TYPE_26)
416
417#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */ 357#define EXT_CSD_BUS_WIDTH_1 0 /* Card is in 1 bit mode */
418#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */ 358#define EXT_CSD_BUS_WIDTH_4 1 /* Card is in 4 bit mode */
419#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */ 359#define EXT_CSD_BUS_WIDTH_8 2 /* Card is in 8 bit mode */
diff --git a/arch/arm/mach-mxs/include/mach/mmc.h b/include/linux/mmc/mxs-mmc.h
index 211547a05564..7c2ad3a7f2f3 100644
--- a/arch/arm/mach-mxs/include/mach/mmc.h
+++ b/include/linux/mmc/mxs-mmc.h
@@ -6,8 +6,8 @@
6 * published by the Free Software Foundation. 6 * published by the Free Software Foundation.
7 */ 7 */
8 8
9#ifndef __MACH_MXS_MMC_H__ 9#ifndef __LINUX_MMC_MXS_MMC_H__
10#define __MACH_MXS_MMC_H__ 10#define __LINUX_MMC_MXS_MMC_H__
11 11
12struct mxs_mmc_platform_data { 12struct mxs_mmc_platform_data {
13 int wp_gpio; /* write protect pin */ 13 int wp_gpio; /* write protect pin */
@@ -15,4 +15,5 @@ struct mxs_mmc_platform_data {
15#define SLOTF_4_BIT_CAPABLE (1 << 0) 15#define SLOTF_4_BIT_CAPABLE (1 << 0)
16#define SLOTF_8_BIT_CAPABLE (1 << 1) 16#define SLOTF_8_BIT_CAPABLE (1 << 1)
17}; 17};
18#endif /* __MACH_MXS_MMC_H__ */ 18
19#endif /* __LINUX_MMC_MXS_MMC_H__ */
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index c04ecfe03f7f..580bd587d916 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -4,7 +4,7 @@
4#ifdef CONFIG_DEBUG_VM 4#ifdef CONFIG_DEBUG_VM
5#define VM_BUG_ON(cond) BUG_ON(cond) 5#define VM_BUG_ON(cond) BUG_ON(cond)
6#else 6#else
7#define VM_BUG_ON(cond) do { (void)(cond); } while (0) 7#define VM_BUG_ON(cond) BUILD_BUG_ON_INVALID(cond)
8#endif 8#endif
9 9
10#ifdef CONFIG_DEBUG_VIRTUAL 10#ifdef CONFIG_DEBUG_VIRTUAL
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 41aa49b74821..2427706f78b4 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,13 +35,39 @@
35 */ 35 */
36#define PAGE_ALLOC_COSTLY_ORDER 3 36#define PAGE_ALLOC_COSTLY_ORDER 3
37 37
38#define MIGRATE_UNMOVABLE 0 38enum {
39#define MIGRATE_RECLAIMABLE 1 39 MIGRATE_UNMOVABLE,
40#define MIGRATE_MOVABLE 2 40 MIGRATE_RECLAIMABLE,
41#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ 41 MIGRATE_MOVABLE,
42#define MIGRATE_RESERVE 3 42 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
43#define MIGRATE_ISOLATE 4 /* can't allocate from here */ 43 MIGRATE_RESERVE = MIGRATE_PCPTYPES,
44#define MIGRATE_TYPES 5 44#ifdef CONFIG_CMA
45 /*
46 * MIGRATE_CMA migration type is designed to mimic the way
47 * ZONE_MOVABLE works. Only movable pages can be allocated
48 * from MIGRATE_CMA pageblocks and page allocator never
49 * implicitly change migration type of MIGRATE_CMA pageblock.
50 *
51 * The way to use it is to change migratetype of a range of
52 * pageblocks to MIGRATE_CMA which can be done by
53 * __free_pageblock_cma() function. What is important though
54 * is that a range of pageblocks must be aligned to
55 * MAX_ORDER_NR_PAGES should biggest page be bigger then
56 * a single pageblock.
57 */
58 MIGRATE_CMA,
59#endif
60 MIGRATE_ISOLATE, /* can't allocate from here */
61 MIGRATE_TYPES
62};
63
64#ifdef CONFIG_CMA
65# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
66# define cma_wmark_pages(zone) zone->min_cma_pages
67#else
68# define is_migrate_cma(migratetype) false
69# define cma_wmark_pages(zone) 0
70#endif
45 71
46#define for_each_migratetype_order(order, type) \ 72#define for_each_migratetype_order(order, type) \
47 for (order = 0; order < MAX_ORDER; order++) \ 73 for (order = 0; order < MAX_ORDER; order++) \
@@ -159,8 +185,25 @@ static inline int is_unevictable_lru(enum lru_list lru)
159 return (lru == LRU_UNEVICTABLE); 185 return (lru == LRU_UNEVICTABLE);
160} 186}
161 187
188struct zone_reclaim_stat {
189 /*
190 * The pageout code in vmscan.c keeps track of how many of the
191 * mem/swap backed and file backed pages are refeferenced.
192 * The higher the rotated/scanned ratio, the more valuable
193 * that cache is.
194 *
195 * The anon LRU stats live in [0], file LRU stats in [1]
196 */
197 unsigned long recent_rotated[2];
198 unsigned long recent_scanned[2];
199};
200
162struct lruvec { 201struct lruvec {
163 struct list_head lists[NR_LRU_LISTS]; 202 struct list_head lists[NR_LRU_LISTS];
203 struct zone_reclaim_stat reclaim_stat;
204#ifdef CONFIG_CGROUP_MEM_RES_CTLR
205 struct zone *zone;
206#endif
164}; 207};
165 208
166/* Mask used at gathering information at once (see memcontrol.c) */ 209/* Mask used at gathering information at once (see memcontrol.c) */
@@ -169,16 +212,12 @@ struct lruvec {
169#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON) 212#define LRU_ALL_EVICTABLE (LRU_ALL_FILE | LRU_ALL_ANON)
170#define LRU_ALL ((1 << NR_LRU_LISTS) - 1) 213#define LRU_ALL ((1 << NR_LRU_LISTS) - 1)
171 214
172/* Isolate inactive pages */
173#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1)
174/* Isolate active pages */
175#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
176/* Isolate clean file */ 215/* Isolate clean file */
177#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4) 216#define ISOLATE_CLEAN ((__force isolate_mode_t)0x1)
178/* Isolate unmapped file */ 217/* Isolate unmapped file */
179#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8) 218#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
180/* Isolate for asynchronous migration */ 219/* Isolate for asynchronous migration */
181#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10) 220#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
182 221
183/* LRU Isolation modes. */ 222/* LRU Isolation modes. */
184typedef unsigned __bitwise__ isolate_mode_t; 223typedef unsigned __bitwise__ isolate_mode_t;
@@ -287,19 +326,6 @@ enum zone_type {
287#error ZONES_SHIFT -- too many zones configured adjust calculation 326#error ZONES_SHIFT -- too many zones configured adjust calculation
288#endif 327#endif
289 328
290struct zone_reclaim_stat {
291 /*
292 * The pageout code in vmscan.c keeps track of how many of the
293 * mem/swap backed and file backed pages are refeferenced.
294 * The higher the rotated/scanned ratio, the more valuable
295 * that cache is.
296 *
297 * The anon LRU stats live in [0], file LRU stats in [1]
298 */
299 unsigned long recent_rotated[2];
300 unsigned long recent_scanned[2];
301};
302
303struct zone { 329struct zone {
304 /* Fields commonly accessed by the page allocator */ 330 /* Fields commonly accessed by the page allocator */
305 331
@@ -347,6 +373,13 @@ struct zone {
347 /* see spanned/present_pages for more description */ 373 /* see spanned/present_pages for more description */
348 seqlock_t span_seqlock; 374 seqlock_t span_seqlock;
349#endif 375#endif
376#ifdef CONFIG_CMA
377 /*
378 * CMA needs to increase watermark levels during the allocation
379 * process to make sure that the system is not starved.
380 */
381 unsigned long min_cma_pages;
382#endif
350 struct free_area free_area[MAX_ORDER]; 383 struct free_area free_area[MAX_ORDER];
351 384
352#ifndef CONFIG_SPARSEMEM 385#ifndef CONFIG_SPARSEMEM
@@ -374,8 +407,6 @@ struct zone {
374 spinlock_t lru_lock; 407 spinlock_t lru_lock;
375 struct lruvec lruvec; 408 struct lruvec lruvec;
376 409
377 struct zone_reclaim_stat reclaim_stat;
378
379 unsigned long pages_scanned; /* since last reclaim */ 410 unsigned long pages_scanned; /* since last reclaim */
380 unsigned long flags; /* zone flags, see below */ 411 unsigned long flags; /* zone flags, see below */
381 412
@@ -701,6 +732,17 @@ extern int init_currently_empty_zone(struct zone *zone, unsigned long start_pfn,
701 unsigned long size, 732 unsigned long size,
702 enum memmap_context context); 733 enum memmap_context context);
703 734
735extern void lruvec_init(struct lruvec *lruvec, struct zone *zone);
736
737static inline struct zone *lruvec_zone(struct lruvec *lruvec)
738{
739#ifdef CONFIG_CGROUP_MEM_RES_CTLR
740 return lruvec->zone;
741#else
742 return container_of(lruvec, struct zone, lruvec);
743#endif
744}
745
704#ifdef CONFIG_HAVE_MEMORY_PRESENT 746#ifdef CONFIG_HAVE_MEMORY_PRESENT
705void memory_present(int nid, unsigned long start, unsigned long end); 747void memory_present(int nid, unsigned long start, unsigned long end);
706#else 748#else
diff --git a/include/linux/mv643xx_eth.h b/include/linux/mv643xx_eth.h
index 30b0c4e78f91..51bf8ada6dc0 100644
--- a/include/linux/mv643xx_eth.h
+++ b/include/linux/mv643xx_eth.h
@@ -18,7 +18,6 @@
18struct mv643xx_eth_shared_platform_data { 18struct mv643xx_eth_shared_platform_data {
19 struct mbus_dram_target_info *dram; 19 struct mbus_dram_target_info *dram;
20 struct platform_device *shared_smi; 20 struct platform_device *shared_smi;
21 unsigned int t_clk;
22 /* 21 /*
23 * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default 22 * Max packet size for Tx IP/Layer 4 checksum, when set to 0, default
24 * limit of 9KiB will be used. 23 * limit of 9KiB will be used.
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 0987146b0637..af2d2fa30eee 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -69,6 +69,10 @@
69#define NFS4_CDFC4_FORE_OR_BOTH 0x3 69#define NFS4_CDFC4_FORE_OR_BOTH 0x3
70#define NFS4_CDFC4_BACK_OR_BOTH 0x7 70#define NFS4_CDFC4_BACK_OR_BOTH 0x7
71 71
72#define NFS4_CDFS4_FORE 0x1
73#define NFS4_CDFS4_BACK 0x2
74#define NFS4_CDFS4_BOTH 0x3
75
72#define NFS4_SET_TO_SERVER_TIME 0 76#define NFS4_SET_TO_SERVER_TIME 0
73#define NFS4_SET_TO_CLIENT_TIME 1 77#define NFS4_SET_TO_CLIENT_TIME 1
74 78
@@ -526,6 +530,13 @@ enum lock_type4 {
526#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23) 530#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
527#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30) 531#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
528#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1) 532#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
533#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
534
535/* MDS threshold bitmap bits */
536#define THRESHOLD_RD (1UL << 0)
537#define THRESHOLD_WR (1UL << 1)
538#define THRESHOLD_RD_IO (1UL << 2)
539#define THRESHOLD_WR_IO (1UL << 3)
529 540
530#define NFSPROC4_NULL 0 541#define NFSPROC4_NULL 0
531#define NFSPROC4_COMPOUND 1 542#define NFSPROC4_COMPOUND 1
@@ -596,6 +607,8 @@ enum {
596 NFSPROC4_CLNT_TEST_STATEID, 607 NFSPROC4_CLNT_TEST_STATEID,
597 NFSPROC4_CLNT_FREE_STATEID, 608 NFSPROC4_CLNT_FREE_STATEID,
598 NFSPROC4_CLNT_GETDEVICELIST, 609 NFSPROC4_CLNT_GETDEVICELIST,
610 NFSPROC4_CLNT_BIND_CONN_TO_SESSION,
611 NFSPROC4_CLNT_DESTROY_CLIENTID,
599}; 612};
600 613
601/* nfs41 types */ 614/* nfs41 types */
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 52a1bdb4ee2b..b23cfc120edb 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -102,6 +102,7 @@ struct nfs_open_context {
102 int error; 102 int error;
103 103
104 struct list_head list; 104 struct list_head list;
105 struct nfs4_threshold *mdsthreshold;
105}; 106};
106 107
107struct nfs_open_dir_context { 108struct nfs_open_dir_context {
@@ -179,8 +180,7 @@ struct nfs_inode {
179 __be32 cookieverf[2]; 180 __be32 cookieverf[2];
180 181
181 unsigned long npages; 182 unsigned long npages;
182 unsigned long ncommit; 183 struct nfs_mds_commit_info commit_info;
183 struct list_head commit_list;
184 184
185 /* Open contexts for shared mmap writes */ 185 /* Open contexts for shared mmap writes */
186 struct list_head open_files; 186 struct list_head open_files;
@@ -201,8 +201,10 @@ struct nfs_inode {
201 201
202 /* pNFS layout information */ 202 /* pNFS layout information */
203 struct pnfs_layout_hdr *layout; 203 struct pnfs_layout_hdr *layout;
204 atomic_t commits_outstanding;
205#endif /* CONFIG_NFS_V4*/ 204#endif /* CONFIG_NFS_V4*/
205 /* how many bytes have been written/read and how many bytes queued up */
206 __u64 write_io;
207 __u64 read_io;
206#ifdef CONFIG_NFS_FSCACHE 208#ifdef CONFIG_NFS_FSCACHE
207 struct fscache_cookie *fscache; 209 struct fscache_cookie *fscache;
208#endif 210#endif
@@ -230,7 +232,6 @@ struct nfs_inode {
230#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */ 232#define NFS_INO_FSCACHE (5) /* inode can be cached by FS-Cache */
231#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */ 233#define NFS_INO_FSCACHE_LOCK (6) /* FS-Cache cookie management lock */
232#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */ 234#define NFS_INO_COMMIT (7) /* inode is committing unstable writes */
233#define NFS_INO_PNFS_COMMIT (8) /* use pnfs code for commit */
234#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */ 235#define NFS_INO_LAYOUTCOMMIT (9) /* layoutcommit required */
235#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */ 236#define NFS_INO_LAYOUTCOMMITTING (10) /* layoutcommit inflight */
236 237
@@ -317,11 +318,6 @@ static inline int nfs_server_capable(struct inode *inode, int cap)
317 return NFS_SERVER(inode)->caps & cap; 318 return NFS_SERVER(inode)->caps & cap;
318} 319}
319 320
320static inline int NFS_USE_READDIRPLUS(struct inode *inode)
321{
322 return test_bit(NFS_INO_ADVISE_RDPLUS, &NFS_I(inode)->flags);
323}
324
325static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf) 321static inline void nfs_set_verifier(struct dentry * dentry, unsigned long verf)
326{ 322{
327 dentry->d_time = verf; 323 dentry->d_time = verf;
@@ -552,8 +548,8 @@ extern int nfs_wb_page(struct inode *inode, struct page* page);
552extern int nfs_wb_page_cancel(struct inode *inode, struct page* page); 548extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
553#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4) 549#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
554extern int nfs_commit_inode(struct inode *, int); 550extern int nfs_commit_inode(struct inode *, int);
555extern struct nfs_write_data *nfs_commitdata_alloc(void); 551extern struct nfs_commit_data *nfs_commitdata_alloc(void);
556extern void nfs_commit_free(struct nfs_write_data *wdata); 552extern void nfs_commit_free(struct nfs_commit_data *data);
557#else 553#else
558static inline int 554static inline int
559nfs_commit_inode(struct inode *inode, int how) 555nfs_commit_inode(struct inode *inode, int how)
@@ -569,12 +565,6 @@ nfs_have_writebacks(struct inode *inode)
569} 565}
570 566
571/* 567/*
572 * Allocate nfs_write_data structures
573 */
574extern struct nfs_write_data *nfs_writedata_alloc(unsigned int npages);
575extern void nfs_writedata_free(struct nfs_write_data *);
576
577/*
578 * linux/fs/nfs/read.c 568 * linux/fs/nfs/read.c
579 */ 569 */
580extern int nfs_readpage(struct file *, struct page *); 570extern int nfs_readpage(struct file *, struct page *);
@@ -585,12 +575,6 @@ extern int nfs_readpage_async(struct nfs_open_context *, struct inode *,
585 struct page *); 575 struct page *);
586 576
587/* 577/*
588 * Allocate nfs_read_data structures
589 */
590extern struct nfs_read_data *nfs_readdata_alloc(unsigned int npages);
591extern void nfs_readdata_free(struct nfs_read_data *);
592
593/*
594 * linux/fs/nfs3proc.c 578 * linux/fs/nfs3proc.c
595 */ 579 */
596#ifdef CONFIG_NFS_V3_ACL 580#ifdef CONFIG_NFS_V3_ACL
@@ -654,6 +638,7 @@ nfs_fileid_to_ino_t(u64 fileid)
654#define NFSDBG_FSCACHE 0x0800 638#define NFSDBG_FSCACHE 0x0800
655#define NFSDBG_PNFS 0x1000 639#define NFSDBG_PNFS 0x1000
656#define NFSDBG_PNFS_LD 0x2000 640#define NFSDBG_PNFS_LD 0x2000
641#define NFSDBG_STATE 0x4000
657#define NFSDBG_ALL 0xFFFF 642#define NFSDBG_ALL 0xFFFF
658 643
659#ifdef __KERNEL__ 644#ifdef __KERNEL__
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 7073fc74481c..fbb78fb09bd2 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -17,7 +17,7 @@ struct nfs4_sequence_args;
17struct nfs4_sequence_res; 17struct nfs4_sequence_res;
18struct nfs_server; 18struct nfs_server;
19struct nfs4_minor_version_ops; 19struct nfs4_minor_version_ops;
20struct server_scope; 20struct nfs41_server_scope;
21struct nfs41_impl_id; 21struct nfs41_impl_id;
22 22
23/* 23/*
@@ -35,6 +35,9 @@ struct nfs_client {
35#define NFS_CS_RENEWD 3 /* - renewd started */ 35#define NFS_CS_RENEWD 3 /* - renewd started */
36#define NFS_CS_STOP_RENEW 4 /* no more state to renew */ 36#define NFS_CS_STOP_RENEW 4 /* no more state to renew */
37#define NFS_CS_CHECK_LEASE_TIME 5 /* need to check lease time */ 37#define NFS_CS_CHECK_LEASE_TIME 5 /* need to check lease time */
38 unsigned long cl_flags; /* behavior switches */
39#define NFS_CS_NORESVPORT 0 /* - use ephemeral src port */
40#define NFS_CS_DISCRTRY 1 /* - disconnect on RPC retry */
38 struct sockaddr_storage cl_addr; /* server identifier */ 41 struct sockaddr_storage cl_addr; /* server identifier */
39 size_t cl_addrlen; 42 size_t cl_addrlen;
40 char * cl_hostname; /* hostname of server */ 43 char * cl_hostname; /* hostname of server */
@@ -61,9 +64,6 @@ struct nfs_client {
61 64
62 struct rpc_wait_queue cl_rpcwaitq; 65 struct rpc_wait_queue cl_rpcwaitq;
63 66
64 /* used for the setclientid verifier */
65 struct timespec cl_boot_time;
66
67 /* idmapper */ 67 /* idmapper */
68 struct idmap * cl_idmap; 68 struct idmap * cl_idmap;
69 69
@@ -79,16 +79,17 @@ struct nfs_client {
79 u32 cl_seqid; 79 u32 cl_seqid;
80 /* The flags used for obtaining the clientid during EXCHANGE_ID */ 80 /* The flags used for obtaining the clientid during EXCHANGE_ID */
81 u32 cl_exchange_flags; 81 u32 cl_exchange_flags;
82 struct nfs4_session *cl_session; /* sharred session */ 82 struct nfs4_session *cl_session; /* shared session */
83 struct nfs41_server_owner *cl_serverowner;
84 struct nfs41_server_scope *cl_serverscope;
85 struct nfs41_impl_id *cl_implid;
83#endif /* CONFIG_NFS_V4 */ 86#endif /* CONFIG_NFS_V4 */
84 87
85#ifdef CONFIG_NFS_FSCACHE 88#ifdef CONFIG_NFS_FSCACHE
86 struct fscache_cookie *fscache; /* client index cache cookie */ 89 struct fscache_cookie *fscache; /* client index cache cookie */
87#endif 90#endif
88 91
89 struct server_scope *server_scope; /* from exchange_id */ 92 struct net *cl_net;
90 struct nfs41_impl_id *impl_id; /* from exchange_id */
91 struct net *net;
92}; 93};
93 94
94/* 95/*
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index eac30d6bec17..88d166b555e8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -27,7 +27,6 @@ enum {
27 PG_CLEAN, 27 PG_CLEAN,
28 PG_NEED_COMMIT, 28 PG_NEED_COMMIT,
29 PG_NEED_RESCHED, 29 PG_NEED_RESCHED,
30 PG_PARTIAL_READ_FAILED,
31 PG_COMMIT_TO_DS, 30 PG_COMMIT_TO_DS,
32}; 31};
33 32
@@ -37,7 +36,6 @@ struct nfs_page {
37 struct page *wb_page; /* page to read in/write out */ 36 struct page *wb_page; /* page to read in/write out */
38 struct nfs_open_context *wb_context; /* File state context info */ 37 struct nfs_open_context *wb_context; /* File state context info */
39 struct nfs_lock_context *wb_lock_context; /* lock context info */ 38 struct nfs_lock_context *wb_lock_context; /* lock context info */
40 atomic_t wb_complete; /* i/os we're waiting for */
41 pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */ 39 pgoff_t wb_index; /* Offset >> PAGE_CACHE_SHIFT */
42 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */ 40 unsigned int wb_offset, /* Offset & ~PAGE_CACHE_MASK */
43 wb_pgbase, /* Start of page data */ 41 wb_pgbase, /* Start of page data */
@@ -68,7 +66,9 @@ struct nfs_pageio_descriptor {
68 int pg_ioflags; 66 int pg_ioflags;
69 int pg_error; 67 int pg_error;
70 const struct rpc_call_ops *pg_rpc_callops; 68 const struct rpc_call_ops *pg_rpc_callops;
69 const struct nfs_pgio_completion_ops *pg_completion_ops;
71 struct pnfs_layout_segment *pg_lseg; 70 struct pnfs_layout_segment *pg_lseg;
71 struct nfs_direct_req *pg_dreq;
72}; 72};
73 73
74#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags)) 74#define NFS_WBACK_BUSY(req) (test_bit(PG_BUSY,&(req)->wb_flags))
@@ -84,6 +84,7 @@ extern void nfs_release_request(struct nfs_page *req);
84extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc, 84extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
85 struct inode *inode, 85 struct inode *inode,
86 const struct nfs_pageio_ops *pg_ops, 86 const struct nfs_pageio_ops *pg_ops,
87 const struct nfs_pgio_completion_ops *compl_ops,
87 size_t bsize, 88 size_t bsize,
88 int how); 89 int how);
89extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *, 90extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
@@ -95,26 +96,17 @@ extern bool nfs_generic_pg_test(struct nfs_pageio_descriptor *desc,
95 struct nfs_page *req); 96 struct nfs_page *req);
96extern int nfs_wait_on_request(struct nfs_page *); 97extern int nfs_wait_on_request(struct nfs_page *);
97extern void nfs_unlock_request(struct nfs_page *req); 98extern void nfs_unlock_request(struct nfs_page *req);
99extern void nfs_unlock_and_release_request(struct nfs_page *req);
98 100
99/* 101/*
100 * Lock the page of an asynchronous request without getting a new reference 102 * Lock the page of an asynchronous request
101 */ 103 */
102static inline int 104static inline int
103nfs_lock_request_dontget(struct nfs_page *req)
104{
105 return !test_and_set_bit(PG_BUSY, &req->wb_flags);
106}
107
108static inline int
109nfs_lock_request(struct nfs_page *req) 105nfs_lock_request(struct nfs_page *req)
110{ 106{
111 if (test_and_set_bit(PG_BUSY, &req->wb_flags)) 107 return !test_and_set_bit(PG_BUSY, &req->wb_flags);
112 return 0;
113 kref_get(&req->wb_kref);
114 return 1;
115} 108}
116 109
117
118/** 110/**
119 * nfs_list_add_request - Insert a request into a list 111 * nfs_list_add_request - Insert a request into a list
120 * @req: request 112 * @req: request
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 7ba3551a0414..d1a7bf51c326 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -35,6 +35,15 @@ static inline int nfs_fsid_equal(const struct nfs_fsid *a, const struct nfs_fsid
35 return a->major == b->major && a->minor == b->minor; 35 return a->major == b->major && a->minor == b->minor;
36} 36}
37 37
38struct nfs4_threshold {
39 __u32 bm;
40 __u32 l_type;
41 __u64 rd_sz;
42 __u64 wr_sz;
43 __u64 rd_io_sz;
44 __u64 wr_io_sz;
45};
46
38struct nfs_fattr { 47struct nfs_fattr {
39 unsigned int valid; /* which fields are valid */ 48 unsigned int valid; /* which fields are valid */
40 umode_t mode; 49 umode_t mode;
@@ -67,6 +76,7 @@ struct nfs_fattr {
67 unsigned long gencount; 76 unsigned long gencount;
68 struct nfs4_string *owner_name; 77 struct nfs4_string *owner_name;
69 struct nfs4_string *group_name; 78 struct nfs4_string *group_name;
79 struct nfs4_threshold *mdsthreshold; /* pNFS threshold hints */
70}; 80};
71 81
72#define NFS_ATTR_FATTR_TYPE (1U << 0) 82#define NFS_ATTR_FATTR_TYPE (1U << 0)
@@ -106,14 +116,14 @@ struct nfs_fattr {
106 | NFS_ATTR_FATTR_FILEID \ 116 | NFS_ATTR_FATTR_FILEID \
107 | NFS_ATTR_FATTR_ATIME \ 117 | NFS_ATTR_FATTR_ATIME \
108 | NFS_ATTR_FATTR_MTIME \ 118 | NFS_ATTR_FATTR_MTIME \
109 | NFS_ATTR_FATTR_CTIME) 119 | NFS_ATTR_FATTR_CTIME \
120 | NFS_ATTR_FATTR_CHANGE)
110#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \ 121#define NFS_ATTR_FATTR_V2 (NFS_ATTR_FATTR \
111 | NFS_ATTR_FATTR_BLOCKS_USED) 122 | NFS_ATTR_FATTR_BLOCKS_USED)
112#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \ 123#define NFS_ATTR_FATTR_V3 (NFS_ATTR_FATTR \
113 | NFS_ATTR_FATTR_SPACE_USED) 124 | NFS_ATTR_FATTR_SPACE_USED)
114#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \ 125#define NFS_ATTR_FATTR_V4 (NFS_ATTR_FATTR \
115 | NFS_ATTR_FATTR_SPACE_USED \ 126 | NFS_ATTR_FATTR_SPACE_USED)
116 | NFS_ATTR_FATTR_CHANGE)
117 127
118/* 128/*
119 * Info on the file system 129 * Info on the file system
@@ -338,7 +348,6 @@ struct nfs_openargs {
338 const struct qstr * name; 348 const struct qstr * name;
339 const struct nfs_server *server; /* Needed for ID mapping */ 349 const struct nfs_server *server; /* Needed for ID mapping */
340 const u32 * bitmask; 350 const u32 * bitmask;
341 const u32 * dir_bitmask;
342 __u32 claim; 351 __u32 claim;
343 struct nfs4_sequence_args seq_args; 352 struct nfs4_sequence_args seq_args;
344}; 353};
@@ -349,7 +358,6 @@ struct nfs_openres {
349 struct nfs4_change_info cinfo; 358 struct nfs4_change_info cinfo;
350 __u32 rflags; 359 __u32 rflags;
351 struct nfs_fattr * f_attr; 360 struct nfs_fattr * f_attr;
352 struct nfs_fattr * dir_attr;
353 struct nfs_seqid * seqid; 361 struct nfs_seqid * seqid;
354 const struct nfs_server *server; 362 const struct nfs_server *server;
355 fmode_t delegation_type; 363 fmode_t delegation_type;
@@ -519,12 +527,29 @@ struct nfs_writeres {
519}; 527};
520 528
521/* 529/*
530 * Arguments to the commit call.
531 */
532struct nfs_commitargs {
533 struct nfs_fh *fh;
534 __u64 offset;
535 __u32 count;
536 const u32 *bitmask;
537 struct nfs4_sequence_args seq_args;
538};
539
540struct nfs_commitres {
541 struct nfs_fattr *fattr;
542 struct nfs_writeverf *verf;
543 const struct nfs_server *server;
544 struct nfs4_sequence_res seq_res;
545};
546
547/*
522 * Common arguments to the unlink call 548 * Common arguments to the unlink call
523 */ 549 */
524struct nfs_removeargs { 550struct nfs_removeargs {
525 const struct nfs_fh *fh; 551 const struct nfs_fh *fh;
526 struct qstr name; 552 struct qstr name;
527 const u32 * bitmask;
528 struct nfs4_sequence_args seq_args; 553 struct nfs4_sequence_args seq_args;
529}; 554};
530 555
@@ -543,7 +568,6 @@ struct nfs_renameargs {
543 const struct nfs_fh *new_dir; 568 const struct nfs_fh *new_dir;
544 const struct qstr *old_name; 569 const struct qstr *old_name;
545 const struct qstr *new_name; 570 const struct qstr *new_name;
546 const u32 *bitmask;
547 struct nfs4_sequence_args seq_args; 571 struct nfs4_sequence_args seq_args;
548}; 572};
549 573
@@ -839,7 +863,6 @@ struct nfs4_create_res {
839 struct nfs_fh * fh; 863 struct nfs_fh * fh;
840 struct nfs_fattr * fattr; 864 struct nfs_fattr * fattr;
841 struct nfs4_change_info dir_cinfo; 865 struct nfs4_change_info dir_cinfo;
842 struct nfs_fattr * dir_fattr;
843 struct nfs4_sequence_res seq_res; 866 struct nfs4_sequence_res seq_res;
844}; 867};
845 868
@@ -1061,6 +1084,21 @@ struct nfstime4 {
1061}; 1084};
1062 1085
1063#ifdef CONFIG_NFS_V4_1 1086#ifdef CONFIG_NFS_V4_1
1087
1088struct pnfs_commit_bucket {
1089 struct list_head written;
1090 struct list_head committing;
1091 struct pnfs_layout_segment *wlseg;
1092 struct pnfs_layout_segment *clseg;
1093};
1094
1095struct pnfs_ds_commit_info {
1096 int nwritten;
1097 int ncommitting;
1098 int nbuckets;
1099 struct pnfs_commit_bucket *buckets;
1100};
1101
1064#define NFS4_EXCHANGE_ID_LEN (48) 1102#define NFS4_EXCHANGE_ID_LEN (48)
1065struct nfs41_exchange_id_args { 1103struct nfs41_exchange_id_args {
1066 struct nfs_client *client; 1104 struct nfs_client *client;
@@ -1070,13 +1108,13 @@ struct nfs41_exchange_id_args {
1070 u32 flags; 1108 u32 flags;
1071}; 1109};
1072 1110
1073struct server_owner { 1111struct nfs41_server_owner {
1074 uint64_t minor_id; 1112 uint64_t minor_id;
1075 uint32_t major_id_sz; 1113 uint32_t major_id_sz;
1076 char major_id[NFS4_OPAQUE_LIMIT]; 1114 char major_id[NFS4_OPAQUE_LIMIT];
1077}; 1115};
1078 1116
1079struct server_scope { 1117struct nfs41_server_scope {
1080 uint32_t server_scope_sz; 1118 uint32_t server_scope_sz;
1081 char server_scope[NFS4_OPAQUE_LIMIT]; 1119 char server_scope[NFS4_OPAQUE_LIMIT];
1082}; 1120};
@@ -1087,10 +1125,18 @@ struct nfs41_impl_id {
1087 struct nfstime4 date; 1125 struct nfstime4 date;
1088}; 1126};
1089 1127
1128struct nfs41_bind_conn_to_session_res {
1129 struct nfs4_session *session;
1130 u32 dir;
1131 bool use_conn_in_rdma_mode;
1132};
1133
1090struct nfs41_exchange_id_res { 1134struct nfs41_exchange_id_res {
1091 struct nfs_client *client; 1135 u64 clientid;
1136 u32 seqid;
1092 u32 flags; 1137 u32 flags;
1093 struct server_scope *server_scope; 1138 struct nfs41_server_owner *server_owner;
1139 struct nfs41_server_scope *server_scope;
1094 struct nfs41_impl_id *impl_id; 1140 struct nfs41_impl_id *impl_id;
1095}; 1141};
1096 1142
@@ -1143,35 +1189,114 @@ struct nfs41_free_stateid_res {
1143 struct nfs4_sequence_res seq_res; 1189 struct nfs4_sequence_res seq_res;
1144}; 1190};
1145 1191
1192#else
1193
1194struct pnfs_ds_commit_info {
1195};
1196
1146#endif /* CONFIG_NFS_V4_1 */ 1197#endif /* CONFIG_NFS_V4_1 */
1147 1198
1148struct nfs_page; 1199struct nfs_page;
1149 1200
1150#define NFS_PAGEVEC_SIZE (8U) 1201#define NFS_PAGEVEC_SIZE (8U)
1151 1202
1203struct nfs_page_array {
1204 struct page **pagevec;
1205 unsigned int npages; /* Max length of pagevec */
1206 struct page *page_array[NFS_PAGEVEC_SIZE];
1207};
1208
1152struct nfs_read_data { 1209struct nfs_read_data {
1210 struct nfs_pgio_header *header;
1211 struct list_head list;
1153 struct rpc_task task; 1212 struct rpc_task task;
1154 struct inode *inode;
1155 struct rpc_cred *cred;
1156 struct nfs_fattr fattr; /* fattr storage */ 1213 struct nfs_fattr fattr; /* fattr storage */
1157 struct list_head pages; /* Coalesced read requests */
1158 struct list_head list; /* lists of struct nfs_read_data */
1159 struct nfs_page *req; /* multi ops per nfs_page */
1160 struct page **pagevec;
1161 unsigned int npages; /* Max length of pagevec */
1162 struct nfs_readargs args; 1214 struct nfs_readargs args;
1163 struct nfs_readres res; 1215 struct nfs_readres res;
1164 unsigned long timestamp; /* For lease renewal */ 1216 unsigned long timestamp; /* For lease renewal */
1165 struct pnfs_layout_segment *lseg;
1166 struct nfs_client *ds_clp; /* pNFS data server */
1167 const struct rpc_call_ops *mds_ops;
1168 int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data); 1217 int (*read_done_cb) (struct rpc_task *task, struct nfs_read_data *data);
1169 __u64 mds_offset; 1218 __u64 mds_offset;
1219 struct nfs_page_array pages;
1220 struct nfs_client *ds_clp; /* pNFS data server */
1221};
1222
1223/* used as flag bits in nfs_pgio_header */
1224enum {
1225 NFS_IOHDR_ERROR = 0,
1226 NFS_IOHDR_EOF,
1227 NFS_IOHDR_REDO,
1228 NFS_IOHDR_NEED_COMMIT,
1229 NFS_IOHDR_NEED_RESCHED,
1230};
1231
1232struct nfs_pgio_header {
1233 struct inode *inode;
1234 struct rpc_cred *cred;
1235 struct list_head pages;
1236 struct list_head rpc_list;
1237 atomic_t refcnt;
1238 struct nfs_page *req;
1239 struct pnfs_layout_segment *lseg;
1240 loff_t io_start;
1241 const struct rpc_call_ops *mds_ops;
1242 void (*release) (struct nfs_pgio_header *hdr);
1243 const struct nfs_pgio_completion_ops *completion_ops;
1244 struct nfs_direct_req *dreq;
1245 spinlock_t lock;
1246 /* fields protected by lock */
1170 int pnfs_error; 1247 int pnfs_error;
1171 struct page *page_array[NFS_PAGEVEC_SIZE]; 1248 int error; /* merge with pnfs_error */
1249 unsigned long good_bytes; /* boundary of good data */
1250 unsigned long flags;
1251};
1252
1253struct nfs_read_header {
1254 struct nfs_pgio_header header;
1255 struct nfs_read_data rpc_data;
1172}; 1256};
1173 1257
1174struct nfs_write_data { 1258struct nfs_write_data {
1259 struct nfs_pgio_header *header;
1260 struct list_head list;
1261 struct rpc_task task;
1262 struct nfs_fattr fattr;
1263 struct nfs_writeverf verf;
1264 struct nfs_writeargs args; /* argument struct */
1265 struct nfs_writeres res; /* result struct */
1266 unsigned long timestamp; /* For lease renewal */
1267 int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data);
1268 __u64 mds_offset; /* Filelayout dense stripe */
1269 struct nfs_page_array pages;
1270 struct nfs_client *ds_clp; /* pNFS data server */
1271};
1272
1273struct nfs_write_header {
1274 struct nfs_pgio_header header;
1275 struct nfs_write_data rpc_data;
1276};
1277
1278struct nfs_mds_commit_info {
1279 atomic_t rpcs_out;
1280 unsigned long ncommit;
1281 struct list_head list;
1282};
1283
1284struct nfs_commit_data;
1285struct nfs_inode;
1286struct nfs_commit_completion_ops {
1287 void (*error_cleanup) (struct nfs_inode *nfsi);
1288 void (*completion) (struct nfs_commit_data *data);
1289};
1290
1291struct nfs_commit_info {
1292 spinlock_t *lock;
1293 struct nfs_mds_commit_info *mds;
1294 struct pnfs_ds_commit_info *ds;
1295 struct nfs_direct_req *dreq; /* O_DIRECT request */
1296 const struct nfs_commit_completion_ops *completion_ops;
1297};
1298
1299struct nfs_commit_data {
1175 struct rpc_task task; 1300 struct rpc_task task;
1176 struct inode *inode; 1301 struct inode *inode;
1177 struct rpc_cred *cred; 1302 struct rpc_cred *cred;
@@ -1179,22 +1304,22 @@ struct nfs_write_data {
1179 struct nfs_writeverf verf; 1304 struct nfs_writeverf verf;
1180 struct list_head pages; /* Coalesced requests we wish to flush */ 1305 struct list_head pages; /* Coalesced requests we wish to flush */
1181 struct list_head list; /* lists of struct nfs_write_data */ 1306 struct list_head list; /* lists of struct nfs_write_data */
1182 struct nfs_page *req; /* multi ops per nfs_page */ 1307 struct nfs_direct_req *dreq; /* O_DIRECT request */
1183 struct page **pagevec; 1308 struct nfs_commitargs args; /* argument struct */
1184 unsigned int npages; /* Max length of pagevec */ 1309 struct nfs_commitres res; /* result struct */
1185 struct nfs_writeargs args; /* argument struct */ 1310 struct nfs_open_context *context;
1186 struct nfs_writeres res; /* result struct */
1187 struct pnfs_layout_segment *lseg; 1311 struct pnfs_layout_segment *lseg;
1188 struct nfs_client *ds_clp; /* pNFS data server */ 1312 struct nfs_client *ds_clp; /* pNFS data server */
1189 int ds_commit_index; 1313 int ds_commit_index;
1190 const struct rpc_call_ops *mds_ops; 1314 const struct rpc_call_ops *mds_ops;
1191 int (*write_done_cb) (struct rpc_task *task, struct nfs_write_data *data); 1315 const struct nfs_commit_completion_ops *completion_ops;
1192#ifdef CONFIG_NFS_V4 1316 int (*commit_done_cb) (struct rpc_task *task, struct nfs_commit_data *data);
1193 unsigned long timestamp; /* For lease renewal */ 1317};
1194#endif 1318
1195 __u64 mds_offset; /* Filelayout dense stripe */ 1319struct nfs_pgio_completion_ops {
1196 int pnfs_error; 1320 void (*error_cleanup)(struct list_head *head);
1197 struct page *page_array[NFS_PAGEVEC_SIZE]; 1321 void (*init_hdr)(struct nfs_pgio_header *hdr);
1322 void (*completion)(struct nfs_pgio_header *hdr);
1198}; 1323};
1199 1324
1200struct nfs_unlinkdata { 1325struct nfs_unlinkdata {
@@ -1234,11 +1359,13 @@ struct nfs_rpc_ops {
1234 1359
1235 int (*getroot) (struct nfs_server *, struct nfs_fh *, 1360 int (*getroot) (struct nfs_server *, struct nfs_fh *,
1236 struct nfs_fsinfo *); 1361 struct nfs_fsinfo *);
1362 struct vfsmount *(*submount) (struct nfs_server *, struct dentry *,
1363 struct nfs_fh *, struct nfs_fattr *);
1237 int (*getattr) (struct nfs_server *, struct nfs_fh *, 1364 int (*getattr) (struct nfs_server *, struct nfs_fh *,
1238 struct nfs_fattr *); 1365 struct nfs_fattr *);
1239 int (*setattr) (struct dentry *, struct nfs_fattr *, 1366 int (*setattr) (struct dentry *, struct nfs_fattr *,
1240 struct iattr *); 1367 struct iattr *);
1241 int (*lookup) (struct rpc_clnt *clnt, struct inode *, struct qstr *, 1368 int (*lookup) (struct inode *, struct qstr *,
1242 struct nfs_fh *, struct nfs_fattr *); 1369 struct nfs_fh *, struct nfs_fattr *);
1243 int (*access) (struct inode *, struct nfs_access_entry *); 1370 int (*access) (struct inode *, struct nfs_access_entry *);
1244 int (*readlink)(struct inode *, struct page *, unsigned int, 1371 int (*readlink)(struct inode *, struct page *, unsigned int,
@@ -1277,8 +1404,9 @@ struct nfs_rpc_ops {
1277 void (*write_setup) (struct nfs_write_data *, struct rpc_message *); 1404 void (*write_setup) (struct nfs_write_data *, struct rpc_message *);
1278 void (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *); 1405 void (*write_rpc_prepare)(struct rpc_task *, struct nfs_write_data *);
1279 int (*write_done) (struct rpc_task *, struct nfs_write_data *); 1406 int (*write_done) (struct rpc_task *, struct nfs_write_data *);
1280 void (*commit_setup) (struct nfs_write_data *, struct rpc_message *); 1407 void (*commit_setup) (struct nfs_commit_data *, struct rpc_message *);
1281 int (*commit_done) (struct rpc_task *, struct nfs_write_data *); 1408 void (*commit_rpc_prepare)(struct rpc_task *, struct nfs_commit_data *);
1409 int (*commit_done) (struct rpc_task *, struct nfs_commit_data *);
1282 int (*lock)(struct file *, int, struct file_lock *); 1410 int (*lock)(struct file *, int, struct file_lock *);
1283 int (*lock_check_bounds)(const struct file_lock *); 1411 int (*lock_check_bounds)(const struct file_lock *);
1284 void (*clear_acl_cache)(struct inode *); 1412 void (*clear_acl_cache)(struct inode *);
@@ -1287,9 +1415,9 @@ struct nfs_rpc_ops {
1287 struct nfs_open_context *ctx, 1415 struct nfs_open_context *ctx,
1288 int open_flags, 1416 int open_flags,
1289 struct iattr *iattr); 1417 struct iattr *iattr);
1290 int (*init_client) (struct nfs_client *, const struct rpc_timeout *, 1418 struct nfs_client *
1291 const char *, rpc_authflavor_t, int); 1419 (*init_client) (struct nfs_client *, const struct rpc_timeout *,
1292 int (*secinfo)(struct inode *, const struct qstr *, struct nfs4_secinfo_flavors *); 1420 const char *, rpc_authflavor_t);
1293}; 1421};
1294 1422
1295/* 1423/*
diff --git a/include/linux/of_i2c.h b/include/linux/of_i2c.h
index 0efe8d465f55..1cb775f8e663 100644
--- a/include/linux/of_i2c.h
+++ b/include/linux/of_i2c.h
@@ -20,6 +20,10 @@ extern void of_i2c_register_devices(struct i2c_adapter *adap);
20/* must call put_device() when done with returned i2c_client device */ 20/* must call put_device() when done with returned i2c_client device */
21extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node); 21extern struct i2c_client *of_find_i2c_device_by_node(struct device_node *node);
22 22
23/* must call put_device() when done with returned i2c_adapter device */
24extern struct i2c_adapter *of_find_i2c_adapter_by_node(
25 struct device_node *node);
26
23#else 27#else
24static inline void of_i2c_register_devices(struct i2c_adapter *adap) 28static inline void of_i2c_register_devices(struct i2c_adapter *adap)
25{ 29{
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index f93e21700d3e..bb115deb7612 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -5,7 +5,7 @@
5 5
6struct pci_dev; 6struct pci_dev;
7struct of_irq; 7struct of_irq;
8int of_irq_map_pci(struct pci_dev *pdev, struct of_irq *out_irq); 8int of_irq_map_pci(const struct pci_dev *pdev, struct of_irq *out_irq);
9 9
10struct device_node; 10struct device_node;
11struct device_node *of_pci_find_child_device(struct device_node *parent, 11struct device_node *of_pci_find_child_device(struct device_node *parent,
diff --git a/include/linux/oom.h b/include/linux/oom.h
index 3d7647536b03..e4c29bc72e70 100644
--- a/include/linux/oom.h
+++ b/include/linux/oom.h
@@ -43,8 +43,9 @@ enum oom_constraint {
43extern void compare_swap_oom_score_adj(int old_val, int new_val); 43extern void compare_swap_oom_score_adj(int old_val, int new_val);
44extern int test_set_oom_score_adj(int new_val); 44extern int test_set_oom_score_adj(int new_val);
45 45
46extern unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg, 46extern unsigned long oom_badness(struct task_struct *p,
47 const nodemask_t *nodemask, unsigned long totalpages); 47 struct mem_cgroup *memcg, const nodemask_t *nodemask,
48 unsigned long totalpages);
48extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 49extern int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
49extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags); 50extern void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_flags);
50 51
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 051c1b1ede4e..3bdcab30ca41 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -3,7 +3,7 @@
3 3
4/* 4/*
5 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE. 5 * Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
6 * If specified range includes migrate types other than MOVABLE, 6 * If specified range includes migrate types other than MOVABLE or CMA,
7 * this will fail with -EBUSY. 7 * this will fail with -EBUSY.
8 * 8 *
9 * For isolating all pages in the range finally, the caller have to 9 * For isolating all pages in the range finally, the caller have to
@@ -11,27 +11,27 @@
11 * test it. 11 * test it.
12 */ 12 */
13extern int 13extern int
14start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn); 14start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
15 unsigned migratetype);
15 16
16/* 17/*
17 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE. 18 * Changes MIGRATE_ISOLATE to MIGRATE_MOVABLE.
18 * target range is [start_pfn, end_pfn) 19 * target range is [start_pfn, end_pfn)
19 */ 20 */
20extern int 21extern int
21undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn); 22undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
23 unsigned migratetype);
22 24
23/* 25/*
24 * test all pages in [start_pfn, end_pfn)are isolated or not. 26 * Test all pages in [start_pfn, end_pfn) are isolated or not.
25 */ 27 */
26extern int 28int test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
27test_pages_isolated(unsigned long start_pfn, unsigned long end_pfn);
28 29
29/* 30/*
30 * Internal funcs.Changes pageblock's migrate type. 31 * Internal functions. Changes pageblock's migrate type.
31 * Please use make_pagetype_isolated()/make_pagetype_movable().
32 */ 32 */
33extern int set_migratetype_isolate(struct page *page); 33extern int set_migratetype_isolate(struct page *page);
34extern void unset_migratetype_isolate(struct page *page); 34extern void unset_migratetype_isolate(struct page *page, unsigned migratetype);
35 35
36 36
37#endif 37#endif
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index efa26b4da8d2..7cfad3bbb0cc 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -460,11 +460,11 @@ static inline int fault_in_pages_readable(const char __user *uaddr, int size)
460 */ 460 */
461static inline int fault_in_multipages_writeable(char __user *uaddr, int size) 461static inline int fault_in_multipages_writeable(char __user *uaddr, int size)
462{ 462{
463 int ret; 463 int ret = 0;
464 char __user *end = uaddr + size - 1; 464 char __user *end = uaddr + size - 1;
465 465
466 if (unlikely(size == 0)) 466 if (unlikely(size == 0))
467 return 0; 467 return ret;
468 468
469 /* 469 /*
470 * Writing zeroes into userspace here is OK, because we know that if 470 * Writing zeroes into userspace here is OK, because we know that if
@@ -489,11 +489,11 @@ static inline int fault_in_multipages_readable(const char __user *uaddr,
489 int size) 489 int size)
490{ 490{
491 volatile char c; 491 volatile char c;
492 int ret; 492 int ret = 0;
493 const char __user *end = uaddr + size - 1; 493 const char __user *end = uaddr + size - 1;
494 494
495 if (unlikely(size == 0)) 495 if (unlikely(size == 0))
496 return 0; 496 return ret;
497 497
498 while (uaddr <= end) { 498 while (uaddr <= end) {
499 ret = __get_user(c, uaddr); 499 ret = __get_user(c, uaddr);
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 17b7b5b01b4a..d8c379dba6ad 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -687,7 +687,7 @@ int __must_check pci_bus_add_device(struct pci_dev *dev);
687void pci_read_bridge_bases(struct pci_bus *child); 687void pci_read_bridge_bases(struct pci_bus *child);
688struct resource *pci_find_parent_resource(const struct pci_dev *dev, 688struct resource *pci_find_parent_resource(const struct pci_dev *dev,
689 struct resource *res); 689 struct resource *res);
690u8 pci_swizzle_interrupt_pin(struct pci_dev *dev, u8 pin); 690u8 pci_swizzle_interrupt_pin(const struct pci_dev *dev, u8 pin);
691int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge); 691int pci_get_interrupt_pin(struct pci_dev *dev, struct pci_dev **bridge);
692u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp); 692u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp);
693extern struct pci_dev *pci_dev_get(struct pci_dev *dev); 693extern struct pci_dev *pci_dev_get(struct pci_dev *dev);
@@ -1692,7 +1692,8 @@ extern void pci_release_bus_of_node(struct pci_bus *bus);
1692/* Arch may override this (weak) */ 1692/* Arch may override this (weak) */
1693extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus); 1693extern struct device_node * __weak pcibios_get_phb_of_node(struct pci_bus *bus);
1694 1694
1695static inline struct device_node *pci_device_to_OF_node(struct pci_dev *pdev) 1695static inline struct device_node *
1696pci_device_to_OF_node(const struct pci_dev *pdev)
1696{ 1697{
1697 return pdev ? pdev->dev.of_node : NULL; 1698 return pdev ? pdev->dev.of_node : NULL;
1698} 1699}
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 3329965ed63f..ab741b0d0074 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -2506,6 +2506,7 @@
2506#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F 2506#define PCI_DEVICE_ID_INTEL_MRST_SD2 0x084F
2507#define PCI_DEVICE_ID_INTEL_I960 0x0960 2507#define PCI_DEVICE_ID_INTEL_I960 0x0960
2508#define PCI_DEVICE_ID_INTEL_I960RM 0x0962 2508#define PCI_DEVICE_ID_INTEL_I960RM 0x0962
2509#define PCI_DEVICE_ID_INTEL_CENTERTON_ILB 0x0c60
2509#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062 2510#define PCI_DEVICE_ID_INTEL_8257X_SOL 0x1062
2510#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085 2511#define PCI_DEVICE_ID_INTEL_82573E_SOL 0x1085
2511#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F 2512#define PCI_DEVICE_ID_INTEL_82573L_SOL 0x108F
diff --git a/include/linux/res_counter.h b/include/linux/res_counter.h
index fb201896a8b0..7d7fbe2ef782 100644
--- a/include/linux/res_counter.h
+++ b/include/linux/res_counter.h
@@ -119,7 +119,7 @@ int __must_check res_counter_charge_locked(struct res_counter *counter,
119 unsigned long val, bool force); 119 unsigned long val, bool force);
120int __must_check res_counter_charge(struct res_counter *counter, 120int __must_check res_counter_charge(struct res_counter *counter,
121 unsigned long val, struct res_counter **limit_fail_at); 121 unsigned long val, struct res_counter **limit_fail_at);
122int __must_check res_counter_charge_nofail(struct res_counter *counter, 122int res_counter_charge_nofail(struct res_counter *counter,
123 unsigned long val, struct res_counter **limit_fail_at); 123 unsigned long val, struct res_counter **limit_fail_at);
124 124
125/* 125/*
@@ -135,6 +135,9 @@ int __must_check res_counter_charge_nofail(struct res_counter *counter,
135void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val); 135void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val);
136void res_counter_uncharge(struct res_counter *counter, unsigned long val); 136void res_counter_uncharge(struct res_counter *counter, unsigned long val);
137 137
138void res_counter_uncharge_until(struct res_counter *counter,
139 struct res_counter *top,
140 unsigned long val);
138/** 141/**
139 * res_counter_margin - calculate chargeable space of a counter 142 * res_counter_margin - calculate chargeable space of a counter
140 * @cnt: the counter 143 * @cnt: the counter
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index fd07c4542cee..3fce545df394 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -173,8 +173,6 @@ enum ttu_flags {
173}; 173};
174#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK) 174#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
175 175
176bool is_vma_temporary_stack(struct vm_area_struct *vma);
177
178int try_to_unmap(struct page *, enum ttu_flags flags); 176int try_to_unmap(struct page *, enum ttu_flags flags);
179int try_to_unmap_one(struct page *, struct vm_area_struct *, 177int try_to_unmap_one(struct page *, struct vm_area_struct *,
180 unsigned long address, enum ttu_flags flags); 178 unsigned long address, enum ttu_flags flags);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index fcabfb4873c8..f071b3922c67 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -91,6 +91,9 @@ struct rtc_pll_info {
91#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */ 91#define RTC_PLL_GET _IOR('p', 0x11, struct rtc_pll_info) /* Get PLL correction */
92#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */ 92#define RTC_PLL_SET _IOW('p', 0x12, struct rtc_pll_info) /* Set PLL correction */
93 93
94#define RTC_VL_READ _IOR('p', 0x13, int) /* Voltage low detector */
95#define RTC_VL_CLR _IO('p', 0x14) /* Clear voltage low information */
96
94/* interrupt flags */ 97/* interrupt flags */
95#define RTC_IRQF 0x80 /* Any of the following is active */ 98#define RTC_IRQF 0x80 /* Any of the following is active */
96#define RTC_PF 0x40 /* Periodic interrupt */ 99#define RTC_PF 0x40 /* Periodic interrupt */
diff --git a/include/linux/rtc/ds1307.h b/include/linux/rtc/ds1307.h
new file mode 100644
index 000000000000..291b1c490367
--- /dev/null
+++ b/include/linux/rtc/ds1307.h
@@ -0,0 +1,22 @@
1/*
2 * ds1307.h - platform_data for the ds1307 (and variants) rtc driver
3 * (C) Copyright 2012 by Wolfram Sang, Pengutronix e.K.
4 * same license as the driver
5 */
6
7#ifndef _LINUX_DS1307_H
8#define _LINUX_DS1307_H
9
10#include <linux/types.h>
11
12#define DS1307_TRICKLE_CHARGER_250_OHM 0x01
13#define DS1307_TRICKLE_CHARGER_2K_OHM 0x02
14#define DS1307_TRICKLE_CHARGER_4K_OHM 0x03
15#define DS1307_TRICKLE_CHARGER_NO_DIODE 0x04
16#define DS1307_TRICKLE_CHARGER_DIODE 0x08
17
18struct ds1307_platform_data {
19 u8 trickle_charger_setup;
20};
21
22#endif /* _LINUX_DS1307_H */
diff --git a/include/linux/spi/orion_spi.h b/include/linux/spi/orion_spi.h
deleted file mode 100644
index b4d9fa6f797c..000000000000
--- a/include/linux/spi/orion_spi.h
+++ /dev/null
@@ -1,17 +0,0 @@
1/*
2 * orion_spi.h
3 *
4 * This file is licensed under the terms of the GNU General Public
5 * License version 2. This program is licensed "as is" without any
6 * warranty of any kind, whether express or implied.
7 */
8
9#ifndef __LINUX_SPI_ORION_SPI_H
10#define __LINUX_SPI_ORION_SPI_H
11
12struct orion_spi_info {
13 u32 tclk; /* no <linux/clk.h> support yet */
14};
15
16
17#endif
diff --git a/include/linux/stmp_device.h b/include/linux/stmp_device.h
new file mode 100644
index 000000000000..6cf7ec9547cf
--- /dev/null
+++ b/include/linux/stmp_device.h
@@ -0,0 +1,20 @@
1/*
2 * basic functions for devices following the "stmp" style register layout
3 *
4 * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#ifndef __STMP_DEVICE_H__
13#define __STMP_DEVICE_H__
14
15#define STMP_OFFSET_REG_SET 0x4
16#define STMP_OFFSET_REG_CLR 0x8
17#define STMP_OFFSET_REG_TOG 0xc
18
19extern int stmp_reset_block(void __iomem *);
20#endif /* __STMP_DEVICE_H__ */
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b1fd5c7925fe..b6661933e252 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -221,8 +221,8 @@ extern unsigned int nr_free_pagecache_pages(void);
221/* linux/mm/swap.c */ 221/* linux/mm/swap.c */
222extern void __lru_cache_add(struct page *, enum lru_list lru); 222extern void __lru_cache_add(struct page *, enum lru_list lru);
223extern void lru_cache_add_lru(struct page *, enum lru_list lru); 223extern void lru_cache_add_lru(struct page *, enum lru_list lru);
224extern void lru_add_page_tail(struct zone* zone, 224extern void lru_add_page_tail(struct page *page, struct page *page_tail,
225 struct page *page, struct page *page_tail); 225 struct lruvec *lruvec);
226extern void activate_page(struct page *); 226extern void activate_page(struct page *);
227extern void mark_page_accessed(struct page *); 227extern void mark_page_accessed(struct page *);
228extern void lru_add_drain(void); 228extern void lru_add_drain(void);
@@ -251,7 +251,7 @@ static inline void lru_cache_add_file(struct page *page)
251/* linux/mm/vmscan.c */ 251/* linux/mm/vmscan.c */
252extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order, 252extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
253 gfp_t gfp_mask, nodemask_t *mask); 253 gfp_t gfp_mask, nodemask_t *mask);
254extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file); 254extern int __isolate_lru_page(struct page *page, isolate_mode_t mode);
255extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem, 255extern unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem,
256 gfp_t gfp_mask, bool noswap); 256 gfp_t gfp_mask, bool noswap);
257extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem, 257extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
@@ -351,31 +351,14 @@ extern int swap_type_of(dev_t, sector_t, struct block_device **);
351extern unsigned int count_swap_pages(int, int); 351extern unsigned int count_swap_pages(int, int);
352extern sector_t map_swap_page(struct page *, struct block_device **); 352extern sector_t map_swap_page(struct page *, struct block_device **);
353extern sector_t swapdev_block(int, pgoff_t); 353extern sector_t swapdev_block(int, pgoff_t);
354extern int page_swapcount(struct page *);
354extern int reuse_swap_page(struct page *); 355extern int reuse_swap_page(struct page *);
355extern int try_to_free_swap(struct page *); 356extern int try_to_free_swap(struct page *);
356struct backing_dev_info; 357struct backing_dev_info;
357 358
358/* linux/mm/thrash.c */
359extern struct mm_struct *swap_token_mm;
360extern void grab_swap_token(struct mm_struct *);
361extern void __put_swap_token(struct mm_struct *);
362extern void disable_swap_token(struct mem_cgroup *memcg);
363
364static inline int has_swap_token(struct mm_struct *mm)
365{
366 return (mm == swap_token_mm);
367}
368
369static inline void put_swap_token(struct mm_struct *mm)
370{
371 if (has_swap_token(mm))
372 __put_swap_token(mm);
373}
374
375#ifdef CONFIG_CGROUP_MEM_RES_CTLR 359#ifdef CONFIG_CGROUP_MEM_RES_CTLR
376extern void 360extern void
377mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout); 361mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout);
378extern int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep);
379#else 362#else
380static inline void 363static inline void
381mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout) 364mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent, bool swapout)
@@ -462,6 +445,11 @@ static inline void delete_from_swap_cache(struct page *page)
462{ 445{
463} 446}
464 447
448static inline int page_swapcount(struct page *page)
449{
450 return 0;
451}
452
465#define reuse_swap_page(page) (page_mapcount(page) == 1) 453#define reuse_swap_page(page) (page_mapcount(page) == 1)
466 454
467static inline int try_to_free_swap(struct page *page) 455static inline int try_to_free_swap(struct page *page)
@@ -476,37 +464,11 @@ static inline swp_entry_t get_swap_page(void)
476 return entry; 464 return entry;
477} 465}
478 466
479/* linux/mm/thrash.c */
480static inline void put_swap_token(struct mm_struct *mm)
481{
482}
483
484static inline void grab_swap_token(struct mm_struct *mm)
485{
486}
487
488static inline int has_swap_token(struct mm_struct *mm)
489{
490 return 0;
491}
492
493static inline void disable_swap_token(struct mem_cgroup *memcg)
494{
495}
496
497static inline void 467static inline void
498mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent) 468mem_cgroup_uncharge_swapcache(struct page *page, swp_entry_t ent)
499{ 469{
500} 470}
501 471
502#ifdef CONFIG_CGROUP_MEM_RES_CTLR
503static inline int
504mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
505{
506 return 0;
507}
508#endif
509
510#endif /* CONFIG_SWAP */ 472#endif /* CONFIG_SWAP */
511#endif /* __KERNEL__*/ 473#endif /* __KERNEL__*/
512#endif /* _LINUX_SWAP_H */ 474#endif /* _LINUX_SWAP_H */
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a2b84f598e2b..6d0a0fcd80e7 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -58,7 +58,6 @@ extern const char *wb_reason_name[];
58 * in a manner such that unspecified fields are set to zero. 58 * in a manner such that unspecified fields are set to zero.
59 */ 59 */
60struct writeback_control { 60struct writeback_control {
61 enum writeback_sync_modes sync_mode;
62 long nr_to_write; /* Write this many pages, and decrement 61 long nr_to_write; /* Write this many pages, and decrement
63 this for each page written */ 62 this for each page written */
64 long pages_skipped; /* Pages which were not written */ 63 long pages_skipped; /* Pages which were not written */
@@ -71,6 +70,8 @@ struct writeback_control {
71 loff_t range_start; 70 loff_t range_start;
72 loff_t range_end; 71 loff_t range_end;
73 72
73 enum writeback_sync_modes sync_mode;
74
74 unsigned for_kupdate:1; /* A kupdate writeback */ 75 unsigned for_kupdate:1; /* A kupdate writeback */
75 unsigned for_background:1; /* A background writeback */ 76 unsigned for_background:1; /* A background writeback */
76 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */ 77 unsigned tagged_writepages:1; /* tag-and-write to avoid livelock */
@@ -94,6 +95,7 @@ long writeback_inodes_wb(struct bdi_writeback *wb, long nr_pages,
94 enum wb_reason reason); 95 enum wb_reason reason);
95long wb_do_writeback(struct bdi_writeback *wb, int force_wait); 96long wb_do_writeback(struct bdi_writeback *wb, int force_wait);
96void wakeup_flusher_threads(long nr_pages, enum wb_reason reason); 97void wakeup_flusher_threads(long nr_pages, enum wb_reason reason);
98void inode_wait_for_writeback(struct inode *inode);
97 99
98/* writeback.h requires fs.h; it, too, is not included from here. */ 100/* writeback.h requires fs.h; it, too, is not included from here. */
99static inline void wait_on_inode(struct inode *inode) 101static inline void wait_on_inode(struct inode *inode)
@@ -101,12 +103,6 @@ static inline void wait_on_inode(struct inode *inode)
101 might_sleep(); 103 might_sleep();
102 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE); 104 wait_on_bit(&inode->i_state, __I_NEW, inode_wait, TASK_UNINTERRUPTIBLE);
103} 105}
104static inline void inode_sync_wait(struct inode *inode)
105{
106 might_sleep();
107 wait_on_bit(&inode->i_state, __I_SYNC, inode_wait,
108 TASK_UNINTERRUPTIBLE);
109}
110 106
111 107
112/* 108/*
diff --git a/include/net/sock.h b/include/net/sock.h
index d89f0582b6b6..4a4521699563 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -46,6 +46,7 @@
46#include <linux/list_nulls.h> 46#include <linux/list_nulls.h>
47#include <linux/timer.h> 47#include <linux/timer.h>
48#include <linux/cache.h> 48#include <linux/cache.h>
49#include <linux/bitops.h>
49#include <linux/lockdep.h> 50#include <linux/lockdep.h>
50#include <linux/netdevice.h> 51#include <linux/netdevice.h>
51#include <linux/skbuff.h> /* struct sk_buff */ 52#include <linux/skbuff.h> /* struct sk_buff */
@@ -921,12 +922,23 @@ struct proto {
921#endif 922#endif
922}; 923};
923 924
925/*
926 * Bits in struct cg_proto.flags
927 */
928enum cg_proto_flags {
929 /* Currently active and new sockets should be assigned to cgroups */
930 MEMCG_SOCK_ACTIVE,
931 /* It was ever activated; we must disarm static keys on destruction */
932 MEMCG_SOCK_ACTIVATED,
933};
934
924struct cg_proto { 935struct cg_proto {
925 void (*enter_memory_pressure)(struct sock *sk); 936 void (*enter_memory_pressure)(struct sock *sk);
926 struct res_counter *memory_allocated; /* Current allocated memory. */ 937 struct res_counter *memory_allocated; /* Current allocated memory. */
927 struct percpu_counter *sockets_allocated; /* Current number of sockets. */ 938 struct percpu_counter *sockets_allocated; /* Current number of sockets. */
928 int *memory_pressure; 939 int *memory_pressure;
929 long *sysctl_mem; 940 long *sysctl_mem;
941 unsigned long flags;
930 /* 942 /*
931 * memcg field is used to find which memcg we belong directly 943 * memcg field is used to find which memcg we belong directly
932 * Each memcg struct can hold more than one cg_proto, so container_of 944 * Each memcg struct can hold more than one cg_proto, so container_of
@@ -942,6 +954,16 @@ struct cg_proto {
942extern int proto_register(struct proto *prot, int alloc_slab); 954extern int proto_register(struct proto *prot, int alloc_slab);
943extern void proto_unregister(struct proto *prot); 955extern void proto_unregister(struct proto *prot);
944 956
957static inline bool memcg_proto_active(struct cg_proto *cg_proto)
958{
959 return test_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
960}
961
962static inline bool memcg_proto_activated(struct cg_proto *cg_proto)
963{
964 return test_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags);
965}
966
945#ifdef SOCK_REFCNT_DEBUG 967#ifdef SOCK_REFCNT_DEBUG
946static inline void sk_refcnt_debug_inc(struct sock *sk) 968static inline void sk_refcnt_debug_inc(struct sock *sk)
947{ 969{
diff --git a/include/trace/events/jbd.h b/include/trace/events/jbd.h
index aff64d82d713..da6f2591c25e 100644
--- a/include/trace/events/jbd.h
+++ b/include/trace/events/jbd.h
@@ -36,19 +36,17 @@ DECLARE_EVENT_CLASS(jbd_commit,
36 36
37 TP_STRUCT__entry( 37 TP_STRUCT__entry(
38 __field( dev_t, dev ) 38 __field( dev_t, dev )
39 __field( char, sync_commit )
40 __field( int, transaction ) 39 __field( int, transaction )
41 ), 40 ),
42 41
43 TP_fast_assign( 42 TP_fast_assign(
44 __entry->dev = journal->j_fs_dev->bd_dev; 43 __entry->dev = journal->j_fs_dev->bd_dev;
45 __entry->sync_commit = commit_transaction->t_synchronous_commit;
46 __entry->transaction = commit_transaction->t_tid; 44 __entry->transaction = commit_transaction->t_tid;
47 ), 45 ),
48 46
49 TP_printk("dev %d,%d transaction %d sync %d", 47 TP_printk("dev %d,%d transaction %d",
50 MAJOR(__entry->dev), MINOR(__entry->dev), 48 MAJOR(__entry->dev), MINOR(__entry->dev),
51 __entry->transaction, __entry->sync_commit) 49 __entry->transaction)
52); 50);
53 51
54DEFINE_EVENT(jbd_commit, jbd_start_commit, 52DEFINE_EVENT(jbd_commit, jbd_start_commit,
@@ -87,19 +85,17 @@ TRACE_EVENT(jbd_drop_transaction,
87 85
88 TP_STRUCT__entry( 86 TP_STRUCT__entry(
89 __field( dev_t, dev ) 87 __field( dev_t, dev )
90 __field( char, sync_commit )
91 __field( int, transaction ) 88 __field( int, transaction )
92 ), 89 ),
93 90
94 TP_fast_assign( 91 TP_fast_assign(
95 __entry->dev = journal->j_fs_dev->bd_dev; 92 __entry->dev = journal->j_fs_dev->bd_dev;
96 __entry->sync_commit = commit_transaction->t_synchronous_commit;
97 __entry->transaction = commit_transaction->t_tid; 93 __entry->transaction = commit_transaction->t_tid;
98 ), 94 ),
99 95
100 TP_printk("dev %d,%d transaction %d sync %d", 96 TP_printk("dev %d,%d transaction %d",
101 MAJOR(__entry->dev), MINOR(__entry->dev), 97 MAJOR(__entry->dev), MINOR(__entry->dev),
102 __entry->transaction, __entry->sync_commit) 98 __entry->transaction)
103); 99);
104 100
105TRACE_EVENT(jbd_end_commit, 101TRACE_EVENT(jbd_end_commit,
@@ -109,21 +105,19 @@ TRACE_EVENT(jbd_end_commit,
109 105
110 TP_STRUCT__entry( 106 TP_STRUCT__entry(
111 __field( dev_t, dev ) 107 __field( dev_t, dev )
112 __field( char, sync_commit )
113 __field( int, transaction ) 108 __field( int, transaction )
114 __field( int, head ) 109 __field( int, head )
115 ), 110 ),
116 111
117 TP_fast_assign( 112 TP_fast_assign(
118 __entry->dev = journal->j_fs_dev->bd_dev; 113 __entry->dev = journal->j_fs_dev->bd_dev;
119 __entry->sync_commit = commit_transaction->t_synchronous_commit;
120 __entry->transaction = commit_transaction->t_tid; 114 __entry->transaction = commit_transaction->t_tid;
121 __entry->head = journal->j_tail_sequence; 115 __entry->head = journal->j_tail_sequence;
122 ), 116 ),
123 117
124 TP_printk("dev %d,%d transaction %d sync %d head %d", 118 TP_printk("dev %d,%d transaction %d head %d",
125 MAJOR(__entry->dev), MINOR(__entry->dev), 119 MAJOR(__entry->dev), MINOR(__entry->dev),
126 __entry->transaction, __entry->sync_commit, __entry->head) 120 __entry->transaction, __entry->head)
127); 121);
128 122
129TRACE_EVENT(jbd_do_submit_data, 123TRACE_EVENT(jbd_do_submit_data,
@@ -133,19 +127,17 @@ TRACE_EVENT(jbd_do_submit_data,
133 127
134 TP_STRUCT__entry( 128 TP_STRUCT__entry(
135 __field( dev_t, dev ) 129 __field( dev_t, dev )
136 __field( char, sync_commit )
137 __field( int, transaction ) 130 __field( int, transaction )
138 ), 131 ),
139 132
140 TP_fast_assign( 133 TP_fast_assign(
141 __entry->dev = journal->j_fs_dev->bd_dev; 134 __entry->dev = journal->j_fs_dev->bd_dev;
142 __entry->sync_commit = commit_transaction->t_synchronous_commit;
143 __entry->transaction = commit_transaction->t_tid; 135 __entry->transaction = commit_transaction->t_tid;
144 ), 136 ),
145 137
146 TP_printk("dev %d,%d transaction %d sync %d", 138 TP_printk("dev %d,%d transaction %d",
147 MAJOR(__entry->dev), MINOR(__entry->dev), 139 MAJOR(__entry->dev), MINOR(__entry->dev),
148 __entry->transaction, __entry->sync_commit) 140 __entry->transaction)
149); 141);
150 142
151TRACE_EVENT(jbd_cleanup_journal_tail, 143TRACE_EVENT(jbd_cleanup_journal_tail,
@@ -177,24 +169,23 @@ TRACE_EVENT(jbd_cleanup_journal_tail,
177 __entry->block_nr, __entry->freed) 169 __entry->block_nr, __entry->freed)
178); 170);
179 171
180TRACE_EVENT(jbd_update_superblock_end, 172TRACE_EVENT(journal_write_superblock,
181 TP_PROTO(journal_t *journal, int wait), 173 TP_PROTO(journal_t *journal, int write_op),
182 174
183 TP_ARGS(journal, wait), 175 TP_ARGS(journal, write_op),
184 176
185 TP_STRUCT__entry( 177 TP_STRUCT__entry(
186 __field( dev_t, dev ) 178 __field( dev_t, dev )
187 __field( int, wait ) 179 __field( int, write_op )
188 ), 180 ),
189 181
190 TP_fast_assign( 182 TP_fast_assign(
191 __entry->dev = journal->j_fs_dev->bd_dev; 183 __entry->dev = journal->j_fs_dev->bd_dev;
192 __entry->wait = wait; 184 __entry->write_op = write_op;
193 ), 185 ),
194 186
195 TP_printk("dev %d,%d wait %d", 187 TP_printk("dev %d,%d write_op %x", MAJOR(__entry->dev),
196 MAJOR(__entry->dev), MINOR(__entry->dev), 188 MINOR(__entry->dev), __entry->write_op)
197 __entry->wait)
198); 189);
199 190
200#endif /* _TRACE_JBD_H */ 191#endif /* _TRACE_JBD_H */
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index f64560e204bc..bab3b87e4064 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -13,7 +13,7 @@
13#define RECLAIM_WB_ANON 0x0001u 13#define RECLAIM_WB_ANON 0x0001u
14#define RECLAIM_WB_FILE 0x0002u 14#define RECLAIM_WB_FILE 0x0002u
15#define RECLAIM_WB_MIXED 0x0010u 15#define RECLAIM_WB_MIXED 0x0010u
16#define RECLAIM_WB_SYNC 0x0004u 16#define RECLAIM_WB_SYNC 0x0004u /* Unused, all reclaim async */
17#define RECLAIM_WB_ASYNC 0x0008u 17#define RECLAIM_WB_ASYNC 0x0008u
18 18
19#define show_reclaim_flags(flags) \ 19#define show_reclaim_flags(flags) \
@@ -25,15 +25,15 @@
25 {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \ 25 {RECLAIM_WB_ASYNC, "RECLAIM_WB_ASYNC"} \
26 ) : "RECLAIM_WB_NONE" 26 ) : "RECLAIM_WB_NONE"
27 27
28#define trace_reclaim_flags(page, sync) ( \ 28#define trace_reclaim_flags(page) ( \
29 (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \ 29 (page_is_file_cache(page) ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
30 (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ 30 (RECLAIM_WB_ASYNC) \
31 ) 31 )
32 32
33#define trace_shrink_flags(file, sync) ( \ 33#define trace_shrink_flags(file) \
34 (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_MIXED : \ 34 ( \
35 (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON)) | \ 35 (file ? RECLAIM_WB_FILE : RECLAIM_WB_ANON) | \
36 (sync & RECLAIM_MODE_SYNC ? RECLAIM_WB_SYNC : RECLAIM_WB_ASYNC) \ 36 (RECLAIM_WB_ASYNC) \
37 ) 37 )
38 38
39TRACE_EVENT(mm_vmscan_kswapd_sleep, 39TRACE_EVENT(mm_vmscan_kswapd_sleep,
@@ -263,22 +263,16 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
263 unsigned long nr_requested, 263 unsigned long nr_requested,
264 unsigned long nr_scanned, 264 unsigned long nr_scanned,
265 unsigned long nr_taken, 265 unsigned long nr_taken,
266 unsigned long nr_lumpy_taken,
267 unsigned long nr_lumpy_dirty,
268 unsigned long nr_lumpy_failed,
269 isolate_mode_t isolate_mode, 266 isolate_mode_t isolate_mode,
270 int file), 267 int file),
271 268
272 TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file), 269 TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file),
273 270
274 TP_STRUCT__entry( 271 TP_STRUCT__entry(
275 __field(int, order) 272 __field(int, order)
276 __field(unsigned long, nr_requested) 273 __field(unsigned long, nr_requested)
277 __field(unsigned long, nr_scanned) 274 __field(unsigned long, nr_scanned)
278 __field(unsigned long, nr_taken) 275 __field(unsigned long, nr_taken)
279 __field(unsigned long, nr_lumpy_taken)
280 __field(unsigned long, nr_lumpy_dirty)
281 __field(unsigned long, nr_lumpy_failed)
282 __field(isolate_mode_t, isolate_mode) 276 __field(isolate_mode_t, isolate_mode)
283 __field(int, file) 277 __field(int, file)
284 ), 278 ),
@@ -288,22 +282,16 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
288 __entry->nr_requested = nr_requested; 282 __entry->nr_requested = nr_requested;
289 __entry->nr_scanned = nr_scanned; 283 __entry->nr_scanned = nr_scanned;
290 __entry->nr_taken = nr_taken; 284 __entry->nr_taken = nr_taken;
291 __entry->nr_lumpy_taken = nr_lumpy_taken;
292 __entry->nr_lumpy_dirty = nr_lumpy_dirty;
293 __entry->nr_lumpy_failed = nr_lumpy_failed;
294 __entry->isolate_mode = isolate_mode; 285 __entry->isolate_mode = isolate_mode;
295 __entry->file = file; 286 __entry->file = file;
296 ), 287 ),
297 288
298 TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu contig_taken=%lu contig_dirty=%lu contig_failed=%lu file=%d", 289 TP_printk("isolate_mode=%d order=%d nr_requested=%lu nr_scanned=%lu nr_taken=%lu file=%d",
299 __entry->isolate_mode, 290 __entry->isolate_mode,
300 __entry->order, 291 __entry->order,
301 __entry->nr_requested, 292 __entry->nr_requested,
302 __entry->nr_scanned, 293 __entry->nr_scanned,
303 __entry->nr_taken, 294 __entry->nr_taken,
304 __entry->nr_lumpy_taken,
305 __entry->nr_lumpy_dirty,
306 __entry->nr_lumpy_failed,
307 __entry->file) 295 __entry->file)
308); 296);
309 297
@@ -313,13 +301,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
313 unsigned long nr_requested, 301 unsigned long nr_requested,
314 unsigned long nr_scanned, 302 unsigned long nr_scanned,
315 unsigned long nr_taken, 303 unsigned long nr_taken,
316 unsigned long nr_lumpy_taken,
317 unsigned long nr_lumpy_dirty,
318 unsigned long nr_lumpy_failed,
319 isolate_mode_t isolate_mode, 304 isolate_mode_t isolate_mode,
320 int file), 305 int file),
321 306
322 TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file) 307 TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
323 308
324); 309);
325 310
@@ -329,13 +314,10 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
329 unsigned long nr_requested, 314 unsigned long nr_requested,
330 unsigned long nr_scanned, 315 unsigned long nr_scanned,
331 unsigned long nr_taken, 316 unsigned long nr_taken,
332 unsigned long nr_lumpy_taken,
333 unsigned long nr_lumpy_dirty,
334 unsigned long nr_lumpy_failed,
335 isolate_mode_t isolate_mode, 317 isolate_mode_t isolate_mode,
336 int file), 318 int file),
337 319
338 TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode, file) 320 TP_ARGS(order, nr_requested, nr_scanned, nr_taken, isolate_mode, file)
339 321
340); 322);
341 323
@@ -395,88 +377,6 @@ TRACE_EVENT(mm_vmscan_lru_shrink_inactive,
395 show_reclaim_flags(__entry->reclaim_flags)) 377 show_reclaim_flags(__entry->reclaim_flags))
396); 378);
397 379
398TRACE_EVENT(replace_swap_token,
399 TP_PROTO(struct mm_struct *old_mm,
400 struct mm_struct *new_mm),
401
402 TP_ARGS(old_mm, new_mm),
403
404 TP_STRUCT__entry(
405 __field(struct mm_struct*, old_mm)
406 __field(unsigned int, old_prio)
407 __field(struct mm_struct*, new_mm)
408 __field(unsigned int, new_prio)
409 ),
410
411 TP_fast_assign(
412 __entry->old_mm = old_mm;
413 __entry->old_prio = old_mm ? old_mm->token_priority : 0;
414 __entry->new_mm = new_mm;
415 __entry->new_prio = new_mm->token_priority;
416 ),
417
418 TP_printk("old_token_mm=%p old_prio=%u new_token_mm=%p new_prio=%u",
419 __entry->old_mm, __entry->old_prio,
420 __entry->new_mm, __entry->new_prio)
421);
422
423DECLARE_EVENT_CLASS(put_swap_token_template,
424 TP_PROTO(struct mm_struct *swap_token_mm),
425
426 TP_ARGS(swap_token_mm),
427
428 TP_STRUCT__entry(
429 __field(struct mm_struct*, swap_token_mm)
430 ),
431
432 TP_fast_assign(
433 __entry->swap_token_mm = swap_token_mm;
434 ),
435
436 TP_printk("token_mm=%p", __entry->swap_token_mm)
437);
438
439DEFINE_EVENT(put_swap_token_template, put_swap_token,
440 TP_PROTO(struct mm_struct *swap_token_mm),
441 TP_ARGS(swap_token_mm)
442);
443
444DEFINE_EVENT_CONDITION(put_swap_token_template, disable_swap_token,
445 TP_PROTO(struct mm_struct *swap_token_mm),
446 TP_ARGS(swap_token_mm),
447 TP_CONDITION(swap_token_mm != NULL)
448);
449
450TRACE_EVENT_CONDITION(update_swap_token_priority,
451 TP_PROTO(struct mm_struct *mm,
452 unsigned int old_prio,
453 struct mm_struct *swap_token_mm),
454
455 TP_ARGS(mm, old_prio, swap_token_mm),
456
457 TP_CONDITION(mm->token_priority != old_prio),
458
459 TP_STRUCT__entry(
460 __field(struct mm_struct*, mm)
461 __field(unsigned int, old_prio)
462 __field(unsigned int, new_prio)
463 __field(struct mm_struct*, swap_token_mm)
464 __field(unsigned int, swap_token_prio)
465 ),
466
467 TP_fast_assign(
468 __entry->mm = mm;
469 __entry->old_prio = old_prio;
470 __entry->new_prio = mm->token_priority;
471 __entry->swap_token_mm = swap_token_mm;
472 __entry->swap_token_prio = swap_token_mm ? swap_token_mm->token_priority : 0;
473 ),
474
475 TP_printk("mm=%p old_prio=%u new_prio=%u swap_token_mm=%p token_prio=%u",
476 __entry->mm, __entry->old_prio, __entry->new_prio,
477 __entry->swap_token_mm, __entry->swap_token_prio)
478);
479
480#endif /* _TRACE_VMSCAN_H */ 380#endif /* _TRACE_VMSCAN_H */
481 381
482/* This part must be outside protection */ 382/* This part must be outside protection */
diff --git a/include/trace/events/writeback.h b/include/trace/events/writeback.h
index 7b81887b023f..b453d92c2253 100644
--- a/include/trace/events/writeback.h
+++ b/include/trace/events/writeback.h
@@ -372,6 +372,35 @@ TRACE_EVENT(balance_dirty_pages,
372 ) 372 )
373); 373);
374 374
375TRACE_EVENT(writeback_sb_inodes_requeue,
376
377 TP_PROTO(struct inode *inode),
378 TP_ARGS(inode),
379
380 TP_STRUCT__entry(
381 __array(char, name, 32)
382 __field(unsigned long, ino)
383 __field(unsigned long, state)
384 __field(unsigned long, dirtied_when)
385 ),
386
387 TP_fast_assign(
388 strncpy(__entry->name,
389 dev_name(inode_to_bdi(inode)->dev), 32);
390 __entry->ino = inode->i_ino;
391 __entry->state = inode->i_state;
392 __entry->dirtied_when = inode->dirtied_when;
393 ),
394
395 TP_printk("bdi %s: ino=%lu state=%s dirtied_when=%lu age=%lu",
396 __entry->name,
397 __entry->ino,
398 show_inode_state(__entry->state),
399 __entry->dirtied_when,
400 (jiffies - __entry->dirtied_when) / HZ
401 )
402);
403
375DECLARE_EVENT_CLASS(writeback_congest_waited_template, 404DECLARE_EVENT_CLASS(writeback_congest_waited_template,
376 405
377 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed), 406 TP_PROTO(unsigned int usec_timeout, unsigned int usec_delayed),
@@ -450,13 +479,6 @@ DECLARE_EVENT_CLASS(writeback_single_inode_template,
450 ) 479 )
451); 480);
452 481
453DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode_requeue,
454 TP_PROTO(struct inode *inode,
455 struct writeback_control *wbc,
456 unsigned long nr_to_write),
457 TP_ARGS(inode, wbc, nr_to_write)
458);
459
460DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode, 482DEFINE_EVENT(writeback_single_inode_template, writeback_single_inode,
461 TP_PROTO(struct inode *inode, 483 TP_PROTO(struct inode *inode,
462 struct writeback_control *wbc, 484 struct writeback_control *wbc,
diff --git a/include/xen/acpi.h b/include/xen/acpi.h
new file mode 100644
index 000000000000..48a9c0171b65
--- /dev/null
+++ b/include/xen/acpi.h
@@ -0,0 +1,58 @@
1/******************************************************************************
2 * acpi.h
3 * acpi file for domain 0 kernel
4 *
5 * Copyright (c) 2011 Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
6 * Copyright (c) 2011 Yu Ke <ke.yu@intel.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
13 *
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
20 *
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
30 * IN THE SOFTWARE.
31 */
32
33#ifndef _XEN_ACPI_H
34#define _XEN_ACPI_H
35
36#include <linux/types.h>
37
38#ifdef CONFIG_XEN_DOM0
39#include <asm/xen/hypervisor.h>
40#include <xen/xen.h>
41#include <linux/acpi.h>
42
43int xen_acpi_notify_hypervisor_state(u8 sleep_state,
44 u32 pm1a_cnt, u32 pm1b_cnd);
45
46static inline void xen_acpi_sleep_register(void)
47{
48 if (xen_initial_domain())
49 acpi_os_set_prepare_sleep(
50 &xen_acpi_notify_hypervisor_state);
51}
52#else
53static inline void xen_acpi_sleep_register(void)
54{
55}
56#endif
57
58#endif /* _XEN_ACPI_H */
diff --git a/include/xen/events.h b/include/xen/events.h
index 0f773708e02c..04399b28e821 100644
--- a/include/xen/events.h
+++ b/include/xen/events.h
@@ -103,6 +103,9 @@ int xen_irq_from_pirq(unsigned pirq);
103/* Return the pirq allocated to the irq. */ 103/* Return the pirq allocated to the irq. */
104int xen_pirq_from_irq(unsigned irq); 104int xen_pirq_from_irq(unsigned irq);
105 105
106/* Return the irq allocated to the gsi */
107int xen_irq_from_gsi(unsigned gsi);
108
106/* Determine whether to ignore this IRQ if it is passed to a guest. */ 109/* Determine whether to ignore this IRQ if it is passed to a guest. */
107int xen_test_irq_shared(int irq); 110int xen_test_irq_shared(int irq);
108 111
diff --git a/include/xen/grant_table.h b/include/xen/grant_table.h
index 15f8a00ff003..11e27c3af3cb 100644
--- a/include/xen/grant_table.h
+++ b/include/xen/grant_table.h
@@ -46,6 +46,8 @@
46 46
47#include <xen/features.h> 47#include <xen/features.h>
48 48
49#define GNTTAB_RESERVED_XENSTORE 1
50
49/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */ 51/* NR_GRANT_FRAMES must be less than or equal to that configured in Xen */
50#define NR_GRANT_FRAMES 4 52#define NR_GRANT_FRAMES 4
51 53
diff --git a/include/xen/xenbus_dev.h b/include/xen/xenbus_dev.h
index ac5f0fe47ed9..bbee8c6a349d 100644
--- a/include/xen/xenbus_dev.h
+++ b/include/xen/xenbus_dev.h
@@ -38,4 +38,7 @@
38#define IOCTL_XENBUS_BACKEND_EVTCHN \ 38#define IOCTL_XENBUS_BACKEND_EVTCHN \
39 _IOC(_IOC_NONE, 'B', 0, 0) 39 _IOC(_IOC_NONE, 'B', 0, 0)
40 40
41#define IOCTL_XENBUS_BACKEND_SETUP \
42 _IOC(_IOC_NONE, 'B', 1, 0)
43
41#endif /* __LINUX_XEN_XENBUS_DEV_H__ */ 44#endif /* __LINUX_XEN_XENBUS_DEV_H__ */
diff --git a/ipc/mqueue.c b/ipc/mqueue.c
index b6a0d46fbad7..a2757d4ab773 100644
--- a/ipc/mqueue.c
+++ b/ipc/mqueue.c
@@ -251,7 +251,7 @@ static void mqueue_evict_inode(struct inode *inode)
251 int i; 251 int i;
252 struct ipc_namespace *ipc_ns; 252 struct ipc_namespace *ipc_ns;
253 253
254 end_writeback(inode); 254 clear_inode(inode);
255 255
256 if (S_ISDIR(inode->i_mode)) 256 if (S_ISDIR(inode->i_mode))
257 return; 257 return;
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index a0c6af34d500..0f3527d6184a 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -5132,7 +5132,7 @@ EXPORT_SYMBOL_GPL(css_depth);
5132 * @root: the css supporsed to be an ancestor of the child. 5132 * @root: the css supporsed to be an ancestor of the child.
5133 * 5133 *
5134 * Returns true if "root" is an ancestor of "child" in its hierarchy. Because 5134 * Returns true if "root" is an ancestor of "child" in its hierarchy. Because
5135 * this function reads css->id, this use rcu_dereference() and rcu_read_lock(). 5135 * this function reads css->id, the caller must hold rcu_read_lock().
5136 * But, considering usual usage, the csses should be valid objects after test. 5136 * But, considering usual usage, the csses should be valid objects after test.
5137 * Assuming that the caller will do some action to the child if this returns 5137 * Assuming that the caller will do some action to the child if this returns
5138 * returns true, the caller must take "child";s reference count. 5138 * returns true, the caller must take "child";s reference count.
@@ -5144,18 +5144,18 @@ bool css_is_ancestor(struct cgroup_subsys_state *child,
5144{ 5144{
5145 struct css_id *child_id; 5145 struct css_id *child_id;
5146 struct css_id *root_id; 5146 struct css_id *root_id;
5147 bool ret = true;
5148 5147
5149 rcu_read_lock();
5150 child_id = rcu_dereference(child->id); 5148 child_id = rcu_dereference(child->id);
5149 if (!child_id)
5150 return false;
5151 root_id = rcu_dereference(root->id); 5151 root_id = rcu_dereference(root->id);
5152 if (!child_id 5152 if (!root_id)
5153 || !root_id 5153 return false;
5154 || (child_id->depth < root_id->depth) 5154 if (child_id->depth < root_id->depth)
5155 || (child_id->stack[root_id->depth] != root_id->id)) 5155 return false;
5156 ret = false; 5156 if (child_id->stack[root_id->depth] != root_id->id)
5157 rcu_read_unlock(); 5157 return false;
5158 return ret; 5158 return true;
5159} 5159}
5160 5160
5161void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css) 5161void free_css_id(struct cgroup_subsys *ss, struct cgroup_subsys_state *css)
diff --git a/kernel/fork.c b/kernel/fork.c
index 47b4e4f379f9..017fb23d5983 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -386,7 +386,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
386 } 386 }
387 charge = 0; 387 charge = 0;
388 if (mpnt->vm_flags & VM_ACCOUNT) { 388 if (mpnt->vm_flags & VM_ACCOUNT) {
389 unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; 389 unsigned long len;
390 len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
390 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */ 391 if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
391 goto fail_nomem; 392 goto fail_nomem;
392 charge = len; 393 charge = len;
@@ -614,7 +615,6 @@ void mmput(struct mm_struct *mm)
614 list_del(&mm->mmlist); 615 list_del(&mm->mmlist);
615 spin_unlock(&mmlist_lock); 616 spin_unlock(&mmlist_lock);
616 } 617 }
617 put_swap_token(mm);
618 if (mm->binfmt) 618 if (mm->binfmt)
619 module_put(mm->binfmt->module); 619 module_put(mm->binfmt->module);
620 mmdrop(mm); 620 mmdrop(mm);
@@ -831,10 +831,6 @@ struct mm_struct *dup_mm(struct task_struct *tsk)
831 memcpy(mm, oldmm, sizeof(*mm)); 831 memcpy(mm, oldmm, sizeof(*mm));
832 mm_init_cpumask(mm); 832 mm_init_cpumask(mm);
833 833
834 /* Initializing for Swap token stuff */
835 mm->token_priority = 0;
836 mm->last_interval = 0;
837
838#ifdef CONFIG_TRANSPARENT_HUGEPAGE 834#ifdef CONFIG_TRANSPARENT_HUGEPAGE
839 mm->pmd_huge_pte = NULL; 835 mm->pmd_huge_pte = NULL;
840#endif 836#endif
@@ -913,10 +909,6 @@ static int copy_mm(unsigned long clone_flags, struct task_struct *tsk)
913 goto fail_nomem; 909 goto fail_nomem;
914 910
915good_mm: 911good_mm:
916 /* Initializing for Swap token stuff */
917 mm->token_priority = 0;
918 mm->last_interval = 0;
919
920 tsk->mm = mm; 912 tsk->mm = mm;
921 tsk->active_mm = mm; 913 tsk->active_mm = mm;
922 return 0; 914 return 0;
diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
index 079f1d39a8b8..2169feeba529 100644
--- a/kernel/kallsyms.c
+++ b/kernel/kallsyms.c
@@ -343,7 +343,7 @@ int lookup_symbol_attrs(unsigned long addr, unsigned long *size,
343 343
344/* Look up a kernel symbol and return it in a text buffer. */ 344/* Look up a kernel symbol and return it in a text buffer. */
345static int __sprint_symbol(char *buffer, unsigned long address, 345static int __sprint_symbol(char *buffer, unsigned long address,
346 int symbol_offset) 346 int symbol_offset, int add_offset)
347{ 347{
348 char *modname; 348 char *modname;
349 const char *name; 349 const char *name;
@@ -358,13 +358,13 @@ static int __sprint_symbol(char *buffer, unsigned long address,
358 if (name != buffer) 358 if (name != buffer)
359 strcpy(buffer, name); 359 strcpy(buffer, name);
360 len = strlen(buffer); 360 len = strlen(buffer);
361 buffer += len;
362 offset -= symbol_offset; 361 offset -= symbol_offset;
363 362
363 if (add_offset)
364 len += sprintf(buffer + len, "+%#lx/%#lx", offset, size);
365
364 if (modname) 366 if (modname)
365 len += sprintf(buffer, "+%#lx/%#lx [%s]", offset, size, modname); 367 len += sprintf(buffer + len, " [%s]", modname);
366 else
367 len += sprintf(buffer, "+%#lx/%#lx", offset, size);
368 368
369 return len; 369 return len;
370} 370}
@@ -382,12 +382,28 @@ static int __sprint_symbol(char *buffer, unsigned long address,
382 */ 382 */
383int sprint_symbol(char *buffer, unsigned long address) 383int sprint_symbol(char *buffer, unsigned long address)
384{ 384{
385 return __sprint_symbol(buffer, address, 0); 385 return __sprint_symbol(buffer, address, 0, 1);
386} 386}
387
388EXPORT_SYMBOL_GPL(sprint_symbol); 387EXPORT_SYMBOL_GPL(sprint_symbol);
389 388
390/** 389/**
390 * sprint_symbol_no_offset - Look up a kernel symbol and return it in a text buffer
391 * @buffer: buffer to be stored
392 * @address: address to lookup
393 *
394 * This function looks up a kernel symbol with @address and stores its name
395 * and module name to @buffer if possible. If no symbol was found, just saves
396 * its @address as is.
397 *
398 * This function returns the number of bytes stored in @buffer.
399 */
400int sprint_symbol_no_offset(char *buffer, unsigned long address)
401{
402 return __sprint_symbol(buffer, address, 0, 0);
403}
404EXPORT_SYMBOL_GPL(sprint_symbol_no_offset);
405
406/**
391 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer 407 * sprint_backtrace - Look up a backtrace symbol and return it in a text buffer
392 * @buffer: buffer to be stored 408 * @buffer: buffer to be stored
393 * @address: address to lookup 409 * @address: address to lookup
@@ -403,7 +419,7 @@ EXPORT_SYMBOL_GPL(sprint_symbol);
403 */ 419 */
404int sprint_backtrace(char *buffer, unsigned long address) 420int sprint_backtrace(char *buffer, unsigned long address)
405{ 421{
406 return __sprint_symbol(buffer, address, -1); 422 return __sprint_symbol(buffer, address, -1, 1);
407} 423}
408 424
409/* Look up a kernel symbol and print it to the kernel messages. */ 425/* Look up a kernel symbol and print it to the kernel messages. */
diff --git a/kernel/res_counter.c b/kernel/res_counter.c
index bebe2b170d49..ad581aa2369a 100644
--- a/kernel/res_counter.c
+++ b/kernel/res_counter.c
@@ -94,13 +94,15 @@ void res_counter_uncharge_locked(struct res_counter *counter, unsigned long val)
94 counter->usage -= val; 94 counter->usage -= val;
95} 95}
96 96
97void res_counter_uncharge(struct res_counter *counter, unsigned long val) 97void res_counter_uncharge_until(struct res_counter *counter,
98 struct res_counter *top,
99 unsigned long val)
98{ 100{
99 unsigned long flags; 101 unsigned long flags;
100 struct res_counter *c; 102 struct res_counter *c;
101 103
102 local_irq_save(flags); 104 local_irq_save(flags);
103 for (c = counter; c != NULL; c = c->parent) { 105 for (c = counter; c != top; c = c->parent) {
104 spin_lock(&c->lock); 106 spin_lock(&c->lock);
105 res_counter_uncharge_locked(c, val); 107 res_counter_uncharge_locked(c, val);
106 spin_unlock(&c->lock); 108 spin_unlock(&c->lock);
@@ -108,6 +110,10 @@ void res_counter_uncharge(struct res_counter *counter, unsigned long val)
108 local_irq_restore(flags); 110 local_irq_restore(flags);
109} 111}
110 112
113void res_counter_uncharge(struct res_counter *counter, unsigned long val)
114{
115 res_counter_uncharge_until(counter, NULL, val);
116}
111 117
112static inline unsigned long long * 118static inline unsigned long long *
113res_counter_member(struct res_counter *counter, int member) 119res_counter_member(struct res_counter *counter, int member)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index df30ee08bdd4..e5e1d85b8c7c 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -24,6 +24,7 @@
24#include <linux/sysctl.h> 24#include <linux/sysctl.h>
25 25
26#include <asm/irq_regs.h> 26#include <asm/irq_regs.h>
27#include <linux/kvm_para.h>
27#include <linux/perf_event.h> 28#include <linux/perf_event.h>
28 29
29int watchdog_enabled = 1; 30int watchdog_enabled = 1;
@@ -280,6 +281,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
280 __this_cpu_write(softlockup_touch_sync, false); 281 __this_cpu_write(softlockup_touch_sync, false);
281 sched_clock_tick(); 282 sched_clock_tick();
282 } 283 }
284
285 /* Clear the guest paused flag on watchdog reset */
286 kvm_check_and_clear_guest_paused();
283 __touch_watchdog(); 287 __touch_watchdog();
284 return HRTIMER_RESTART; 288 return HRTIMER_RESTART;
285 } 289 }
@@ -292,6 +296,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
292 */ 296 */
293 duration = is_softlockup(touch_ts); 297 duration = is_softlockup(touch_ts);
294 if (unlikely(duration)) { 298 if (unlikely(duration)) {
299 /*
300 * If a virtual machine is stopped by the host it can look to
301 * the watchdog like a soft lockup, check to see if the host
302 * stopped the vm before we issue the warning
303 */
304 if (kvm_check_and_clear_guest_paused())
305 return HRTIMER_RESTART;
306
295 /* only warn once */ 307 /* only warn once */
296 if (__this_cpu_read(soft_watchdog_warn) == true) 308 if (__this_cpu_read(soft_watchdog_warn) == true)
297 return HRTIMER_RESTART; 309 return HRTIMER_RESTART;
diff --git a/lib/Kconfig b/lib/Kconfig
index 98230ac3db29..a9e15403434e 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -19,6 +19,9 @@ config RATIONAL
19config GENERIC_STRNCPY_FROM_USER 19config GENERIC_STRNCPY_FROM_USER
20 bool 20 bool
21 21
22config GENERIC_STRNLEN_USER
23 bool
24
22config GENERIC_FIND_FIRST_BIT 25config GENERIC_FIND_FIRST_BIT
23 bool 26 bool
24 27
@@ -36,6 +39,9 @@ config GENERIC_IO
36 boolean 39 boolean
37 default n 40 default n
38 41
42config STMP_DEVICE
43 bool
44
39config CRC_CCITT 45config CRC_CCITT
40 tristate "CRC-CCITT functions" 46 tristate "CRC-CCITT functions"
41 help 47 help
diff --git a/lib/Makefile b/lib/Makefile
index b98df505f335..8c31a0cb75e9 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -126,6 +126,9 @@ obj-$(CONFIG_CLZ_TAB) += clz_tab.o
126obj-$(CONFIG_DDR) += jedec_ddr_data.o 126obj-$(CONFIG_DDR) += jedec_ddr_data.o
127 127
128obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o 128obj-$(CONFIG_GENERIC_STRNCPY_FROM_USER) += strncpy_from_user.o
129obj-$(CONFIG_GENERIC_STRNLEN_USER) += strnlen_user.o
130
131obj-$(CONFIG_STMP_DEVICE) += stmp_device.o
129 132
130hostprogs-y := gen_crc32table 133hostprogs-y := gen_crc32table
131clean-files := crc32table.h 134clean-files := crc32table.h
diff --git a/lib/bitmap.c b/lib/bitmap.c
index b5a8b6ad2454..06fdfa1aeba7 100644
--- a/lib/bitmap.c
+++ b/lib/bitmap.c
@@ -369,7 +369,8 @@ EXPORT_SYMBOL(bitmap_find_next_zero_area);
369 * @nmaskbits: size of bitmap, in bits 369 * @nmaskbits: size of bitmap, in bits
370 * 370 *
371 * Exactly @nmaskbits bits are displayed. Hex digits are grouped into 371 * Exactly @nmaskbits bits are displayed. Hex digits are grouped into
372 * comma-separated sets of eight digits per set. 372 * comma-separated sets of eight digits per set. Returns the number of
373 * characters which were written to *buf, excluding the trailing \0.
373 */ 374 */
374int bitmap_scnprintf(char *buf, unsigned int buflen, 375int bitmap_scnprintf(char *buf, unsigned int buflen,
375 const unsigned long *maskp, int nmaskbits) 376 const unsigned long *maskp, int nmaskbits)
@@ -517,8 +518,8 @@ EXPORT_SYMBOL(bitmap_parse_user);
517 * 518 *
518 * Helper routine for bitmap_scnlistprintf(). Write decimal number 519 * Helper routine for bitmap_scnlistprintf(). Write decimal number
519 * or range to buf, suppressing output past buf+buflen, with optional 520 * or range to buf, suppressing output past buf+buflen, with optional
520 * comma-prefix. Return len of what would be written to buf, if it 521 * comma-prefix. Return len of what was written to *buf, excluding the
521 * all fit. 522 * trailing \0.
522 */ 523 */
523static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len) 524static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
524{ 525{
@@ -544,9 +545,8 @@ static inline int bscnl_emit(char *buf, int buflen, int rbot, int rtop, int len)
544 * the range. Output format is compatible with the format 545 * the range. Output format is compatible with the format
545 * accepted as input by bitmap_parselist(). 546 * accepted as input by bitmap_parselist().
546 * 547 *
547 * The return value is the number of characters which would be 548 * The return value is the number of characters which were written to *buf
548 * generated for the given input, excluding the trailing '\0', as 549 * excluding the trailing '\0', as per ISO C99's scnprintf.
549 * per ISO C99.
550 */ 550 */
551int bitmap_scnlistprintf(char *buf, unsigned int buflen, 551int bitmap_scnlistprintf(char *buf, unsigned int buflen,
552 const unsigned long *maskp, int nmaskbits) 552 const unsigned long *maskp, int nmaskbits)
diff --git a/lib/list_debug.c b/lib/list_debug.c
index 3810b481f940..23a5e031cd8b 100644
--- a/lib/list_debug.c
+++ b/lib/list_debug.c
@@ -31,6 +31,9 @@ void __list_add(struct list_head *new,
31 "list_add corruption. prev->next should be " 31 "list_add corruption. prev->next should be "
32 "next (%p), but was %p. (prev=%p).\n", 32 "next (%p), but was %p. (prev=%p).\n",
33 next, prev->next, prev); 33 next, prev->next, prev);
34 WARN(new == prev || new == next,
35 "list_add double add: new=%p, prev=%p, next=%p.\n",
36 new, prev, next);
34 next->prev = new; 37 next->prev = new;
35 new->next = next; 38 new->next = next;
36 new->prev = prev; 39 new->prev = prev;
diff --git a/lib/radix-tree.c b/lib/radix-tree.c
index 86516f5588e3..d7c878cc006c 100644
--- a/lib/radix-tree.c
+++ b/lib/radix-tree.c
@@ -73,11 +73,24 @@ static unsigned long height_to_maxindex[RADIX_TREE_MAX_PATH + 1] __read_mostly;
73static struct kmem_cache *radix_tree_node_cachep; 73static struct kmem_cache *radix_tree_node_cachep;
74 74
75/* 75/*
76 * The radix tree is variable-height, so an insert operation not only has
77 * to build the branch to its corresponding item, it also has to build the
78 * branch to existing items if the size has to be increased (by
79 * radix_tree_extend).
80 *
81 * The worst case is a zero height tree with just a single item at index 0,
82 * and then inserting an item at index ULONG_MAX. This requires 2 new branches
83 * of RADIX_TREE_MAX_PATH size to be created, with only the root node shared.
84 * Hence:
85 */
86#define RADIX_TREE_PRELOAD_SIZE (RADIX_TREE_MAX_PATH * 2 - 1)
87
88/*
76 * Per-cpu pool of preloaded nodes 89 * Per-cpu pool of preloaded nodes
77 */ 90 */
78struct radix_tree_preload { 91struct radix_tree_preload {
79 int nr; 92 int nr;
80 struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; 93 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
81}; 94};
82static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; 95static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
83 96
diff --git a/lib/spinlock_debug.c b/lib/spinlock_debug.c
index 525d160d44f0..d0ec4f3d1593 100644
--- a/lib/spinlock_debug.c
+++ b/lib/spinlock_debug.c
@@ -58,7 +58,7 @@ static void spin_dump(raw_spinlock_t *lock, const char *msg)
58 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n", 58 printk(KERN_EMERG "BUG: spinlock %s on CPU#%d, %s/%d\n",
59 msg, raw_smp_processor_id(), 59 msg, raw_smp_processor_id(),
60 current->comm, task_pid_nr(current)); 60 current->comm, task_pid_nr(current));
61 printk(KERN_EMERG " lock: %p, .magic: %08x, .owner: %s/%d, " 61 printk(KERN_EMERG " lock: %ps, .magic: %08x, .owner: %s/%d, "
62 ".owner_cpu: %d\n", 62 ".owner_cpu: %d\n",
63 lock, lock->magic, 63 lock, lock->magic,
64 owner ? owner->comm : "<none>", 64 owner ? owner->comm : "<none>",
diff --git a/lib/stmp_device.c b/lib/stmp_device.c
new file mode 100644
index 000000000000..8ac9bcc4289a
--- /dev/null
+++ b/lib/stmp_device.c
@@ -0,0 +1,80 @@
1/*
2 * Copyright (C) 1999 ARM Limited
3 * Copyright (C) 2000 Deep Blue Solutions Ltd
4 * Copyright 2006-2007,2010 Freescale Semiconductor, Inc. All Rights Reserved.
5 * Copyright 2008 Juergen Beisert, kernel@pengutronix.de
6 * Copyright 2009 Ilya Yanok, Emcraft Systems Ltd, yanok@emcraft.com
7 * Copyright (C) 2011 Wolfram Sang, Pengutronix e.K.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 */
14
15#include <linux/io.h>
16#include <linux/errno.h>
17#include <linux/delay.h>
18#include <linux/module.h>
19#include <linux/stmp_device.h>
20
21#define STMP_MODULE_CLKGATE (1 << 30)
22#define STMP_MODULE_SFTRST (1 << 31)
23
24/*
25 * Clear the bit and poll it cleared. This is usually called with
26 * a reset address and mask being either SFTRST(bit 31) or CLKGATE
27 * (bit 30).
28 */
29static int stmp_clear_poll_bit(void __iomem *addr, u32 mask)
30{
31 int timeout = 0x400;
32
33 writel(mask, addr + STMP_OFFSET_REG_CLR);
34 udelay(1);
35 while ((readl(addr) & mask) && --timeout)
36 /* nothing */;
37
38 return !timeout;
39}
40
41int stmp_reset_block(void __iomem *reset_addr)
42{
43 int ret;
44 int timeout = 0x400;
45
46 /* clear and poll SFTRST */
47 ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
48 if (unlikely(ret))
49 goto error;
50
51 /* clear CLKGATE */
52 writel(STMP_MODULE_CLKGATE, reset_addr + STMP_OFFSET_REG_CLR);
53
54 /* set SFTRST to reset the block */
55 writel(STMP_MODULE_SFTRST, reset_addr + STMP_OFFSET_REG_SET);
56 udelay(1);
57
58 /* poll CLKGATE becoming set */
59 while ((!(readl(reset_addr) & STMP_MODULE_CLKGATE)) && --timeout)
60 /* nothing */;
61 if (unlikely(!timeout))
62 goto error;
63
64 /* clear and poll SFTRST */
65 ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_SFTRST);
66 if (unlikely(ret))
67 goto error;
68
69 /* clear and poll CLKGATE */
70 ret = stmp_clear_poll_bit(reset_addr, STMP_MODULE_CLKGATE);
71 if (unlikely(ret))
72 goto error;
73
74 return 0;
75
76error:
77 pr_err("%s(%p): module reset timeout\n", __func__, reset_addr);
78 return -ETIMEDOUT;
79}
80EXPORT_SYMBOL(stmp_reset_block);
diff --git a/lib/string_helpers.c b/lib/string_helpers.c
index dd4ece372699..1cffc223bff5 100644
--- a/lib/string_helpers.c
+++ b/lib/string_helpers.c
@@ -23,15 +23,15 @@
23int string_get_size(u64 size, const enum string_size_units units, 23int string_get_size(u64 size, const enum string_size_units units,
24 char *buf, int len) 24 char *buf, int len)
25{ 25{
26 const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB", 26 static const char *units_10[] = { "B", "kB", "MB", "GB", "TB", "PB",
27 "EB", "ZB", "YB", NULL}; 27 "EB", "ZB", "YB", NULL};
28 const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB", 28 static const char *units_2[] = {"B", "KiB", "MiB", "GiB", "TiB", "PiB",
29 "EiB", "ZiB", "YiB", NULL }; 29 "EiB", "ZiB", "YiB", NULL };
30 const char **units_str[] = { 30 static const char **units_str[] = {
31 [STRING_UNITS_10] = units_10, 31 [STRING_UNITS_10] = units_10,
32 [STRING_UNITS_2] = units_2, 32 [STRING_UNITS_2] = units_2,
33 }; 33 };
34 const unsigned int divisor[] = { 34 static const unsigned int divisor[] = {
35 [STRING_UNITS_10] = 1000, 35 [STRING_UNITS_10] = 1000,
36 [STRING_UNITS_2] = 1024, 36 [STRING_UNITS_2] = 1024,
37 }; 37 };
diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
index c4c09b0e96ba..bb2b201d6ad0 100644
--- a/lib/strncpy_from_user.c
+++ b/lib/strncpy_from_user.c
@@ -4,37 +4,7 @@
4#include <linux/errno.h> 4#include <linux/errno.h>
5 5
6#include <asm/byteorder.h> 6#include <asm/byteorder.h>
7 7#include <asm/word-at-a-time.h>
8static inline long find_zero(unsigned long mask)
9{
10 long byte = 0;
11
12#ifdef __BIG_ENDIAN
13#ifdef CONFIG_64BIT
14 if (mask >> 32)
15 mask >>= 32;
16 else
17 byte = 4;
18#endif
19 if (mask >> 16)
20 mask >>= 16;
21 else
22 byte += 2;
23 return (mask >> 8) ? byte : byte + 1;
24#else
25#ifdef CONFIG_64BIT
26 if (!((unsigned int) mask)) {
27 mask >>= 32;
28 byte = 4;
29 }
30#endif
31 if (!(mask & 0xffff)) {
32 mask >>= 16;
33 byte += 2;
34 }
35 return (mask & 0xff) ? byte : byte + 1;
36#endif
37}
38 8
39#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS 9#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
40#define IS_UNALIGNED(src, dst) 0 10#define IS_UNALIGNED(src, dst) 0
@@ -51,8 +21,7 @@ static inline long find_zero(unsigned long mask)
51 */ 21 */
52static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max) 22static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
53{ 23{
54 const unsigned long high_bits = REPEAT_BYTE(0xfe) + 1; 24 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
55 const unsigned long low_bits = REPEAT_BYTE(0x7f);
56 long res = 0; 25 long res = 0;
57 26
58 /* 27 /*
@@ -66,18 +35,16 @@ static inline long do_strncpy_from_user(char *dst, const char __user *src, long
66 goto byte_at_a_time; 35 goto byte_at_a_time;
67 36
68 while (max >= sizeof(unsigned long)) { 37 while (max >= sizeof(unsigned long)) {
69 unsigned long c, v, rhs; 38 unsigned long c, data;
70 39
71 /* Fall back to byte-at-a-time if we get a page fault */ 40 /* Fall back to byte-at-a-time if we get a page fault */
72 if (unlikely(__get_user(c,(unsigned long __user *)(src+res)))) 41 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
73 break; 42 break;
74 rhs = c | low_bits;
75 v = (c + high_bits) & ~rhs;
76 *(unsigned long *)(dst+res) = c; 43 *(unsigned long *)(dst+res) = c;
77 if (v) { 44 if (has_zero(c, &data, &constants)) {
78 v = (c & low_bits) + low_bits; 45 data = prep_zero_mask(c, data, &constants);
79 v = ~(v | rhs); 46 data = create_zero_mask(data);
80 return res + find_zero(v); 47 return res + find_zero(data);
81 } 48 }
82 res += sizeof(unsigned long); 49 res += sizeof(unsigned long);
83 max -= sizeof(unsigned long); 50 max -= sizeof(unsigned long);
diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
new file mode 100644
index 000000000000..a28df5206d95
--- /dev/null
+++ b/lib/strnlen_user.c
@@ -0,0 +1,138 @@
1#include <linux/kernel.h>
2#include <linux/export.h>
3#include <linux/uaccess.h>
4
5#include <asm/word-at-a-time.h>
6
7/* Set bits in the first 'n' bytes when loaded from memory */
8#ifdef __LITTLE_ENDIAN
9# define aligned_byte_mask(n) ((1ul << 8*(n))-1)
10#else
11# define aligned_byte_mask(n) (~0xfful << (BITS_PER_LONG - 8 - 8*(n)))
12#endif
13
14/*
15 * Do a strnlen, return length of string *with* final '\0'.
16 * 'count' is the user-supplied count, while 'max' is the
17 * address space maximum.
18 *
19 * Return 0 for exceptions (which includes hitting the address
20 * space maximum), or 'count+1' if hitting the user-supplied
21 * maximum count.
22 *
23 * NOTE! We can sometimes overshoot the user-supplied maximum
24 * if it fits in a aligned 'long'. The caller needs to check
25 * the return value against "> max".
26 */
27static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
28{
29 const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
30 long align, res = 0;
31 unsigned long c;
32
33 /*
34 * Truncate 'max' to the user-specified limit, so that
35 * we only have one limit we need to check in the loop
36 */
37 if (max > count)
38 max = count;
39
40 /*
41 * Do everything aligned. But that means that we
42 * need to also expand the maximum..
43 */
44 align = (sizeof(long) - 1) & (unsigned long)src;
45 src -= align;
46 max += align;
47
48 if (unlikely(__get_user(c,(unsigned long __user *)src)))
49 return 0;
50 c |= aligned_byte_mask(align);
51
52 for (;;) {
53 unsigned long data;
54 if (has_zero(c, &data, &constants)) {
55 data = prep_zero_mask(c, data, &constants);
56 data = create_zero_mask(data);
57 return res + find_zero(data) + 1 - align;
58 }
59 res += sizeof(unsigned long);
60 if (unlikely(max < sizeof(unsigned long)))
61 break;
62 max -= sizeof(unsigned long);
63 if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
64 return 0;
65 }
66 res -= align;
67
68 /*
69 * Uhhuh. We hit 'max'. But was that the user-specified maximum
70 * too? If so, return the marker for "too long".
71 */
72 if (res >= count)
73 return count+1;
74
75 /*
76 * Nope: we hit the address space limit, and we still had more
77 * characters the caller would have wanted. That's 0.
78 */
79 return 0;
80}
81
82/**
83 * strnlen_user: - Get the size of a user string INCLUDING final NUL.
84 * @str: The string to measure.
85 * @count: Maximum count (including NUL character)
86 *
87 * Context: User context only. This function may sleep.
88 *
89 * Get the size of a NUL-terminated string in user space.
90 *
91 * Returns the size of the string INCLUDING the terminating NUL.
92 * If the string is too long, returns 'count+1'.
93 * On exception (or invalid count), returns 0.
94 */
95long strnlen_user(const char __user *str, long count)
96{
97 unsigned long max_addr, src_addr;
98
99 if (unlikely(count <= 0))
100 return 0;
101
102 max_addr = user_addr_max();
103 src_addr = (unsigned long)str;
104 if (likely(src_addr < max_addr)) {
105 unsigned long max = max_addr - src_addr;
106 return do_strnlen_user(str, count, max);
107 }
108 return 0;
109}
110EXPORT_SYMBOL(strnlen_user);
111
112/**
113 * strlen_user: - Get the size of a user string INCLUDING final NUL.
114 * @str: The string to measure.
115 *
116 * Context: User context only. This function may sleep.
117 *
118 * Get the size of a NUL-terminated string in user space.
119 *
120 * Returns the size of the string INCLUDING the terminating NUL.
121 * On exception, returns 0.
122 *
123 * If there is a limit on the length of a valid string, you may wish to
124 * consider using strnlen_user() instead.
125 */
126long strlen_user(const char __user *str)
127{
128 unsigned long max_addr, src_addr;
129
130 max_addr = user_addr_max();
131 src_addr = (unsigned long)str;
132 if (likely(src_addr < max_addr)) {
133 unsigned long max = max_addr - src_addr;
134 return do_strnlen_user(str, ~0ul, max);
135 }
136 return 0;
137}
138EXPORT_SYMBOL(strlen_user);
diff --git a/lib/swiotlb.c b/lib/swiotlb.c
index 414f46ed1dcd..45bc1f83a5ad 100644
--- a/lib/swiotlb.c
+++ b/lib/swiotlb.c
@@ -130,11 +130,9 @@ void swiotlb_print_info(void)
130 pstart = virt_to_phys(io_tlb_start); 130 pstart = virt_to_phys(io_tlb_start);
131 pend = virt_to_phys(io_tlb_end); 131 pend = virt_to_phys(io_tlb_end);
132 132
133 printk(KERN_INFO "Placing %luMB software IO TLB between %p - %p\n", 133 printk(KERN_INFO "software IO TLB [mem %#010llx-%#010llx] (%luMB) mapped at [%p-%p]\n",
134 bytes >> 20, io_tlb_start, io_tlb_end); 134 (unsigned long long)pstart, (unsigned long long)pend - 1,
135 printk(KERN_INFO "software IO TLB at phys %#llx - %#llx\n", 135 bytes >> 20, io_tlb_start, io_tlb_end - 1);
136 (unsigned long long)pstart,
137 (unsigned long long)pend);
138} 136}
139 137
140void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose) 138void __init swiotlb_init_with_tbl(char *tlb, unsigned long nslabs, int verbose)
diff --git a/lib/test-kstrtox.c b/lib/test-kstrtox.c
index d55769d63cb8..bea3f3fa3f02 100644
--- a/lib/test-kstrtox.c
+++ b/lib/test-kstrtox.c
@@ -11,7 +11,7 @@ struct test_fail {
11}; 11};
12 12
13#define DEFINE_TEST_FAIL(test) \ 13#define DEFINE_TEST_FAIL(test) \
14 const struct test_fail test[] __initdata 14 const struct test_fail test[] __initconst
15 15
16#define DECLARE_TEST_OK(type, test_type) \ 16#define DECLARE_TEST_OK(type, test_type) \
17 test_type { \ 17 test_type { \
@@ -21,7 +21,7 @@ struct test_fail {
21 } 21 }
22 22
23#define DEFINE_TEST_OK(type, test) \ 23#define DEFINE_TEST_OK(type, test) \
24 const type test[] __initdata 24 const type test[] __initconst
25 25
26#define TEST_FAIL(fn, type, fmt, test) \ 26#define TEST_FAIL(fn, type, fmt, test) \
27{ \ 27{ \
diff --git a/lib/vsprintf.c b/lib/vsprintf.c
index abbabec9720a..5391299c1e78 100644
--- a/lib/vsprintf.c
+++ b/lib/vsprintf.c
@@ -284,6 +284,7 @@ char *number(char *buf, char *end, unsigned long long num,
284 char locase; 284 char locase;
285 int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10); 285 int need_pfx = ((spec.flags & SPECIAL) && spec.base != 10);
286 int i; 286 int i;
287 bool is_zero = num == 0LL;
287 288
288 /* locase = 0 or 0x20. ORing digits or letters with 'locase' 289 /* locase = 0 or 0x20. ORing digits or letters with 'locase'
289 * produces same digits or (maybe lowercased) letters */ 290 * produces same digits or (maybe lowercased) letters */
@@ -305,8 +306,9 @@ char *number(char *buf, char *end, unsigned long long num,
305 } 306 }
306 } 307 }
307 if (need_pfx) { 308 if (need_pfx) {
308 spec.field_width--;
309 if (spec.base == 16) 309 if (spec.base == 16)
310 spec.field_width -= 2;
311 else if (!is_zero)
310 spec.field_width--; 312 spec.field_width--;
311 } 313 }
312 314
@@ -353,9 +355,11 @@ char *number(char *buf, char *end, unsigned long long num,
353 } 355 }
354 /* "0x" / "0" prefix */ 356 /* "0x" / "0" prefix */
355 if (need_pfx) { 357 if (need_pfx) {
356 if (buf < end) 358 if (spec.base == 16 || !is_zero) {
357 *buf = '0'; 359 if (buf < end)
358 ++buf; 360 *buf = '0';
361 ++buf;
362 }
359 if (spec.base == 16) { 363 if (spec.base == 16) {
360 if (buf < end) 364 if (buf < end)
361 *buf = ('X' | locase); 365 *buf = ('X' | locase);
@@ -436,7 +440,7 @@ char *symbol_string(char *buf, char *end, void *ptr,
436 else if (ext != 'f' && ext != 's') 440 else if (ext != 'f' && ext != 's')
437 sprint_symbol(sym, value); 441 sprint_symbol(sym, value);
438 else 442 else
439 kallsyms_lookup(value, NULL, NULL, NULL, sym); 443 sprint_symbol_no_offset(sym, value);
440 444
441 return string(buf, end, sym, spec); 445 return string(buf, end, sym, spec);
442#else 446#else
diff --git a/mm/Kconfig b/mm/Kconfig
index e338407f1225..b2176374b98e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -198,7 +198,7 @@ config COMPACTION
198config MIGRATION 198config MIGRATION
199 bool "Page migration" 199 bool "Page migration"
200 def_bool y 200 def_bool y
201 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION 201 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
202 help 202 help
203 Allows the migration of the physical location of pages of processes 203 Allows the migration of the physical location of pages of processes
204 while the virtual addresses are not changed. This is useful in 204 while the virtual addresses are not changed. This is useful in
@@ -349,6 +349,16 @@ choice
349 benefit. 349 benefit.
350endchoice 350endchoice
351 351
352config CROSS_MEMORY_ATTACH
353 bool "Cross Memory Support"
354 depends on MMU
355 default y
356 help
357 Enabling this option adds the system calls process_vm_readv and
358 process_vm_writev which allow a process with the correct privileges
359 to directly read from or write to to another process's address space.
360 See the man page for more details.
361
352# 362#
353# UP and nommu archs use km based percpu allocator 363# UP and nommu archs use km based percpu allocator
354# 364#
diff --git a/mm/Makefile b/mm/Makefile
index 50ec00ef2a0e..a156285ce88d 100644
--- a/mm/Makefile
+++ b/mm/Makefile
@@ -5,15 +5,18 @@
5mmu-y := nommu.o 5mmu-y := nommu.o
6mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \ 6mmu-$(CONFIG_MMU) := fremap.o highmem.o madvise.o memory.o mincore.o \
7 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \ 7 mlock.o mmap.o mprotect.o mremap.o msync.o rmap.o \
8 vmalloc.o pagewalk.o pgtable-generic.o \ 8 vmalloc.o pagewalk.o pgtable-generic.o
9 process_vm_access.o 9
10ifdef CONFIG_CROSS_MEMORY_ATTACH
11mmu-$(CONFIG_MMU) += process_vm_access.o
12endif
10 13
11obj-y := filemap.o mempool.o oom_kill.o fadvise.o \ 14obj-y := filemap.o mempool.o oom_kill.o fadvise.o \
12 maccess.o page_alloc.o page-writeback.o \ 15 maccess.o page_alloc.o page-writeback.o \
13 readahead.o swap.o truncate.o vmscan.o shmem.o \ 16 readahead.o swap.o truncate.o vmscan.o shmem.o \
14 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \ 17 prio_tree.o util.o mmzone.o vmstat.o backing-dev.o \
15 page_isolation.o mm_init.o mmu_context.o percpu.o \ 18 page_isolation.o mm_init.o mmu_context.o percpu.o \
16 $(mmu-y) 19 compaction.o $(mmu-y)
17obj-y += init-mm.o 20obj-y += init-mm.o
18 21
19ifdef CONFIG_NO_BOOTMEM 22ifdef CONFIG_NO_BOOTMEM
@@ -25,14 +28,13 @@ endif
25obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o 28obj-$(CONFIG_HAVE_MEMBLOCK) += memblock.o
26 29
27obj-$(CONFIG_BOUNCE) += bounce.o 30obj-$(CONFIG_BOUNCE) += bounce.o
28obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o thrash.o 31obj-$(CONFIG_SWAP) += page_io.o swap_state.o swapfile.o
29obj-$(CONFIG_HAS_DMA) += dmapool.o 32obj-$(CONFIG_HAS_DMA) += dmapool.o
30obj-$(CONFIG_HUGETLBFS) += hugetlb.o 33obj-$(CONFIG_HUGETLBFS) += hugetlb.o
31obj-$(CONFIG_NUMA) += mempolicy.o 34obj-$(CONFIG_NUMA) += mempolicy.o
32obj-$(CONFIG_SPARSEMEM) += sparse.o 35obj-$(CONFIG_SPARSEMEM) += sparse.o
33obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o 36obj-$(CONFIG_SPARSEMEM_VMEMMAP) += sparse-vmemmap.o
34obj-$(CONFIG_SLOB) += slob.o 37obj-$(CONFIG_SLOB) += slob.o
35obj-$(CONFIG_COMPACTION) += compaction.o
36obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o 38obj-$(CONFIG_MMU_NOTIFIER) += mmu_notifier.o
37obj-$(CONFIG_KSM) += ksm.o 39obj-$(CONFIG_KSM) += ksm.o
38obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o 40obj-$(CONFIG_PAGE_POISONING) += debug-pagealloc.o
diff --git a/mm/bootmem.c b/mm/bootmem.c
index 0131170c9d54..ec4fcb7a56c8 100644
--- a/mm/bootmem.c
+++ b/mm/bootmem.c
@@ -77,16 +77,16 @@ unsigned long __init bootmem_bootmap_pages(unsigned long pages)
77 */ 77 */
78static void __init link_bootmem(bootmem_data_t *bdata) 78static void __init link_bootmem(bootmem_data_t *bdata)
79{ 79{
80 struct list_head *iter; 80 bootmem_data_t *ent;
81 81
82 list_for_each(iter, &bdata_list) { 82 list_for_each_entry(ent, &bdata_list, list) {
83 bootmem_data_t *ent; 83 if (bdata->node_min_pfn < ent->node_min_pfn) {
84 84 list_add_tail(&bdata->list, &ent->list);
85 ent = list_entry(iter, bootmem_data_t, list); 85 return;
86 if (bdata->node_min_pfn < ent->node_min_pfn) 86 }
87 break;
88 } 87 }
89 list_add_tail(&bdata->list, iter); 88
89 list_add_tail(&bdata->list, &bdata_list);
90} 90}
91 91
92/* 92/*
@@ -203,7 +203,8 @@ static unsigned long __init free_all_bootmem_core(bootmem_data_t *bdata)
203 } else { 203 } else {
204 unsigned long off = 0; 204 unsigned long off = 0;
205 205
206 while (vec && off < BITS_PER_LONG) { 206 vec >>= start & (BITS_PER_LONG - 1);
207 while (vec) {
207 if (vec & 1) { 208 if (vec & 1) {
208 page = pfn_to_page(start + off); 209 page = pfn_to_page(start + off);
209 __free_pages_bootmem(page, 0); 210 __free_pages_bootmem(page, 0);
@@ -467,7 +468,7 @@ static unsigned long __init align_off(struct bootmem_data *bdata,
467 return ALIGN(base + off, align) - base; 468 return ALIGN(base + off, align) - base;
468} 469}
469 470
470static void * __init alloc_bootmem_core(struct bootmem_data *bdata, 471static void * __init alloc_bootmem_bdata(struct bootmem_data *bdata,
471 unsigned long size, unsigned long align, 472 unsigned long size, unsigned long align,
472 unsigned long goal, unsigned long limit) 473 unsigned long goal, unsigned long limit)
473{ 474{
@@ -588,14 +589,14 @@ static void * __init alloc_arch_preferred_bootmem(bootmem_data_t *bdata,
588 p_bdata = bootmem_arch_preferred_node(bdata, size, align, 589 p_bdata = bootmem_arch_preferred_node(bdata, size, align,
589 goal, limit); 590 goal, limit);
590 if (p_bdata) 591 if (p_bdata)
591 return alloc_bootmem_core(p_bdata, size, align, 592 return alloc_bootmem_bdata(p_bdata, size, align,
592 goal, limit); 593 goal, limit);
593 } 594 }
594#endif 595#endif
595 return NULL; 596 return NULL;
596} 597}
597 598
598static void * __init ___alloc_bootmem_nopanic(unsigned long size, 599static void * __init alloc_bootmem_core(unsigned long size,
599 unsigned long align, 600 unsigned long align,
600 unsigned long goal, 601 unsigned long goal,
601 unsigned long limit) 602 unsigned long limit)
@@ -603,7 +604,6 @@ static void * __init ___alloc_bootmem_nopanic(unsigned long size,
603 bootmem_data_t *bdata; 604 bootmem_data_t *bdata;
604 void *region; 605 void *region;
605 606
606restart:
607 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit); 607 region = alloc_arch_preferred_bootmem(NULL, size, align, goal, limit);
608 if (region) 608 if (region)
609 return region; 609 return region;
@@ -614,11 +614,25 @@ restart:
614 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit)) 614 if (limit && bdata->node_min_pfn >= PFN_DOWN(limit))
615 break; 615 break;
616 616
617 region = alloc_bootmem_core(bdata, size, align, goal, limit); 617 region = alloc_bootmem_bdata(bdata, size, align, goal, limit);
618 if (region) 618 if (region)
619 return region; 619 return region;
620 } 620 }
621 621
622 return NULL;
623}
624
625static void * __init ___alloc_bootmem_nopanic(unsigned long size,
626 unsigned long align,
627 unsigned long goal,
628 unsigned long limit)
629{
630 void *ptr;
631
632restart:
633 ptr = alloc_bootmem_core(size, align, goal, limit);
634 if (ptr)
635 return ptr;
622 if (goal) { 636 if (goal) {
623 goal = 0; 637 goal = 0;
624 goto restart; 638 goto restart;
@@ -684,21 +698,56 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
684 return ___alloc_bootmem(size, align, goal, limit); 698 return ___alloc_bootmem(size, align, goal, limit);
685} 699}
686 700
687static void * __init ___alloc_bootmem_node(bootmem_data_t *bdata, 701static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
688 unsigned long size, unsigned long align, 702 unsigned long size, unsigned long align,
689 unsigned long goal, unsigned long limit) 703 unsigned long goal, unsigned long limit)
690{ 704{
691 void *ptr; 705 void *ptr;
692 706
693 ptr = alloc_arch_preferred_bootmem(bdata, size, align, goal, limit); 707again:
708 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size,
709 align, goal, limit);
694 if (ptr) 710 if (ptr)
695 return ptr; 711 return ptr;
696 712
697 ptr = alloc_bootmem_core(bdata, size, align, goal, limit); 713 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align, goal, limit);
698 if (ptr) 714 if (ptr)
699 return ptr; 715 return ptr;
700 716
701 return ___alloc_bootmem(size, align, goal, limit); 717 ptr = alloc_bootmem_core(size, align, goal, limit);
718 if (ptr)
719 return ptr;
720
721 if (goal) {
722 goal = 0;
723 goto again;
724 }
725
726 return NULL;
727}
728
729void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
730 unsigned long align, unsigned long goal)
731{
732 if (WARN_ON_ONCE(slab_is_available()))
733 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
734
735 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
736}
737
738void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
739 unsigned long align, unsigned long goal,
740 unsigned long limit)
741{
742 void *ptr;
743
744 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
745 if (ptr)
746 return ptr;
747
748 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
749 panic("Out of memory");
750 return NULL;
702} 751}
703 752
704/** 753/**
@@ -722,7 +771,7 @@ void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
722 if (WARN_ON_ONCE(slab_is_available())) 771 if (WARN_ON_ONCE(slab_is_available()))
723 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 772 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
724 773
725 return ___alloc_bootmem_node(pgdat->bdata, size, align, goal, 0); 774 return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
726} 775}
727 776
728void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 777void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
@@ -743,7 +792,7 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
743 unsigned long new_goal; 792 unsigned long new_goal;
744 793
745 new_goal = MAX_DMA32_PFN << PAGE_SHIFT; 794 new_goal = MAX_DMA32_PFN << PAGE_SHIFT;
746 ptr = alloc_bootmem_core(pgdat->bdata, size, align, 795 ptr = alloc_bootmem_bdata(pgdat->bdata, size, align,
747 new_goal, 0); 796 new_goal, 0);
748 if (ptr) 797 if (ptr)
749 return ptr; 798 return ptr;
@@ -754,47 +803,6 @@ void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
754 803
755} 804}
756 805
757#ifdef CONFIG_SPARSEMEM
758/**
759 * alloc_bootmem_section - allocate boot memory from a specific section
760 * @size: size of the request in bytes
761 * @section_nr: sparse map section to allocate from
762 *
763 * Return NULL on failure.
764 */
765void * __init alloc_bootmem_section(unsigned long size,
766 unsigned long section_nr)
767{
768 bootmem_data_t *bdata;
769 unsigned long pfn, goal;
770
771 pfn = section_nr_to_pfn(section_nr);
772 goal = pfn << PAGE_SHIFT;
773 bdata = &bootmem_node_data[early_pfn_to_nid(pfn)];
774
775 return alloc_bootmem_core(bdata, size, SMP_CACHE_BYTES, goal, 0);
776}
777#endif
778
779void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
780 unsigned long align, unsigned long goal)
781{
782 void *ptr;
783
784 if (WARN_ON_ONCE(slab_is_available()))
785 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
786
787 ptr = alloc_arch_preferred_bootmem(pgdat->bdata, size, align, goal, 0);
788 if (ptr)
789 return ptr;
790
791 ptr = alloc_bootmem_core(pgdat->bdata, size, align, goal, 0);
792 if (ptr)
793 return ptr;
794
795 return __alloc_bootmem_nopanic(size, align, goal);
796}
797
798#ifndef ARCH_LOW_ADDRESS_LIMIT 806#ifndef ARCH_LOW_ADDRESS_LIMIT
799#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL 807#define ARCH_LOW_ADDRESS_LIMIT 0xffffffffUL
800#endif 808#endif
@@ -839,6 +847,6 @@ void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
839 if (WARN_ON_ONCE(slab_is_available())) 847 if (WARN_ON_ONCE(slab_is_available()))
840 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 848 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
841 849
842 return ___alloc_bootmem_node(pgdat->bdata, size, align, 850 return ___alloc_bootmem_node(pgdat, size, align,
843 goal, ARCH_LOW_ADDRESS_LIMIT); 851 goal, ARCH_LOW_ADDRESS_LIMIT);
844} 852}
diff --git a/mm/compaction.c b/mm/compaction.c
index 74a8c825ff28..4ac338af5120 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -16,30 +16,11 @@
16#include <linux/sysfs.h> 16#include <linux/sysfs.h>
17#include "internal.h" 17#include "internal.h"
18 18
19#if defined CONFIG_COMPACTION || defined CONFIG_CMA
20
19#define CREATE_TRACE_POINTS 21#define CREATE_TRACE_POINTS
20#include <trace/events/compaction.h> 22#include <trace/events/compaction.h>
21 23
22/*
23 * compact_control is used to track pages being migrated and the free pages
24 * they are being migrated to during memory compaction. The free_pfn starts
25 * at the end of a zone and migrate_pfn begins at the start. Movable pages
26 * are moved to the end of a zone during a compaction run and the run
27 * completes when free_pfn <= migrate_pfn
28 */
29struct compact_control {
30 struct list_head freepages; /* List of free pages to migrate to */
31 struct list_head migratepages; /* List of pages being migrated */
32 unsigned long nr_freepages; /* Number of isolated free pages */
33 unsigned long nr_migratepages; /* Number of pages to migrate */
34 unsigned long free_pfn; /* isolate_freepages search base */
35 unsigned long migrate_pfn; /* isolate_migratepages search base */
36 bool sync; /* Synchronous migration */
37
38 int order; /* order a direct compactor needs */
39 int migratetype; /* MOVABLE, RECLAIMABLE etc */
40 struct zone *zone;
41};
42
43static unsigned long release_freepages(struct list_head *freelist) 24static unsigned long release_freepages(struct list_head *freelist)
44{ 25{
45 struct page *page, *next; 26 struct page *page, *next;
@@ -54,24 +35,35 @@ static unsigned long release_freepages(struct list_head *freelist)
54 return count; 35 return count;
55} 36}
56 37
57/* Isolate free pages onto a private freelist. Must hold zone->lock */ 38static void map_pages(struct list_head *list)
58static unsigned long isolate_freepages_block(struct zone *zone, 39{
59 unsigned long blockpfn, 40 struct page *page;
60 struct list_head *freelist) 41
42 list_for_each_entry(page, list, lru) {
43 arch_alloc_page(page, 0);
44 kernel_map_pages(page, 1, 1);
45 }
46}
47
48static inline bool migrate_async_suitable(int migratetype)
49{
50 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
51}
52
53/*
54 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
55 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
56 * pages inside of the pageblock (even though it may still end up isolating
57 * some pages).
58 */
59static unsigned long isolate_freepages_block(unsigned long blockpfn,
60 unsigned long end_pfn,
61 struct list_head *freelist,
62 bool strict)
61{ 63{
62 unsigned long zone_end_pfn, end_pfn;
63 int nr_scanned = 0, total_isolated = 0; 64 int nr_scanned = 0, total_isolated = 0;
64 struct page *cursor; 65 struct page *cursor;
65 66
66 /* Get the last PFN we should scan for free pages at */
67 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
68 end_pfn = min(blockpfn + pageblock_nr_pages, zone_end_pfn);
69
70 /* Find the first usable PFN in the block to initialse page cursor */
71 for (; blockpfn < end_pfn; blockpfn++) {
72 if (pfn_valid_within(blockpfn))
73 break;
74 }
75 cursor = pfn_to_page(blockpfn); 67 cursor = pfn_to_page(blockpfn);
76 68
77 /* Isolate free pages. This assumes the block is valid */ 69 /* Isolate free pages. This assumes the block is valid */
@@ -79,15 +71,23 @@ static unsigned long isolate_freepages_block(struct zone *zone,
79 int isolated, i; 71 int isolated, i;
80 struct page *page = cursor; 72 struct page *page = cursor;
81 73
82 if (!pfn_valid_within(blockpfn)) 74 if (!pfn_valid_within(blockpfn)) {
75 if (strict)
76 return 0;
83 continue; 77 continue;
78 }
84 nr_scanned++; 79 nr_scanned++;
85 80
86 if (!PageBuddy(page)) 81 if (!PageBuddy(page)) {
82 if (strict)
83 return 0;
87 continue; 84 continue;
85 }
88 86
89 /* Found a free page, break it into order-0 pages */ 87 /* Found a free page, break it into order-0 pages */
90 isolated = split_free_page(page); 88 isolated = split_free_page(page);
89 if (!isolated && strict)
90 return 0;
91 total_isolated += isolated; 91 total_isolated += isolated;
92 for (i = 0; i < isolated; i++) { 92 for (i = 0; i < isolated; i++) {
93 list_add(&page->lru, freelist); 93 list_add(&page->lru, freelist);
@@ -105,114 +105,71 @@ static unsigned long isolate_freepages_block(struct zone *zone,
105 return total_isolated; 105 return total_isolated;
106} 106}
107 107
108/* Returns true if the page is within a block suitable for migration to */ 108/**
109static bool suitable_migration_target(struct page *page) 109 * isolate_freepages_range() - isolate free pages.
110{ 110 * @start_pfn: The first PFN to start isolating.
111 111 * @end_pfn: The one-past-last PFN.
112 int migratetype = get_pageblock_migratetype(page); 112 *
113 113 * Non-free pages, invalid PFNs, or zone boundaries within the
114 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */ 114 * [start_pfn, end_pfn) range are considered errors, cause function to
115 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE) 115 * undo its actions and return zero.
116 return false; 116 *
117 117 * Otherwise, function returns one-past-the-last PFN of isolated page
118 /* If the page is a large free page, then allow migration */ 118 * (which may be greater then end_pfn if end fell in a middle of
119 if (PageBuddy(page) && page_order(page) >= pageblock_order) 119 * a free page).
120 return true;
121
122 /* If the block is MIGRATE_MOVABLE, allow migration */
123 if (migratetype == MIGRATE_MOVABLE)
124 return true;
125
126 /* Otherwise skip the block */
127 return false;
128}
129
130/*
131 * Based on information in the current compact_control, find blocks
132 * suitable for isolating free pages from and then isolate them.
133 */ 120 */
134static void isolate_freepages(struct zone *zone, 121unsigned long
135 struct compact_control *cc) 122isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn)
136{ 123{
137 struct page *page; 124 unsigned long isolated, pfn, block_end_pfn, flags;
138 unsigned long high_pfn, low_pfn, pfn; 125 struct zone *zone = NULL;
139 unsigned long flags; 126 LIST_HEAD(freelist);
140 int nr_freepages = cc->nr_freepages;
141 struct list_head *freelist = &cc->freepages;
142
143 /*
144 * Initialise the free scanner. The starting point is where we last
145 * scanned from (or the end of the zone if starting). The low point
146 * is the end of the pageblock the migration scanner is using.
147 */
148 pfn = cc->free_pfn;
149 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
150 127
151 /* 128 if (pfn_valid(start_pfn))
152 * Take care that if the migration scanner is at the end of the zone 129 zone = page_zone(pfn_to_page(start_pfn));
153 * that the free scanner does not accidentally move to the next zone
154 * in the next isolation cycle.
155 */
156 high_pfn = min(low_pfn, pfn);
157 130
158 /* 131 for (pfn = start_pfn; pfn < end_pfn; pfn += isolated) {
159 * Isolate free pages until enough are available to migrate the 132 if (!pfn_valid(pfn) || zone != page_zone(pfn_to_page(pfn)))
160 * pages on cc->migratepages. We stop searching if the migrate 133 break;
161 * and free page scanners meet or enough free pages are isolated.
162 */
163 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
164 pfn -= pageblock_nr_pages) {
165 unsigned long isolated;
166
167 if (!pfn_valid(pfn))
168 continue;
169 134
170 /* 135 /*
171 * Check for overlapping nodes/zones. It's possible on some 136 * On subsequent iterations ALIGN() is actually not needed,
172 * configurations to have a setup like 137 * but we keep it that we not to complicate the code.
173 * node0 node1 node0
174 * i.e. it's possible that all pages within a zones range of
175 * pages do not belong to a single zone.
176 */ 138 */
177 page = pfn_to_page(pfn); 139 block_end_pfn = ALIGN(pfn + 1, pageblock_nr_pages);
178 if (page_zone(page) != zone) 140 block_end_pfn = min(block_end_pfn, end_pfn);
179 continue;
180 141
181 /* Check the block is suitable for migration */ 142 spin_lock_irqsave(&zone->lock, flags);
182 if (!suitable_migration_target(page)) 143 isolated = isolate_freepages_block(pfn, block_end_pfn,
183 continue; 144 &freelist, true);
145 spin_unlock_irqrestore(&zone->lock, flags);
184 146
185 /* 147 /*
186 * Found a block suitable for isolating free pages from. Now 148 * In strict mode, isolate_freepages_block() returns 0 if
187 * we disabled interrupts, double check things are ok and 149 * there are any holes in the block (ie. invalid PFNs or
188 * isolate the pages. This is to minimise the time IRQs 150 * non-free pages).
189 * are disabled
190 */ 151 */
191 isolated = 0; 152 if (!isolated)
192 spin_lock_irqsave(&zone->lock, flags); 153 break;
193 if (suitable_migration_target(page)) {
194 isolated = isolate_freepages_block(zone, pfn, freelist);
195 nr_freepages += isolated;
196 }
197 spin_unlock_irqrestore(&zone->lock, flags);
198 154
199 /* 155 /*
200 * Record the highest PFN we isolated pages from. When next 156 * If we managed to isolate pages, it is always (1 << n) *
201 * looking for free pages, the search will restart here as 157 * pageblock_nr_pages for some non-negative n. (Max order
202 * page migration may have returned some pages to the allocator 158 * page may span two pageblocks).
203 */ 159 */
204 if (isolated)
205 high_pfn = max(high_pfn, pfn);
206 } 160 }
207 161
208 /* split_free_page does not map the pages */ 162 /* split_free_page does not map the pages */
209 list_for_each_entry(page, freelist, lru) { 163 map_pages(&freelist);
210 arch_alloc_page(page, 0); 164
211 kernel_map_pages(page, 1, 1); 165 if (pfn < end_pfn) {
166 /* Loop terminated early, cleanup. */
167 release_freepages(&freelist);
168 return 0;
212 } 169 }
213 170
214 cc->free_pfn = high_pfn; 171 /* We don't use freelists for anything. */
215 cc->nr_freepages = nr_freepages; 172 return pfn;
216} 173}
217 174
218/* Update the number of anon and file isolated pages in the zone */ 175/* Update the number of anon and file isolated pages in the zone */
@@ -243,37 +200,34 @@ static bool too_many_isolated(struct zone *zone)
243 return isolated > (inactive + active) / 2; 200 return isolated > (inactive + active) / 2;
244} 201}
245 202
246/* possible outcome of isolate_migratepages */ 203/**
247typedef enum { 204 * isolate_migratepages_range() - isolate all migrate-able pages in range.
248 ISOLATE_ABORT, /* Abort compaction now */ 205 * @zone: Zone pages are in.
249 ISOLATE_NONE, /* No pages isolated, continue scanning */ 206 * @cc: Compaction control structure.
250 ISOLATE_SUCCESS, /* Pages isolated, migrate */ 207 * @low_pfn: The first PFN of the range.
251} isolate_migrate_t; 208 * @end_pfn: The one-past-the-last PFN of the range.
252 209 *
253/* 210 * Isolate all pages that can be migrated from the range specified by
254 * Isolate all pages that can be migrated from the block pointed to by 211 * [low_pfn, end_pfn). Returns zero if there is a fatal signal
255 * the migrate scanner within compact_control. 212 * pending), otherwise PFN of the first page that was not scanned
213 * (which may be both less, equal to or more then end_pfn).
214 *
215 * Assumes that cc->migratepages is empty and cc->nr_migratepages is
216 * zero.
217 *
218 * Apart from cc->migratepages and cc->nr_migratetypes this function
219 * does not modify any cc's fields, in particular it does not modify
220 * (or read for that matter) cc->migrate_pfn.
256 */ 221 */
257static isolate_migrate_t isolate_migratepages(struct zone *zone, 222unsigned long
258 struct compact_control *cc) 223isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
224 unsigned long low_pfn, unsigned long end_pfn)
259{ 225{
260 unsigned long low_pfn, end_pfn;
261 unsigned long last_pageblock_nr = 0, pageblock_nr; 226 unsigned long last_pageblock_nr = 0, pageblock_nr;
262 unsigned long nr_scanned = 0, nr_isolated = 0; 227 unsigned long nr_scanned = 0, nr_isolated = 0;
263 struct list_head *migratelist = &cc->migratepages; 228 struct list_head *migratelist = &cc->migratepages;
264 isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE; 229 isolate_mode_t mode = 0;
265 230 struct lruvec *lruvec;
266 /* Do not scan outside zone boundaries */
267 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
268
269 /* Only scan within a pageblock boundary */
270 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
271
272 /* Do not cross the free scanner or scan within a memory hole */
273 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
274 cc->migrate_pfn = end_pfn;
275 return ISOLATE_NONE;
276 }
277 231
278 /* 232 /*
279 * Ensure that there are not too many pages isolated from the LRU 233 * Ensure that there are not too many pages isolated from the LRU
@@ -282,13 +236,13 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
282 */ 236 */
283 while (unlikely(too_many_isolated(zone))) { 237 while (unlikely(too_many_isolated(zone))) {
284 /* async migration should just abort */ 238 /* async migration should just abort */
285 if (!cc->sync) 239 if (cc->mode != COMPACT_SYNC)
286 return ISOLATE_ABORT; 240 return 0;
287 241
288 congestion_wait(BLK_RW_ASYNC, HZ/10); 242 congestion_wait(BLK_RW_ASYNC, HZ/10);
289 243
290 if (fatal_signal_pending(current)) 244 if (fatal_signal_pending(current))
291 return ISOLATE_ABORT; 245 return 0;
292 } 246 }
293 247
294 /* Time to isolate some pages for migration */ 248 /* Time to isolate some pages for migration */
@@ -350,8 +304,9 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
350 * satisfies the allocation 304 * satisfies the allocation
351 */ 305 */
352 pageblock_nr = low_pfn >> pageblock_order; 306 pageblock_nr = low_pfn >> pageblock_order;
353 if (!cc->sync && last_pageblock_nr != pageblock_nr && 307 if (cc->mode != COMPACT_SYNC &&
354 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { 308 last_pageblock_nr != pageblock_nr &&
309 !migrate_async_suitable(get_pageblock_migratetype(page))) {
355 low_pfn += pageblock_nr_pages; 310 low_pfn += pageblock_nr_pages;
356 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 311 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
357 last_pageblock_nr = pageblock_nr; 312 last_pageblock_nr = pageblock_nr;
@@ -371,17 +326,19 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
371 continue; 326 continue;
372 } 327 }
373 328
374 if (!cc->sync) 329 if (cc->mode != COMPACT_SYNC)
375 mode |= ISOLATE_ASYNC_MIGRATE; 330 mode |= ISOLATE_ASYNC_MIGRATE;
376 331
332 lruvec = mem_cgroup_page_lruvec(page, zone);
333
377 /* Try isolate the page */ 334 /* Try isolate the page */
378 if (__isolate_lru_page(page, mode, 0) != 0) 335 if (__isolate_lru_page(page, mode) != 0)
379 continue; 336 continue;
380 337
381 VM_BUG_ON(PageTransCompound(page)); 338 VM_BUG_ON(PageTransCompound(page));
382 339
383 /* Successfully isolated */ 340 /* Successfully isolated */
384 del_page_from_lru_list(zone, page, page_lru(page)); 341 del_page_from_lru_list(page, lruvec, page_lru(page));
385 list_add(&page->lru, migratelist); 342 list_add(&page->lru, migratelist);
386 cc->nr_migratepages++; 343 cc->nr_migratepages++;
387 nr_isolated++; 344 nr_isolated++;
@@ -396,11 +353,200 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
396 acct_isolated(zone, cc); 353 acct_isolated(zone, cc);
397 354
398 spin_unlock_irq(&zone->lru_lock); 355 spin_unlock_irq(&zone->lru_lock);
399 cc->migrate_pfn = low_pfn;
400 356
401 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 357 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
402 358
403 return ISOLATE_SUCCESS; 359 return low_pfn;
360}
361
362#endif /* CONFIG_COMPACTION || CONFIG_CMA */
363#ifdef CONFIG_COMPACTION
364/*
365 * Returns true if MIGRATE_UNMOVABLE pageblock was successfully
366 * converted to MIGRATE_MOVABLE type, false otherwise.
367 */
368static bool rescue_unmovable_pageblock(struct page *page)
369{
370 unsigned long pfn, start_pfn, end_pfn;
371 struct page *start_page, *end_page;
372
373 pfn = page_to_pfn(page);
374 start_pfn = pfn & ~(pageblock_nr_pages - 1);
375 end_pfn = start_pfn + pageblock_nr_pages;
376
377 start_page = pfn_to_page(start_pfn);
378 end_page = pfn_to_page(end_pfn);
379
380 /* Do not deal with pageblocks that overlap zones */
381 if (page_zone(start_page) != page_zone(end_page))
382 return false;
383
384 for (page = start_page, pfn = start_pfn; page < end_page; pfn++,
385 page++) {
386 if (!pfn_valid_within(pfn))
387 continue;
388
389 if (PageBuddy(page)) {
390 int order = page_order(page);
391
392 pfn += (1 << order) - 1;
393 page += (1 << order) - 1;
394
395 continue;
396 } else if (page_count(page) == 0 || PageLRU(page))
397 continue;
398
399 return false;
400 }
401
402 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
403 move_freepages_block(page_zone(page), page, MIGRATE_MOVABLE);
404 return true;
405}
406
407enum smt_result {
408 GOOD_AS_MIGRATION_TARGET,
409 FAIL_UNMOVABLE_TARGET,
410 FAIL_BAD_TARGET,
411};
412
413/*
414 * Returns GOOD_AS_MIGRATION_TARGET if the page is within a block
415 * suitable for migration to, FAIL_UNMOVABLE_TARGET if the page
416 * is within a MIGRATE_UNMOVABLE block, FAIL_BAD_TARGET otherwise.
417 */
418static enum smt_result suitable_migration_target(struct page *page,
419 struct compact_control *cc)
420{
421
422 int migratetype = get_pageblock_migratetype(page);
423
424 /* Don't interfere with memory hot-remove or the min_free_kbytes blocks */
425 if (migratetype == MIGRATE_ISOLATE || migratetype == MIGRATE_RESERVE)
426 return FAIL_BAD_TARGET;
427
428 /* If the page is a large free page, then allow migration */
429 if (PageBuddy(page) && page_order(page) >= pageblock_order)
430 return GOOD_AS_MIGRATION_TARGET;
431
432 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
433 if (cc->mode != COMPACT_ASYNC_UNMOVABLE &&
434 migrate_async_suitable(migratetype))
435 return GOOD_AS_MIGRATION_TARGET;
436
437 if (cc->mode == COMPACT_ASYNC_MOVABLE &&
438 migratetype == MIGRATE_UNMOVABLE)
439 return FAIL_UNMOVABLE_TARGET;
440
441 if (cc->mode != COMPACT_ASYNC_MOVABLE &&
442 migratetype == MIGRATE_UNMOVABLE &&
443 rescue_unmovable_pageblock(page))
444 return GOOD_AS_MIGRATION_TARGET;
445
446 /* Otherwise skip the block */
447 return FAIL_BAD_TARGET;
448}
449
450/*
451 * Based on information in the current compact_control, find blocks
452 * suitable for isolating free pages from and then isolate them.
453 */
454static void isolate_freepages(struct zone *zone,
455 struct compact_control *cc)
456{
457 struct page *page;
458 unsigned long high_pfn, low_pfn, pfn, zone_end_pfn, end_pfn;
459 unsigned long flags;
460 int nr_freepages = cc->nr_freepages;
461 struct list_head *freelist = &cc->freepages;
462
463 /*
464 * Initialise the free scanner. The starting point is where we last
465 * scanned from (or the end of the zone if starting). The low point
466 * is the end of the pageblock the migration scanner is using.
467 */
468 pfn = cc->free_pfn;
469 low_pfn = cc->migrate_pfn + pageblock_nr_pages;
470
471 /*
472 * Take care that if the migration scanner is at the end of the zone
473 * that the free scanner does not accidentally move to the next zone
474 * in the next isolation cycle.
475 */
476 high_pfn = min(low_pfn, pfn);
477
478 zone_end_pfn = zone->zone_start_pfn + zone->spanned_pages;
479
480 /*
481 * isolate_freepages() may be called more than once during
482 * compact_zone_order() run and we want only the most recent
483 * count.
484 */
485 cc->nr_pageblocks_skipped = 0;
486
487 /*
488 * Isolate free pages until enough are available to migrate the
489 * pages on cc->migratepages. We stop searching if the migrate
490 * and free page scanners meet or enough free pages are isolated.
491 */
492 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages;
493 pfn -= pageblock_nr_pages) {
494 unsigned long isolated;
495 enum smt_result ret;
496
497 if (!pfn_valid(pfn))
498 continue;
499
500 /*
501 * Check for overlapping nodes/zones. It's possible on some
502 * configurations to have a setup like
503 * node0 node1 node0
504 * i.e. it's possible that all pages within a zones range of
505 * pages do not belong to a single zone.
506 */
507 page = pfn_to_page(pfn);
508 if (page_zone(page) != zone)
509 continue;
510
511 /* Check the block is suitable for migration */
512 ret = suitable_migration_target(page, cc);
513 if (ret != GOOD_AS_MIGRATION_TARGET) {
514 if (ret == FAIL_UNMOVABLE_TARGET)
515 cc->nr_pageblocks_skipped++;
516 continue;
517 }
518 /*
519 * Found a block suitable for isolating free pages from. Now
520 * we disabled interrupts, double check things are ok and
521 * isolate the pages. This is to minimise the time IRQs
522 * are disabled
523 */
524 isolated = 0;
525 spin_lock_irqsave(&zone->lock, flags);
526 ret = suitable_migration_target(page, cc);
527 if (ret == GOOD_AS_MIGRATION_TARGET) {
528 end_pfn = min(pfn + pageblock_nr_pages, zone_end_pfn);
529 isolated = isolate_freepages_block(pfn, end_pfn,
530 freelist, false);
531 nr_freepages += isolated;
532 } else if (ret == FAIL_UNMOVABLE_TARGET)
533 cc->nr_pageblocks_skipped++;
534 spin_unlock_irqrestore(&zone->lock, flags);
535
536 /*
537 * Record the highest PFN we isolated pages from. When next
538 * looking for free pages, the search will restart here as
539 * page migration may have returned some pages to the allocator
540 */
541 if (isolated)
542 high_pfn = max(high_pfn, pfn);
543 }
544
545 /* split_free_page does not map the pages */
546 map_pages(freelist);
547
548 cc->free_pfn = high_pfn;
549 cc->nr_freepages = nr_freepages;
404} 550}
405 551
406/* 552/*
@@ -449,6 +595,44 @@ static void update_nr_listpages(struct compact_control *cc)
449 cc->nr_freepages = nr_freepages; 595 cc->nr_freepages = nr_freepages;
450} 596}
451 597
598/* possible outcome of isolate_migratepages */
599typedef enum {
600 ISOLATE_ABORT, /* Abort compaction now */
601 ISOLATE_NONE, /* No pages isolated, continue scanning */
602 ISOLATE_SUCCESS, /* Pages isolated, migrate */
603} isolate_migrate_t;
604
605/*
606 * Isolate all pages that can be migrated from the block pointed to by
607 * the migrate scanner within compact_control.
608 */
609static isolate_migrate_t isolate_migratepages(struct zone *zone,
610 struct compact_control *cc)
611{
612 unsigned long low_pfn, end_pfn;
613
614 /* Do not scan outside zone boundaries */
615 low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
616
617 /* Only scan within a pageblock boundary */
618 end_pfn = ALIGN(low_pfn + pageblock_nr_pages, pageblock_nr_pages);
619
620 /* Do not cross the free scanner or scan within a memory hole */
621 if (end_pfn > cc->free_pfn || !pfn_valid(low_pfn)) {
622 cc->migrate_pfn = end_pfn;
623 return ISOLATE_NONE;
624 }
625
626 /* Perform the isolation */
627 low_pfn = isolate_migratepages_range(zone, cc, low_pfn, end_pfn);
628 if (!low_pfn)
629 return ISOLATE_ABORT;
630
631 cc->migrate_pfn = low_pfn;
632
633 return ISOLATE_SUCCESS;
634}
635
452static int compact_finished(struct zone *zone, 636static int compact_finished(struct zone *zone,
453 struct compact_control *cc) 637 struct compact_control *cc)
454{ 638{
@@ -578,8 +762,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
578 762
579 nr_migrate = cc->nr_migratepages; 763 nr_migrate = cc->nr_migratepages;
580 err = migrate_pages(&cc->migratepages, compaction_alloc, 764 err = migrate_pages(&cc->migratepages, compaction_alloc,
581 (unsigned long)cc, false, 765 (unsigned long)&cc->freepages, false,
582 cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC); 766 (cc->mode == COMPACT_SYNC) ? MIGRATE_SYNC_LIGHT
767 : MIGRATE_ASYNC);
583 update_nr_listpages(cc); 768 update_nr_listpages(cc);
584 nr_remaining = cc->nr_migratepages; 769 nr_remaining = cc->nr_migratepages;
585 770
@@ -608,7 +793,8 @@ out:
608 793
609static unsigned long compact_zone_order(struct zone *zone, 794static unsigned long compact_zone_order(struct zone *zone,
610 int order, gfp_t gfp_mask, 795 int order, gfp_t gfp_mask,
611 bool sync) 796 enum compact_mode mode,
797 unsigned long *nr_pageblocks_skipped)
612{ 798{
613 struct compact_control cc = { 799 struct compact_control cc = {
614 .nr_freepages = 0, 800 .nr_freepages = 0,
@@ -616,12 +802,17 @@ static unsigned long compact_zone_order(struct zone *zone,
616 .order = order, 802 .order = order,
617 .migratetype = allocflags_to_migratetype(gfp_mask), 803 .migratetype = allocflags_to_migratetype(gfp_mask),
618 .zone = zone, 804 .zone = zone,
619 .sync = sync, 805 .mode = mode,
620 }; 806 };
807 unsigned long rc;
808
621 INIT_LIST_HEAD(&cc.freepages); 809 INIT_LIST_HEAD(&cc.freepages);
622 INIT_LIST_HEAD(&cc.migratepages); 810 INIT_LIST_HEAD(&cc.migratepages);
623 811
624 return compact_zone(zone, &cc); 812 rc = compact_zone(zone, &cc);
813 *nr_pageblocks_skipped = cc.nr_pageblocks_skipped;
814
815 return rc;
625} 816}
626 817
627int sysctl_extfrag_threshold = 500; 818int sysctl_extfrag_threshold = 500;
@@ -646,6 +837,8 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
646 struct zoneref *z; 837 struct zoneref *z;
647 struct zone *zone; 838 struct zone *zone;
648 int rc = COMPACT_SKIPPED; 839 int rc = COMPACT_SKIPPED;
840 unsigned long nr_pageblocks_skipped;
841 enum compact_mode mode;
649 842
650 /* 843 /*
651 * Check whether it is worth even starting compaction. The order check is 844 * Check whether it is worth even starting compaction. The order check is
@@ -662,12 +855,22 @@ unsigned long try_to_compact_pages(struct zonelist *zonelist,
662 nodemask) { 855 nodemask) {
663 int status; 856 int status;
664 857
665 status = compact_zone_order(zone, order, gfp_mask, sync); 858 mode = sync ? COMPACT_SYNC : COMPACT_ASYNC_MOVABLE;
859retry:
860 status = compact_zone_order(zone, order, gfp_mask, mode,
861 &nr_pageblocks_skipped);
666 rc = max(status, rc); 862 rc = max(status, rc);
667 863
668 /* If a normal allocation would succeed, stop compacting */ 864 /* If a normal allocation would succeed, stop compacting */
669 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0)) 865 if (zone_watermark_ok(zone, order, low_wmark_pages(zone), 0, 0))
670 break; 866 break;
867
868 if (rc == COMPACT_COMPLETE && mode == COMPACT_ASYNC_MOVABLE) {
869 if (nr_pageblocks_skipped) {
870 mode = COMPACT_ASYNC_UNMOVABLE;
871 goto retry;
872 }
873 }
671 } 874 }
672 875
673 return rc; 876 return rc;
@@ -701,7 +904,7 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
701 if (ok && cc->order > zone->compact_order_failed) 904 if (ok && cc->order > zone->compact_order_failed)
702 zone->compact_order_failed = cc->order + 1; 905 zone->compact_order_failed = cc->order + 1;
703 /* Currently async compaction is never deferred. */ 906 /* Currently async compaction is never deferred. */
704 else if (!ok && cc->sync) 907 else if (!ok && cc->mode == COMPACT_SYNC)
705 defer_compaction(zone, cc->order); 908 defer_compaction(zone, cc->order);
706 } 909 }
707 910
@@ -716,7 +919,7 @@ int compact_pgdat(pg_data_t *pgdat, int order)
716{ 919{
717 struct compact_control cc = { 920 struct compact_control cc = {
718 .order = order, 921 .order = order,
719 .sync = false, 922 .mode = COMPACT_ASYNC_MOVABLE,
720 }; 923 };
721 924
722 return __compact_pgdat(pgdat, &cc); 925 return __compact_pgdat(pgdat, &cc);
@@ -726,7 +929,7 @@ static int compact_node(int nid)
726{ 929{
727 struct compact_control cc = { 930 struct compact_control cc = {
728 .order = -1, 931 .order = -1,
729 .sync = true, 932 .mode = COMPACT_SYNC,
730 }; 933 };
731 934
732 return __compact_pgdat(NODE_DATA(nid), &cc); 935 return __compact_pgdat(NODE_DATA(nid), &cc);
@@ -795,3 +998,5 @@ void compaction_unregister_node(struct node *node)
795 return device_remove_file(&node->dev, &dev_attr_compact); 998 return device_remove_file(&node->dev, &dev_attr_compact);
796} 999}
797#endif /* CONFIG_SYSFS && CONFIG_NUMA */ 1000#endif /* CONFIG_SYSFS && CONFIG_NUMA */
1001
1002#endif /* CONFIG_COMPACTION */
diff --git a/mm/filemap.c b/mm/filemap.c
index 79c4b2b0b14e..64b48f934b89 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -29,7 +29,6 @@
29#include <linux/pagevec.h> 29#include <linux/pagevec.h>
30#include <linux/blkdev.h> 30#include <linux/blkdev.h>
31#include <linux/security.h> 31#include <linux/security.h>
32#include <linux/syscalls.h>
33#include <linux/cpuset.h> 32#include <linux/cpuset.h>
34#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */ 33#include <linux/hardirq.h> /* for BUG_ON(!in_atomic()) only */
35#include <linux/memcontrol.h> 34#include <linux/memcontrol.h>
@@ -1478,44 +1477,6 @@ out:
1478} 1477}
1479EXPORT_SYMBOL(generic_file_aio_read); 1478EXPORT_SYMBOL(generic_file_aio_read);
1480 1479
1481static ssize_t
1482do_readahead(struct address_space *mapping, struct file *filp,
1483 pgoff_t index, unsigned long nr)
1484{
1485 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
1486 return -EINVAL;
1487
1488 force_page_cache_readahead(mapping, filp, index, nr);
1489 return 0;
1490}
1491
1492SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
1493{
1494 ssize_t ret;
1495 struct file *file;
1496
1497 ret = -EBADF;
1498 file = fget(fd);
1499 if (file) {
1500 if (file->f_mode & FMODE_READ) {
1501 struct address_space *mapping = file->f_mapping;
1502 pgoff_t start = offset >> PAGE_CACHE_SHIFT;
1503 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
1504 unsigned long len = end - start + 1;
1505 ret = do_readahead(mapping, file, start, len);
1506 }
1507 fput(file);
1508 }
1509 return ret;
1510}
1511#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
1512asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
1513{
1514 return SYSC_readahead((int) fd, offset, (size_t) count);
1515}
1516SYSCALL_ALIAS(sys_readahead, SyS_readahead);
1517#endif
1518
1519#ifdef CONFIG_MMU 1480#ifdef CONFIG_MMU
1520/** 1481/**
1521 * page_cache_read - adds requested page to the page cache if not already there 1482 * page_cache_read - adds requested page to the page cache if not already there
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index f0e5306eeb55..57c4b9309015 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -636,16 +636,12 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
636 unsigned long haddr, pmd_t *pmd, 636 unsigned long haddr, pmd_t *pmd,
637 struct page *page) 637 struct page *page)
638{ 638{
639 int ret = 0;
640 pgtable_t pgtable; 639 pgtable_t pgtable;
641 640
642 VM_BUG_ON(!PageCompound(page)); 641 VM_BUG_ON(!PageCompound(page));
643 pgtable = pte_alloc_one(mm, haddr); 642 pgtable = pte_alloc_one(mm, haddr);
644 if (unlikely(!pgtable)) { 643 if (unlikely(!pgtable))
645 mem_cgroup_uncharge_page(page);
646 put_page(page);
647 return VM_FAULT_OOM; 644 return VM_FAULT_OOM;
648 }
649 645
650 clear_huge_page(page, haddr, HPAGE_PMD_NR); 646 clear_huge_page(page, haddr, HPAGE_PMD_NR);
651 __SetPageUptodate(page); 647 __SetPageUptodate(page);
@@ -675,7 +671,7 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
675 spin_unlock(&mm->page_table_lock); 671 spin_unlock(&mm->page_table_lock);
676 } 672 }
677 673
678 return ret; 674 return 0;
679} 675}
680 676
681static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp) 677static inline gfp_t alloc_hugepage_gfpmask(int defrag, gfp_t extra_gfp)
@@ -724,8 +720,14 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
724 put_page(page); 720 put_page(page);
725 goto out; 721 goto out;
726 } 722 }
723 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
724 page))) {
725 mem_cgroup_uncharge_page(page);
726 put_page(page);
727 goto out;
728 }
727 729
728 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); 730 return 0;
729 } 731 }
730out: 732out:
731 /* 733 /*
@@ -950,6 +952,8 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
950 count_vm_event(THP_FAULT_FALLBACK); 952 count_vm_event(THP_FAULT_FALLBACK);
951 ret = do_huge_pmd_wp_page_fallback(mm, vma, address, 953 ret = do_huge_pmd_wp_page_fallback(mm, vma, address,
952 pmd, orig_pmd, page, haddr); 954 pmd, orig_pmd, page, haddr);
955 if (ret & VM_FAULT_OOM)
956 split_huge_page(page);
953 put_page(page); 957 put_page(page);
954 goto out; 958 goto out;
955 } 959 }
@@ -957,6 +961,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
957 961
958 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) { 962 if (unlikely(mem_cgroup_newpage_charge(new_page, mm, GFP_KERNEL))) {
959 put_page(new_page); 963 put_page(new_page);
964 split_huge_page(page);
960 put_page(page); 965 put_page(page);
961 ret |= VM_FAULT_OOM; 966 ret |= VM_FAULT_OOM;
962 goto out; 967 goto out;
@@ -968,8 +973,10 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
968 spin_lock(&mm->page_table_lock); 973 spin_lock(&mm->page_table_lock);
969 put_page(page); 974 put_page(page);
970 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 975 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
976 spin_unlock(&mm->page_table_lock);
971 mem_cgroup_uncharge_page(new_page); 977 mem_cgroup_uncharge_page(new_page);
972 put_page(new_page); 978 put_page(new_page);
979 goto out;
973 } else { 980 } else {
974 pmd_t entry; 981 pmd_t entry;
975 VM_BUG_ON(!PageHead(page)); 982 VM_BUG_ON(!PageHead(page));
@@ -1224,10 +1231,13 @@ static void __split_huge_page_refcount(struct page *page)
1224{ 1231{
1225 int i; 1232 int i;
1226 struct zone *zone = page_zone(page); 1233 struct zone *zone = page_zone(page);
1234 struct lruvec *lruvec;
1227 int tail_count = 0; 1235 int tail_count = 0;
1228 1236
1229 /* prevent PageLRU to go away from under us, and freeze lru stats */ 1237 /* prevent PageLRU to go away from under us, and freeze lru stats */
1230 spin_lock_irq(&zone->lru_lock); 1238 spin_lock_irq(&zone->lru_lock);
1239 lruvec = mem_cgroup_page_lruvec(page, zone);
1240
1231 compound_lock(page); 1241 compound_lock(page);
1232 /* complete memcg works before add pages to LRU */ 1242 /* complete memcg works before add pages to LRU */
1233 mem_cgroup_split_huge_fixup(page); 1243 mem_cgroup_split_huge_fixup(page);
@@ -1302,13 +1312,12 @@ static void __split_huge_page_refcount(struct page *page)
1302 BUG_ON(!PageDirty(page_tail)); 1312 BUG_ON(!PageDirty(page_tail));
1303 BUG_ON(!PageSwapBacked(page_tail)); 1313 BUG_ON(!PageSwapBacked(page_tail));
1304 1314
1305 1315 lru_add_page_tail(page, page_tail, lruvec);
1306 lru_add_page_tail(zone, page, page_tail);
1307 } 1316 }
1308 atomic_sub(tail_count, &page->_count); 1317 atomic_sub(tail_count, &page->_count);
1309 BUG_ON(atomic_read(&page->_count) <= 0); 1318 BUG_ON(atomic_read(&page->_count) <= 0);
1310 1319
1311 __dec_zone_page_state(page, NR_ANON_TRANSPARENT_HUGEPAGES); 1320 __mod_zone_page_state(zone, NR_ANON_TRANSPARENT_HUGEPAGES, -1);
1312 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR); 1321 __mod_zone_page_state(zone, NR_ANON_PAGES, HPAGE_PMD_NR);
1313 1322
1314 ClearPageCompound(page); 1323 ClearPageCompound(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index ae8f708e3d75..285a81e87ec8 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -273,8 +273,8 @@ static long region_count(struct list_head *head, long f, long t)
273 273
274 /* Locate each segment we overlap with, and count that overlap. */ 274 /* Locate each segment we overlap with, and count that overlap. */
275 list_for_each_entry(rg, head, link) { 275 list_for_each_entry(rg, head, link) {
276 int seg_from; 276 long seg_from;
277 int seg_to; 277 long seg_to;
278 278
279 if (rg->to <= f) 279 if (rg->to <= f)
280 continue; 280 continue;
@@ -2157,6 +2157,15 @@ static void hugetlb_vm_op_open(struct vm_area_struct *vma)
2157 kref_get(&reservations->refs); 2157 kref_get(&reservations->refs);
2158} 2158}
2159 2159
2160static void resv_map_put(struct vm_area_struct *vma)
2161{
2162 struct resv_map *reservations = vma_resv_map(vma);
2163
2164 if (!reservations)
2165 return;
2166 kref_put(&reservations->refs, resv_map_release);
2167}
2168
2160static void hugetlb_vm_op_close(struct vm_area_struct *vma) 2169static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2161{ 2170{
2162 struct hstate *h = hstate_vma(vma); 2171 struct hstate *h = hstate_vma(vma);
@@ -2173,7 +2182,7 @@ static void hugetlb_vm_op_close(struct vm_area_struct *vma)
2173 reserve = (end - start) - 2182 reserve = (end - start) -
2174 region_count(&reservations->regions, start, end); 2183 region_count(&reservations->regions, start, end);
2175 2184
2176 kref_put(&reservations->refs, resv_map_release); 2185 resv_map_put(vma);
2177 2186
2178 if (reserve) { 2187 if (reserve) {
2179 hugetlb_acct_memory(h, -reserve); 2188 hugetlb_acct_memory(h, -reserve);
@@ -2213,6 +2222,7 @@ static pte_t make_huge_pte(struct vm_area_struct *vma, struct page *page,
2213 } 2222 }
2214 entry = pte_mkyoung(entry); 2223 entry = pte_mkyoung(entry);
2215 entry = pte_mkhuge(entry); 2224 entry = pte_mkhuge(entry);
2225 entry = arch_make_huge_pte(entry, vma, page, writable);
2216 2226
2217 return entry; 2227 return entry;
2218} 2228}
@@ -2990,12 +3000,16 @@ int hugetlb_reserve_pages(struct inode *inode,
2990 set_vma_resv_flags(vma, HPAGE_RESV_OWNER); 3000 set_vma_resv_flags(vma, HPAGE_RESV_OWNER);
2991 } 3001 }
2992 3002
2993 if (chg < 0) 3003 if (chg < 0) {
2994 return chg; 3004 ret = chg;
3005 goto out_err;
3006 }
2995 3007
2996 /* There must be enough pages in the subpool for the mapping */ 3008 /* There must be enough pages in the subpool for the mapping */
2997 if (hugepage_subpool_get_pages(spool, chg)) 3009 if (hugepage_subpool_get_pages(spool, chg)) {
2998 return -ENOSPC; 3010 ret = -ENOSPC;
3011 goto out_err;
3012 }
2999 3013
3000 /* 3014 /*
3001 * Check enough hugepages are available for the reservation. 3015 * Check enough hugepages are available for the reservation.
@@ -3004,7 +3018,7 @@ int hugetlb_reserve_pages(struct inode *inode,
3004 ret = hugetlb_acct_memory(h, chg); 3018 ret = hugetlb_acct_memory(h, chg);
3005 if (ret < 0) { 3019 if (ret < 0) {
3006 hugepage_subpool_put_pages(spool, chg); 3020 hugepage_subpool_put_pages(spool, chg);
3007 return ret; 3021 goto out_err;
3008 } 3022 }
3009 3023
3010 /* 3024 /*
@@ -3021,6 +3035,9 @@ int hugetlb_reserve_pages(struct inode *inode,
3021 if (!vma || vma->vm_flags & VM_MAYSHARE) 3035 if (!vma || vma->vm_flags & VM_MAYSHARE)
3022 region_add(&inode->i_mapping->private_list, from, to); 3036 region_add(&inode->i_mapping->private_list, from, to);
3023 return 0; 3037 return 0;
3038out_err:
3039 resv_map_put(vma);
3040 return ret;
3024} 3041}
3025 3042
3026void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed) 3043void hugetlb_unreserve_pages(struct inode *inode, long offset, long freed)
diff --git a/mm/internal.h b/mm/internal.h
index 2189af491783..4194ab9dc19b 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -94,12 +94,52 @@ extern void putback_lru_page(struct page *page);
94/* 94/*
95 * in mm/page_alloc.c 95 * in mm/page_alloc.c
96 */ 96 */
97extern void set_pageblock_migratetype(struct page *page, int migratetype);
98extern int move_freepages_block(struct zone *zone, struct page *page,
99 int migratetype);
97extern void __free_pages_bootmem(struct page *page, unsigned int order); 100extern void __free_pages_bootmem(struct page *page, unsigned int order);
98extern void prep_compound_page(struct page *page, unsigned long order); 101extern void prep_compound_page(struct page *page, unsigned long order);
99#ifdef CONFIG_MEMORY_FAILURE 102#ifdef CONFIG_MEMORY_FAILURE
100extern bool is_free_buddy_page(struct page *page); 103extern bool is_free_buddy_page(struct page *page);
101#endif 104#endif
102 105
106#if defined CONFIG_COMPACTION || defined CONFIG_CMA
107#include <linux/compaction.h>
108
109/*
110 * in mm/compaction.c
111 */
112/*
113 * compact_control is used to track pages being migrated and the free pages
114 * they are being migrated to during memory compaction. The free_pfn starts
115 * at the end of a zone and migrate_pfn begins at the start. Movable pages
116 * are moved to the end of a zone during a compaction run and the run
117 * completes when free_pfn <= migrate_pfn
118 */
119struct compact_control {
120 struct list_head freepages; /* List of free pages to migrate to */
121 struct list_head migratepages; /* List of pages being migrated */
122 unsigned long nr_freepages; /* Number of isolated free pages */
123 unsigned long nr_migratepages; /* Number of pages to migrate */
124 unsigned long free_pfn; /* isolate_freepages search base */
125 unsigned long migrate_pfn; /* isolate_migratepages search base */
126 enum compact_mode mode; /* Compaction mode */
127
128 int order; /* order a direct compactor needs */
129 int migratetype; /* MOVABLE, RECLAIMABLE etc */
130 struct zone *zone;
131
132 /* Number of UNMOVABLE destination pageblocks skipped during scan */
133 unsigned long nr_pageblocks_skipped;
134};
135
136unsigned long
137isolate_freepages_range(unsigned long start_pfn, unsigned long end_pfn);
138unsigned long
139isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
140 unsigned long low_pfn, unsigned long end_pfn);
141
142#endif
103 143
104/* 144/*
105 * function for dealing with page's order in buddy system. 145 * function for dealing with page's order in buddy system.
@@ -131,7 +171,8 @@ static inline void munlock_vma_pages_all(struct vm_area_struct *vma)
131 * to determine if it's being mapped into a LOCKED vma. 171 * to determine if it's being mapped into a LOCKED vma.
132 * If so, mark page as mlocked. 172 * If so, mark page as mlocked.
133 */ 173 */
134static inline int is_mlocked_vma(struct vm_area_struct *vma, struct page *page) 174static inline int mlocked_vma_newpage(struct vm_area_struct *vma,
175 struct page *page)
135{ 176{
136 VM_BUG_ON(PageLRU(page)); 177 VM_BUG_ON(PageLRU(page));
137 178
@@ -189,7 +230,7 @@ extern unsigned long vma_address(struct page *page,
189 struct vm_area_struct *vma); 230 struct vm_area_struct *vma);
190#endif 231#endif
191#else /* !CONFIG_MMU */ 232#else /* !CONFIG_MMU */
192static inline int is_mlocked_vma(struct vm_area_struct *v, struct page *p) 233static inline int mlocked_vma_newpage(struct vm_area_struct *v, struct page *p)
193{ 234{
194 return 0; 235 return 0;
195} 236}
diff --git a/mm/madvise.c b/mm/madvise.c
index 1ccbba5b6674..deff1b64a08c 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -11,8 +11,10 @@
11#include <linux/mempolicy.h> 11#include <linux/mempolicy.h>
12#include <linux/page-isolation.h> 12#include <linux/page-isolation.h>
13#include <linux/hugetlb.h> 13#include <linux/hugetlb.h>
14#include <linux/falloc.h>
14#include <linux/sched.h> 15#include <linux/sched.h>
15#include <linux/ksm.h> 16#include <linux/ksm.h>
17#include <linux/fs.h>
16 18
17/* 19/*
18 * Any behaviour which results in changes to the vma->vm_flags needs to 20 * Any behaviour which results in changes to the vma->vm_flags needs to
@@ -200,8 +202,7 @@ static long madvise_remove(struct vm_area_struct *vma,
200 struct vm_area_struct **prev, 202 struct vm_area_struct **prev,
201 unsigned long start, unsigned long end) 203 unsigned long start, unsigned long end)
202{ 204{
203 struct address_space *mapping; 205 loff_t offset;
204 loff_t offset, endoff;
205 int error; 206 int error;
206 207
207 *prev = NULL; /* tell sys_madvise we drop mmap_sem */ 208 *prev = NULL; /* tell sys_madvise we drop mmap_sem */
@@ -217,16 +218,14 @@ static long madvise_remove(struct vm_area_struct *vma,
217 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE)) 218 if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
218 return -EACCES; 219 return -EACCES;
219 220
220 mapping = vma->vm_file->f_mapping;
221
222 offset = (loff_t)(start - vma->vm_start) 221 offset = (loff_t)(start - vma->vm_start)
223 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT); 222 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
224 endoff = (loff_t)(end - vma->vm_start - 1)
225 + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
226 223
227 /* vmtruncate_range needs to take i_mutex */ 224 /* filesystem's fallocate may need to take i_mutex */
228 up_read(&current->mm->mmap_sem); 225 up_read(&current->mm->mmap_sem);
229 error = vmtruncate_range(mapping->host, offset, endoff); 226 error = do_fallocate(vma->vm_file,
227 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
228 offset, end - start);
230 down_read(&current->mm->mmap_sem); 229 down_read(&current->mm->mmap_sem);
231 return error; 230 return error;
232} 231}
diff --git a/mm/memblock.c b/mm/memblock.c
index a44eab3157f8..952123eba433 100644
--- a/mm/memblock.c
+++ b/mm/memblock.c
@@ -37,6 +37,8 @@ struct memblock memblock __initdata_memblock = {
37 37
38int memblock_debug __initdata_memblock; 38int memblock_debug __initdata_memblock;
39static int memblock_can_resize __initdata_memblock; 39static int memblock_can_resize __initdata_memblock;
40static int memblock_memory_in_slab __initdata_memblock = 0;
41static int memblock_reserved_in_slab __initdata_memblock = 0;
40 42
41/* inline so we don't get a warning when pr_debug is compiled out */ 43/* inline so we don't get a warning when pr_debug is compiled out */
42static inline const char *memblock_type_name(struct memblock_type *type) 44static inline const char *memblock_type_name(struct memblock_type *type)
@@ -187,6 +189,7 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
187 struct memblock_region *new_array, *old_array; 189 struct memblock_region *new_array, *old_array;
188 phys_addr_t old_size, new_size, addr; 190 phys_addr_t old_size, new_size, addr;
189 int use_slab = slab_is_available(); 191 int use_slab = slab_is_available();
192 int *in_slab;
190 193
191 /* We don't allow resizing until we know about the reserved regions 194 /* We don't allow resizing until we know about the reserved regions
192 * of memory that aren't suitable for allocation 195 * of memory that aren't suitable for allocation
@@ -198,6 +201,12 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
198 old_size = type->max * sizeof(struct memblock_region); 201 old_size = type->max * sizeof(struct memblock_region);
199 new_size = old_size << 1; 202 new_size = old_size << 1;
200 203
204 /* Retrieve the slab flag */
205 if (type == &memblock.memory)
206 in_slab = &memblock_memory_in_slab;
207 else
208 in_slab = &memblock_reserved_in_slab;
209
201 /* Try to find some space for it. 210 /* Try to find some space for it.
202 * 211 *
203 * WARNING: We assume that either slab_is_available() and we use it or 212 * WARNING: We assume that either slab_is_available() and we use it or
@@ -212,14 +221,15 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
212 if (use_slab) { 221 if (use_slab) {
213 new_array = kmalloc(new_size, GFP_KERNEL); 222 new_array = kmalloc(new_size, GFP_KERNEL);
214 addr = new_array ? __pa(new_array) : 0; 223 addr = new_array ? __pa(new_array) : 0;
215 } else 224 } else {
216 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t)); 225 addr = memblock_find_in_range(0, MEMBLOCK_ALLOC_ACCESSIBLE, new_size, sizeof(phys_addr_t));
226 new_array = addr ? __va(addr) : 0;
227 }
217 if (!addr) { 228 if (!addr) {
218 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n", 229 pr_err("memblock: Failed to double %s array from %ld to %ld entries !\n",
219 memblock_type_name(type), type->max, type->max * 2); 230 memblock_type_name(type), type->max, type->max * 2);
220 return -1; 231 return -1;
221 } 232 }
222 new_array = __va(addr);
223 233
224 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]", 234 memblock_dbg("memblock: %s array is doubled to %ld at [%#010llx-%#010llx]",
225 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1); 235 memblock_type_name(type), type->max * 2, (u64)addr, (u64)addr + new_size - 1);
@@ -234,22 +244,24 @@ static int __init_memblock memblock_double_array(struct memblock_type *type)
234 type->regions = new_array; 244 type->regions = new_array;
235 type->max <<= 1; 245 type->max <<= 1;
236 246
237 /* If we use SLAB that's it, we are done */ 247 /* Free old array. We needn't free it if the array is the
238 if (use_slab) 248 * static one
239 return 0;
240
241 /* Add the new reserved region now. Should not fail ! */
242 BUG_ON(memblock_reserve(addr, new_size));
243
244 /* If the array wasn't our static init one, then free it. We only do
245 * that before SLAB is available as later on, we don't know whether
246 * to use kfree or free_bootmem_pages(). Shouldn't be a big deal
247 * anyways
248 */ 249 */
249 if (old_array != memblock_memory_init_regions && 250 if (*in_slab)
250 old_array != memblock_reserved_init_regions) 251 kfree(old_array);
252 else if (old_array != memblock_memory_init_regions &&
253 old_array != memblock_reserved_init_regions)
251 memblock_free(__pa(old_array), old_size); 254 memblock_free(__pa(old_array), old_size);
252 255
256 /* Reserve the new array if that comes from the memblock.
257 * Otherwise, we needn't do it
258 */
259 if (!use_slab)
260 BUG_ON(memblock_reserve(addr, new_size));
261
262 /* Update slab flag */
263 *in_slab = use_slab;
264
253 return 0; 265 return 0;
254} 266}
255 267
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index f342778a0c0a..ac35bccadb7b 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -59,7 +59,7 @@
59 59
60struct cgroup_subsys mem_cgroup_subsys __read_mostly; 60struct cgroup_subsys mem_cgroup_subsys __read_mostly;
61#define MEM_CGROUP_RECLAIM_RETRIES 5 61#define MEM_CGROUP_RECLAIM_RETRIES 5
62struct mem_cgroup *root_mem_cgroup __read_mostly; 62static struct mem_cgroup *root_mem_cgroup __read_mostly;
63 63
64#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 64#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
65/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */ 65/* Turned on only when memory cgroup is enabled && really_do_swap_account = 1 */
@@ -73,7 +73,7 @@ static int really_do_swap_account __initdata = 0;
73#endif 73#endif
74 74
75#else 75#else
76#define do_swap_account (0) 76#define do_swap_account 0
77#endif 77#endif
78 78
79 79
@@ -88,18 +88,31 @@ enum mem_cgroup_stat_index {
88 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */ 88 MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
89 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */ 89 MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
90 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */ 90 MEM_CGROUP_STAT_SWAPOUT, /* # of pages, swapped out */
91 MEM_CGROUP_STAT_DATA, /* end of data requires synchronization */
92 MEM_CGROUP_STAT_NSTATS, 91 MEM_CGROUP_STAT_NSTATS,
93}; 92};
94 93
94static const char * const mem_cgroup_stat_names[] = {
95 "cache",
96 "rss",
97 "mapped_file",
98 "swap",
99};
100
95enum mem_cgroup_events_index { 101enum mem_cgroup_events_index {
96 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */ 102 MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
97 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */ 103 MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
98 MEM_CGROUP_EVENTS_COUNT, /* # of pages paged in/out */
99 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */ 104 MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
100 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */ 105 MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
101 MEM_CGROUP_EVENTS_NSTATS, 106 MEM_CGROUP_EVENTS_NSTATS,
102}; 107};
108
109static const char * const mem_cgroup_events_names[] = {
110 "pgpgin",
111 "pgpgout",
112 "pgfault",
113 "pgmajfault",
114};
115
103/* 116/*
104 * Per memcg event counter is incremented at every pagein/pageout. With THP, 117 * Per memcg event counter is incremented at every pagein/pageout. With THP,
105 * it will be incremated by the number of pages. This counter is used for 118 * it will be incremated by the number of pages. This counter is used for
@@ -112,13 +125,14 @@ enum mem_cgroup_events_target {
112 MEM_CGROUP_TARGET_NUMAINFO, 125 MEM_CGROUP_TARGET_NUMAINFO,
113 MEM_CGROUP_NTARGETS, 126 MEM_CGROUP_NTARGETS,
114}; 127};
115#define THRESHOLDS_EVENTS_TARGET (128) 128#define THRESHOLDS_EVENTS_TARGET 128
116#define SOFTLIMIT_EVENTS_TARGET (1024) 129#define SOFTLIMIT_EVENTS_TARGET 1024
117#define NUMAINFO_EVENTS_TARGET (1024) 130#define NUMAINFO_EVENTS_TARGET 1024
118 131
119struct mem_cgroup_stat_cpu { 132struct mem_cgroup_stat_cpu {
120 long count[MEM_CGROUP_STAT_NSTATS]; 133 long count[MEM_CGROUP_STAT_NSTATS];
121 unsigned long events[MEM_CGROUP_EVENTS_NSTATS]; 134 unsigned long events[MEM_CGROUP_EVENTS_NSTATS];
135 unsigned long nr_page_events;
122 unsigned long targets[MEM_CGROUP_NTARGETS]; 136 unsigned long targets[MEM_CGROUP_NTARGETS];
123}; 137};
124 138
@@ -138,7 +152,6 @@ struct mem_cgroup_per_zone {
138 152
139 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1]; 153 struct mem_cgroup_reclaim_iter reclaim_iter[DEF_PRIORITY + 1];
140 154
141 struct zone_reclaim_stat reclaim_stat;
142 struct rb_node tree_node; /* RB tree node */ 155 struct rb_node tree_node; /* RB tree node */
143 unsigned long long usage_in_excess;/* Set to the value by which */ 156 unsigned long long usage_in_excess;/* Set to the value by which */
144 /* the soft limit is exceeded*/ 157 /* the soft limit is exceeded*/
@@ -182,7 +195,7 @@ struct mem_cgroup_threshold {
182 195
183/* For threshold */ 196/* For threshold */
184struct mem_cgroup_threshold_ary { 197struct mem_cgroup_threshold_ary {
185 /* An array index points to threshold just below usage. */ 198 /* An array index points to threshold just below or equal to usage. */
186 int current_threshold; 199 int current_threshold;
187 /* Size of entries[] */ 200 /* Size of entries[] */
188 unsigned int size; 201 unsigned int size;
@@ -245,8 +258,8 @@ struct mem_cgroup {
245 */ 258 */
246 struct rcu_head rcu_freeing; 259 struct rcu_head rcu_freeing;
247 /* 260 /*
248 * But when using vfree(), that cannot be done at 261 * We also need some space for a worker in deferred freeing.
249 * interrupt time, so we must then queue the work. 262 * By the time we call it, rcu_freeing is no longer in use.
250 */ 263 */
251 struct work_struct work_freeing; 264 struct work_struct work_freeing;
252 }; 265 };
@@ -305,7 +318,7 @@ struct mem_cgroup {
305 /* 318 /*
306 * percpu counter. 319 * percpu counter.
307 */ 320 */
308 struct mem_cgroup_stat_cpu *stat; 321 struct mem_cgroup_stat_cpu __percpu *stat;
309 /* 322 /*
310 * used when a cpu is offlined or other synchronizations 323 * used when a cpu is offlined or other synchronizations
311 * See mem_cgroup_read_stat(). 324 * See mem_cgroup_read_stat().
@@ -360,8 +373,8 @@ static bool move_file(void)
360 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft 373 * Maximum loops in mem_cgroup_hierarchical_reclaim(), used for soft
361 * limit reclaim to prevent infinite loops, if they ever occur. 374 * limit reclaim to prevent infinite loops, if they ever occur.
362 */ 375 */
363#define MEM_CGROUP_MAX_RECLAIM_LOOPS (100) 376#define MEM_CGROUP_MAX_RECLAIM_LOOPS 100
364#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS (2) 377#define MEM_CGROUP_MAX_SOFT_LIMIT_RECLAIM_LOOPS 2
365 378
366enum charge_type { 379enum charge_type {
367 MEM_CGROUP_CHARGE_TYPE_CACHE = 0, 380 MEM_CGROUP_CHARGE_TYPE_CACHE = 0,
@@ -377,8 +390,8 @@ enum charge_type {
377#define _MEM (0) 390#define _MEM (0)
378#define _MEMSWAP (1) 391#define _MEMSWAP (1)
379#define _OOM_TYPE (2) 392#define _OOM_TYPE (2)
380#define MEMFILE_PRIVATE(x, val) (((x) << 16) | (val)) 393#define MEMFILE_PRIVATE(x, val) ((x) << 16 | (val))
381#define MEMFILE_TYPE(val) (((val) >> 16) & 0xffff) 394#define MEMFILE_TYPE(val) ((val) >> 16 & 0xffff)
382#define MEMFILE_ATTR(val) ((val) & 0xffff) 395#define MEMFILE_ATTR(val) ((val) & 0xffff)
383/* Used for OOM nofiier */ 396/* Used for OOM nofiier */
384#define OOM_CONTROL (0) 397#define OOM_CONTROL (0)
@@ -404,6 +417,7 @@ void sock_update_memcg(struct sock *sk)
404{ 417{
405 if (mem_cgroup_sockets_enabled) { 418 if (mem_cgroup_sockets_enabled) {
406 struct mem_cgroup *memcg; 419 struct mem_cgroup *memcg;
420 struct cg_proto *cg_proto;
407 421
408 BUG_ON(!sk->sk_prot->proto_cgroup); 422 BUG_ON(!sk->sk_prot->proto_cgroup);
409 423
@@ -423,9 +437,10 @@ void sock_update_memcg(struct sock *sk)
423 437
424 rcu_read_lock(); 438 rcu_read_lock();
425 memcg = mem_cgroup_from_task(current); 439 memcg = mem_cgroup_from_task(current);
426 if (!mem_cgroup_is_root(memcg)) { 440 cg_proto = sk->sk_prot->proto_cgroup(memcg);
441 if (!mem_cgroup_is_root(memcg) && memcg_proto_active(cg_proto)) {
427 mem_cgroup_get(memcg); 442 mem_cgroup_get(memcg);
428 sk->sk_cgrp = sk->sk_prot->proto_cgroup(memcg); 443 sk->sk_cgrp = cg_proto;
429 } 444 }
430 rcu_read_unlock(); 445 rcu_read_unlock();
431 } 446 }
@@ -454,6 +469,19 @@ EXPORT_SYMBOL(tcp_proto_cgroup);
454#endif /* CONFIG_INET */ 469#endif /* CONFIG_INET */
455#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */ 470#endif /* CONFIG_CGROUP_MEM_RES_CTLR_KMEM */
456 471
472#if defined(CONFIG_INET) && defined(CONFIG_CGROUP_MEM_RES_CTLR_KMEM)
473static void disarm_sock_keys(struct mem_cgroup *memcg)
474{
475 if (!memcg_proto_activated(&memcg->tcp_mem.cg_proto))
476 return;
477 static_key_slow_dec(&memcg_socket_limit_enabled);
478}
479#else
480static void disarm_sock_keys(struct mem_cgroup *memcg)
481{
482}
483#endif
484
457static void drain_all_stock_async(struct mem_cgroup *memcg); 485static void drain_all_stock_async(struct mem_cgroup *memcg);
458 486
459static struct mem_cgroup_per_zone * 487static struct mem_cgroup_per_zone *
@@ -718,12 +746,21 @@ static void mem_cgroup_charge_statistics(struct mem_cgroup *memcg,
718 nr_pages = -nr_pages; /* for event */ 746 nr_pages = -nr_pages; /* for event */
719 } 747 }
720 748
721 __this_cpu_add(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT], nr_pages); 749 __this_cpu_add(memcg->stat->nr_page_events, nr_pages);
722 750
723 preempt_enable(); 751 preempt_enable();
724} 752}
725 753
726unsigned long 754unsigned long
755mem_cgroup_get_lru_size(struct lruvec *lruvec, enum lru_list lru)
756{
757 struct mem_cgroup_per_zone *mz;
758
759 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
760 return mz->lru_size[lru];
761}
762
763static unsigned long
727mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid, 764mem_cgroup_zone_nr_lru_pages(struct mem_cgroup *memcg, int nid, int zid,
728 unsigned int lru_mask) 765 unsigned int lru_mask)
729{ 766{
@@ -770,7 +807,7 @@ static bool mem_cgroup_event_ratelimit(struct mem_cgroup *memcg,
770{ 807{
771 unsigned long val, next; 808 unsigned long val, next;
772 809
773 val = __this_cpu_read(memcg->stat->events[MEM_CGROUP_EVENTS_COUNT]); 810 val = __this_cpu_read(memcg->stat->nr_page_events);
774 next = __this_cpu_read(memcg->stat->targets[target]); 811 next = __this_cpu_read(memcg->stat->targets[target]);
775 /* from time_after() in jiffies.h */ 812 /* from time_after() in jiffies.h */
776 if ((long)next - (long)val < 0) { 813 if ((long)next - (long)val < 0) {
@@ -1013,7 +1050,7 @@ EXPORT_SYMBOL(mem_cgroup_count_vm_event);
1013/** 1050/**
1014 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg 1051 * mem_cgroup_zone_lruvec - get the lru list vector for a zone and memcg
1015 * @zone: zone of the wanted lruvec 1052 * @zone: zone of the wanted lruvec
1016 * @mem: memcg of the wanted lruvec 1053 * @memcg: memcg of the wanted lruvec
1017 * 1054 *
1018 * Returns the lru list vector holding pages for the given @zone and 1055 * Returns the lru list vector holding pages for the given @zone and
1019 * @mem. This can be the global zone lruvec, if the memory controller 1056 * @mem. This can be the global zone lruvec, if the memory controller
@@ -1046,19 +1083,11 @@ struct lruvec *mem_cgroup_zone_lruvec(struct zone *zone,
1046 */ 1083 */
1047 1084
1048/** 1085/**
1049 * mem_cgroup_lru_add_list - account for adding an lru page and return lruvec 1086 * mem_cgroup_page_lruvec - return lruvec for adding an lru page
1050 * @zone: zone of the page
1051 * @page: the page 1087 * @page: the page
1052 * @lru: current lru 1088 * @zone: zone of the page
1053 *
1054 * This function accounts for @page being added to @lru, and returns
1055 * the lruvec for the given @zone and the memcg @page is charged to.
1056 *
1057 * The callsite is then responsible for physically linking the page to
1058 * the returned lruvec->lists[@lru].
1059 */ 1089 */
1060struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page, 1090struct lruvec *mem_cgroup_page_lruvec(struct page *page, struct zone *zone)
1061 enum lru_list lru)
1062{ 1091{
1063 struct mem_cgroup_per_zone *mz; 1092 struct mem_cgroup_per_zone *mz;
1064 struct mem_cgroup *memcg; 1093 struct mem_cgroup *memcg;
@@ -1071,7 +1100,7 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1071 memcg = pc->mem_cgroup; 1100 memcg = pc->mem_cgroup;
1072 1101
1073 /* 1102 /*
1074 * Surreptitiously switch any uncharged page to root: 1103 * Surreptitiously switch any uncharged offlist page to root:
1075 * an uncharged page off lru does nothing to secure 1104 * an uncharged page off lru does nothing to secure
1076 * its former mem_cgroup from sudden removal. 1105 * its former mem_cgroup from sudden removal.
1077 * 1106 *
@@ -1079,85 +1108,60 @@ struct lruvec *mem_cgroup_lru_add_list(struct zone *zone, struct page *page,
1079 * under page_cgroup lock: between them, they make all uses 1108 * under page_cgroup lock: between them, they make all uses
1080 * of pc->mem_cgroup safe. 1109 * of pc->mem_cgroup safe.
1081 */ 1110 */
1082 if (!PageCgroupUsed(pc) && memcg != root_mem_cgroup) 1111 if (!PageLRU(page) && !PageCgroupUsed(pc) && memcg != root_mem_cgroup)
1083 pc->mem_cgroup = memcg = root_mem_cgroup; 1112 pc->mem_cgroup = memcg = root_mem_cgroup;
1084 1113
1085 mz = page_cgroup_zoneinfo(memcg, page); 1114 mz = page_cgroup_zoneinfo(memcg, page);
1086 /* compound_order() is stabilized through lru_lock */
1087 mz->lru_size[lru] += 1 << compound_order(page);
1088 return &mz->lruvec; 1115 return &mz->lruvec;
1089} 1116}
1090 1117
1091/** 1118/**
1092 * mem_cgroup_lru_del_list - account for removing an lru page 1119 * mem_cgroup_update_lru_size - account for adding or removing an lru page
1093 * @page: the page 1120 * @lruvec: mem_cgroup per zone lru vector
1094 * @lru: target lru 1121 * @lru: index of lru list the page is sitting on
1095 * 1122 * @nr_pages: positive when adding or negative when removing
1096 * This function accounts for @page being removed from @lru.
1097 * 1123 *
1098 * The callsite is then responsible for physically unlinking 1124 * This function must be called when a page is added to or removed from an
1099 * @page->lru. 1125 * lru list.
1100 */ 1126 */
1101void mem_cgroup_lru_del_list(struct page *page, enum lru_list lru) 1127void mem_cgroup_update_lru_size(struct lruvec *lruvec, enum lru_list lru,
1128 int nr_pages)
1102{ 1129{
1103 struct mem_cgroup_per_zone *mz; 1130 struct mem_cgroup_per_zone *mz;
1104 struct mem_cgroup *memcg; 1131 unsigned long *lru_size;
1105 struct page_cgroup *pc;
1106 1132
1107 if (mem_cgroup_disabled()) 1133 if (mem_cgroup_disabled())
1108 return; 1134 return;
1109 1135
1110 pc = lookup_page_cgroup(page); 1136 mz = container_of(lruvec, struct mem_cgroup_per_zone, lruvec);
1111 memcg = pc->mem_cgroup; 1137 lru_size = mz->lru_size + lru;
1112 VM_BUG_ON(!memcg); 1138 *lru_size += nr_pages;
1113 mz = page_cgroup_zoneinfo(memcg, page); 1139 VM_BUG_ON((long)(*lru_size) < 0);
1114 /* huge page split is done under lru_lock. so, we have no races. */
1115 VM_BUG_ON(mz->lru_size[lru] < (1 << compound_order(page)));
1116 mz->lru_size[lru] -= 1 << compound_order(page);
1117}
1118
1119void mem_cgroup_lru_del(struct page *page)
1120{
1121 mem_cgroup_lru_del_list(page, page_lru(page));
1122}
1123
1124/**
1125 * mem_cgroup_lru_move_lists - account for moving a page between lrus
1126 * @zone: zone of the page
1127 * @page: the page
1128 * @from: current lru
1129 * @to: target lru
1130 *
1131 * This function accounts for @page being moved between the lrus @from
1132 * and @to, and returns the lruvec for the given @zone and the memcg
1133 * @page is charged to.
1134 *
1135 * The callsite is then responsible for physically relinking
1136 * @page->lru to the returned lruvec->lists[@to].
1137 */
1138struct lruvec *mem_cgroup_lru_move_lists(struct zone *zone,
1139 struct page *page,
1140 enum lru_list from,
1141 enum lru_list to)
1142{
1143 /* XXX: Optimize this, especially for @from == @to */
1144 mem_cgroup_lru_del_list(page, from);
1145 return mem_cgroup_lru_add_list(zone, page, to);
1146} 1140}
1147 1141
1148/* 1142/*
1149 * Checks whether given mem is same or in the root_mem_cgroup's 1143 * Checks whether given mem is same or in the root_mem_cgroup's
1150 * hierarchy subtree 1144 * hierarchy subtree
1151 */ 1145 */
1146bool __mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1147 struct mem_cgroup *memcg)
1148{
1149 if (root_memcg == memcg)
1150 return true;
1151 if (!root_memcg->use_hierarchy)
1152 return false;
1153 return css_is_ancestor(&memcg->css, &root_memcg->css);
1154}
1155
1152static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg, 1156static bool mem_cgroup_same_or_subtree(const struct mem_cgroup *root_memcg,
1153 struct mem_cgroup *memcg) 1157 struct mem_cgroup *memcg)
1154{ 1158{
1155 if (root_memcg != memcg) { 1159 bool ret;
1156 return (root_memcg->use_hierarchy &&
1157 css_is_ancestor(&memcg->css, &root_memcg->css));
1158 }
1159 1160
1160 return true; 1161 rcu_read_lock();
1162 ret = __mem_cgroup_same_or_subtree(root_memcg, memcg);
1163 rcu_read_unlock();
1164 return ret;
1161} 1165}
1162 1166
1163int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg) 1167int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
@@ -1195,19 +1199,15 @@ int task_in_mem_cgroup(struct task_struct *task, const struct mem_cgroup *memcg)
1195 return ret; 1199 return ret;
1196} 1200}
1197 1201
1198int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone) 1202int mem_cgroup_inactive_anon_is_low(struct lruvec *lruvec)
1199{ 1203{
1200 unsigned long inactive_ratio; 1204 unsigned long inactive_ratio;
1201 int nid = zone_to_nid(zone);
1202 int zid = zone_idx(zone);
1203 unsigned long inactive; 1205 unsigned long inactive;
1204 unsigned long active; 1206 unsigned long active;
1205 unsigned long gb; 1207 unsigned long gb;
1206 1208
1207 inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1209 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_ANON);
1208 BIT(LRU_INACTIVE_ANON)); 1210 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_ANON);
1209 active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1210 BIT(LRU_ACTIVE_ANON));
1211 1211
1212 gb = (inactive + active) >> (30 - PAGE_SHIFT); 1212 gb = (inactive + active) >> (30 - PAGE_SHIFT);
1213 if (gb) 1213 if (gb)
@@ -1218,49 +1218,17 @@ int mem_cgroup_inactive_anon_is_low(struct mem_cgroup *memcg, struct zone *zone)
1218 return inactive * inactive_ratio < active; 1218 return inactive * inactive_ratio < active;
1219} 1219}
1220 1220
1221int mem_cgroup_inactive_file_is_low(struct mem_cgroup *memcg, struct zone *zone) 1221int mem_cgroup_inactive_file_is_low(struct lruvec *lruvec)
1222{ 1222{
1223 unsigned long active; 1223 unsigned long active;
1224 unsigned long inactive; 1224 unsigned long inactive;
1225 int zid = zone_idx(zone);
1226 int nid = zone_to_nid(zone);
1227 1225
1228 inactive = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid, 1226 inactive = mem_cgroup_get_lru_size(lruvec, LRU_INACTIVE_FILE);
1229 BIT(LRU_INACTIVE_FILE)); 1227 active = mem_cgroup_get_lru_size(lruvec, LRU_ACTIVE_FILE);
1230 active = mem_cgroup_zone_nr_lru_pages(memcg, nid, zid,
1231 BIT(LRU_ACTIVE_FILE));
1232 1228
1233 return (active > inactive); 1229 return (active > inactive);
1234} 1230}
1235 1231
1236struct zone_reclaim_stat *mem_cgroup_get_reclaim_stat(struct mem_cgroup *memcg,
1237 struct zone *zone)
1238{
1239 int nid = zone_to_nid(zone);
1240 int zid = zone_idx(zone);
1241 struct mem_cgroup_per_zone *mz = mem_cgroup_zoneinfo(memcg, nid, zid);
1242
1243 return &mz->reclaim_stat;
1244}
1245
1246struct zone_reclaim_stat *
1247mem_cgroup_get_reclaim_stat_from_page(struct page *page)
1248{
1249 struct page_cgroup *pc;
1250 struct mem_cgroup_per_zone *mz;
1251
1252 if (mem_cgroup_disabled())
1253 return NULL;
1254
1255 pc = lookup_page_cgroup(page);
1256 if (!PageCgroupUsed(pc))
1257 return NULL;
1258 /* Ensure pc->mem_cgroup is visible after reading PCG_USED. */
1259 smp_rmb();
1260 mz = page_cgroup_zoneinfo(pc->mem_cgroup, page);
1261 return &mz->reclaim_stat;
1262}
1263
1264#define mem_cgroup_from_res_counter(counter, member) \ 1232#define mem_cgroup_from_res_counter(counter, member) \
1265 container_of(counter, struct mem_cgroup, member) 1233 container_of(counter, struct mem_cgroup, member)
1266 1234
@@ -1634,7 +1602,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1634 * unused nodes. But scan_nodes is lazily updated and may not cotain 1602 * unused nodes. But scan_nodes is lazily updated and may not cotain
1635 * enough new information. We need to do double check. 1603 * enough new information. We need to do double check.
1636 */ 1604 */
1637bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1605static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1638{ 1606{
1639 int nid; 1607 int nid;
1640 1608
@@ -1669,7 +1637,7 @@ int mem_cgroup_select_victim_node(struct mem_cgroup *memcg)
1669 return 0; 1637 return 0;
1670} 1638}
1671 1639
1672bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap) 1640static bool mem_cgroup_reclaimable(struct mem_cgroup *memcg, bool noswap)
1673{ 1641{
1674 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap); 1642 return test_mem_cgroup_node_reclaimable(memcg, 0, noswap);
1675} 1643}
@@ -1843,7 +1811,8 @@ static void memcg_oom_recover(struct mem_cgroup *memcg)
1843/* 1811/*
1844 * try to call OOM killer. returns false if we should exit memory-reclaim loop. 1812 * try to call OOM killer. returns false if we should exit memory-reclaim loop.
1845 */ 1813 */
1846bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask, int order) 1814static bool mem_cgroup_handle_oom(struct mem_cgroup *memcg, gfp_t mask,
1815 int order)
1847{ 1816{
1848 struct oom_wait_info owait; 1817 struct oom_wait_info owait;
1849 bool locked, need_to_kill; 1818 bool locked, need_to_kill;
@@ -1992,7 +1961,7 @@ struct memcg_stock_pcp {
1992 unsigned int nr_pages; 1961 unsigned int nr_pages;
1993 struct work_struct work; 1962 struct work_struct work;
1994 unsigned long flags; 1963 unsigned long flags;
1995#define FLUSHING_CACHED_CHARGE (0) 1964#define FLUSHING_CACHED_CHARGE 0
1996}; 1965};
1997static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock); 1966static DEFINE_PER_CPU(struct memcg_stock_pcp, memcg_stock);
1998static DEFINE_MUTEX(percpu_charge_mutex); 1967static DEFINE_MUTEX(percpu_charge_mutex);
@@ -2139,7 +2108,7 @@ static void mem_cgroup_drain_pcp_counter(struct mem_cgroup *memcg, int cpu)
2139 int i; 2108 int i;
2140 2109
2141 spin_lock(&memcg->pcp_counter_lock); 2110 spin_lock(&memcg->pcp_counter_lock);
2142 for (i = 0; i < MEM_CGROUP_STAT_DATA; i++) { 2111 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
2143 long x = per_cpu(memcg->stat->count[i], cpu); 2112 long x = per_cpu(memcg->stat->count[i], cpu);
2144 2113
2145 per_cpu(memcg->stat->count[i], cpu) = 0; 2114 per_cpu(memcg->stat->count[i], cpu) = 0;
@@ -2427,6 +2396,24 @@ static void __mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
2427} 2396}
2428 2397
2429/* 2398/*
2399 * Cancel chrages in this cgroup....doesn't propagate to parent cgroup.
2400 * This is useful when moving usage to parent cgroup.
2401 */
2402static void __mem_cgroup_cancel_local_charge(struct mem_cgroup *memcg,
2403 unsigned int nr_pages)
2404{
2405 unsigned long bytes = nr_pages * PAGE_SIZE;
2406
2407 if (mem_cgroup_is_root(memcg))
2408 return;
2409
2410 res_counter_uncharge_until(&memcg->res, memcg->res.parent, bytes);
2411 if (do_swap_account)
2412 res_counter_uncharge_until(&memcg->memsw,
2413 memcg->memsw.parent, bytes);
2414}
2415
2416/*
2430 * A helper function to get mem_cgroup from ID. must be called under 2417 * A helper function to get mem_cgroup from ID. must be called under
2431 * rcu_read_lock(). The caller must check css_is_removed() or some if 2418 * rcu_read_lock(). The caller must check css_is_removed() or some if
2432 * it's concern. (dropping refcnt from swap can be called against removed 2419 * it's concern. (dropping refcnt from swap can be called against removed
@@ -2481,6 +2468,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2481{ 2468{
2482 struct page_cgroup *pc = lookup_page_cgroup(page); 2469 struct page_cgroup *pc = lookup_page_cgroup(page);
2483 struct zone *uninitialized_var(zone); 2470 struct zone *uninitialized_var(zone);
2471 struct lruvec *lruvec;
2484 bool was_on_lru = false; 2472 bool was_on_lru = false;
2485 bool anon; 2473 bool anon;
2486 2474
@@ -2503,8 +2491,9 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2503 zone = page_zone(page); 2491 zone = page_zone(page);
2504 spin_lock_irq(&zone->lru_lock); 2492 spin_lock_irq(&zone->lru_lock);
2505 if (PageLRU(page)) { 2493 if (PageLRU(page)) {
2494 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2506 ClearPageLRU(page); 2495 ClearPageLRU(page);
2507 del_page_from_lru_list(zone, page, page_lru(page)); 2496 del_page_from_lru_list(page, lruvec, page_lru(page));
2508 was_on_lru = true; 2497 was_on_lru = true;
2509 } 2498 }
2510 } 2499 }
@@ -2522,9 +2511,10 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2522 2511
2523 if (lrucare) { 2512 if (lrucare) {
2524 if (was_on_lru) { 2513 if (was_on_lru) {
2514 lruvec = mem_cgroup_zone_lruvec(zone, pc->mem_cgroup);
2525 VM_BUG_ON(PageLRU(page)); 2515 VM_BUG_ON(PageLRU(page));
2526 SetPageLRU(page); 2516 SetPageLRU(page);
2527 add_page_to_lru_list(zone, page, page_lru(page)); 2517 add_page_to_lru_list(page, lruvec, page_lru(page));
2528 } 2518 }
2529 spin_unlock_irq(&zone->lru_lock); 2519 spin_unlock_irq(&zone->lru_lock);
2530 } 2520 }
@@ -2547,7 +2537,7 @@ static void __mem_cgroup_commit_charge(struct mem_cgroup *memcg,
2547 2537
2548#ifdef CONFIG_TRANSPARENT_HUGEPAGE 2538#ifdef CONFIG_TRANSPARENT_HUGEPAGE
2549 2539
2550#define PCGF_NOCOPY_AT_SPLIT ((1 << PCG_LOCK) | (1 << PCG_MIGRATION)) 2540#define PCGF_NOCOPY_AT_SPLIT (1 << PCG_LOCK | 1 << PCG_MIGRATION)
2551/* 2541/*
2552 * Because tail pages are not marked as "used", set it. We're under 2542 * Because tail pages are not marked as "used", set it. We're under
2553 * zone->lru_lock, 'splitting on pmd' and compound_lock. 2543 * zone->lru_lock, 'splitting on pmd' and compound_lock.
@@ -2578,23 +2568,19 @@ void mem_cgroup_split_huge_fixup(struct page *head)
2578 * @pc: page_cgroup of the page. 2568 * @pc: page_cgroup of the page.
2579 * @from: mem_cgroup which the page is moved from. 2569 * @from: mem_cgroup which the page is moved from.
2580 * @to: mem_cgroup which the page is moved to. @from != @to. 2570 * @to: mem_cgroup which the page is moved to. @from != @to.
2581 * @uncharge: whether we should call uncharge and css_put against @from.
2582 * 2571 *
2583 * The caller must confirm following. 2572 * The caller must confirm following.
2584 * - page is not on LRU (isolate_page() is useful.) 2573 * - page is not on LRU (isolate_page() is useful.)
2585 * - compound_lock is held when nr_pages > 1 2574 * - compound_lock is held when nr_pages > 1
2586 * 2575 *
2587 * This function doesn't do "charge" nor css_get to new cgroup. It should be 2576 * This function doesn't do "charge" to new cgroup and doesn't do "uncharge"
2588 * done by a caller(__mem_cgroup_try_charge would be useful). If @uncharge is 2577 * from old cgroup.
2589 * true, this function does "uncharge" from old cgroup, but it doesn't if
2590 * @uncharge is false, so a caller should do "uncharge".
2591 */ 2578 */
2592static int mem_cgroup_move_account(struct page *page, 2579static int mem_cgroup_move_account(struct page *page,
2593 unsigned int nr_pages, 2580 unsigned int nr_pages,
2594 struct page_cgroup *pc, 2581 struct page_cgroup *pc,
2595 struct mem_cgroup *from, 2582 struct mem_cgroup *from,
2596 struct mem_cgroup *to, 2583 struct mem_cgroup *to)
2597 bool uncharge)
2598{ 2584{
2599 unsigned long flags; 2585 unsigned long flags;
2600 int ret; 2586 int ret;
@@ -2628,9 +2614,6 @@ static int mem_cgroup_move_account(struct page *page,
2628 preempt_enable(); 2614 preempt_enable();
2629 } 2615 }
2630 mem_cgroup_charge_statistics(from, anon, -nr_pages); 2616 mem_cgroup_charge_statistics(from, anon, -nr_pages);
2631 if (uncharge)
2632 /* This is not "cancel", but cancel_charge does all we need. */
2633 __mem_cgroup_cancel_charge(from, nr_pages);
2634 2617
2635 /* caller should have done css_get */ 2618 /* caller should have done css_get */
2636 pc->mem_cgroup = to; 2619 pc->mem_cgroup = to;
@@ -2664,15 +2647,13 @@ static int mem_cgroup_move_parent(struct page *page,
2664 struct mem_cgroup *child, 2647 struct mem_cgroup *child,
2665 gfp_t gfp_mask) 2648 gfp_t gfp_mask)
2666{ 2649{
2667 struct cgroup *cg = child->css.cgroup;
2668 struct cgroup *pcg = cg->parent;
2669 struct mem_cgroup *parent; 2650 struct mem_cgroup *parent;
2670 unsigned int nr_pages; 2651 unsigned int nr_pages;
2671 unsigned long uninitialized_var(flags); 2652 unsigned long uninitialized_var(flags);
2672 int ret; 2653 int ret;
2673 2654
2674 /* Is ROOT ? */ 2655 /* Is ROOT ? */
2675 if (!pcg) 2656 if (mem_cgroup_is_root(child))
2676 return -EINVAL; 2657 return -EINVAL;
2677 2658
2678 ret = -EBUSY; 2659 ret = -EBUSY;
@@ -2683,21 +2664,23 @@ static int mem_cgroup_move_parent(struct page *page,
2683 2664
2684 nr_pages = hpage_nr_pages(page); 2665 nr_pages = hpage_nr_pages(page);
2685 2666
2686 parent = mem_cgroup_from_cont(pcg); 2667 parent = parent_mem_cgroup(child);
2687 ret = __mem_cgroup_try_charge(NULL, gfp_mask, nr_pages, &parent, false); 2668 /*
2688 if (ret) 2669 * If no parent, move charges to root cgroup.
2689 goto put_back; 2670 */
2671 if (!parent)
2672 parent = root_mem_cgroup;
2690 2673
2691 if (nr_pages > 1) 2674 if (nr_pages > 1)
2692 flags = compound_lock_irqsave(page); 2675 flags = compound_lock_irqsave(page);
2693 2676
2694 ret = mem_cgroup_move_account(page, nr_pages, pc, child, parent, true); 2677 ret = mem_cgroup_move_account(page, nr_pages,
2695 if (ret) 2678 pc, child, parent);
2696 __mem_cgroup_cancel_charge(parent, nr_pages); 2679 if (!ret)
2680 __mem_cgroup_cancel_local_charge(child, nr_pages);
2697 2681
2698 if (nr_pages > 1) 2682 if (nr_pages > 1)
2699 compound_unlock_irqrestore(page, flags); 2683 compound_unlock_irqrestore(page, flags);
2700put_back:
2701 putback_lru_page(page); 2684 putback_lru_page(page);
2702put: 2685put:
2703 put_page(page); 2686 put_page(page);
@@ -2845,24 +2828,7 @@ __mem_cgroup_commit_charge_swapin(struct page *page, struct mem_cgroup *memcg,
2845 */ 2828 */
2846 if (do_swap_account && PageSwapCache(page)) { 2829 if (do_swap_account && PageSwapCache(page)) {
2847 swp_entry_t ent = {.val = page_private(page)}; 2830 swp_entry_t ent = {.val = page_private(page)};
2848 struct mem_cgroup *swap_memcg; 2831 mem_cgroup_uncharge_swap(ent);
2849 unsigned short id;
2850
2851 id = swap_cgroup_record(ent, 0);
2852 rcu_read_lock();
2853 swap_memcg = mem_cgroup_lookup(id);
2854 if (swap_memcg) {
2855 /*
2856 * This recorded memcg can be obsolete one. So, avoid
2857 * calling css_tryget
2858 */
2859 if (!mem_cgroup_is_root(swap_memcg))
2860 res_counter_uncharge(&swap_memcg->memsw,
2861 PAGE_SIZE);
2862 mem_cgroup_swap_statistics(swap_memcg, false);
2863 mem_cgroup_put(swap_memcg);
2864 }
2865 rcu_read_unlock();
2866 } 2832 }
2867 /* 2833 /*
2868 * At swapin, we may charge account against cgroup which has no tasks. 2834 * At swapin, we may charge account against cgroup which has no tasks.
@@ -3155,7 +3121,6 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
3155 * @entry: swap entry to be moved 3121 * @entry: swap entry to be moved
3156 * @from: mem_cgroup which the entry is moved from 3122 * @from: mem_cgroup which the entry is moved from
3157 * @to: mem_cgroup which the entry is moved to 3123 * @to: mem_cgroup which the entry is moved to
3158 * @need_fixup: whether we should fixup res_counters and refcounts.
3159 * 3124 *
3160 * It succeeds only when the swap_cgroup's record for this entry is the same 3125 * It succeeds only when the swap_cgroup's record for this entry is the same
3161 * as the mem_cgroup's id of @from. 3126 * as the mem_cgroup's id of @from.
@@ -3166,7 +3131,7 @@ void mem_cgroup_uncharge_swap(swp_entry_t ent)
3166 * both res and memsw, and called css_get(). 3131 * both res and memsw, and called css_get().
3167 */ 3132 */
3168static int mem_cgroup_move_swap_account(swp_entry_t entry, 3133static int mem_cgroup_move_swap_account(swp_entry_t entry,
3169 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3134 struct mem_cgroup *from, struct mem_cgroup *to)
3170{ 3135{
3171 unsigned short old_id, new_id; 3136 unsigned short old_id, new_id;
3172 3137
@@ -3185,24 +3150,13 @@ static int mem_cgroup_move_swap_account(swp_entry_t entry,
3185 * swap-in, the refcount of @to might be decreased to 0. 3150 * swap-in, the refcount of @to might be decreased to 0.
3186 */ 3151 */
3187 mem_cgroup_get(to); 3152 mem_cgroup_get(to);
3188 if (need_fixup) {
3189 if (!mem_cgroup_is_root(from))
3190 res_counter_uncharge(&from->memsw, PAGE_SIZE);
3191 mem_cgroup_put(from);
3192 /*
3193 * we charged both to->res and to->memsw, so we should
3194 * uncharge to->res.
3195 */
3196 if (!mem_cgroup_is_root(to))
3197 res_counter_uncharge(&to->res, PAGE_SIZE);
3198 }
3199 return 0; 3153 return 0;
3200 } 3154 }
3201 return -EINVAL; 3155 return -EINVAL;
3202} 3156}
3203#else 3157#else
3204static inline int mem_cgroup_move_swap_account(swp_entry_t entry, 3158static inline int mem_cgroup_move_swap_account(swp_entry_t entry,
3205 struct mem_cgroup *from, struct mem_cgroup *to, bool need_fixup) 3159 struct mem_cgroup *from, struct mem_cgroup *to)
3206{ 3160{
3207 return -EINVAL; 3161 return -EINVAL;
3208} 3162}
@@ -3363,7 +3317,7 @@ void mem_cgroup_end_migration(struct mem_cgroup *memcg,
3363void mem_cgroup_replace_page_cache(struct page *oldpage, 3317void mem_cgroup_replace_page_cache(struct page *oldpage,
3364 struct page *newpage) 3318 struct page *newpage)
3365{ 3319{
3366 struct mem_cgroup *memcg; 3320 struct mem_cgroup *memcg = NULL;
3367 struct page_cgroup *pc; 3321 struct page_cgroup *pc;
3368 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE; 3322 enum charge_type type = MEM_CGROUP_CHARGE_TYPE_CACHE;
3369 3323
@@ -3373,11 +3327,20 @@ void mem_cgroup_replace_page_cache(struct page *oldpage,
3373 pc = lookup_page_cgroup(oldpage); 3327 pc = lookup_page_cgroup(oldpage);
3374 /* fix accounting on old pages */ 3328 /* fix accounting on old pages */
3375 lock_page_cgroup(pc); 3329 lock_page_cgroup(pc);
3376 memcg = pc->mem_cgroup; 3330 if (PageCgroupUsed(pc)) {
3377 mem_cgroup_charge_statistics(memcg, false, -1); 3331 memcg = pc->mem_cgroup;
3378 ClearPageCgroupUsed(pc); 3332 mem_cgroup_charge_statistics(memcg, false, -1);
3333 ClearPageCgroupUsed(pc);
3334 }
3379 unlock_page_cgroup(pc); 3335 unlock_page_cgroup(pc);
3380 3336
3337 /*
3338 * When called from shmem_replace_page(), in some cases the
3339 * oldpage has already been charged, and in some cases not.
3340 */
3341 if (!memcg)
3342 return;
3343
3381 if (PageSwapBacked(oldpage)) 3344 if (PageSwapBacked(oldpage))
3382 type = MEM_CGROUP_CHARGE_TYPE_SHMEM; 3345 type = MEM_CGROUP_CHARGE_TYPE_SHMEM;
3383 3346
@@ -3793,7 +3756,7 @@ try_to_free:
3793 goto move_account; 3756 goto move_account;
3794} 3757}
3795 3758
3796int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event) 3759static int mem_cgroup_force_empty_write(struct cgroup *cont, unsigned int event)
3797{ 3760{
3798 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true); 3761 return mem_cgroup_force_empty(mem_cgroup_from_cont(cont), true);
3799} 3762}
@@ -4051,103 +4014,13 @@ static int mem_cgroup_move_charge_write(struct cgroup *cgrp,
4051} 4014}
4052#endif 4015#endif
4053 4016
4054
4055/* For read statistics */
4056enum {
4057 MCS_CACHE,
4058 MCS_RSS,
4059 MCS_FILE_MAPPED,
4060 MCS_PGPGIN,
4061 MCS_PGPGOUT,
4062 MCS_SWAP,
4063 MCS_PGFAULT,
4064 MCS_PGMAJFAULT,
4065 MCS_INACTIVE_ANON,
4066 MCS_ACTIVE_ANON,
4067 MCS_INACTIVE_FILE,
4068 MCS_ACTIVE_FILE,
4069 MCS_UNEVICTABLE,
4070 NR_MCS_STAT,
4071};
4072
4073struct mcs_total_stat {
4074 s64 stat[NR_MCS_STAT];
4075};
4076
4077struct {
4078 char *local_name;
4079 char *total_name;
4080} memcg_stat_strings[NR_MCS_STAT] = {
4081 {"cache", "total_cache"},
4082 {"rss", "total_rss"},
4083 {"mapped_file", "total_mapped_file"},
4084 {"pgpgin", "total_pgpgin"},
4085 {"pgpgout", "total_pgpgout"},
4086 {"swap", "total_swap"},
4087 {"pgfault", "total_pgfault"},
4088 {"pgmajfault", "total_pgmajfault"},
4089 {"inactive_anon", "total_inactive_anon"},
4090 {"active_anon", "total_active_anon"},
4091 {"inactive_file", "total_inactive_file"},
4092 {"active_file", "total_active_file"},
4093 {"unevictable", "total_unevictable"}
4094};
4095
4096
4097static void
4098mem_cgroup_get_local_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
4099{
4100 s64 val;
4101
4102 /* per cpu stat */
4103 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_CACHE);
4104 s->stat[MCS_CACHE] += val * PAGE_SIZE;
4105 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_RSS);
4106 s->stat[MCS_RSS] += val * PAGE_SIZE;
4107 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_FILE_MAPPED);
4108 s->stat[MCS_FILE_MAPPED] += val * PAGE_SIZE;
4109 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGIN);
4110 s->stat[MCS_PGPGIN] += val;
4111 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGPGOUT);
4112 s->stat[MCS_PGPGOUT] += val;
4113 if (do_swap_account) {
4114 val = mem_cgroup_read_stat(memcg, MEM_CGROUP_STAT_SWAPOUT);
4115 s->stat[MCS_SWAP] += val * PAGE_SIZE;
4116 }
4117 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGFAULT);
4118 s->stat[MCS_PGFAULT] += val;
4119 val = mem_cgroup_read_events(memcg, MEM_CGROUP_EVENTS_PGMAJFAULT);
4120 s->stat[MCS_PGMAJFAULT] += val;
4121
4122 /* per zone stat */
4123 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_ANON));
4124 s->stat[MCS_INACTIVE_ANON] += val * PAGE_SIZE;
4125 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_ANON));
4126 s->stat[MCS_ACTIVE_ANON] += val * PAGE_SIZE;
4127 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_INACTIVE_FILE));
4128 s->stat[MCS_INACTIVE_FILE] += val * PAGE_SIZE;
4129 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_ACTIVE_FILE));
4130 s->stat[MCS_ACTIVE_FILE] += val * PAGE_SIZE;
4131 val = mem_cgroup_nr_lru_pages(memcg, BIT(LRU_UNEVICTABLE));
4132 s->stat[MCS_UNEVICTABLE] += val * PAGE_SIZE;
4133}
4134
4135static void
4136mem_cgroup_get_total_stat(struct mem_cgroup *memcg, struct mcs_total_stat *s)
4137{
4138 struct mem_cgroup *iter;
4139
4140 for_each_mem_cgroup_tree(iter, memcg)
4141 mem_cgroup_get_local_stat(iter, s);
4142}
4143
4144#ifdef CONFIG_NUMA 4017#ifdef CONFIG_NUMA
4145static int mem_control_numa_stat_show(struct seq_file *m, void *arg) 4018static int mem_control_numa_stat_show(struct cgroup *cont, struct cftype *cft,
4019 struct seq_file *m)
4146{ 4020{
4147 int nid; 4021 int nid;
4148 unsigned long total_nr, file_nr, anon_nr, unevictable_nr; 4022 unsigned long total_nr, file_nr, anon_nr, unevictable_nr;
4149 unsigned long node_nr; 4023 unsigned long node_nr;
4150 struct cgroup *cont = m->private;
4151 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 4024 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4152 4025
4153 total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL); 4026 total_nr = mem_cgroup_nr_lru_pages(memcg, LRU_ALL);
@@ -4188,64 +4061,100 @@ static int mem_control_numa_stat_show(struct seq_file *m, void *arg)
4188} 4061}
4189#endif /* CONFIG_NUMA */ 4062#endif /* CONFIG_NUMA */
4190 4063
4064static const char * const mem_cgroup_lru_names[] = {
4065 "inactive_anon",
4066 "active_anon",
4067 "inactive_file",
4068 "active_file",
4069 "unevictable",
4070};
4071
4072static inline void mem_cgroup_lru_names_not_uptodate(void)
4073{
4074 BUILD_BUG_ON(ARRAY_SIZE(mem_cgroup_lru_names) != NR_LRU_LISTS);
4075}
4076
4191static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft, 4077static int mem_control_stat_show(struct cgroup *cont, struct cftype *cft,
4192 struct cgroup_map_cb *cb) 4078 struct seq_file *m)
4193{ 4079{
4194 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont); 4080 struct mem_cgroup *memcg = mem_cgroup_from_cont(cont);
4195 struct mcs_total_stat mystat; 4081 struct mem_cgroup *mi;
4196 int i; 4082 unsigned int i;
4197
4198 memset(&mystat, 0, sizeof(mystat));
4199 mem_cgroup_get_local_stat(memcg, &mystat);
4200 4083
4201 4084 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4202 for (i = 0; i < NR_MCS_STAT; i++) { 4085 if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
4203 if (i == MCS_SWAP && !do_swap_account)
4204 continue; 4086 continue;
4205 cb->fill(cb, memcg_stat_strings[i].local_name, mystat.stat[i]); 4087 seq_printf(m, "%s %ld\n", mem_cgroup_stat_names[i],
4088 mem_cgroup_read_stat(memcg, i) * PAGE_SIZE);
4206 } 4089 }
4207 4090
4091 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++)
4092 seq_printf(m, "%s %lu\n", mem_cgroup_events_names[i],
4093 mem_cgroup_read_events(memcg, i));
4094
4095 for (i = 0; i < NR_LRU_LISTS; i++)
4096 seq_printf(m, "%s %lu\n", mem_cgroup_lru_names[i],
4097 mem_cgroup_nr_lru_pages(memcg, BIT(i)) * PAGE_SIZE);
4098
4208 /* Hierarchical information */ 4099 /* Hierarchical information */
4209 { 4100 {
4210 unsigned long long limit, memsw_limit; 4101 unsigned long long limit, memsw_limit;
4211 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit); 4102 memcg_get_hierarchical_limit(memcg, &limit, &memsw_limit);
4212 cb->fill(cb, "hierarchical_memory_limit", limit); 4103 seq_printf(m, "hierarchical_memory_limit %llu\n", limit);
4213 if (do_swap_account) 4104 if (do_swap_account)
4214 cb->fill(cb, "hierarchical_memsw_limit", memsw_limit); 4105 seq_printf(m, "hierarchical_memsw_limit %llu\n",
4106 memsw_limit);
4215 } 4107 }
4216 4108
4217 memset(&mystat, 0, sizeof(mystat)); 4109 for (i = 0; i < MEM_CGROUP_STAT_NSTATS; i++) {
4218 mem_cgroup_get_total_stat(memcg, &mystat); 4110 long long val = 0;
4219 for (i = 0; i < NR_MCS_STAT; i++) { 4111
4220 if (i == MCS_SWAP && !do_swap_account) 4112 if (i == MEM_CGROUP_STAT_SWAPOUT && !do_swap_account)
4221 continue; 4113 continue;
4222 cb->fill(cb, memcg_stat_strings[i].total_name, mystat.stat[i]); 4114 for_each_mem_cgroup_tree(mi, memcg)
4115 val += mem_cgroup_read_stat(mi, i) * PAGE_SIZE;
4116 seq_printf(m, "total_%s %lld\n", mem_cgroup_stat_names[i], val);
4117 }
4118
4119 for (i = 0; i < MEM_CGROUP_EVENTS_NSTATS; i++) {
4120 unsigned long long val = 0;
4121
4122 for_each_mem_cgroup_tree(mi, memcg)
4123 val += mem_cgroup_read_events(mi, i);
4124 seq_printf(m, "total_%s %llu\n",
4125 mem_cgroup_events_names[i], val);
4126 }
4127
4128 for (i = 0; i < NR_LRU_LISTS; i++) {
4129 unsigned long long val = 0;
4130
4131 for_each_mem_cgroup_tree(mi, memcg)
4132 val += mem_cgroup_nr_lru_pages(mi, BIT(i)) * PAGE_SIZE;
4133 seq_printf(m, "total_%s %llu\n", mem_cgroup_lru_names[i], val);
4223 } 4134 }
4224 4135
4225#ifdef CONFIG_DEBUG_VM 4136#ifdef CONFIG_DEBUG_VM
4226 { 4137 {
4227 int nid, zid; 4138 int nid, zid;
4228 struct mem_cgroup_per_zone *mz; 4139 struct mem_cgroup_per_zone *mz;
4140 struct zone_reclaim_stat *rstat;
4229 unsigned long recent_rotated[2] = {0, 0}; 4141 unsigned long recent_rotated[2] = {0, 0};
4230 unsigned long recent_scanned[2] = {0, 0}; 4142 unsigned long recent_scanned[2] = {0, 0};
4231 4143
4232 for_each_online_node(nid) 4144 for_each_online_node(nid)
4233 for (zid = 0; zid < MAX_NR_ZONES; zid++) { 4145 for (zid = 0; zid < MAX_NR_ZONES; zid++) {
4234 mz = mem_cgroup_zoneinfo(memcg, nid, zid); 4146 mz = mem_cgroup_zoneinfo(memcg, nid, zid);
4147 rstat = &mz->lruvec.reclaim_stat;
4235 4148
4236 recent_rotated[0] += 4149 recent_rotated[0] += rstat->recent_rotated[0];
4237 mz->reclaim_stat.recent_rotated[0]; 4150 recent_rotated[1] += rstat->recent_rotated[1];
4238 recent_rotated[1] += 4151 recent_scanned[0] += rstat->recent_scanned[0];
4239 mz->reclaim_stat.recent_rotated[1]; 4152 recent_scanned[1] += rstat->recent_scanned[1];
4240 recent_scanned[0] +=
4241 mz->reclaim_stat.recent_scanned[0];
4242 recent_scanned[1] +=
4243 mz->reclaim_stat.recent_scanned[1];
4244 } 4153 }
4245 cb->fill(cb, "recent_rotated_anon", recent_rotated[0]); 4154 seq_printf(m, "recent_rotated_anon %lu\n", recent_rotated[0]);
4246 cb->fill(cb, "recent_rotated_file", recent_rotated[1]); 4155 seq_printf(m, "recent_rotated_file %lu\n", recent_rotated[1]);
4247 cb->fill(cb, "recent_scanned_anon", recent_scanned[0]); 4156 seq_printf(m, "recent_scanned_anon %lu\n", recent_scanned[0]);
4248 cb->fill(cb, "recent_scanned_file", recent_scanned[1]); 4157 seq_printf(m, "recent_scanned_file %lu\n", recent_scanned[1]);
4249 } 4158 }
4250#endif 4159#endif
4251 4160
@@ -4307,7 +4216,7 @@ static void __mem_cgroup_threshold(struct mem_cgroup *memcg, bool swap)
4307 usage = mem_cgroup_usage(memcg, swap); 4216 usage = mem_cgroup_usage(memcg, swap);
4308 4217
4309 /* 4218 /*
4310 * current_threshold points to threshold just below usage. 4219 * current_threshold points to threshold just below or equal to usage.
4311 * If it's not true, a threshold was crossed after last 4220 * If it's not true, a threshold was crossed after last
4312 * call of __mem_cgroup_threshold(). 4221 * call of __mem_cgroup_threshold().
4313 */ 4222 */
@@ -4433,14 +4342,15 @@ static int mem_cgroup_usage_register_event(struct cgroup *cgrp,
4433 /* Find current threshold */ 4342 /* Find current threshold */
4434 new->current_threshold = -1; 4343 new->current_threshold = -1;
4435 for (i = 0; i < size; i++) { 4344 for (i = 0; i < size; i++) {
4436 if (new->entries[i].threshold < usage) { 4345 if (new->entries[i].threshold <= usage) {
4437 /* 4346 /*
4438 * new->current_threshold will not be used until 4347 * new->current_threshold will not be used until
4439 * rcu_assign_pointer(), so it's safe to increment 4348 * rcu_assign_pointer(), so it's safe to increment
4440 * it here. 4349 * it here.
4441 */ 4350 */
4442 ++new->current_threshold; 4351 ++new->current_threshold;
4443 } 4352 } else
4353 break;
4444 } 4354 }
4445 4355
4446 /* Free old spare buffer and save old primary buffer as spare */ 4356 /* Free old spare buffer and save old primary buffer as spare */
@@ -4509,7 +4419,7 @@ static void mem_cgroup_usage_unregister_event(struct cgroup *cgrp,
4509 continue; 4419 continue;
4510 4420
4511 new->entries[j] = thresholds->primary->entries[i]; 4421 new->entries[j] = thresholds->primary->entries[i];
4512 if (new->entries[j].threshold < usage) { 4422 if (new->entries[j].threshold <= usage) {
4513 /* 4423 /*
4514 * new->current_threshold will not be used 4424 * new->current_threshold will not be used
4515 * until rcu_assign_pointer(), so it's safe to increment 4425 * until rcu_assign_pointer(), so it's safe to increment
@@ -4623,22 +4533,6 @@ static int mem_cgroup_oom_control_write(struct cgroup *cgrp,
4623 return 0; 4533 return 0;
4624} 4534}
4625 4535
4626#ifdef CONFIG_NUMA
4627static const struct file_operations mem_control_numa_stat_file_operations = {
4628 .read = seq_read,
4629 .llseek = seq_lseek,
4630 .release = single_release,
4631};
4632
4633static int mem_control_numa_stat_open(struct inode *unused, struct file *file)
4634{
4635 struct cgroup *cont = file->f_dentry->d_parent->d_fsdata;
4636
4637 file->f_op = &mem_control_numa_stat_file_operations;
4638 return single_open(file, mem_control_numa_stat_show, cont);
4639}
4640#endif /* CONFIG_NUMA */
4641
4642#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM 4536#ifdef CONFIG_CGROUP_MEM_RES_CTLR_KMEM
4643static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss) 4537static int memcg_init_kmem(struct mem_cgroup *memcg, struct cgroup_subsys *ss)
4644{ 4538{
@@ -4694,7 +4588,7 @@ static struct cftype mem_cgroup_files[] = {
4694 }, 4588 },
4695 { 4589 {
4696 .name = "stat", 4590 .name = "stat",
4697 .read_map = mem_control_stat_show, 4591 .read_seq_string = mem_control_stat_show,
4698 }, 4592 },
4699 { 4593 {
4700 .name = "force_empty", 4594 .name = "force_empty",
@@ -4726,8 +4620,7 @@ static struct cftype mem_cgroup_files[] = {
4726#ifdef CONFIG_NUMA 4620#ifdef CONFIG_NUMA
4727 { 4621 {
4728 .name = "numa_stat", 4622 .name = "numa_stat",
4729 .open = mem_control_numa_stat_open, 4623 .read_seq_string = mem_control_numa_stat_show,
4730 .mode = S_IRUGO,
4731 }, 4624 },
4732#endif 4625#endif
4733#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP 4626#ifdef CONFIG_CGROUP_MEM_RES_CTLR_SWAP
@@ -4764,7 +4657,6 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4764{ 4657{
4765 struct mem_cgroup_per_node *pn; 4658 struct mem_cgroup_per_node *pn;
4766 struct mem_cgroup_per_zone *mz; 4659 struct mem_cgroup_per_zone *mz;
4767 enum lru_list lru;
4768 int zone, tmp = node; 4660 int zone, tmp = node;
4769 /* 4661 /*
4770 * This routine is called against possible nodes. 4662 * This routine is called against possible nodes.
@@ -4782,8 +4674,7 @@ static int alloc_mem_cgroup_per_zone_info(struct mem_cgroup *memcg, int node)
4782 4674
4783 for (zone = 0; zone < MAX_NR_ZONES; zone++) { 4675 for (zone = 0; zone < MAX_NR_ZONES; zone++) {
4784 mz = &pn->zoneinfo[zone]; 4676 mz = &pn->zoneinfo[zone];
4785 for_each_lru(lru) 4677 lruvec_init(&mz->lruvec, &NODE_DATA(node)->node_zones[zone]);
4786 INIT_LIST_HEAD(&mz->lruvec.lists[lru]);
4787 mz->usage_in_excess = 0; 4678 mz->usage_in_excess = 0;
4788 mz->on_tree = false; 4679 mz->on_tree = false;
4789 mz->memcg = memcg; 4680 mz->memcg = memcg;
@@ -4826,23 +4717,40 @@ out_free:
4826} 4717}
4827 4718
4828/* 4719/*
4829 * Helpers for freeing a vzalloc()ed mem_cgroup by RCU, 4720 * Helpers for freeing a kmalloc()ed/vzalloc()ed mem_cgroup by RCU,
4830 * but in process context. The work_freeing structure is overlaid 4721 * but in process context. The work_freeing structure is overlaid
4831 * on the rcu_freeing structure, which itself is overlaid on memsw. 4722 * on the rcu_freeing structure, which itself is overlaid on memsw.
4832 */ 4723 */
4833static void vfree_work(struct work_struct *work) 4724static void free_work(struct work_struct *work)
4834{ 4725{
4835 struct mem_cgroup *memcg; 4726 struct mem_cgroup *memcg;
4727 int size = sizeof(struct mem_cgroup);
4836 4728
4837 memcg = container_of(work, struct mem_cgroup, work_freeing); 4729 memcg = container_of(work, struct mem_cgroup, work_freeing);
4838 vfree(memcg); 4730 /*
4731 * We need to make sure that (at least for now), the jump label
4732 * destruction code runs outside of the cgroup lock. This is because
4733 * get_online_cpus(), which is called from the static_branch update,
4734 * can't be called inside the cgroup_lock. cpusets are the ones
4735 * enforcing this dependency, so if they ever change, we might as well.
4736 *
4737 * schedule_work() will guarantee this happens. Be careful if you need
4738 * to move this code around, and make sure it is outside
4739 * the cgroup_lock.
4740 */
4741 disarm_sock_keys(memcg);
4742 if (size < PAGE_SIZE)
4743 kfree(memcg);
4744 else
4745 vfree(memcg);
4839} 4746}
4840static void vfree_rcu(struct rcu_head *rcu_head) 4747
4748static void free_rcu(struct rcu_head *rcu_head)
4841{ 4749{
4842 struct mem_cgroup *memcg; 4750 struct mem_cgroup *memcg;
4843 4751
4844 memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing); 4752 memcg = container_of(rcu_head, struct mem_cgroup, rcu_freeing);
4845 INIT_WORK(&memcg->work_freeing, vfree_work); 4753 INIT_WORK(&memcg->work_freeing, free_work);
4846 schedule_work(&memcg->work_freeing); 4754 schedule_work(&memcg->work_freeing);
4847} 4755}
4848 4756
@@ -4868,10 +4776,7 @@ static void __mem_cgroup_free(struct mem_cgroup *memcg)
4868 free_mem_cgroup_per_zone_info(memcg, node); 4776 free_mem_cgroup_per_zone_info(memcg, node);
4869 4777
4870 free_percpu(memcg->stat); 4778 free_percpu(memcg->stat);
4871 if (sizeof(struct mem_cgroup) < PAGE_SIZE) 4779 call_rcu(&memcg->rcu_freeing, free_rcu);
4872 kfree_rcu(memcg, rcu_freeing);
4873 else
4874 call_rcu(&memcg->rcu_freeing, vfree_rcu);
4875} 4780}
4876 4781
4877static void mem_cgroup_get(struct mem_cgroup *memcg) 4782static void mem_cgroup_get(struct mem_cgroup *memcg)
@@ -5135,7 +5040,7 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5135 return NULL; 5040 return NULL;
5136 if (PageAnon(page)) { 5041 if (PageAnon(page)) {
5137 /* we don't move shared anon */ 5042 /* we don't move shared anon */
5138 if (!move_anon() || page_mapcount(page) > 2) 5043 if (!move_anon())
5139 return NULL; 5044 return NULL;
5140 } else if (!move_file()) 5045 } else if (!move_file())
5141 /* we ignore mapcount for file pages */ 5046 /* we ignore mapcount for file pages */
@@ -5146,32 +5051,37 @@ static struct page *mc_handle_present_pte(struct vm_area_struct *vma,
5146 return page; 5051 return page;
5147} 5052}
5148 5053
5054#ifdef CONFIG_SWAP
5149static struct page *mc_handle_swap_pte(struct vm_area_struct *vma, 5055static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5150 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5056 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5151{ 5057{
5152 int usage_count;
5153 struct page *page = NULL; 5058 struct page *page = NULL;
5154 swp_entry_t ent = pte_to_swp_entry(ptent); 5059 swp_entry_t ent = pte_to_swp_entry(ptent);
5155 5060
5156 if (!move_anon() || non_swap_entry(ent)) 5061 if (!move_anon() || non_swap_entry(ent))
5157 return NULL; 5062 return NULL;
5158 usage_count = mem_cgroup_count_swap_user(ent, &page); 5063 /*
5159 if (usage_count > 1) { /* we don't move shared anon */ 5064 * Because lookup_swap_cache() updates some statistics counter,
5160 if (page) 5065 * we call find_get_page() with swapper_space directly.
5161 put_page(page); 5066 */
5162 return NULL; 5067 page = find_get_page(&swapper_space, ent.val);
5163 }
5164 if (do_swap_account) 5068 if (do_swap_account)
5165 entry->val = ent.val; 5069 entry->val = ent.val;
5166 5070
5167 return page; 5071 return page;
5168} 5072}
5073#else
5074static struct page *mc_handle_swap_pte(struct vm_area_struct *vma,
5075 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5076{
5077 return NULL;
5078}
5079#endif
5169 5080
5170static struct page *mc_handle_file_pte(struct vm_area_struct *vma, 5081static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5171 unsigned long addr, pte_t ptent, swp_entry_t *entry) 5082 unsigned long addr, pte_t ptent, swp_entry_t *entry)
5172{ 5083{
5173 struct page *page = NULL; 5084 struct page *page = NULL;
5174 struct inode *inode;
5175 struct address_space *mapping; 5085 struct address_space *mapping;
5176 pgoff_t pgoff; 5086 pgoff_t pgoff;
5177 5087
@@ -5180,7 +5090,6 @@ static struct page *mc_handle_file_pte(struct vm_area_struct *vma,
5180 if (!move_file()) 5090 if (!move_file())
5181 return NULL; 5091 return NULL;
5182 5092
5183 inode = vma->vm_file->f_path.dentry->d_inode;
5184 mapping = vma->vm_file->f_mapping; 5093 mapping = vma->vm_file->f_mapping;
5185 if (pte_none(ptent)) 5094 if (pte_none(ptent))
5186 pgoff = linear_page_index(vma, addr); 5095 pgoff = linear_page_index(vma, addr);
@@ -5479,8 +5388,7 @@ static int mem_cgroup_move_charge_pte_range(pmd_t *pmd,
5479 if (!isolate_lru_page(page)) { 5388 if (!isolate_lru_page(page)) {
5480 pc = lookup_page_cgroup(page); 5389 pc = lookup_page_cgroup(page);
5481 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR, 5390 if (!mem_cgroup_move_account(page, HPAGE_PMD_NR,
5482 pc, mc.from, mc.to, 5391 pc, mc.from, mc.to)) {
5483 false)) {
5484 mc.precharge -= HPAGE_PMD_NR; 5392 mc.precharge -= HPAGE_PMD_NR;
5485 mc.moved_charge += HPAGE_PMD_NR; 5393 mc.moved_charge += HPAGE_PMD_NR;
5486 } 5394 }
@@ -5510,7 +5418,7 @@ retry:
5510 goto put; 5418 goto put;
5511 pc = lookup_page_cgroup(page); 5419 pc = lookup_page_cgroup(page);
5512 if (!mem_cgroup_move_account(page, 1, pc, 5420 if (!mem_cgroup_move_account(page, 1, pc,
5513 mc.from, mc.to, false)) { 5421 mc.from, mc.to)) {
5514 mc.precharge--; 5422 mc.precharge--;
5515 /* we uncharge from mc.from later. */ 5423 /* we uncharge from mc.from later. */
5516 mc.moved_charge++; 5424 mc.moved_charge++;
@@ -5521,8 +5429,7 @@ put: /* get_mctgt_type() gets the page */
5521 break; 5429 break;
5522 case MC_TARGET_SWAP: 5430 case MC_TARGET_SWAP:
5523 ent = target.ent; 5431 ent = target.ent;
5524 if (!mem_cgroup_move_swap_account(ent, 5432 if (!mem_cgroup_move_swap_account(ent, mc.from, mc.to)) {
5525 mc.from, mc.to, false)) {
5526 mc.precharge--; 5433 mc.precharge--;
5527 /* we fixup refcnts and charges later. */ 5434 /* we fixup refcnts and charges later. */
5528 mc.moved_swap++; 5435 mc.moved_swap++;
@@ -5598,7 +5505,6 @@ static void mem_cgroup_move_task(struct cgroup *cont,
5598 if (mm) { 5505 if (mm) {
5599 if (mc.to) 5506 if (mc.to)
5600 mem_cgroup_move_charge(mm); 5507 mem_cgroup_move_charge(mm);
5601 put_swap_token(mm);
5602 mmput(mm); 5508 mmput(mm);
5603 } 5509 }
5604 if (mc.to) 5510 if (mc.to)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 97cc2733551a..ab1e7145e290 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1388,23 +1388,23 @@ static int get_any_page(struct page *p, unsigned long pfn, int flags)
1388 */ 1388 */
1389 if (!get_page_unless_zero(compound_head(p))) { 1389 if (!get_page_unless_zero(compound_head(p))) {
1390 if (PageHuge(p)) { 1390 if (PageHuge(p)) {
1391 pr_info("get_any_page: %#lx free huge page\n", pfn); 1391 pr_info("%s: %#lx free huge page\n", __func__, pfn);
1392 ret = dequeue_hwpoisoned_huge_page(compound_head(p)); 1392 ret = dequeue_hwpoisoned_huge_page(compound_head(p));
1393 } else if (is_free_buddy_page(p)) { 1393 } else if (is_free_buddy_page(p)) {
1394 pr_info("get_any_page: %#lx free buddy page\n", pfn); 1394 pr_info("%s: %#lx free buddy page\n", __func__, pfn);
1395 /* Set hwpoison bit while page is still isolated */ 1395 /* Set hwpoison bit while page is still isolated */
1396 SetPageHWPoison(p); 1396 SetPageHWPoison(p);
1397 ret = 0; 1397 ret = 0;
1398 } else { 1398 } else {
1399 pr_info("get_any_page: %#lx: unknown zero refcount page type %lx\n", 1399 pr_info("%s: %#lx: unknown zero refcount page type %lx\n",
1400 pfn, p->flags); 1400 __func__, pfn, p->flags);
1401 ret = -EIO; 1401 ret = -EIO;
1402 } 1402 }
1403 } else { 1403 } else {
1404 /* Not a free page */ 1404 /* Not a free page */
1405 ret = 1; 1405 ret = 1;
1406 } 1406 }
1407 unset_migratetype_isolate(p); 1407 unset_migratetype_isolate(p, MIGRATE_MOVABLE);
1408 unlock_memory_hotplug(); 1408 unlock_memory_hotplug();
1409 return ret; 1409 return ret;
1410} 1410}
diff --git a/mm/memory.c b/mm/memory.c
index e40f6759ba98..1b7dc662bf9f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2908,7 +2908,6 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2908 delayacct_set_flag(DELAYACCT_PF_SWAPIN); 2908 delayacct_set_flag(DELAYACCT_PF_SWAPIN);
2909 page = lookup_swap_cache(entry); 2909 page = lookup_swap_cache(entry);
2910 if (!page) { 2910 if (!page) {
2911 grab_swap_token(mm); /* Contend for token _before_ read-in */
2912 page = swapin_readahead(entry, 2911 page = swapin_readahead(entry,
2913 GFP_HIGHUSER_MOVABLE, vma, address); 2912 GFP_HIGHUSER_MOVABLE, vma, address);
2914 if (!page) { 2913 if (!page) {
@@ -2938,6 +2937,7 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
2938 } 2937 }
2939 2938
2940 locked = lock_page_or_retry(page, mm, flags); 2939 locked = lock_page_or_retry(page, mm, flags);
2940
2941 delayacct_clear_flag(DELAYACCT_PF_SWAPIN); 2941 delayacct_clear_flag(DELAYACCT_PF_SWAPIN);
2942 if (!locked) { 2942 if (!locked) {
2943 ret |= VM_FAULT_RETRY; 2943 ret |= VM_FAULT_RETRY;
@@ -3486,6 +3486,7 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3486 if (unlikely(is_vm_hugetlb_page(vma))) 3486 if (unlikely(is_vm_hugetlb_page(vma)))
3487 return hugetlb_fault(mm, vma, address, flags); 3487 return hugetlb_fault(mm, vma, address, flags);
3488 3488
3489retry:
3489 pgd = pgd_offset(mm, address); 3490 pgd = pgd_offset(mm, address);
3490 pud = pud_alloc(mm, pgd, address); 3491 pud = pud_alloc(mm, pgd, address);
3491 if (!pud) 3492 if (!pud)
@@ -3499,13 +3500,24 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
3499 pmd, flags); 3500 pmd, flags);
3500 } else { 3501 } else {
3501 pmd_t orig_pmd = *pmd; 3502 pmd_t orig_pmd = *pmd;
3503 int ret;
3504
3502 barrier(); 3505 barrier();
3503 if (pmd_trans_huge(orig_pmd)) { 3506 if (pmd_trans_huge(orig_pmd)) {
3504 if (flags & FAULT_FLAG_WRITE && 3507 if (flags & FAULT_FLAG_WRITE &&
3505 !pmd_write(orig_pmd) && 3508 !pmd_write(orig_pmd) &&
3506 !pmd_trans_splitting(orig_pmd)) 3509 !pmd_trans_splitting(orig_pmd)) {
3507 return do_huge_pmd_wp_page(mm, vma, address, 3510 ret = do_huge_pmd_wp_page(mm, vma, address, pmd,
3508 pmd, orig_pmd); 3511 orig_pmd);
3512 /*
3513 * If COW results in an oom, the huge pmd will
3514 * have been split, so retry the fault on the
3515 * pte for a smaller charge.
3516 */
3517 if (unlikely(ret & VM_FAULT_OOM))
3518 goto retry;
3519 return ret;
3520 }
3509 return 0; 3521 return 0;
3510 } 3522 }
3511 } 3523 }
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index 6629fafd6ce4..0d7e3ec8e0f3 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -74,8 +74,7 @@ static struct resource *register_memory_resource(u64 start, u64 size)
74 res->end = start + size - 1; 74 res->end = start + size - 1;
75 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY; 75 res->flags = IORESOURCE_MEM | IORESOURCE_BUSY;
76 if (request_resource(&iomem_resource, res) < 0) { 76 if (request_resource(&iomem_resource, res) < 0) {
77 printk("System RAM resource %llx - %llx cannot be added\n", 77 printk("System RAM resource %pR cannot be added\n", res);
78 (unsigned long long)res->start, (unsigned long long)res->end);
79 kfree(res); 78 kfree(res);
80 res = NULL; 79 res = NULL;
81 } 80 }
@@ -502,8 +501,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages)
502 online_pages_range); 501 online_pages_range);
503 if (ret) { 502 if (ret) {
504 mutex_unlock(&zonelists_mutex); 503 mutex_unlock(&zonelists_mutex);
505 printk(KERN_DEBUG "online_pages %lx at %lx failed\n", 504 printk(KERN_DEBUG "online_pages [mem %#010llx-%#010llx] failed\n",
506 nr_pages, pfn); 505 (unsigned long long) pfn << PAGE_SHIFT,
506 (((unsigned long long) pfn + nr_pages)
507 << PAGE_SHIFT) - 1);
507 memory_notify(MEM_CANCEL_ONLINE, &arg); 508 memory_notify(MEM_CANCEL_ONLINE, &arg);
508 unlock_memory_hotplug(); 509 unlock_memory_hotplug();
509 return ret; 510 return ret;
@@ -891,7 +892,7 @@ static int __ref offline_pages(unsigned long start_pfn,
891 nr_pages = end_pfn - start_pfn; 892 nr_pages = end_pfn - start_pfn;
892 893
893 /* set above range as isolated */ 894 /* set above range as isolated */
894 ret = start_isolate_page_range(start_pfn, end_pfn); 895 ret = start_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
895 if (ret) 896 if (ret)
896 goto out; 897 goto out;
897 898
@@ -956,7 +957,7 @@ repeat:
956 We cannot do rollback at this point. */ 957 We cannot do rollback at this point. */
957 offline_isolated_pages(start_pfn, end_pfn); 958 offline_isolated_pages(start_pfn, end_pfn);
958 /* reset pagetype flags and makes migrate type to be MOVABLE */ 959 /* reset pagetype flags and makes migrate type to be MOVABLE */
959 undo_isolate_page_range(start_pfn, end_pfn); 960 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
960 /* removal success */ 961 /* removal success */
961 zone->present_pages -= offlined_pages; 962 zone->present_pages -= offlined_pages;
962 zone->zone_pgdat->node_present_pages -= offlined_pages; 963 zone->zone_pgdat->node_present_pages -= offlined_pages;
@@ -977,11 +978,12 @@ repeat:
977 return 0; 978 return 0;
978 979
979failed_removal: 980failed_removal:
980 printk(KERN_INFO "memory offlining %lx to %lx failed\n", 981 printk(KERN_INFO "memory offlining [mem %#010llx-%#010llx] failed\n",
981 start_pfn, end_pfn); 982 (unsigned long long) start_pfn << PAGE_SHIFT,
983 ((unsigned long long) end_pfn << PAGE_SHIFT) - 1);
982 memory_notify(MEM_CANCEL_OFFLINE, &arg); 984 memory_notify(MEM_CANCEL_OFFLINE, &arg);
983 /* pushback to free area */ 985 /* pushback to free area */
984 undo_isolate_page_range(start_pfn, end_pfn); 986 undo_isolate_page_range(start_pfn, end_pfn, MIGRATE_MOVABLE);
985 987
986out: 988out:
987 unlock_memory_hotplug(); 989 unlock_memory_hotplug();
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 88f9422b92e7..f15c1b24ca18 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -390,7 +390,7 @@ static void mpol_rebind_policy(struct mempolicy *pol, const nodemask_t *newmask,
390{ 390{
391 if (!pol) 391 if (!pol)
392 return; 392 return;
393 if (!mpol_store_user_nodemask(pol) && step == 0 && 393 if (!mpol_store_user_nodemask(pol) && step == MPOL_REBIND_ONCE &&
394 nodes_equal(pol->w.cpuset_mems_allowed, *newmask)) 394 nodes_equal(pol->w.cpuset_mems_allowed, *newmask))
395 return; 395 return;
396 396
@@ -950,8 +950,8 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
950 * 950 *
951 * Returns the number of page that could not be moved. 951 * Returns the number of page that could not be moved.
952 */ 952 */
953int do_migrate_pages(struct mm_struct *mm, 953int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
954 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 954 const nodemask_t *to, int flags)
955{ 955{
956 int busy = 0; 956 int busy = 0;
957 int err; 957 int err;
@@ -963,7 +963,7 @@ int do_migrate_pages(struct mm_struct *mm,
963 963
964 down_read(&mm->mmap_sem); 964 down_read(&mm->mmap_sem);
965 965
966 err = migrate_vmas(mm, from_nodes, to_nodes, flags); 966 err = migrate_vmas(mm, from, to, flags);
967 if (err) 967 if (err)
968 goto out; 968 goto out;
969 969
@@ -998,14 +998,34 @@ int do_migrate_pages(struct mm_struct *mm,
998 * moved to an empty node, then there is nothing left worth migrating. 998 * moved to an empty node, then there is nothing left worth migrating.
999 */ 999 */
1000 1000
1001 tmp = *from_nodes; 1001 tmp = *from;
1002 while (!nodes_empty(tmp)) { 1002 while (!nodes_empty(tmp)) {
1003 int s,d; 1003 int s,d;
1004 int source = -1; 1004 int source = -1;
1005 int dest = 0; 1005 int dest = 0;
1006 1006
1007 for_each_node_mask(s, tmp) { 1007 for_each_node_mask(s, tmp) {
1008 d = node_remap(s, *from_nodes, *to_nodes); 1008
1009 /*
1010 * do_migrate_pages() tries to maintain the relative
1011 * node relationship of the pages established between
1012 * threads and memory areas.
1013 *
1014 * However if the number of source nodes is not equal to
1015 * the number of destination nodes we can not preserve
1016 * this node relative relationship. In that case, skip
1017 * copying memory from a node that is in the destination
1018 * mask.
1019 *
1020 * Example: [2,3,4] -> [3,4,5] moves everything.
1021 * [0-7] - > [3,4,5] moves only 0,1,2,6,7.
1022 */
1023
1024 if ((nodes_weight(*from) != nodes_weight(*to)) &&
1025 (node_isset(s, *to)))
1026 continue;
1027
1028 d = node_remap(s, *from, *to);
1009 if (s == d) 1029 if (s == d)
1010 continue; 1030 continue;
1011 1031
@@ -1065,8 +1085,8 @@ static void migrate_page_add(struct page *page, struct list_head *pagelist,
1065{ 1085{
1066} 1086}
1067 1087
1068int do_migrate_pages(struct mm_struct *mm, 1088int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from,
1069 const nodemask_t *from_nodes, const nodemask_t *to_nodes, int flags) 1089 const nodemask_t *to, int flags)
1070{ 1090{
1071 return -ENOSYS; 1091 return -ENOSYS;
1072} 1092}
diff --git a/mm/mmap.c b/mm/mmap.c
index e8dcfc7de866..4a9c2a391e28 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1639,33 +1639,34 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
1639{ 1639{
1640 struct vm_area_struct *vma = NULL; 1640 struct vm_area_struct *vma = NULL;
1641 1641
1642 if (mm) { 1642 if (WARN_ON_ONCE(!mm)) /* Remove this in linux-3.6 */
1643 /* Check the cache first. */ 1643 return NULL;
1644 /* (Cache hit rate is typically around 35%.) */ 1644
1645 vma = mm->mmap_cache; 1645 /* Check the cache first. */
1646 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { 1646 /* (Cache hit rate is typically around 35%.) */
1647 struct rb_node * rb_node; 1647 vma = mm->mmap_cache;
1648 1648 if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) {
1649 rb_node = mm->mm_rb.rb_node; 1649 struct rb_node *rb_node;
1650 vma = NULL; 1650
1651 1651 rb_node = mm->mm_rb.rb_node;
1652 while (rb_node) { 1652 vma = NULL;
1653 struct vm_area_struct * vma_tmp; 1653
1654 1654 while (rb_node) {
1655 vma_tmp = rb_entry(rb_node, 1655 struct vm_area_struct *vma_tmp;
1656 struct vm_area_struct, vm_rb); 1656
1657 1657 vma_tmp = rb_entry(rb_node,
1658 if (vma_tmp->vm_end > addr) { 1658 struct vm_area_struct, vm_rb);
1659 vma = vma_tmp; 1659
1660 if (vma_tmp->vm_start <= addr) 1660 if (vma_tmp->vm_end > addr) {
1661 break; 1661 vma = vma_tmp;
1662 rb_node = rb_node->rb_left; 1662 if (vma_tmp->vm_start <= addr)
1663 } else 1663 break;
1664 rb_node = rb_node->rb_right; 1664 rb_node = rb_node->rb_left;
1665 } 1665 } else
1666 if (vma) 1666 rb_node = rb_node->rb_right;
1667 mm->mmap_cache = vma;
1668 } 1667 }
1668 if (vma)
1669 mm->mmap_cache = vma;
1669 } 1670 }
1670 return vma; 1671 return vma;
1671} 1672}
diff --git a/mm/mmzone.c b/mm/mmzone.c
index 7cf7b7ddc7c5..6830eab5bf09 100644
--- a/mm/mmzone.c
+++ b/mm/mmzone.c
@@ -86,3 +86,17 @@ int memmap_valid_within(unsigned long pfn,
86 return 1; 86 return 1;
87} 87}
88#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */ 88#endif /* CONFIG_ARCH_HAS_HOLES_MEMORYMODEL */
89
90void lruvec_init(struct lruvec *lruvec, struct zone *zone)
91{
92 enum lru_list lru;
93
94 memset(lruvec, 0, sizeof(struct lruvec));
95
96 for_each_lru(lru)
97 INIT_LIST_HEAD(&lruvec->lists[lru]);
98
99#ifdef CONFIG_CGROUP_MEM_RES_CTLR
100 lruvec->zone = zone;
101#endif
102}
diff --git a/mm/nobootmem.c b/mm/nobootmem.c
index 1983fb1c7026..d23415c001bc 100644
--- a/mm/nobootmem.c
+++ b/mm/nobootmem.c
@@ -274,86 +274,85 @@ void * __init __alloc_bootmem(unsigned long size, unsigned long align,
274 return ___alloc_bootmem(size, align, goal, limit); 274 return ___alloc_bootmem(size, align, goal, limit);
275} 275}
276 276
277/** 277static void * __init ___alloc_bootmem_node_nopanic(pg_data_t *pgdat,
278 * __alloc_bootmem_node - allocate boot memory from a specific node 278 unsigned long size,
279 * @pgdat: node to allocate from 279 unsigned long align,
280 * @size: size of the request in bytes 280 unsigned long goal,
281 * @align: alignment of the region 281 unsigned long limit)
282 * @goal: preferred starting address of the region
283 *
284 * The goal is dropped if it can not be satisfied and the allocation will
285 * fall back to memory below @goal.
286 *
287 * Allocation may fall back to any node in the system if the specified node
288 * can not hold the requested memory.
289 *
290 * The function panics if the request can not be satisfied.
291 */
292void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
293 unsigned long align, unsigned long goal)
294{ 282{
295 void *ptr; 283 void *ptr;
296 284
297 if (WARN_ON_ONCE(slab_is_available()))
298 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
299
300again: 285again:
301 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, 286 ptr = __alloc_memory_core_early(pgdat->node_id, size, align,
302 goal, -1ULL); 287 goal, limit);
303 if (ptr) 288 if (ptr)
304 return ptr; 289 return ptr;
305 290
306 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align, 291 ptr = __alloc_memory_core_early(MAX_NUMNODES, size, align,
307 goal, -1ULL); 292 goal, limit);
308 if (!ptr && goal) { 293 if (ptr)
294 return ptr;
295
296 if (goal) {
309 goal = 0; 297 goal = 0;
310 goto again; 298 goto again;
311 } 299 }
312 return ptr; 300
301 return NULL;
313} 302}
314 303
315void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size, 304void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size,
316 unsigned long align, unsigned long goal) 305 unsigned long align, unsigned long goal)
317{ 306{
318 return __alloc_bootmem_node(pgdat, size, align, goal); 307 if (WARN_ON_ONCE(slab_is_available()))
308 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
309
310 return ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, 0);
319} 311}
320 312
321#ifdef CONFIG_SPARSEMEM 313void * __init ___alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
322/** 314 unsigned long align, unsigned long goal,
323 * alloc_bootmem_section - allocate boot memory from a specific section 315 unsigned long limit)
324 * @size: size of the request in bytes
325 * @section_nr: sparse map section to allocate from
326 *
327 * Return NULL on failure.
328 */
329void * __init alloc_bootmem_section(unsigned long size,
330 unsigned long section_nr)
331{ 316{
332 unsigned long pfn, goal, limit; 317 void *ptr;
333 318
334 pfn = section_nr_to_pfn(section_nr); 319 ptr = ___alloc_bootmem_node_nopanic(pgdat, size, align, goal, limit);
335 goal = pfn << PAGE_SHIFT; 320 if (ptr)
336 limit = section_nr_to_pfn(section_nr + 1) << PAGE_SHIFT; 321 return ptr;
337 322
338 return __alloc_memory_core_early(early_pfn_to_nid(pfn), size, 323 printk(KERN_ALERT "bootmem alloc of %lu bytes failed!\n", size);
339 SMP_CACHE_BYTES, goal, limit); 324 panic("Out of memory");
325 return NULL;
340} 326}
341#endif
342 327
343void * __init __alloc_bootmem_node_nopanic(pg_data_t *pgdat, unsigned long size, 328/**
329 * __alloc_bootmem_node - allocate boot memory from a specific node
330 * @pgdat: node to allocate from
331 * @size: size of the request in bytes
332 * @align: alignment of the region
333 * @goal: preferred starting address of the region
334 *
335 * The goal is dropped if it can not be satisfied and the allocation will
336 * fall back to memory below @goal.
337 *
338 * Allocation may fall back to any node in the system if the specified node
339 * can not hold the requested memory.
340 *
341 * The function panics if the request can not be satisfied.
342 */
343void * __init __alloc_bootmem_node(pg_data_t *pgdat, unsigned long size,
344 unsigned long align, unsigned long goal) 344 unsigned long align, unsigned long goal)
345{ 345{
346 void *ptr;
347
348 if (WARN_ON_ONCE(slab_is_available())) 346 if (WARN_ON_ONCE(slab_is_available()))
349 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 347 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
350 348
351 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, 349 return ___alloc_bootmem_node(pgdat, size, align, goal, 0);
352 goal, -1ULL); 350}
353 if (ptr)
354 return ptr;
355 351
356 return __alloc_bootmem_nopanic(size, align, goal); 352void * __init __alloc_bootmem_node_high(pg_data_t *pgdat, unsigned long size,
353 unsigned long align, unsigned long goal)
354{
355 return __alloc_bootmem_node(pgdat, size, align, goal);
357} 356}
358 357
359#ifndef ARCH_LOW_ADDRESS_LIMIT 358#ifndef ARCH_LOW_ADDRESS_LIMIT
@@ -397,16 +396,9 @@ void * __init __alloc_bootmem_low(unsigned long size, unsigned long align,
397void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size, 396void * __init __alloc_bootmem_low_node(pg_data_t *pgdat, unsigned long size,
398 unsigned long align, unsigned long goal) 397 unsigned long align, unsigned long goal)
399{ 398{
400 void *ptr;
401
402 if (WARN_ON_ONCE(slab_is_available())) 399 if (WARN_ON_ONCE(slab_is_available()))
403 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id); 400 return kzalloc_node(size, GFP_NOWAIT, pgdat->node_id);
404 401
405 ptr = __alloc_memory_core_early(pgdat->node_id, size, align, 402 return ___alloc_bootmem_node(pgdat, size, align, goal,
406 goal, ARCH_LOW_ADDRESS_LIMIT); 403 ARCH_LOW_ADDRESS_LIMIT);
407 if (ptr)
408 return ptr;
409
410 return __alloc_memory_core_early(MAX_NUMNODES, size, align,
411 goal, ARCH_LOW_ADDRESS_LIMIT);
412} 404}
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 9f09a1fde9f9..ed0e19677360 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -180,10 +180,10 @@ static bool oom_unkillable_task(struct task_struct *p,
180 * predictable as possible. The goal is to return the highest value for the 180 * predictable as possible. The goal is to return the highest value for the
181 * task consuming the most memory to avoid subsequent oom failures. 181 * task consuming the most memory to avoid subsequent oom failures.
182 */ 182 */
183unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg, 183unsigned long oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
184 const nodemask_t *nodemask, unsigned long totalpages) 184 const nodemask_t *nodemask, unsigned long totalpages)
185{ 185{
186 long points; 186 unsigned long points;
187 187
188 if (oom_unkillable_task(p, memcg, nodemask)) 188 if (oom_unkillable_task(p, memcg, nodemask))
189 return 0; 189 return 0;
@@ -198,21 +198,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
198 } 198 }
199 199
200 /* 200 /*
201 * The memory controller may have a limit of 0 bytes, so avoid a divide
202 * by zero, if necessary.
203 */
204 if (!totalpages)
205 totalpages = 1;
206
207 /*
208 * The baseline for the badness score is the proportion of RAM that each 201 * The baseline for the badness score is the proportion of RAM that each
209 * task's rss, pagetable and swap space use. 202 * task's rss, pagetable and swap space use.
210 */ 203 */
211 points = get_mm_rss(p->mm) + p->mm->nr_ptes; 204 points = get_mm_rss(p->mm) + p->mm->nr_ptes +
212 points += get_mm_counter(p->mm, MM_SWAPENTS); 205 get_mm_counter(p->mm, MM_SWAPENTS);
213
214 points *= 1000;
215 points /= totalpages;
216 task_unlock(p); 206 task_unlock(p);
217 207
218 /* 208 /*
@@ -220,23 +210,20 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *memcg,
220 * implementation used by LSMs. 210 * implementation used by LSMs.
221 */ 211 */
222 if (has_capability_noaudit(p, CAP_SYS_ADMIN)) 212 if (has_capability_noaudit(p, CAP_SYS_ADMIN))
223 points -= 30; 213 points -= 30 * totalpages / 1000;
224 214
225 /* 215 /*
226 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may 216 * /proc/pid/oom_score_adj ranges from -1000 to +1000 such that it may
227 * either completely disable oom killing or always prefer a certain 217 * either completely disable oom killing or always prefer a certain
228 * task. 218 * task.
229 */ 219 */
230 points += p->signal->oom_score_adj; 220 points += p->signal->oom_score_adj * totalpages / 1000;
231 221
232 /* 222 /*
233 * Never return 0 for an eligible task that may be killed since it's 223 * Never return 0 for an eligible task regardless of the root bonus and
234 * possible that no single user task uses more than 0.1% of memory and 224 * oom_score_adj (oom_score_adj can't be OOM_SCORE_ADJ_MIN here).
235 * no single admin tasks uses more than 3.0%.
236 */ 225 */
237 if (points <= 0) 226 return points ? points : 1;
238 return 1;
239 return (points < 1000) ? points : 1000;
240} 227}
241 228
242/* 229/*
@@ -314,7 +301,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
314{ 301{
315 struct task_struct *g, *p; 302 struct task_struct *g, *p;
316 struct task_struct *chosen = NULL; 303 struct task_struct *chosen = NULL;
317 *ppoints = 0; 304 unsigned long chosen_points = 0;
318 305
319 do_each_thread(g, p) { 306 do_each_thread(g, p) {
320 unsigned int points; 307 unsigned int points;
@@ -354,7 +341,7 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
354 */ 341 */
355 if (p == current) { 342 if (p == current) {
356 chosen = p; 343 chosen = p;
357 *ppoints = 1000; 344 chosen_points = ULONG_MAX;
358 } else if (!force_kill) { 345 } else if (!force_kill) {
359 /* 346 /*
360 * If this task is not being ptraced on exit, 347 * If this task is not being ptraced on exit,
@@ -367,12 +354,13 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
367 } 354 }
368 355
369 points = oom_badness(p, memcg, nodemask, totalpages); 356 points = oom_badness(p, memcg, nodemask, totalpages);
370 if (points > *ppoints) { 357 if (points > chosen_points) {
371 chosen = p; 358 chosen = p;
372 *ppoints = points; 359 chosen_points = points;
373 } 360 }
374 } while_each_thread(g, p); 361 } while_each_thread(g, p);
375 362
363 *ppoints = chosen_points * 1000 / totalpages;
376 return chosen; 364 return chosen;
377} 365}
378 366
@@ -572,7 +560,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask,
572 } 560 }
573 561
574 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL); 562 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, order, NULL);
575 limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT; 563 limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT ? : 1;
576 read_lock(&tasklist_lock); 564 read_lock(&tasklist_lock);
577 p = select_bad_process(&points, limit, memcg, NULL, false); 565 p = select_bad_process(&points, limit, memcg, NULL, false);
578 if (p && PTR_ERR(p) != -1UL) 566 if (p && PTR_ERR(p) != -1UL)
diff --git a/mm/page-writeback.c b/mm/page-writeback.c
index 26adea8ca2e7..93d8d2f7108c 100644
--- a/mm/page-writeback.c
+++ b/mm/page-writeback.c
@@ -204,7 +204,7 @@ static unsigned long highmem_dirtyable_memory(unsigned long total)
204 * Returns the global number of pages potentially available for dirty 204 * Returns the global number of pages potentially available for dirty
205 * page cache. This is the base value for the global dirty limits. 205 * page cache. This is the base value for the global dirty limits.
206 */ 206 */
207unsigned long global_dirtyable_memory(void) 207static unsigned long global_dirtyable_memory(void)
208{ 208{
209 unsigned long x; 209 unsigned long x;
210 210
@@ -1568,6 +1568,7 @@ void writeback_set_ratelimit(void)
1568 unsigned long background_thresh; 1568 unsigned long background_thresh;
1569 unsigned long dirty_thresh; 1569 unsigned long dirty_thresh;
1570 global_dirty_limits(&background_thresh, &dirty_thresh); 1570 global_dirty_limits(&background_thresh, &dirty_thresh);
1571 global_dirty_limit = dirty_thresh;
1571 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32); 1572 ratelimit_pages = dirty_thresh / (num_online_cpus() * 32);
1572 if (ratelimit_pages < 16) 1573 if (ratelimit_pages < 16)
1573 ratelimit_pages = 16; 1574 ratelimit_pages = 16;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 1851df600438..6092f331b32e 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -57,6 +57,7 @@
57#include <linux/ftrace_event.h> 57#include <linux/ftrace_event.h>
58#include <linux/memcontrol.h> 58#include <linux/memcontrol.h>
59#include <linux/prefetch.h> 59#include <linux/prefetch.h>
60#include <linux/migrate.h>
60#include <linux/page-debug-flags.h> 61#include <linux/page-debug-flags.h>
61 62
62#include <asm/tlbflush.h> 63#include <asm/tlbflush.h>
@@ -218,7 +219,7 @@ EXPORT_SYMBOL(nr_online_nodes);
218 219
219int page_group_by_mobility_disabled __read_mostly; 220int page_group_by_mobility_disabled __read_mostly;
220 221
221static void set_pageblock_migratetype(struct page *page, int migratetype) 222void set_pageblock_migratetype(struct page *page, int migratetype)
222{ 223{
223 224
224 if (unlikely(page_group_by_mobility_disabled)) 225 if (unlikely(page_group_by_mobility_disabled))
@@ -513,10 +514,10 @@ static inline int page_is_buddy(struct page *page, struct page *buddy,
513 * free pages of length of (1 << order) and marked with _mapcount -2. Page's 514 * free pages of length of (1 << order) and marked with _mapcount -2. Page's
514 * order is recorded in page_private(page) field. 515 * order is recorded in page_private(page) field.
515 * So when we are allocating or freeing one, we can derive the state of the 516 * So when we are allocating or freeing one, we can derive the state of the
516 * other. That is, if we allocate a small block, and both were 517 * other. That is, if we allocate a small block, and both were
517 * free, the remainder of the region must be split into blocks. 518 * free, the remainder of the region must be split into blocks.
518 * If a block is freed, and its buddy is also free, then this 519 * If a block is freed, and its buddy is also free, then this
519 * triggers coalescing into a block of larger size. 520 * triggers coalescing into a block of larger size.
520 * 521 *
521 * -- wli 522 * -- wli
522 */ 523 */
@@ -749,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
749 __free_pages(page, order); 750 __free_pages(page, order);
750} 751}
751 752
753#ifdef CONFIG_CMA
754/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
755void __init init_cma_reserved_pageblock(struct page *page)
756{
757 unsigned i = pageblock_nr_pages;
758 struct page *p = page;
759
760 do {
761 __ClearPageReserved(p);
762 set_page_count(p, 0);
763 } while (++p, --i);
764
765 set_page_refcounted(page);
766 set_pageblock_migratetype(page, MIGRATE_CMA);
767 __free_pages(page, pageblock_order);
768 totalram_pages += pageblock_nr_pages;
769}
770#endif
752 771
753/* 772/*
754 * The order of subdivision here is critical for the IO subsystem. 773 * The order of subdivision here is critical for the IO subsystem.
@@ -874,11 +893,17 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
874 * This array describes the order lists are fallen back to when 893 * This array describes the order lists are fallen back to when
875 * the free lists for the desirable migrate type are depleted 894 * the free lists for the desirable migrate type are depleted
876 */ 895 */
877static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { 896static int fallbacks[MIGRATE_TYPES][4] = {
878 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 897 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
879 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 898 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
880 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 899#ifdef CONFIG_CMA
881 [MIGRATE_RESERVE] = { MIGRATE_RESERVE, MIGRATE_RESERVE, MIGRATE_RESERVE }, /* Never used */ 900 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
901 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
902#else
903 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
904#endif
905 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
906 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
882}; 907};
883 908
884/* 909/*
@@ -929,8 +954,8 @@ static int move_freepages(struct zone *zone,
929 return pages_moved; 954 return pages_moved;
930} 955}
931 956
932static int move_freepages_block(struct zone *zone, struct page *page, 957int move_freepages_block(struct zone *zone, struct page *page,
933 int migratetype) 958 int migratetype)
934{ 959{
935 unsigned long start_pfn, end_pfn; 960 unsigned long start_pfn, end_pfn;
936 struct page *start_page, *end_page; 961 struct page *start_page, *end_page;
@@ -973,12 +998,12 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
973 /* Find the largest possible block of pages in the other list */ 998 /* Find the largest possible block of pages in the other list */
974 for (current_order = MAX_ORDER-1; current_order >= order; 999 for (current_order = MAX_ORDER-1; current_order >= order;
975 --current_order) { 1000 --current_order) {
976 for (i = 0; i < MIGRATE_TYPES - 1; i++) { 1001 for (i = 0;; i++) {
977 migratetype = fallbacks[start_migratetype][i]; 1002 migratetype = fallbacks[start_migratetype][i];
978 1003
979 /* MIGRATE_RESERVE handled later if necessary */ 1004 /* MIGRATE_RESERVE handled later if necessary */
980 if (migratetype == MIGRATE_RESERVE) 1005 if (migratetype == MIGRATE_RESERVE)
981 continue; 1006 break;
982 1007
983 area = &(zone->free_area[current_order]); 1008 area = &(zone->free_area[current_order]);
984 if (list_empty(&area->free_list[migratetype])) 1009 if (list_empty(&area->free_list[migratetype]))
@@ -993,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
993 * pages to the preferred allocation list. If falling 1018 * pages to the preferred allocation list. If falling
994 * back for a reclaimable kernel allocation, be more 1019 * back for a reclaimable kernel allocation, be more
995 * aggressive about taking ownership of free pages 1020 * aggressive about taking ownership of free pages
1021 *
1022 * On the other hand, never change migration
1023 * type of MIGRATE_CMA pageblocks nor move CMA
1024 * pages on different free lists. We don't
1025 * want unmovable pages to be allocated from
1026 * MIGRATE_CMA areas.
996 */ 1027 */
997 if (unlikely(current_order >= (pageblock_order >> 1)) || 1028 if (!is_migrate_cma(migratetype) &&
998 start_migratetype == MIGRATE_RECLAIMABLE || 1029 (unlikely(current_order >= pageblock_order / 2) ||
999 page_group_by_mobility_disabled) { 1030 start_migratetype == MIGRATE_RECLAIMABLE ||
1000 unsigned long pages; 1031 page_group_by_mobility_disabled)) {
1032 int pages;
1001 pages = move_freepages_block(zone, page, 1033 pages = move_freepages_block(zone, page,
1002 start_migratetype); 1034 start_migratetype);
1003 1035
@@ -1015,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1015 rmv_page_order(page); 1047 rmv_page_order(page);
1016 1048
1017 /* Take ownership for orders >= pageblock_order */ 1049 /* Take ownership for orders >= pageblock_order */
1018 if (current_order >= pageblock_order) 1050 if (current_order >= pageblock_order &&
1051 !is_migrate_cma(migratetype))
1019 change_pageblock_range(page, current_order, 1052 change_pageblock_range(page, current_order,
1020 start_migratetype); 1053 start_migratetype);
1021 1054
1022 expand(zone, page, order, current_order, area, migratetype); 1055 expand(zone, page, order, current_order, area,
1056 is_migrate_cma(migratetype)
1057 ? migratetype : start_migratetype);
1023 1058
1024 trace_mm_page_alloc_extfrag(page, order, current_order, 1059 trace_mm_page_alloc_extfrag(page, order, current_order,
1025 start_migratetype, migratetype); 1060 start_migratetype, migratetype);
@@ -1061,17 +1096,17 @@ retry_reserve:
1061 return page; 1096 return page;
1062} 1097}
1063 1098
1064/* 1099/*
1065 * Obtain a specified number of elements from the buddy allocator, all under 1100 * Obtain a specified number of elements from the buddy allocator, all under
1066 * a single hold of the lock, for efficiency. Add them to the supplied list. 1101 * a single hold of the lock, for efficiency. Add them to the supplied list.
1067 * Returns the number of new pages which were placed at *list. 1102 * Returns the number of new pages which were placed at *list.
1068 */ 1103 */
1069static int rmqueue_bulk(struct zone *zone, unsigned int order, 1104static int rmqueue_bulk(struct zone *zone, unsigned int order,
1070 unsigned long count, struct list_head *list, 1105 unsigned long count, struct list_head *list,
1071 int migratetype, int cold) 1106 int migratetype, int cold)
1072{ 1107{
1073 int i; 1108 int mt = migratetype, i;
1074 1109
1075 spin_lock(&zone->lock); 1110 spin_lock(&zone->lock);
1076 for (i = 0; i < count; ++i) { 1111 for (i = 0; i < count; ++i) {
1077 struct page *page = __rmqueue(zone, order, migratetype); 1112 struct page *page = __rmqueue(zone, order, migratetype);
@@ -1091,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1091 list_add(&page->lru, list); 1126 list_add(&page->lru, list);
1092 else 1127 else
1093 list_add_tail(&page->lru, list); 1128 list_add_tail(&page->lru, list);
1094 set_page_private(page, migratetype); 1129 if (IS_ENABLED(CONFIG_CMA)) {
1130 mt = get_pageblock_migratetype(page);
1131 if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
1132 mt = migratetype;
1133 }
1134 set_page_private(page, mt);
1095 list = &page->lru; 1135 list = &page->lru;
1096 } 1136 }
1097 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1137 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1371,8 +1411,12 @@ int split_free_page(struct page *page)
1371 1411
1372 if (order >= pageblock_order - 1) { 1412 if (order >= pageblock_order - 1) {
1373 struct page *endpage = page + (1 << order) - 1; 1413 struct page *endpage = page + (1 << order) - 1;
1374 for (; page < endpage; page += pageblock_nr_pages) 1414 for (; page < endpage; page += pageblock_nr_pages) {
1375 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1415 int mt = get_pageblock_migratetype(page);
1416 if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
1417 set_pageblock_migratetype(page,
1418 MIGRATE_MOVABLE);
1419 }
1376 } 1420 }
1377 1421
1378 return 1 << order; 1422 return 1 << order;
@@ -2086,16 +2130,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
2086} 2130}
2087#endif /* CONFIG_COMPACTION */ 2131#endif /* CONFIG_COMPACTION */
2088 2132
2089/* The really slow allocator path where we enter direct reclaim */ 2133/* Perform direct synchronous page reclaim */
2090static inline struct page * 2134static int
2091__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order, 2135__perform_reclaim(gfp_t gfp_mask, unsigned int order, struct zonelist *zonelist,
2092 struct zonelist *zonelist, enum zone_type high_zoneidx, 2136 nodemask_t *nodemask)
2093 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2094 int migratetype, unsigned long *did_some_progress)
2095{ 2137{
2096 struct page *page = NULL;
2097 struct reclaim_state reclaim_state; 2138 struct reclaim_state reclaim_state;
2098 bool drained = false; 2139 int progress;
2099 2140
2100 cond_resched(); 2141 cond_resched();
2101 2142
@@ -2106,7 +2147,7 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2106 reclaim_state.reclaimed_slab = 0; 2147 reclaim_state.reclaimed_slab = 0;
2107 current->reclaim_state = &reclaim_state; 2148 current->reclaim_state = &reclaim_state;
2108 2149
2109 *did_some_progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask); 2150 progress = try_to_free_pages(zonelist, order, gfp_mask, nodemask);
2110 2151
2111 current->reclaim_state = NULL; 2152 current->reclaim_state = NULL;
2112 lockdep_clear_current_reclaim_state(); 2153 lockdep_clear_current_reclaim_state();
@@ -2114,6 +2155,21 @@ __alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2114 2155
2115 cond_resched(); 2156 cond_resched();
2116 2157
2158 return progress;
2159}
2160
2161/* The really slow allocator path where we enter direct reclaim */
2162static inline struct page *
2163__alloc_pages_direct_reclaim(gfp_t gfp_mask, unsigned int order,
2164 struct zonelist *zonelist, enum zone_type high_zoneidx,
2165 nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
2166 int migratetype, unsigned long *did_some_progress)
2167{
2168 struct page *page = NULL;
2169 bool drained = false;
2170
2171 *did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
2172 nodemask);
2117 if (unlikely(!(*did_some_progress))) 2173 if (unlikely(!(*did_some_progress)))
2118 return NULL; 2174 return NULL;
2119 2175
@@ -4244,25 +4300,24 @@ static inline void setup_usemap(struct pglist_data *pgdat,
4244 4300
4245#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE 4301#ifdef CONFIG_HUGETLB_PAGE_SIZE_VARIABLE
4246 4302
4247/* Return a sensible default order for the pageblock size. */
4248static inline int pageblock_default_order(void)
4249{
4250 if (HPAGE_SHIFT > PAGE_SHIFT)
4251 return HUGETLB_PAGE_ORDER;
4252
4253 return MAX_ORDER-1;
4254}
4255
4256/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */ 4303/* Initialise the number of pages represented by NR_PAGEBLOCK_BITS */
4257static inline void __init set_pageblock_order(unsigned int order) 4304static inline void __init set_pageblock_order(void)
4258{ 4305{
4306 unsigned int order;
4307
4259 /* Check that pageblock_nr_pages has not already been setup */ 4308 /* Check that pageblock_nr_pages has not already been setup */
4260 if (pageblock_order) 4309 if (pageblock_order)
4261 return; 4310 return;
4262 4311
4312 if (HPAGE_SHIFT > PAGE_SHIFT)
4313 order = HUGETLB_PAGE_ORDER;
4314 else
4315 order = MAX_ORDER - 1;
4316
4263 /* 4317 /*
4264 * Assume the largest contiguous order of interest is a huge page. 4318 * Assume the largest contiguous order of interest is a huge page.
4265 * This value may be variable depending on boot parameters on IA64 4319 * This value may be variable depending on boot parameters on IA64 and
4320 * powerpc.
4266 */ 4321 */
4267 pageblock_order = order; 4322 pageblock_order = order;
4268} 4323}
@@ -4270,15 +4325,13 @@ static inline void __init set_pageblock_order(unsigned int order)
4270 4325
4271/* 4326/*
4272 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order() 4327 * When CONFIG_HUGETLB_PAGE_SIZE_VARIABLE is not set, set_pageblock_order()
4273 * and pageblock_default_order() are unused as pageblock_order is set 4328 * is unused as pageblock_order is set at compile-time. See
4274 * at compile-time. See include/linux/pageblock-flags.h for the values of 4329 * include/linux/pageblock-flags.h for the values of pageblock_order based on
4275 * pageblock_order based on the kernel config 4330 * the kernel config
4276 */ 4331 */
4277static inline int pageblock_default_order(unsigned int order) 4332static inline void set_pageblock_order(void)
4278{ 4333{
4279 return MAX_ORDER-1;
4280} 4334}
4281#define set_pageblock_order(x) do {} while (0)
4282 4335
4283#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */ 4336#endif /* CONFIG_HUGETLB_PAGE_SIZE_VARIABLE */
4284 4337
@@ -4301,11 +4354,10 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4301 init_waitqueue_head(&pgdat->kswapd_wait); 4354 init_waitqueue_head(&pgdat->kswapd_wait);
4302 pgdat->kswapd_max_order = 0; 4355 pgdat->kswapd_max_order = 0;
4303 pgdat_page_cgroup_init(pgdat); 4356 pgdat_page_cgroup_init(pgdat);
4304 4357
4305 for (j = 0; j < MAX_NR_ZONES; j++) { 4358 for (j = 0; j < MAX_NR_ZONES; j++) {
4306 struct zone *zone = pgdat->node_zones + j; 4359 struct zone *zone = pgdat->node_zones + j;
4307 unsigned long size, realsize, memmap_pages; 4360 unsigned long size, realsize, memmap_pages;
4308 enum lru_list lru;
4309 4361
4310 size = zone_spanned_pages_in_node(nid, j, zones_size); 4362 size = zone_spanned_pages_in_node(nid, j, zones_size);
4311 realsize = size - zone_absent_pages_in_node(nid, j, 4363 realsize = size - zone_absent_pages_in_node(nid, j,
@@ -4355,18 +4407,13 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat,
4355 zone->zone_pgdat = pgdat; 4407 zone->zone_pgdat = pgdat;
4356 4408
4357 zone_pcp_init(zone); 4409 zone_pcp_init(zone);
4358 for_each_lru(lru) 4410 lruvec_init(&zone->lruvec, zone);
4359 INIT_LIST_HEAD(&zone->lruvec.lists[lru]);
4360 zone->reclaim_stat.recent_rotated[0] = 0;
4361 zone->reclaim_stat.recent_rotated[1] = 0;
4362 zone->reclaim_stat.recent_scanned[0] = 0;
4363 zone->reclaim_stat.recent_scanned[1] = 0;
4364 zap_zone_vm_stats(zone); 4411 zap_zone_vm_stats(zone);
4365 zone->flags = 0; 4412 zone->flags = 0;
4366 if (!size) 4413 if (!size)
4367 continue; 4414 continue;
4368 4415
4369 set_pageblock_order(pageblock_default_order()); 4416 set_pageblock_order();
4370 setup_usemap(pgdat, zone, size); 4417 setup_usemap(pgdat, zone, size);
4371 ret = init_currently_empty_zone(zone, zone_start_pfn, 4418 ret = init_currently_empty_zone(zone, zone_start_pfn,
4372 size, MEMMAP_EARLY); 4419 size, MEMMAP_EARLY);
@@ -4759,7 +4806,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4759 find_zone_movable_pfns_for_nodes(); 4806 find_zone_movable_pfns_for_nodes();
4760 4807
4761 /* Print out the zone ranges */ 4808 /* Print out the zone ranges */
4762 printk("Zone PFN ranges:\n"); 4809 printk("Zone ranges:\n");
4763 for (i = 0; i < MAX_NR_ZONES; i++) { 4810 for (i = 0; i < MAX_NR_ZONES; i++) {
4764 if (i == ZONE_MOVABLE) 4811 if (i == ZONE_MOVABLE)
4765 continue; 4812 continue;
@@ -4768,22 +4815,25 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn)
4768 arch_zone_highest_possible_pfn[i]) 4815 arch_zone_highest_possible_pfn[i])
4769 printk(KERN_CONT "empty\n"); 4816 printk(KERN_CONT "empty\n");
4770 else 4817 else
4771 printk(KERN_CONT "%0#10lx -> %0#10lx\n", 4818 printk(KERN_CONT "[mem %0#10lx-%0#10lx]\n",
4772 arch_zone_lowest_possible_pfn[i], 4819 arch_zone_lowest_possible_pfn[i] << PAGE_SHIFT,
4773 arch_zone_highest_possible_pfn[i]); 4820 (arch_zone_highest_possible_pfn[i]
4821 << PAGE_SHIFT) - 1);
4774 } 4822 }
4775 4823
4776 /* Print out the PFNs ZONE_MOVABLE begins at in each node */ 4824 /* Print out the PFNs ZONE_MOVABLE begins at in each node */
4777 printk("Movable zone start PFN for each node\n"); 4825 printk("Movable zone start for each node\n");
4778 for (i = 0; i < MAX_NUMNODES; i++) { 4826 for (i = 0; i < MAX_NUMNODES; i++) {
4779 if (zone_movable_pfn[i]) 4827 if (zone_movable_pfn[i])
4780 printk(" Node %d: %lu\n", i, zone_movable_pfn[i]); 4828 printk(" Node %d: %#010lx\n", i,
4829 zone_movable_pfn[i] << PAGE_SHIFT);
4781 } 4830 }
4782 4831
4783 /* Print out the early_node_map[] */ 4832 /* Print out the early_node_map[] */
4784 printk("Early memory PFN ranges\n"); 4833 printk("Early memory node ranges\n");
4785 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) 4834 for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid)
4786 printk(" %3d: %0#10lx -> %0#10lx\n", nid, start_pfn, end_pfn); 4835 printk(" node %3d: [mem %#010lx-%#010lx]\n", nid,
4836 start_pfn << PAGE_SHIFT, (end_pfn << PAGE_SHIFT) - 1);
4787 4837
4788 /* Initialise every node */ 4838 /* Initialise every node */
4789 mminit_verify_pageflags_layout(); 4839 mminit_verify_pageflags_layout();
@@ -4976,14 +5026,7 @@ static void setup_per_zone_lowmem_reserve(void)
4976 calculate_totalreserve_pages(); 5026 calculate_totalreserve_pages();
4977} 5027}
4978 5028
4979/** 5029static void __setup_per_zone_wmarks(void)
4980 * setup_per_zone_wmarks - called when min_free_kbytes changes
4981 * or when memory is hot-{added|removed}
4982 *
4983 * Ensures that the watermark[min,low,high] values for each zone are set
4984 * correctly with respect to min_free_kbytes.
4985 */
4986void setup_per_zone_wmarks(void)
4987{ 5030{
4988 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10); 5031 unsigned long pages_min = min_free_kbytes >> (PAGE_SHIFT - 10);
4989 unsigned long lowmem_pages = 0; 5032 unsigned long lowmem_pages = 0;
@@ -5030,6 +5073,11 @@ void setup_per_zone_wmarks(void)
5030 5073
5031 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2); 5074 zone->watermark[WMARK_LOW] = min_wmark_pages(zone) + (tmp >> 2);
5032 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1); 5075 zone->watermark[WMARK_HIGH] = min_wmark_pages(zone) + (tmp >> 1);
5076
5077 zone->watermark[WMARK_MIN] += cma_wmark_pages(zone);
5078 zone->watermark[WMARK_LOW] += cma_wmark_pages(zone);
5079 zone->watermark[WMARK_HIGH] += cma_wmark_pages(zone);
5080
5033 setup_zone_migrate_reserve(zone); 5081 setup_zone_migrate_reserve(zone);
5034 spin_unlock_irqrestore(&zone->lock, flags); 5082 spin_unlock_irqrestore(&zone->lock, flags);
5035 } 5083 }
@@ -5038,6 +5086,20 @@ void setup_per_zone_wmarks(void)
5038 calculate_totalreserve_pages(); 5086 calculate_totalreserve_pages();
5039} 5087}
5040 5088
5089/**
5090 * setup_per_zone_wmarks - called when min_free_kbytes changes
5091 * or when memory is hot-{added|removed}
5092 *
5093 * Ensures that the watermark[min,low,high] values for each zone are set
5094 * correctly with respect to min_free_kbytes.
5095 */
5096void setup_per_zone_wmarks(void)
5097{
5098 mutex_lock(&zonelists_mutex);
5099 __setup_per_zone_wmarks();
5100 mutex_unlock(&zonelists_mutex);
5101}
5102
5041/* 5103/*
5042 * The inactive anon list should be small enough that the VM never has to 5104 * The inactive anon list should be small enough that the VM never has to
5043 * do too much work, but large enough that each inactive page has a chance 5105 * do too much work, but large enough that each inactive page has a chance
@@ -5415,14 +5477,16 @@ static int
5415__count_immobile_pages(struct zone *zone, struct page *page, int count) 5477__count_immobile_pages(struct zone *zone, struct page *page, int count)
5416{ 5478{
5417 unsigned long pfn, iter, found; 5479 unsigned long pfn, iter, found;
5480 int mt;
5481
5418 /* 5482 /*
5419 * For avoiding noise data, lru_add_drain_all() should be called 5483 * For avoiding noise data, lru_add_drain_all() should be called
5420 * If ZONE_MOVABLE, the zone never contains immobile pages 5484 * If ZONE_MOVABLE, the zone never contains immobile pages
5421 */ 5485 */
5422 if (zone_idx(zone) == ZONE_MOVABLE) 5486 if (zone_idx(zone) == ZONE_MOVABLE)
5423 return true; 5487 return true;
5424 5488 mt = get_pageblock_migratetype(page);
5425 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) 5489 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
5426 return true; 5490 return true;
5427 5491
5428 pfn = page_to_pfn(page); 5492 pfn = page_to_pfn(page);
@@ -5539,7 +5603,7 @@ out:
5539 return ret; 5603 return ret;
5540} 5604}
5541 5605
5542void unset_migratetype_isolate(struct page *page) 5606void unset_migratetype_isolate(struct page *page, unsigned migratetype)
5543{ 5607{
5544 struct zone *zone; 5608 struct zone *zone;
5545 unsigned long flags; 5609 unsigned long flags;
@@ -5547,12 +5611,259 @@ void unset_migratetype_isolate(struct page *page)
5547 spin_lock_irqsave(&zone->lock, flags); 5611 spin_lock_irqsave(&zone->lock, flags);
5548 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 5612 if (get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
5549 goto out; 5613 goto out;
5550 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 5614 set_pageblock_migratetype(page, migratetype);
5551 move_freepages_block(zone, page, MIGRATE_MOVABLE); 5615 move_freepages_block(zone, page, migratetype);
5552out: 5616out:
5553 spin_unlock_irqrestore(&zone->lock, flags); 5617 spin_unlock_irqrestore(&zone->lock, flags);
5554} 5618}
5555 5619
5620#ifdef CONFIG_CMA
5621
5622static unsigned long pfn_max_align_down(unsigned long pfn)
5623{
5624 return pfn & ~(max_t(unsigned long, MAX_ORDER_NR_PAGES,
5625 pageblock_nr_pages) - 1);
5626}
5627
5628static unsigned long pfn_max_align_up(unsigned long pfn)
5629{
5630 return ALIGN(pfn, max_t(unsigned long, MAX_ORDER_NR_PAGES,
5631 pageblock_nr_pages));
5632}
5633
5634static struct page *
5635__alloc_contig_migrate_alloc(struct page *page, unsigned long private,
5636 int **resultp)
5637{
5638 return alloc_page(GFP_HIGHUSER_MOVABLE);
5639}
5640
5641/* [start, end) must belong to a single zone. */
5642static int __alloc_contig_migrate_range(unsigned long start, unsigned long end)
5643{
5644 /* This function is based on compact_zone() from compaction.c. */
5645
5646 unsigned long pfn = start;
5647 unsigned int tries = 0;
5648 int ret = 0;
5649
5650 struct compact_control cc = {
5651 .nr_migratepages = 0,
5652 .order = -1,
5653 .zone = page_zone(pfn_to_page(start)),
5654 .mode = COMPACT_SYNC,
5655 };
5656 INIT_LIST_HEAD(&cc.migratepages);
5657
5658 migrate_prep_local();
5659
5660 while (pfn < end || !list_empty(&cc.migratepages)) {
5661 if (fatal_signal_pending(current)) {
5662 ret = -EINTR;
5663 break;
5664 }
5665
5666 if (list_empty(&cc.migratepages)) {
5667 cc.nr_migratepages = 0;
5668 pfn = isolate_migratepages_range(cc.zone, &cc,
5669 pfn, end);
5670 if (!pfn) {
5671 ret = -EINTR;
5672 break;
5673 }
5674 tries = 0;
5675 } else if (++tries == 5) {
5676 ret = ret < 0 ? ret : -EBUSY;
5677 break;
5678 }
5679
5680 ret = migrate_pages(&cc.migratepages,
5681 __alloc_contig_migrate_alloc,
5682 0, false, MIGRATE_SYNC);
5683 }
5684
5685 putback_lru_pages(&cc.migratepages);
5686 return ret > 0 ? 0 : ret;
5687}
5688
5689/*
5690 * Update zone's cma pages counter used for watermark level calculation.
5691 */
5692static inline void __update_cma_watermarks(struct zone *zone, int count)
5693{
5694 unsigned long flags;
5695 spin_lock_irqsave(&zone->lock, flags);
5696 zone->min_cma_pages += count;
5697 spin_unlock_irqrestore(&zone->lock, flags);
5698 setup_per_zone_wmarks();
5699}
5700
5701/*
5702 * Trigger memory pressure bump to reclaim some pages in order to be able to
5703 * allocate 'count' pages in single page units. Does similar work as
5704 *__alloc_pages_slowpath() function.
5705 */
5706static int __reclaim_pages(struct zone *zone, gfp_t gfp_mask, int count)
5707{
5708 enum zone_type high_zoneidx = gfp_zone(gfp_mask);
5709 struct zonelist *zonelist = node_zonelist(0, gfp_mask);
5710 int did_some_progress = 0;
5711 int order = 1;
5712
5713 /*
5714 * Increase level of watermarks to force kswapd do his job
5715 * to stabilise at new watermark level.
5716 */
5717 __update_cma_watermarks(zone, count);
5718
5719 /* Obey watermarks as if the page was being allocated */
5720 while (!zone_watermark_ok(zone, 0, low_wmark_pages(zone), 0, 0)) {
5721 wake_all_kswapd(order, zonelist, high_zoneidx, zone_idx(zone));
5722
5723 did_some_progress = __perform_reclaim(gfp_mask, order, zonelist,
5724 NULL);
5725 if (!did_some_progress) {
5726 /* Exhausted what can be done so it's blamo time */
5727 out_of_memory(zonelist, gfp_mask, order, NULL, false);
5728 }
5729 }
5730
5731 /* Restore original watermark levels. */
5732 __update_cma_watermarks(zone, -count);
5733
5734 return count;
5735}
5736
5737/**
5738 * alloc_contig_range() -- tries to allocate given range of pages
5739 * @start: start PFN to allocate
5740 * @end: one-past-the-last PFN to allocate
5741 * @migratetype: migratetype of the underlaying pageblocks (either
5742 * #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
5743 * in range must have the same migratetype and it must
5744 * be either of the two.
5745 *
5746 * The PFN range does not have to be pageblock or MAX_ORDER_NR_PAGES
5747 * aligned, however it's the caller's responsibility to guarantee that
5748 * we are the only thread that changes migrate type of pageblocks the
5749 * pages fall in.
5750 *
5751 * The PFN range must belong to a single zone.
5752 *
5753 * Returns zero on success or negative error code. On success all
5754 * pages which PFN is in [start, end) are allocated for the caller and
5755 * need to be freed with free_contig_range().
5756 */
5757int alloc_contig_range(unsigned long start, unsigned long end,
5758 unsigned migratetype)
5759{
5760 struct zone *zone = page_zone(pfn_to_page(start));
5761 unsigned long outer_start, outer_end;
5762 int ret = 0, order;
5763
5764 /*
5765 * What we do here is we mark all pageblocks in range as
5766 * MIGRATE_ISOLATE. Because pageblock and max order pages may
5767 * have different sizes, and due to the way page allocator
5768 * work, we align the range to biggest of the two pages so
5769 * that page allocator won't try to merge buddies from
5770 * different pageblocks and change MIGRATE_ISOLATE to some
5771 * other migration type.
5772 *
5773 * Once the pageblocks are marked as MIGRATE_ISOLATE, we
5774 * migrate the pages from an unaligned range (ie. pages that
5775 * we are interested in). This will put all the pages in
5776 * range back to page allocator as MIGRATE_ISOLATE.
5777 *
5778 * When this is done, we take the pages in range from page
5779 * allocator removing them from the buddy system. This way
5780 * page allocator will never consider using them.
5781 *
5782 * This lets us mark the pageblocks back as
5783 * MIGRATE_CMA/MIGRATE_MOVABLE so that free pages in the
5784 * aligned range but not in the unaligned, original range are
5785 * put back to page allocator so that buddy can use them.
5786 */
5787
5788 ret = start_isolate_page_range(pfn_max_align_down(start),
5789 pfn_max_align_up(end), migratetype);
5790 if (ret)
5791 goto done;
5792
5793 ret = __alloc_contig_migrate_range(start, end);
5794 if (ret)
5795 goto done;
5796
5797 /*
5798 * Pages from [start, end) are within a MAX_ORDER_NR_PAGES
5799 * aligned blocks that are marked as MIGRATE_ISOLATE. What's
5800 * more, all pages in [start, end) are free in page allocator.
5801 * What we are going to do is to allocate all pages from
5802 * [start, end) (that is remove them from page allocator).
5803 *
5804 * The only problem is that pages at the beginning and at the
5805 * end of interesting range may be not aligned with pages that
5806 * page allocator holds, ie. they can be part of higher order
5807 * pages. Because of this, we reserve the bigger range and
5808 * once this is done free the pages we are not interested in.
5809 *
5810 * We don't have to hold zone->lock here because the pages are
5811 * isolated thus they won't get removed from buddy.
5812 */
5813
5814 lru_add_drain_all();
5815 drain_all_pages();
5816
5817 order = 0;
5818 outer_start = start;
5819 while (!PageBuddy(pfn_to_page(outer_start))) {
5820 if (++order >= MAX_ORDER) {
5821 ret = -EBUSY;
5822 goto done;
5823 }
5824 outer_start &= ~0UL << order;
5825 }
5826
5827 /* Make sure the range is really isolated. */
5828 if (test_pages_isolated(outer_start, end)) {
5829 pr_warn("alloc_contig_range test_pages_isolated(%lx, %lx) failed\n",
5830 outer_start, end);
5831 ret = -EBUSY;
5832 goto done;
5833 }
5834
5835 /*
5836 * Reclaim enough pages to make sure that contiguous allocation
5837 * will not starve the system.
5838 */
5839 __reclaim_pages(zone, GFP_HIGHUSER_MOVABLE, end-start);
5840
5841 /* Grab isolated pages from freelists. */
5842 outer_end = isolate_freepages_range(outer_start, end);
5843 if (!outer_end) {
5844 ret = -EBUSY;
5845 goto done;
5846 }
5847
5848 /* Free head and tail (if any) */
5849 if (start != outer_start)
5850 free_contig_range(outer_start, start - outer_start);
5851 if (end != outer_end)
5852 free_contig_range(end, outer_end - end);
5853
5854done:
5855 undo_isolate_page_range(pfn_max_align_down(start),
5856 pfn_max_align_up(end), migratetype);
5857 return ret;
5858}
5859
5860void free_contig_range(unsigned long pfn, unsigned nr_pages)
5861{
5862 for (; nr_pages--; ++pfn)
5863 __free_page(pfn_to_page(pfn));
5864}
5865#endif
5866
5556#ifdef CONFIG_MEMORY_HOTREMOVE 5867#ifdef CONFIG_MEMORY_HOTREMOVE
5557/* 5868/*
5558 * All pages in the range must be isolated before calling this. 5869 * All pages in the range must be isolated before calling this.
@@ -5621,7 +5932,7 @@ bool is_free_buddy_page(struct page *page)
5621} 5932}
5622#endif 5933#endif
5623 5934
5624static struct trace_print_flags pageflag_names[] = { 5935static const struct trace_print_flags pageflag_names[] = {
5625 {1UL << PG_locked, "locked" }, 5936 {1UL << PG_locked, "locked" },
5626 {1UL << PG_error, "error" }, 5937 {1UL << PG_error, "error" },
5627 {1UL << PG_referenced, "referenced" }, 5938 {1UL << PG_referenced, "referenced" },
@@ -5656,7 +5967,9 @@ static struct trace_print_flags pageflag_names[] = {
5656#ifdef CONFIG_MEMORY_FAILURE 5967#ifdef CONFIG_MEMORY_FAILURE
5657 {1UL << PG_hwpoison, "hwpoison" }, 5968 {1UL << PG_hwpoison, "hwpoison" },
5658#endif 5969#endif
5659 {-1UL, NULL }, 5970#ifdef CONFIG_TRANSPARENT_HUGEPAGE
5971 {1UL << PG_compound_lock, "compound_lock" },
5972#endif
5660}; 5973};
5661 5974
5662static void dump_page_flags(unsigned long flags) 5975static void dump_page_flags(unsigned long flags)
@@ -5665,12 +5978,14 @@ static void dump_page_flags(unsigned long flags)
5665 unsigned long mask; 5978 unsigned long mask;
5666 int i; 5979 int i;
5667 5980
5981 BUILD_BUG_ON(ARRAY_SIZE(pageflag_names) != __NR_PAGEFLAGS);
5982
5668 printk(KERN_ALERT "page flags: %#lx(", flags); 5983 printk(KERN_ALERT "page flags: %#lx(", flags);
5669 5984
5670 /* remove zone id */ 5985 /* remove zone id */
5671 flags &= (1UL << NR_PAGEFLAGS) - 1; 5986 flags &= (1UL << NR_PAGEFLAGS) - 1;
5672 5987
5673 for (i = 0; pageflag_names[i].name && flags; i++) { 5988 for (i = 0; i < ARRAY_SIZE(pageflag_names) && flags; i++) {
5674 5989
5675 mask = pageflag_names[i].mask; 5990 mask = pageflag_names[i].mask;
5676 if ((flags & mask) != mask) 5991 if ((flags & mask) != mask)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 4ae42bb40892..c9f04774f2b8 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -24,6 +24,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
24 * to be MIGRATE_ISOLATE. 24 * to be MIGRATE_ISOLATE.
25 * @start_pfn: The lower PFN of the range to be isolated. 25 * @start_pfn: The lower PFN of the range to be isolated.
26 * @end_pfn: The upper PFN of the range to be isolated. 26 * @end_pfn: The upper PFN of the range to be isolated.
27 * @migratetype: migrate type to set in error recovery.
27 * 28 *
28 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in 29 * Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
29 * the range will never be allocated. Any free pages and pages freed in the 30 * the range will never be allocated. Any free pages and pages freed in the
@@ -32,8 +33,8 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
32 * start_pfn/end_pfn must be aligned to pageblock_order. 33 * start_pfn/end_pfn must be aligned to pageblock_order.
33 * Returns 0 on success and -EBUSY if any part of range cannot be isolated. 34 * Returns 0 on success and -EBUSY if any part of range cannot be isolated.
34 */ 35 */
35int 36int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
36start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn) 37 unsigned migratetype)
37{ 38{
38 unsigned long pfn; 39 unsigned long pfn;
39 unsigned long undo_pfn; 40 unsigned long undo_pfn;
@@ -56,7 +57,7 @@ undo:
56 for (pfn = start_pfn; 57 for (pfn = start_pfn;
57 pfn < undo_pfn; 58 pfn < undo_pfn;
58 pfn += pageblock_nr_pages) 59 pfn += pageblock_nr_pages)
59 unset_migratetype_isolate(pfn_to_page(pfn)); 60 unset_migratetype_isolate(pfn_to_page(pfn), migratetype);
60 61
61 return -EBUSY; 62 return -EBUSY;
62} 63}
@@ -64,8 +65,8 @@ undo:
64/* 65/*
65 * Make isolated pages available again. 66 * Make isolated pages available again.
66 */ 67 */
67int 68int undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
68undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn) 69 unsigned migratetype)
69{ 70{
70 unsigned long pfn; 71 unsigned long pfn;
71 struct page *page; 72 struct page *page;
@@ -77,7 +78,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
77 page = __first_valid_page(pfn, pageblock_nr_pages); 78 page = __first_valid_page(pfn, pageblock_nr_pages);
78 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE) 79 if (!page || get_pageblock_migratetype(page) != MIGRATE_ISOLATE)
79 continue; 80 continue;
80 unset_migratetype_isolate(page); 81 unset_migratetype_isolate(page, migratetype);
81 } 82 }
82 return 0; 83 return 0;
83} 84}
@@ -86,7 +87,7 @@ undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn)
86 * all pages in [start_pfn...end_pfn) must be in the same zone. 87 * all pages in [start_pfn...end_pfn) must be in the same zone.
87 * zone->lock must be held before call this. 88 * zone->lock must be held before call this.
88 * 89 *
89 * Returns 1 if all pages in the range is isolated. 90 * Returns 1 if all pages in the range are isolated.
90 */ 91 */
91static int 92static int
92__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn) 93__test_page_isolated_in_pageblock(unsigned long pfn, unsigned long end_pfn)
diff --git a/mm/pgtable-generic.c b/mm/pgtable-generic.c
index 5a74fea182f1..74c0ddaa6fa0 100644
--- a/mm/pgtable-generic.c
+++ b/mm/pgtable-generic.c
@@ -109,8 +109,8 @@ pmd_t pmdp_clear_flush(struct vm_area_struct *vma, unsigned long address,
109 109
110#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH 110#ifndef __HAVE_ARCH_PMDP_SPLITTING_FLUSH
111#ifdef CONFIG_TRANSPARENT_HUGEPAGE 111#ifdef CONFIG_TRANSPARENT_HUGEPAGE
112pmd_t pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address, 112void pmdp_splitting_flush(struct vm_area_struct *vma, unsigned long address,
113 pmd_t *pmdp) 113 pmd_t *pmdp)
114{ 114{
115 pmd_t pmd = pmd_mksplitting(*pmdp); 115 pmd_t pmd = pmd_mksplitting(*pmdp);
116 VM_BUG_ON(address & ~HPAGE_PMD_MASK); 116 VM_BUG_ON(address & ~HPAGE_PMD_MASK);
diff --git a/mm/readahead.c b/mm/readahead.c
index cbcbb02f3e28..ea8f8fa21649 100644
--- a/mm/readahead.c
+++ b/mm/readahead.c
@@ -17,6 +17,8 @@
17#include <linux/task_io_accounting_ops.h> 17#include <linux/task_io_accounting_ops.h>
18#include <linux/pagevec.h> 18#include <linux/pagevec.h>
19#include <linux/pagemap.h> 19#include <linux/pagemap.h>
20#include <linux/syscalls.h>
21#include <linux/file.h>
20 22
21/* 23/*
22 * Initialise a struct file's readahead state. Assumes that the caller has 24 * Initialise a struct file's readahead state. Assumes that the caller has
@@ -562,3 +564,41 @@ page_cache_async_readahead(struct address_space *mapping,
562 ondemand_readahead(mapping, ra, filp, true, offset, req_size); 564 ondemand_readahead(mapping, ra, filp, true, offset, req_size);
563} 565}
564EXPORT_SYMBOL_GPL(page_cache_async_readahead); 566EXPORT_SYMBOL_GPL(page_cache_async_readahead);
567
568static ssize_t
569do_readahead(struct address_space *mapping, struct file *filp,
570 pgoff_t index, unsigned long nr)
571{
572 if (!mapping || !mapping->a_ops || !mapping->a_ops->readpage)
573 return -EINVAL;
574
575 force_page_cache_readahead(mapping, filp, index, nr);
576 return 0;
577}
578
579SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
580{
581 ssize_t ret;
582 struct file *file;
583
584 ret = -EBADF;
585 file = fget(fd);
586 if (file) {
587 if (file->f_mode & FMODE_READ) {
588 struct address_space *mapping = file->f_mapping;
589 pgoff_t start = offset >> PAGE_CACHE_SHIFT;
590 pgoff_t end = (offset + count - 1) >> PAGE_CACHE_SHIFT;
591 unsigned long len = end - start + 1;
592 ret = do_readahead(mapping, file, start, len);
593 }
594 fput(file);
595 }
596 return ret;
597}
598#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
599asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
600{
601 return SYSC_readahead((int) fd, offset, (size_t) count);
602}
603SYSCALL_ALIAS(sys_readahead, SyS_readahead);
604#endif
diff --git a/mm/rmap.c b/mm/rmap.c
index 5b5ad584ffb7..0f3b7cda2a24 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -755,12 +755,6 @@ int page_referenced_one(struct page *page, struct vm_area_struct *vma,
755 pte_unmap_unlock(pte, ptl); 755 pte_unmap_unlock(pte, ptl);
756 } 756 }
757 757
758 /* Pretend the page is referenced if the task has the
759 swap token and is in the middle of a page fault. */
760 if (mm != current->mm && has_swap_token(mm) &&
761 rwsem_is_locked(&mm->mmap_sem))
762 referenced++;
763
764 (*mapcount)--; 758 (*mapcount)--;
765 759
766 if (referenced) 760 if (referenced)
diff --git a/mm/shmem.c b/mm/shmem.c
index d7b433a1ef5e..d576b84d913c 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -53,6 +53,7 @@ static struct vfsmount *shm_mnt;
53#include <linux/blkdev.h> 53#include <linux/blkdev.h>
54#include <linux/pagevec.h> 54#include <linux/pagevec.h>
55#include <linux/percpu_counter.h> 55#include <linux/percpu_counter.h>
56#include <linux/falloc.h>
56#include <linux/splice.h> 57#include <linux/splice.h>
57#include <linux/security.h> 58#include <linux/security.h>
58#include <linux/swapops.h> 59#include <linux/swapops.h>
@@ -83,12 +84,25 @@ struct shmem_xattr {
83 char value[0]; 84 char value[0];
84}; 85};
85 86
87/*
88 * shmem_fallocate and shmem_writepage communicate via inode->i_private
89 * (with i_mutex making sure that it has only one user at a time):
90 * we would prefer not to enlarge the shmem inode just for that.
91 */
92struct shmem_falloc {
93 pgoff_t start; /* start of range currently being fallocated */
94 pgoff_t next; /* the next page offset to be fallocated */
95 pgoff_t nr_falloced; /* how many new pages have been fallocated */
96 pgoff_t nr_unswapped; /* how often writepage refused to swap out */
97};
98
86/* Flag allocation requirements to shmem_getpage */ 99/* Flag allocation requirements to shmem_getpage */
87enum sgp_type { 100enum sgp_type {
88 SGP_READ, /* don't exceed i_size, don't allocate page */ 101 SGP_READ, /* don't exceed i_size, don't allocate page */
89 SGP_CACHE, /* don't exceed i_size, may allocate page */ 102 SGP_CACHE, /* don't exceed i_size, may allocate page */
90 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */ 103 SGP_DIRTY, /* like SGP_CACHE, but set new page dirty */
91 SGP_WRITE, /* may exceed i_size, may allocate page */ 104 SGP_WRITE, /* may exceed i_size, may allocate !Uptodate page */
105 SGP_FALLOC, /* like SGP_WRITE, but make existing page Uptodate */
92}; 106};
93 107
94#ifdef CONFIG_TMPFS 108#ifdef CONFIG_TMPFS
@@ -103,6 +117,9 @@ static unsigned long shmem_default_max_inodes(void)
103} 117}
104#endif 118#endif
105 119
120static bool shmem_should_replace_page(struct page *page, gfp_t gfp);
121static int shmem_replace_page(struct page **pagep, gfp_t gfp,
122 struct shmem_inode_info *info, pgoff_t index);
106static int shmem_getpage_gfp(struct inode *inode, pgoff_t index, 123static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
107 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type); 124 struct page **pagep, enum sgp_type sgp, gfp_t gfp, int *fault_type);
108 125
@@ -423,27 +440,31 @@ void shmem_unlock_mapping(struct address_space *mapping)
423 440
424/* 441/*
425 * Remove range of pages and swap entries from radix tree, and free them. 442 * Remove range of pages and swap entries from radix tree, and free them.
443 * If !unfalloc, truncate or punch hole; if unfalloc, undo failed fallocate.
426 */ 444 */
427void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend) 445static void shmem_undo_range(struct inode *inode, loff_t lstart, loff_t lend,
446 bool unfalloc)
428{ 447{
429 struct address_space *mapping = inode->i_mapping; 448 struct address_space *mapping = inode->i_mapping;
430 struct shmem_inode_info *info = SHMEM_I(inode); 449 struct shmem_inode_info *info = SHMEM_I(inode);
431 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; 450 pgoff_t start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
432 unsigned partial = lstart & (PAGE_CACHE_SIZE - 1); 451 pgoff_t end = (lend + 1) >> PAGE_CACHE_SHIFT;
433 pgoff_t end = (lend >> PAGE_CACHE_SHIFT); 452 unsigned int partial_start = lstart & (PAGE_CACHE_SIZE - 1);
453 unsigned int partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
434 struct pagevec pvec; 454 struct pagevec pvec;
435 pgoff_t indices[PAGEVEC_SIZE]; 455 pgoff_t indices[PAGEVEC_SIZE];
436 long nr_swaps_freed = 0; 456 long nr_swaps_freed = 0;
437 pgoff_t index; 457 pgoff_t index;
438 int i; 458 int i;
439 459
440 BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1)); 460 if (lend == -1)
461 end = -1; /* unsigned, so actually very big */
441 462
442 pagevec_init(&pvec, 0); 463 pagevec_init(&pvec, 0);
443 index = start; 464 index = start;
444 while (index <= end) { 465 while (index < end) {
445 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 466 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
446 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 467 min(end - index, (pgoff_t)PAGEVEC_SIZE),
447 pvec.pages, indices); 468 pvec.pages, indices);
448 if (!pvec.nr) 469 if (!pvec.nr)
449 break; 470 break;
@@ -452,10 +473,12 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
452 struct page *page = pvec.pages[i]; 473 struct page *page = pvec.pages[i];
453 474
454 index = indices[i]; 475 index = indices[i];
455 if (index > end) 476 if (index >= end)
456 break; 477 break;
457 478
458 if (radix_tree_exceptional_entry(page)) { 479 if (radix_tree_exceptional_entry(page)) {
480 if (unfalloc)
481 continue;
459 nr_swaps_freed += !shmem_free_swap(mapping, 482 nr_swaps_freed += !shmem_free_swap(mapping,
460 index, page); 483 index, page);
461 continue; 484 continue;
@@ -463,9 +486,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
463 486
464 if (!trylock_page(page)) 487 if (!trylock_page(page))
465 continue; 488 continue;
466 if (page->mapping == mapping) { 489 if (!unfalloc || !PageUptodate(page)) {
467 VM_BUG_ON(PageWriteback(page)); 490 if (page->mapping == mapping) {
468 truncate_inode_page(mapping, page); 491 VM_BUG_ON(PageWriteback(page));
492 truncate_inode_page(mapping, page);
493 }
469 } 494 }
470 unlock_page(page); 495 unlock_page(page);
471 } 496 }
@@ -476,30 +501,47 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
476 index++; 501 index++;
477 } 502 }
478 503
479 if (partial) { 504 if (partial_start) {
480 struct page *page = NULL; 505 struct page *page = NULL;
481 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL); 506 shmem_getpage(inode, start - 1, &page, SGP_READ, NULL);
482 if (page) { 507 if (page) {
483 zero_user_segment(page, partial, PAGE_CACHE_SIZE); 508 unsigned int top = PAGE_CACHE_SIZE;
509 if (start > end) {
510 top = partial_end;
511 partial_end = 0;
512 }
513 zero_user_segment(page, partial_start, top);
514 set_page_dirty(page);
515 unlock_page(page);
516 page_cache_release(page);
517 }
518 }
519 if (partial_end) {
520 struct page *page = NULL;
521 shmem_getpage(inode, end, &page, SGP_READ, NULL);
522 if (page) {
523 zero_user_segment(page, 0, partial_end);
484 set_page_dirty(page); 524 set_page_dirty(page);
485 unlock_page(page); 525 unlock_page(page);
486 page_cache_release(page); 526 page_cache_release(page);
487 } 527 }
488 } 528 }
529 if (start >= end)
530 return;
489 531
490 index = start; 532 index = start;
491 for ( ; ; ) { 533 for ( ; ; ) {
492 cond_resched(); 534 cond_resched();
493 pvec.nr = shmem_find_get_pages_and_swap(mapping, index, 535 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
494 min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1, 536 min(end - index, (pgoff_t)PAGEVEC_SIZE),
495 pvec.pages, indices); 537 pvec.pages, indices);
496 if (!pvec.nr) { 538 if (!pvec.nr) {
497 if (index == start) 539 if (index == start || unfalloc)
498 break; 540 break;
499 index = start; 541 index = start;
500 continue; 542 continue;
501 } 543 }
502 if (index == start && indices[0] > end) { 544 if ((index == start || unfalloc) && indices[0] >= end) {
503 shmem_deswap_pagevec(&pvec); 545 shmem_deswap_pagevec(&pvec);
504 pagevec_release(&pvec); 546 pagevec_release(&pvec);
505 break; 547 break;
@@ -509,19 +551,23 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
509 struct page *page = pvec.pages[i]; 551 struct page *page = pvec.pages[i];
510 552
511 index = indices[i]; 553 index = indices[i];
512 if (index > end) 554 if (index >= end)
513 break; 555 break;
514 556
515 if (radix_tree_exceptional_entry(page)) { 557 if (radix_tree_exceptional_entry(page)) {
558 if (unfalloc)
559 continue;
516 nr_swaps_freed += !shmem_free_swap(mapping, 560 nr_swaps_freed += !shmem_free_swap(mapping,
517 index, page); 561 index, page);
518 continue; 562 continue;
519 } 563 }
520 564
521 lock_page(page); 565 lock_page(page);
522 if (page->mapping == mapping) { 566 if (!unfalloc || !PageUptodate(page)) {
523 VM_BUG_ON(PageWriteback(page)); 567 if (page->mapping == mapping) {
524 truncate_inode_page(mapping, page); 568 VM_BUG_ON(PageWriteback(page));
569 truncate_inode_page(mapping, page);
570 }
525 } 571 }
526 unlock_page(page); 572 unlock_page(page);
527 } 573 }
@@ -535,7 +581,11 @@ void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
535 info->swapped -= nr_swaps_freed; 581 info->swapped -= nr_swaps_freed;
536 shmem_recalc_inode(inode); 582 shmem_recalc_inode(inode);
537 spin_unlock(&info->lock); 583 spin_unlock(&info->lock);
584}
538 585
586void shmem_truncate_range(struct inode *inode, loff_t lstart, loff_t lend)
587{
588 shmem_undo_range(inode, lstart, lend, false);
539 inode->i_ctime = inode->i_mtime = CURRENT_TIME; 589 inode->i_ctime = inode->i_mtime = CURRENT_TIME;
540} 590}
541EXPORT_SYMBOL_GPL(shmem_truncate_range); 591EXPORT_SYMBOL_GPL(shmem_truncate_range);
@@ -597,19 +647,20 @@ static void shmem_evict_inode(struct inode *inode)
597 } 647 }
598 BUG_ON(inode->i_blocks); 648 BUG_ON(inode->i_blocks);
599 shmem_free_inode(inode->i_sb); 649 shmem_free_inode(inode->i_sb);
600 end_writeback(inode); 650 clear_inode(inode);
601} 651}
602 652
603/* 653/*
604 * If swap found in inode, free it and move page from swapcache to filecache. 654 * If swap found in inode, free it and move page from swapcache to filecache.
605 */ 655 */
606static int shmem_unuse_inode(struct shmem_inode_info *info, 656static int shmem_unuse_inode(struct shmem_inode_info *info,
607 swp_entry_t swap, struct page *page) 657 swp_entry_t swap, struct page **pagep)
608{ 658{
609 struct address_space *mapping = info->vfs_inode.i_mapping; 659 struct address_space *mapping = info->vfs_inode.i_mapping;
610 void *radswap; 660 void *radswap;
611 pgoff_t index; 661 pgoff_t index;
612 int error; 662 gfp_t gfp;
663 int error = 0;
613 664
614 radswap = swp_to_radix_entry(swap); 665 radswap = swp_to_radix_entry(swap);
615 index = radix_tree_locate_item(&mapping->page_tree, radswap); 666 index = radix_tree_locate_item(&mapping->page_tree, radswap);
@@ -625,22 +676,37 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
625 if (shmem_swaplist.next != &info->swaplist) 676 if (shmem_swaplist.next != &info->swaplist)
626 list_move_tail(&shmem_swaplist, &info->swaplist); 677 list_move_tail(&shmem_swaplist, &info->swaplist);
627 678
679 gfp = mapping_gfp_mask(mapping);
680 if (shmem_should_replace_page(*pagep, gfp)) {
681 mutex_unlock(&shmem_swaplist_mutex);
682 error = shmem_replace_page(pagep, gfp, info, index);
683 mutex_lock(&shmem_swaplist_mutex);
684 /*
685 * We needed to drop mutex to make that restrictive page
686 * allocation; but the inode might already be freed by now,
687 * and we cannot refer to inode or mapping or info to check.
688 * However, we do hold page lock on the PageSwapCache page,
689 * so can check if that still has our reference remaining.
690 */
691 if (!page_swapcount(*pagep))
692 error = -ENOENT;
693 }
694
628 /* 695 /*
629 * We rely on shmem_swaplist_mutex, not only to protect the swaplist, 696 * We rely on shmem_swaplist_mutex, not only to protect the swaplist,
630 * but also to hold up shmem_evict_inode(): so inode cannot be freed 697 * but also to hold up shmem_evict_inode(): so inode cannot be freed
631 * beneath us (pagelock doesn't help until the page is in pagecache). 698 * beneath us (pagelock doesn't help until the page is in pagecache).
632 */ 699 */
633 error = shmem_add_to_page_cache(page, mapping, index, 700 if (!error)
701 error = shmem_add_to_page_cache(*pagep, mapping, index,
634 GFP_NOWAIT, radswap); 702 GFP_NOWAIT, radswap);
635 /* which does mem_cgroup_uncharge_cache_page on error */
636
637 if (error != -ENOMEM) { 703 if (error != -ENOMEM) {
638 /* 704 /*
639 * Truncation and eviction use free_swap_and_cache(), which 705 * Truncation and eviction use free_swap_and_cache(), which
640 * only does trylock page: if we raced, best clean up here. 706 * only does trylock page: if we raced, best clean up here.
641 */ 707 */
642 delete_from_swap_cache(page); 708 delete_from_swap_cache(*pagep);
643 set_page_dirty(page); 709 set_page_dirty(*pagep);
644 if (!error) { 710 if (!error) {
645 spin_lock(&info->lock); 711 spin_lock(&info->lock);
646 info->swapped--; 712 info->swapped--;
@@ -660,7 +726,14 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
660 struct list_head *this, *next; 726 struct list_head *this, *next;
661 struct shmem_inode_info *info; 727 struct shmem_inode_info *info;
662 int found = 0; 728 int found = 0;
663 int error; 729 int error = 0;
730
731 /*
732 * There's a faint possibility that swap page was replaced before
733 * caller locked it: it will come back later with the right page.
734 */
735 if (unlikely(!PageSwapCache(page)))
736 goto out;
664 737
665 /* 738 /*
666 * Charge page using GFP_KERNEL while we can wait, before taking 739 * Charge page using GFP_KERNEL while we can wait, before taking
@@ -676,7 +749,7 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
676 list_for_each_safe(this, next, &shmem_swaplist) { 749 list_for_each_safe(this, next, &shmem_swaplist) {
677 info = list_entry(this, struct shmem_inode_info, swaplist); 750 info = list_entry(this, struct shmem_inode_info, swaplist);
678 if (info->swapped) 751 if (info->swapped)
679 found = shmem_unuse_inode(info, swap, page); 752 found = shmem_unuse_inode(info, swap, &page);
680 else 753 else
681 list_del_init(&info->swaplist); 754 list_del_init(&info->swaplist);
682 cond_resched(); 755 cond_resched();
@@ -685,8 +758,6 @@ int shmem_unuse(swp_entry_t swap, struct page *page)
685 } 758 }
686 mutex_unlock(&shmem_swaplist_mutex); 759 mutex_unlock(&shmem_swaplist_mutex);
687 760
688 if (!found)
689 mem_cgroup_uncharge_cache_page(page);
690 if (found < 0) 761 if (found < 0)
691 error = found; 762 error = found;
692out: 763out:
@@ -727,6 +798,38 @@ static int shmem_writepage(struct page *page, struct writeback_control *wbc)
727 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */ 798 WARN_ON_ONCE(1); /* Still happens? Tell us about it! */
728 goto redirty; 799 goto redirty;
729 } 800 }
801
802 /*
803 * This is somewhat ridiculous, but without plumbing a SWAP_MAP_FALLOC
804 * value into swapfile.c, the only way we can correctly account for a
805 * fallocated page arriving here is now to initialize it and write it.
806 *
807 * That's okay for a page already fallocated earlier, but if we have
808 * not yet completed the fallocation, then (a) we want to keep track
809 * of this page in case we have to undo it, and (b) it may not be a
810 * good idea to continue anyway, once we're pushing into swap. So
811 * reactivate the page, and let shmem_fallocate() quit when too many.
812 */
813 if (!PageUptodate(page)) {
814 if (inode->i_private) {
815 struct shmem_falloc *shmem_falloc;
816 spin_lock(&inode->i_lock);
817 shmem_falloc = inode->i_private;
818 if (shmem_falloc &&
819 index >= shmem_falloc->start &&
820 index < shmem_falloc->next)
821 shmem_falloc->nr_unswapped++;
822 else
823 shmem_falloc = NULL;
824 spin_unlock(&inode->i_lock);
825 if (shmem_falloc)
826 goto redirty;
827 }
828 clear_highpage(page);
829 flush_dcache_page(page);
830 SetPageUptodate(page);
831 }
832
730 swap = get_swap_page(); 833 swap = get_swap_page();
731 if (!swap.val) 834 if (!swap.val)
732 goto redirty; 835 goto redirty;
@@ -856,6 +959,84 @@ static inline struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo)
856#endif 959#endif
857 960
858/* 961/*
962 * When a page is moved from swapcache to shmem filecache (either by the
963 * usual swapin of shmem_getpage_gfp(), or by the less common swapoff of
964 * shmem_unuse_inode()), it may have been read in earlier from swap, in
965 * ignorance of the mapping it belongs to. If that mapping has special
966 * constraints (like the gma500 GEM driver, which requires RAM below 4GB),
967 * we may need to copy to a suitable page before moving to filecache.
968 *
969 * In a future release, this may well be extended to respect cpuset and
970 * NUMA mempolicy, and applied also to anonymous pages in do_swap_page();
971 * but for now it is a simple matter of zone.
972 */
973static bool shmem_should_replace_page(struct page *page, gfp_t gfp)
974{
975 return page_zonenum(page) > gfp_zone(gfp);
976}
977
978static int shmem_replace_page(struct page **pagep, gfp_t gfp,
979 struct shmem_inode_info *info, pgoff_t index)
980{
981 struct page *oldpage, *newpage;
982 struct address_space *swap_mapping;
983 pgoff_t swap_index;
984 int error;
985
986 oldpage = *pagep;
987 swap_index = page_private(oldpage);
988 swap_mapping = page_mapping(oldpage);
989
990 /*
991 * We have arrived here because our zones are constrained, so don't
992 * limit chance of success by further cpuset and node constraints.
993 */
994 gfp &= ~GFP_CONSTRAINT_MASK;
995 newpage = shmem_alloc_page(gfp, info, index);
996 if (!newpage)
997 return -ENOMEM;
998 VM_BUG_ON(shmem_should_replace_page(newpage, gfp));
999
1000 *pagep = newpage;
1001 page_cache_get(newpage);
1002 copy_highpage(newpage, oldpage);
1003
1004 VM_BUG_ON(!PageLocked(oldpage));
1005 __set_page_locked(newpage);
1006 VM_BUG_ON(!PageUptodate(oldpage));
1007 SetPageUptodate(newpage);
1008 VM_BUG_ON(!PageSwapBacked(oldpage));
1009 SetPageSwapBacked(newpage);
1010 VM_BUG_ON(!swap_index);
1011 set_page_private(newpage, swap_index);
1012 VM_BUG_ON(!PageSwapCache(oldpage));
1013 SetPageSwapCache(newpage);
1014
1015 /*
1016 * Our caller will very soon move newpage out of swapcache, but it's
1017 * a nice clean interface for us to replace oldpage by newpage there.
1018 */
1019 spin_lock_irq(&swap_mapping->tree_lock);
1020 error = shmem_radix_tree_replace(swap_mapping, swap_index, oldpage,
1021 newpage);
1022 __inc_zone_page_state(newpage, NR_FILE_PAGES);
1023 __dec_zone_page_state(oldpage, NR_FILE_PAGES);
1024 spin_unlock_irq(&swap_mapping->tree_lock);
1025 BUG_ON(error);
1026
1027 mem_cgroup_replace_page_cache(oldpage, newpage);
1028 lru_cache_add_anon(newpage);
1029
1030 ClearPageSwapCache(oldpage);
1031 set_page_private(oldpage, 0);
1032
1033 unlock_page(oldpage);
1034 page_cache_release(oldpage);
1035 page_cache_release(oldpage);
1036 return 0;
1037}
1038
1039/*
859 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate 1040 * shmem_getpage_gfp - find page in cache, or get from swap, or allocate
860 * 1041 *
861 * If we allocate a new one we do not mark it dirty. That's up to the 1042 * If we allocate a new one we do not mark it dirty. That's up to the
@@ -872,6 +1053,7 @@ static int shmem_getpage_gfp(struct inode *inode, pgoff_t index,
872 swp_entry_t swap; 1053 swp_entry_t swap;
873 int error; 1054 int error;
874 int once = 0; 1055 int once = 0;
1056 int alloced = 0;
875 1057
876 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT)) 1058 if (index > (MAX_LFS_FILESIZE >> PAGE_CACHE_SHIFT))
877 return -EFBIG; 1059 return -EFBIG;
@@ -883,19 +1065,21 @@ repeat:
883 page = NULL; 1065 page = NULL;
884 } 1066 }
885 1067
886 if (sgp != SGP_WRITE && 1068 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
887 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1069 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
888 error = -EINVAL; 1070 error = -EINVAL;
889 goto failed; 1071 goto failed;
890 } 1072 }
891 1073
1074 /* fallocated page? */
1075 if (page && !PageUptodate(page)) {
1076 if (sgp != SGP_READ)
1077 goto clear;
1078 unlock_page(page);
1079 page_cache_release(page);
1080 page = NULL;
1081 }
892 if (page || (sgp == SGP_READ && !swap.val)) { 1082 if (page || (sgp == SGP_READ && !swap.val)) {
893 /*
894 * Once we can get the page lock, it must be uptodate:
895 * if there were an error in reading back from swap,
896 * the page would not be inserted into the filecache.
897 */
898 BUG_ON(page && !PageUptodate(page));
899 *pagep = page; 1083 *pagep = page;
900 return 0; 1084 return 0;
901 } 1085 }
@@ -923,19 +1107,20 @@ repeat:
923 1107
924 /* We have to do this with page locked to prevent races */ 1108 /* We have to do this with page locked to prevent races */
925 lock_page(page); 1109 lock_page(page);
1110 if (!PageSwapCache(page) || page->mapping) {
1111 error = -EEXIST; /* try again */
1112 goto failed;
1113 }
926 if (!PageUptodate(page)) { 1114 if (!PageUptodate(page)) {
927 error = -EIO; 1115 error = -EIO;
928 goto failed; 1116 goto failed;
929 } 1117 }
930 wait_on_page_writeback(page); 1118 wait_on_page_writeback(page);
931 1119
932 /* Someone may have already done it for us */ 1120 if (shmem_should_replace_page(page, gfp)) {
933 if (page->mapping) { 1121 error = shmem_replace_page(&page, gfp, info, index);
934 if (page->mapping == mapping && 1122 if (error)
935 page->index == index) 1123 goto failed;
936 goto done;
937 error = -EEXIST;
938 goto failed;
939 } 1124 }
940 1125
941 error = mem_cgroup_cache_charge(page, current->mm, 1126 error = mem_cgroup_cache_charge(page, current->mm,
@@ -991,19 +1176,36 @@ repeat:
991 inode->i_blocks += BLOCKS_PER_PAGE; 1176 inode->i_blocks += BLOCKS_PER_PAGE;
992 shmem_recalc_inode(inode); 1177 shmem_recalc_inode(inode);
993 spin_unlock(&info->lock); 1178 spin_unlock(&info->lock);
1179 alloced = true;
994 1180
995 clear_highpage(page); 1181 /*
996 flush_dcache_page(page); 1182 * Let SGP_FALLOC use the SGP_WRITE optimization on a new page.
997 SetPageUptodate(page); 1183 */
1184 if (sgp == SGP_FALLOC)
1185 sgp = SGP_WRITE;
1186clear:
1187 /*
1188 * Let SGP_WRITE caller clear ends if write does not fill page;
1189 * but SGP_FALLOC on a page fallocated earlier must initialize
1190 * it now, lest undo on failure cancel our earlier guarantee.
1191 */
1192 if (sgp != SGP_WRITE) {
1193 clear_highpage(page);
1194 flush_dcache_page(page);
1195 SetPageUptodate(page);
1196 }
998 if (sgp == SGP_DIRTY) 1197 if (sgp == SGP_DIRTY)
999 set_page_dirty(page); 1198 set_page_dirty(page);
1000 } 1199 }
1001done: 1200
1002 /* Perhaps the file has been truncated since we checked */ 1201 /* Perhaps the file has been truncated since we checked */
1003 if (sgp != SGP_WRITE && 1202 if (sgp != SGP_WRITE && sgp != SGP_FALLOC &&
1004 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) { 1203 ((loff_t)index << PAGE_CACHE_SHIFT) >= i_size_read(inode)) {
1005 error = -EINVAL; 1204 error = -EINVAL;
1006 goto trunc; 1205 if (alloced)
1206 goto trunc;
1207 else
1208 goto failed;
1007 } 1209 }
1008 *pagep = page; 1210 *pagep = page;
1009 return 0; 1211 return 0;
@@ -1012,6 +1214,7 @@ done:
1012 * Error recovery. 1214 * Error recovery.
1013 */ 1215 */
1014trunc: 1216trunc:
1217 info = SHMEM_I(inode);
1015 ClearPageDirty(page); 1218 ClearPageDirty(page);
1016 delete_from_page_cache(page); 1219 delete_from_page_cache(page);
1017 spin_lock(&info->lock); 1220 spin_lock(&info->lock);
@@ -1019,6 +1222,7 @@ trunc:
1019 inode->i_blocks -= BLOCKS_PER_PAGE; 1222 inode->i_blocks -= BLOCKS_PER_PAGE;
1020 spin_unlock(&info->lock); 1223 spin_unlock(&info->lock);
1021decused: 1224decused:
1225 sbinfo = SHMEM_SB(inode->i_sb);
1022 if (sbinfo->max_blocks) 1226 if (sbinfo->max_blocks)
1023 percpu_counter_add(&sbinfo->used_blocks, -1); 1227 percpu_counter_add(&sbinfo->used_blocks, -1);
1024unacct: 1228unacct:
@@ -1204,6 +1408,14 @@ shmem_write_end(struct file *file, struct address_space *mapping,
1204 if (pos + copied > inode->i_size) 1408 if (pos + copied > inode->i_size)
1205 i_size_write(inode, pos + copied); 1409 i_size_write(inode, pos + copied);
1206 1410
1411 if (!PageUptodate(page)) {
1412 if (copied < PAGE_CACHE_SIZE) {
1413 unsigned from = pos & (PAGE_CACHE_SIZE - 1);
1414 zero_user_segments(page, 0, from,
1415 from + copied, PAGE_CACHE_SIZE);
1416 }
1417 SetPageUptodate(page);
1418 }
1207 set_page_dirty(page); 1419 set_page_dirty(page);
1208 unlock_page(page); 1420 unlock_page(page);
1209 page_cache_release(page); 1421 page_cache_release(page);
@@ -1462,6 +1674,199 @@ static ssize_t shmem_file_splice_read(struct file *in, loff_t *ppos,
1462 return error; 1674 return error;
1463} 1675}
1464 1676
1677/*
1678 * llseek SEEK_DATA or SEEK_HOLE through the radix_tree.
1679 */
1680static pgoff_t shmem_seek_hole_data(struct address_space *mapping,
1681 pgoff_t index, pgoff_t end, int origin)
1682{
1683 struct page *page;
1684 struct pagevec pvec;
1685 pgoff_t indices[PAGEVEC_SIZE];
1686 bool done = false;
1687 int i;
1688
1689 pagevec_init(&pvec, 0);
1690 pvec.nr = 1; /* start small: we may be there already */
1691 while (!done) {
1692 pvec.nr = shmem_find_get_pages_and_swap(mapping, index,
1693 pvec.nr, pvec.pages, indices);
1694 if (!pvec.nr) {
1695 if (origin == SEEK_DATA)
1696 index = end;
1697 break;
1698 }
1699 for (i = 0; i < pvec.nr; i++, index++) {
1700 if (index < indices[i]) {
1701 if (origin == SEEK_HOLE) {
1702 done = true;
1703 break;
1704 }
1705 index = indices[i];
1706 }
1707 page = pvec.pages[i];
1708 if (page && !radix_tree_exceptional_entry(page)) {
1709 if (!PageUptodate(page))
1710 page = NULL;
1711 }
1712 if (index >= end ||
1713 (page && origin == SEEK_DATA) ||
1714 (!page && origin == SEEK_HOLE)) {
1715 done = true;
1716 break;
1717 }
1718 }
1719 shmem_deswap_pagevec(&pvec);
1720 pagevec_release(&pvec);
1721 pvec.nr = PAGEVEC_SIZE;
1722 cond_resched();
1723 }
1724 return index;
1725}
1726
1727static loff_t shmem_file_llseek(struct file *file, loff_t offset, int origin)
1728{
1729 struct address_space *mapping;
1730 struct inode *inode;
1731 pgoff_t start, end;
1732 loff_t new_offset;
1733
1734 if (origin != SEEK_DATA && origin != SEEK_HOLE)
1735 return generic_file_llseek_size(file, offset, origin,
1736 MAX_LFS_FILESIZE);
1737 mapping = file->f_mapping;
1738 inode = mapping->host;
1739 mutex_lock(&inode->i_mutex);
1740 /* We're holding i_mutex so we can access i_size directly */
1741
1742 if (offset < 0)
1743 offset = -EINVAL;
1744 else if (offset >= inode->i_size)
1745 offset = -ENXIO;
1746 else {
1747 start = offset >> PAGE_CACHE_SHIFT;
1748 end = (inode->i_size + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1749 new_offset = shmem_seek_hole_data(mapping, start, end, origin);
1750 new_offset <<= PAGE_CACHE_SHIFT;
1751 if (new_offset > offset) {
1752 if (new_offset < inode->i_size)
1753 offset = new_offset;
1754 else if (origin == SEEK_DATA)
1755 offset = -ENXIO;
1756 else
1757 offset = inode->i_size;
1758 }
1759 }
1760
1761 if (offset >= 0 && offset != file->f_pos) {
1762 file->f_pos = offset;
1763 file->f_version = 0;
1764 }
1765 mutex_unlock(&inode->i_mutex);
1766 return offset;
1767}
1768
1769static long shmem_fallocate(struct file *file, int mode, loff_t offset,
1770 loff_t len)
1771{
1772 struct inode *inode = file->f_path.dentry->d_inode;
1773 struct shmem_sb_info *sbinfo = SHMEM_SB(inode->i_sb);
1774 struct shmem_falloc shmem_falloc;
1775 pgoff_t start, index, end;
1776 int error;
1777
1778 mutex_lock(&inode->i_mutex);
1779
1780 if (mode & FALLOC_FL_PUNCH_HOLE) {
1781 struct address_space *mapping = file->f_mapping;
1782 loff_t unmap_start = round_up(offset, PAGE_SIZE);
1783 loff_t unmap_end = round_down(offset + len, PAGE_SIZE) - 1;
1784
1785 if ((u64)unmap_end > (u64)unmap_start)
1786 unmap_mapping_range(mapping, unmap_start,
1787 1 + unmap_end - unmap_start, 0);
1788 shmem_truncate_range(inode, offset, offset + len - 1);
1789 /* No need to unmap again: hole-punching leaves COWed pages */
1790 error = 0;
1791 goto out;
1792 }
1793
1794 /* We need to check rlimit even when FALLOC_FL_KEEP_SIZE */
1795 error = inode_newsize_ok(inode, offset + len);
1796 if (error)
1797 goto out;
1798
1799 start = offset >> PAGE_CACHE_SHIFT;
1800 end = (offset + len + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
1801 /* Try to avoid a swapstorm if len is impossible to satisfy */
1802 if (sbinfo->max_blocks && end - start > sbinfo->max_blocks) {
1803 error = -ENOSPC;
1804 goto out;
1805 }
1806
1807 shmem_falloc.start = start;
1808 shmem_falloc.next = start;
1809 shmem_falloc.nr_falloced = 0;
1810 shmem_falloc.nr_unswapped = 0;
1811 spin_lock(&inode->i_lock);
1812 inode->i_private = &shmem_falloc;
1813 spin_unlock(&inode->i_lock);
1814
1815 for (index = start; index < end; index++) {
1816 struct page *page;
1817
1818 /*
1819 * Good, the fallocate(2) manpage permits EINTR: we may have
1820 * been interrupted because we are using up too much memory.
1821 */
1822 if (signal_pending(current))
1823 error = -EINTR;
1824 else if (shmem_falloc.nr_unswapped > shmem_falloc.nr_falloced)
1825 error = -ENOMEM;
1826 else
1827 error = shmem_getpage(inode, index, &page, SGP_FALLOC,
1828 NULL);
1829 if (error) {
1830 /* Remove the !PageUptodate pages we added */
1831 shmem_undo_range(inode,
1832 (loff_t)start << PAGE_CACHE_SHIFT,
1833 (loff_t)index << PAGE_CACHE_SHIFT, true);
1834 goto undone;
1835 }
1836
1837 /*
1838 * Inform shmem_writepage() how far we have reached.
1839 * No need for lock or barrier: we have the page lock.
1840 */
1841 shmem_falloc.next++;
1842 if (!PageUptodate(page))
1843 shmem_falloc.nr_falloced++;
1844
1845 /*
1846 * If !PageUptodate, leave it that way so that freeable pages
1847 * can be recognized if we need to rollback on error later.
1848 * But set_page_dirty so that memory pressure will swap rather
1849 * than free the pages we are allocating (and SGP_CACHE pages
1850 * might still be clean: we now need to mark those dirty too).
1851 */
1852 set_page_dirty(page);
1853 unlock_page(page);
1854 page_cache_release(page);
1855 cond_resched();
1856 }
1857
1858 if (!(mode & FALLOC_FL_KEEP_SIZE) && offset + len > inode->i_size)
1859 i_size_write(inode, offset + len);
1860 inode->i_ctime = CURRENT_TIME;
1861undone:
1862 spin_lock(&inode->i_lock);
1863 inode->i_private = NULL;
1864 spin_unlock(&inode->i_lock);
1865out:
1866 mutex_unlock(&inode->i_mutex);
1867 return error;
1868}
1869
1465static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf) 1870static int shmem_statfs(struct dentry *dentry, struct kstatfs *buf)
1466{ 1871{
1467 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb); 1872 struct shmem_sb_info *sbinfo = SHMEM_SB(dentry->d_sb);
@@ -1665,6 +2070,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
1665 kaddr = kmap_atomic(page); 2070 kaddr = kmap_atomic(page);
1666 memcpy(kaddr, symname, len); 2071 memcpy(kaddr, symname, len);
1667 kunmap_atomic(kaddr); 2072 kunmap_atomic(kaddr);
2073 SetPageUptodate(page);
1668 set_page_dirty(page); 2074 set_page_dirty(page);
1669 unlock_page(page); 2075 unlock_page(page);
1670 page_cache_release(page); 2076 page_cache_release(page);
@@ -2270,6 +2676,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
2270 } 2676 }
2271 } 2677 }
2272 sb->s_export_op = &shmem_export_ops; 2678 sb->s_export_op = &shmem_export_ops;
2679 sb->s_flags |= MS_NOSEC;
2273#else 2680#else
2274 sb->s_flags |= MS_NOUSER; 2681 sb->s_flags |= MS_NOUSER;
2275#endif 2682#endif
@@ -2364,7 +2771,7 @@ static const struct address_space_operations shmem_aops = {
2364static const struct file_operations shmem_file_operations = { 2771static const struct file_operations shmem_file_operations = {
2365 .mmap = shmem_mmap, 2772 .mmap = shmem_mmap,
2366#ifdef CONFIG_TMPFS 2773#ifdef CONFIG_TMPFS
2367 .llseek = generic_file_llseek, 2774 .llseek = shmem_file_llseek,
2368 .read = do_sync_read, 2775 .read = do_sync_read,
2369 .write = do_sync_write, 2776 .write = do_sync_write,
2370 .aio_read = shmem_file_aio_read, 2777 .aio_read = shmem_file_aio_read,
@@ -2372,12 +2779,12 @@ static const struct file_operations shmem_file_operations = {
2372 .fsync = noop_fsync, 2779 .fsync = noop_fsync,
2373 .splice_read = shmem_file_splice_read, 2780 .splice_read = shmem_file_splice_read,
2374 .splice_write = generic_file_splice_write, 2781 .splice_write = generic_file_splice_write,
2782 .fallocate = shmem_fallocate,
2375#endif 2783#endif
2376}; 2784};
2377 2785
2378static const struct inode_operations shmem_inode_operations = { 2786static const struct inode_operations shmem_inode_operations = {
2379 .setattr = shmem_setattr, 2787 .setattr = shmem_setattr,
2380 .truncate_range = shmem_truncate_range,
2381#ifdef CONFIG_TMPFS_XATTR 2788#ifdef CONFIG_TMPFS_XATTR
2382 .setxattr = shmem_setxattr, 2789 .setxattr = shmem_setxattr,
2383 .getxattr = shmem_getxattr, 2790 .getxattr = shmem_getxattr,
diff --git a/mm/sparse.c b/mm/sparse.c
index a8bc7d364deb..6a4bf9160e85 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -273,10 +273,10 @@ static unsigned long *__kmalloc_section_usemap(void)
273#ifdef CONFIG_MEMORY_HOTREMOVE 273#ifdef CONFIG_MEMORY_HOTREMOVE
274static unsigned long * __init 274static unsigned long * __init
275sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 275sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
276 unsigned long count) 276 unsigned long size)
277{ 277{
278 unsigned long section_nr; 278 pg_data_t *host_pgdat;
279 279 unsigned long goal;
280 /* 280 /*
281 * A page may contain usemaps for other sections preventing the 281 * A page may contain usemaps for other sections preventing the
282 * page being freed and making a section unremovable while 282 * page being freed and making a section unremovable while
@@ -287,8 +287,10 @@ sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
287 * from the same section as the pgdat where possible to avoid 287 * from the same section as the pgdat where possible to avoid
288 * this problem. 288 * this problem.
289 */ 289 */
290 section_nr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); 290 goal = __pa(pgdat) & PAGE_SECTION_MASK;
291 return alloc_bootmem_section(usemap_size() * count, section_nr); 291 host_pgdat = NODE_DATA(early_pfn_to_nid(goal >> PAGE_SHIFT));
292 return __alloc_bootmem_node_nopanic(host_pgdat, size,
293 SMP_CACHE_BYTES, goal);
292} 294}
293 295
294static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 296static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -332,9 +334,9 @@ static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
332#else 334#else
333static unsigned long * __init 335static unsigned long * __init
334sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, 336sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
335 unsigned long count) 337 unsigned long size)
336{ 338{
337 return NULL; 339 return alloc_bootmem_node_nopanic(pgdat, size);
338} 340}
339 341
340static void __init check_usemap_section_nr(int nid, unsigned long *usemap) 342static void __init check_usemap_section_nr(int nid, unsigned long *usemap)
@@ -352,13 +354,10 @@ static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map,
352 int size = usemap_size(); 354 int size = usemap_size();
353 355
354 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), 356 usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid),
355 usemap_count); 357 size * usemap_count);
356 if (!usemap) { 358 if (!usemap) {
357 usemap = alloc_bootmem_node(NODE_DATA(nodeid), size * usemap_count); 359 printk(KERN_WARNING "%s: allocation failed\n", __func__);
358 if (!usemap) { 360 return;
359 printk(KERN_WARNING "%s: allocation failed\n", __func__);
360 return;
361 }
362 } 361 }
363 362
364 for (pnum = pnum_begin; pnum < pnum_end; pnum++) { 363 for (pnum = pnum_begin; pnum < pnum_end; pnum++) {
diff --git a/mm/swap.c b/mm/swap.c
index 5c13f1338972..4e7e2ec67078 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -47,13 +47,15 @@ static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs);
47static void __page_cache_release(struct page *page) 47static void __page_cache_release(struct page *page)
48{ 48{
49 if (PageLRU(page)) { 49 if (PageLRU(page)) {
50 unsigned long flags;
51 struct zone *zone = page_zone(page); 50 struct zone *zone = page_zone(page);
51 struct lruvec *lruvec;
52 unsigned long flags;
52 53
53 spin_lock_irqsave(&zone->lru_lock, flags); 54 spin_lock_irqsave(&zone->lru_lock, flags);
55 lruvec = mem_cgroup_page_lruvec(page, zone);
54 VM_BUG_ON(!PageLRU(page)); 56 VM_BUG_ON(!PageLRU(page));
55 __ClearPageLRU(page); 57 __ClearPageLRU(page);
56 del_page_from_lru_list(zone, page, page_off_lru(page)); 58 del_page_from_lru_list(page, lruvec, page_off_lru(page));
57 spin_unlock_irqrestore(&zone->lru_lock, flags); 59 spin_unlock_irqrestore(&zone->lru_lock, flags);
58 } 60 }
59} 61}
@@ -82,6 +84,25 @@ static void put_compound_page(struct page *page)
82 if (likely(page != page_head && 84 if (likely(page != page_head &&
83 get_page_unless_zero(page_head))) { 85 get_page_unless_zero(page_head))) {
84 unsigned long flags; 86 unsigned long flags;
87
88 /*
89 * THP can not break up slab pages so avoid taking
90 * compound_lock(). Slab performs non-atomic bit ops
91 * on page->flags for better performance. In particular
92 * slab_unlock() in slub used to be a hot path. It is
93 * still hot on arches that do not support
94 * this_cpu_cmpxchg_double().
95 */
96 if (PageSlab(page_head)) {
97 if (PageTail(page)) {
98 if (put_page_testzero(page_head))
99 VM_BUG_ON(1);
100
101 atomic_dec(&page->_mapcount);
102 goto skip_lock_tail;
103 } else
104 goto skip_lock;
105 }
85 /* 106 /*
86 * page_head wasn't a dangling pointer but it 107 * page_head wasn't a dangling pointer but it
87 * may not be a head page anymore by the time 108 * may not be a head page anymore by the time
@@ -92,10 +113,10 @@ static void put_compound_page(struct page *page)
92 if (unlikely(!PageTail(page))) { 113 if (unlikely(!PageTail(page))) {
93 /* __split_huge_page_refcount run before us */ 114 /* __split_huge_page_refcount run before us */
94 compound_unlock_irqrestore(page_head, flags); 115 compound_unlock_irqrestore(page_head, flags);
95 VM_BUG_ON(PageHead(page_head)); 116skip_lock:
96 if (put_page_testzero(page_head)) 117 if (put_page_testzero(page_head))
97 __put_single_page(page_head); 118 __put_single_page(page_head);
98 out_put_single: 119out_put_single:
99 if (put_page_testzero(page)) 120 if (put_page_testzero(page))
100 __put_single_page(page); 121 __put_single_page(page);
101 return; 122 return;
@@ -115,6 +136,8 @@ static void put_compound_page(struct page *page)
115 VM_BUG_ON(atomic_read(&page_head->_count) <= 0); 136 VM_BUG_ON(atomic_read(&page_head->_count) <= 0);
116 VM_BUG_ON(atomic_read(&page->_count) != 0); 137 VM_BUG_ON(atomic_read(&page->_count) != 0);
117 compound_unlock_irqrestore(page_head, flags); 138 compound_unlock_irqrestore(page_head, flags);
139
140skip_lock_tail:
118 if (put_page_testzero(page_head)) { 141 if (put_page_testzero(page_head)) {
119 if (PageHead(page_head)) 142 if (PageHead(page_head))
120 __put_compound_page(page_head); 143 __put_compound_page(page_head);
@@ -162,6 +185,18 @@ bool __get_page_tail(struct page *page)
162 struct page *page_head = compound_trans_head(page); 185 struct page *page_head = compound_trans_head(page);
163 186
164 if (likely(page != page_head && get_page_unless_zero(page_head))) { 187 if (likely(page != page_head && get_page_unless_zero(page_head))) {
188
189 /* Ref to put_compound_page() comment. */
190 if (PageSlab(page_head)) {
191 if (likely(PageTail(page))) {
192 __get_page_tail_foll(page, false);
193 return true;
194 } else {
195 put_page(page_head);
196 return false;
197 }
198 }
199
165 /* 200 /*
166 * page_head wasn't a dangling pointer but it 201 * page_head wasn't a dangling pointer but it
167 * may not be a head page anymore by the time 202 * may not be a head page anymore by the time
@@ -202,11 +237,12 @@ void put_pages_list(struct list_head *pages)
202EXPORT_SYMBOL(put_pages_list); 237EXPORT_SYMBOL(put_pages_list);
203 238
204static void pagevec_lru_move_fn(struct pagevec *pvec, 239static void pagevec_lru_move_fn(struct pagevec *pvec,
205 void (*move_fn)(struct page *page, void *arg), 240 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg),
206 void *arg) 241 void *arg)
207{ 242{
208 int i; 243 int i;
209 struct zone *zone = NULL; 244 struct zone *zone = NULL;
245 struct lruvec *lruvec;
210 unsigned long flags = 0; 246 unsigned long flags = 0;
211 247
212 for (i = 0; i < pagevec_count(pvec); i++) { 248 for (i = 0; i < pagevec_count(pvec); i++) {
@@ -220,7 +256,8 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
220 spin_lock_irqsave(&zone->lru_lock, flags); 256 spin_lock_irqsave(&zone->lru_lock, flags);
221 } 257 }
222 258
223 (*move_fn)(page, arg); 259 lruvec = mem_cgroup_page_lruvec(page, zone);
260 (*move_fn)(page, lruvec, arg);
224 } 261 }
225 if (zone) 262 if (zone)
226 spin_unlock_irqrestore(&zone->lru_lock, flags); 263 spin_unlock_irqrestore(&zone->lru_lock, flags);
@@ -228,16 +265,13 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
228 pagevec_reinit(pvec); 265 pagevec_reinit(pvec);
229} 266}
230 267
231static void pagevec_move_tail_fn(struct page *page, void *arg) 268static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec,
269 void *arg)
232{ 270{
233 int *pgmoved = arg; 271 int *pgmoved = arg;
234 272
235 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 273 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
236 enum lru_list lru = page_lru_base_type(page); 274 enum lru_list lru = page_lru_base_type(page);
237 struct lruvec *lruvec;
238
239 lruvec = mem_cgroup_lru_move_lists(page_zone(page),
240 page, lru, lru);
241 list_move_tail(&page->lru, &lruvec->lists[lru]); 275 list_move_tail(&page->lru, &lruvec->lists[lru]);
242 (*pgmoved)++; 276 (*pgmoved)++;
243 } 277 }
@@ -276,41 +310,30 @@ void rotate_reclaimable_page(struct page *page)
276 } 310 }
277} 311}
278 312
279static void update_page_reclaim_stat(struct zone *zone, struct page *page, 313static void update_page_reclaim_stat(struct lruvec *lruvec,
280 int file, int rotated) 314 int file, int rotated)
281{ 315{
282 struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; 316 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
283 struct zone_reclaim_stat *memcg_reclaim_stat;
284
285 memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page);
286 317
287 reclaim_stat->recent_scanned[file]++; 318 reclaim_stat->recent_scanned[file]++;
288 if (rotated) 319 if (rotated)
289 reclaim_stat->recent_rotated[file]++; 320 reclaim_stat->recent_rotated[file]++;
290
291 if (!memcg_reclaim_stat)
292 return;
293
294 memcg_reclaim_stat->recent_scanned[file]++;
295 if (rotated)
296 memcg_reclaim_stat->recent_rotated[file]++;
297} 321}
298 322
299static void __activate_page(struct page *page, void *arg) 323static void __activate_page(struct page *page, struct lruvec *lruvec,
324 void *arg)
300{ 325{
301 struct zone *zone = page_zone(page);
302
303 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { 326 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) {
304 int file = page_is_file_cache(page); 327 int file = page_is_file_cache(page);
305 int lru = page_lru_base_type(page); 328 int lru = page_lru_base_type(page);
306 del_page_from_lru_list(zone, page, lru);
307 329
330 del_page_from_lru_list(page, lruvec, lru);
308 SetPageActive(page); 331 SetPageActive(page);
309 lru += LRU_ACTIVE; 332 lru += LRU_ACTIVE;
310 add_page_to_lru_list(zone, page, lru); 333 add_page_to_lru_list(page, lruvec, lru);
311 __count_vm_event(PGACTIVATE);
312 334
313 update_page_reclaim_stat(zone, page, file, 1); 335 __count_vm_event(PGACTIVATE);
336 update_page_reclaim_stat(lruvec, file, 1);
314 } 337 }
315} 338}
316 339
@@ -347,7 +370,7 @@ void activate_page(struct page *page)
347 struct zone *zone = page_zone(page); 370 struct zone *zone = page_zone(page);
348 371
349 spin_lock_irq(&zone->lru_lock); 372 spin_lock_irq(&zone->lru_lock);
350 __activate_page(page, NULL); 373 __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
351 spin_unlock_irq(&zone->lru_lock); 374 spin_unlock_irq(&zone->lru_lock);
352} 375}
353#endif 376#endif
@@ -414,11 +437,13 @@ void lru_cache_add_lru(struct page *page, enum lru_list lru)
414void add_page_to_unevictable_list(struct page *page) 437void add_page_to_unevictable_list(struct page *page)
415{ 438{
416 struct zone *zone = page_zone(page); 439 struct zone *zone = page_zone(page);
440 struct lruvec *lruvec;
417 441
418 spin_lock_irq(&zone->lru_lock); 442 spin_lock_irq(&zone->lru_lock);
443 lruvec = mem_cgroup_page_lruvec(page, zone);
419 SetPageUnevictable(page); 444 SetPageUnevictable(page);
420 SetPageLRU(page); 445 SetPageLRU(page);
421 add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); 446 add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
422 spin_unlock_irq(&zone->lru_lock); 447 spin_unlock_irq(&zone->lru_lock);
423} 448}
424 449
@@ -443,11 +468,11 @@ void add_page_to_unevictable_list(struct page *page)
443 * be write it out by flusher threads as this is much more effective 468 * be write it out by flusher threads as this is much more effective
444 * than the single-page writeout from reclaim. 469 * than the single-page writeout from reclaim.
445 */ 470 */
446static void lru_deactivate_fn(struct page *page, void *arg) 471static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec,
472 void *arg)
447{ 473{
448 int lru, file; 474 int lru, file;
449 bool active; 475 bool active;
450 struct zone *zone = page_zone(page);
451 476
452 if (!PageLRU(page)) 477 if (!PageLRU(page))
453 return; 478 return;
@@ -460,13 +485,13 @@ static void lru_deactivate_fn(struct page *page, void *arg)
460 return; 485 return;
461 486
462 active = PageActive(page); 487 active = PageActive(page);
463
464 file = page_is_file_cache(page); 488 file = page_is_file_cache(page);
465 lru = page_lru_base_type(page); 489 lru = page_lru_base_type(page);
466 del_page_from_lru_list(zone, page, lru + active); 490
491 del_page_from_lru_list(page, lruvec, lru + active);
467 ClearPageActive(page); 492 ClearPageActive(page);
468 ClearPageReferenced(page); 493 ClearPageReferenced(page);
469 add_page_to_lru_list(zone, page, lru); 494 add_page_to_lru_list(page, lruvec, lru);
470 495
471 if (PageWriteback(page) || PageDirty(page)) { 496 if (PageWriteback(page) || PageDirty(page)) {
472 /* 497 /*
@@ -476,19 +501,17 @@ static void lru_deactivate_fn(struct page *page, void *arg)
476 */ 501 */
477 SetPageReclaim(page); 502 SetPageReclaim(page);
478 } else { 503 } else {
479 struct lruvec *lruvec;
480 /* 504 /*
481 * The page's writeback ends up during pagevec 505 * The page's writeback ends up during pagevec
482 * We moves tha page into tail of inactive. 506 * We moves tha page into tail of inactive.
483 */ 507 */
484 lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru);
485 list_move_tail(&page->lru, &lruvec->lists[lru]); 508 list_move_tail(&page->lru, &lruvec->lists[lru]);
486 __count_vm_event(PGROTATED); 509 __count_vm_event(PGROTATED);
487 } 510 }
488 511
489 if (active) 512 if (active)
490 __count_vm_event(PGDEACTIVATE); 513 __count_vm_event(PGDEACTIVATE);
491 update_page_reclaim_stat(zone, page, file, 0); 514 update_page_reclaim_stat(lruvec, file, 0);
492} 515}
493 516
494/* 517/*
@@ -588,6 +611,7 @@ void release_pages(struct page **pages, int nr, int cold)
588 int i; 611 int i;
589 LIST_HEAD(pages_to_free); 612 LIST_HEAD(pages_to_free);
590 struct zone *zone = NULL; 613 struct zone *zone = NULL;
614 struct lruvec *lruvec;
591 unsigned long uninitialized_var(flags); 615 unsigned long uninitialized_var(flags);
592 616
593 for (i = 0; i < nr; i++) { 617 for (i = 0; i < nr; i++) {
@@ -615,9 +639,11 @@ void release_pages(struct page **pages, int nr, int cold)
615 zone = pagezone; 639 zone = pagezone;
616 spin_lock_irqsave(&zone->lru_lock, flags); 640 spin_lock_irqsave(&zone->lru_lock, flags);
617 } 641 }
642
643 lruvec = mem_cgroup_page_lruvec(page, zone);
618 VM_BUG_ON(!PageLRU(page)); 644 VM_BUG_ON(!PageLRU(page));
619 __ClearPageLRU(page); 645 __ClearPageLRU(page);
620 del_page_from_lru_list(zone, page, page_off_lru(page)); 646 del_page_from_lru_list(page, lruvec, page_off_lru(page));
621 } 647 }
622 648
623 list_add(&page->lru, &pages_to_free); 649 list_add(&page->lru, &pages_to_free);
@@ -649,8 +675,8 @@ EXPORT_SYMBOL(__pagevec_release);
649 675
650#ifdef CONFIG_TRANSPARENT_HUGEPAGE 676#ifdef CONFIG_TRANSPARENT_HUGEPAGE
651/* used by __split_huge_page_refcount() */ 677/* used by __split_huge_page_refcount() */
652void lru_add_page_tail(struct zone* zone, 678void lru_add_page_tail(struct page *page, struct page *page_tail,
653 struct page *page, struct page *page_tail) 679 struct lruvec *lruvec)
654{ 680{
655 int uninitialized_var(active); 681 int uninitialized_var(active);
656 enum lru_list lru; 682 enum lru_list lru;
@@ -659,7 +685,8 @@ void lru_add_page_tail(struct zone* zone,
659 VM_BUG_ON(!PageHead(page)); 685 VM_BUG_ON(!PageHead(page));
660 VM_BUG_ON(PageCompound(page_tail)); 686 VM_BUG_ON(PageCompound(page_tail));
661 VM_BUG_ON(PageLRU(page_tail)); 687 VM_BUG_ON(PageLRU(page_tail));
662 VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); 688 VM_BUG_ON(NR_CPUS != 1 &&
689 !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
663 690
664 SetPageLRU(page_tail); 691 SetPageLRU(page_tail);
665 692
@@ -688,20 +715,20 @@ void lru_add_page_tail(struct zone* zone,
688 * Use the standard add function to put page_tail on the list, 715 * Use the standard add function to put page_tail on the list,
689 * but then correct its position so they all end up in order. 716 * but then correct its position so they all end up in order.
690 */ 717 */
691 add_page_to_lru_list(zone, page_tail, lru); 718 add_page_to_lru_list(page_tail, lruvec, lru);
692 list_head = page_tail->lru.prev; 719 list_head = page_tail->lru.prev;
693 list_move_tail(&page_tail->lru, list_head); 720 list_move_tail(&page_tail->lru, list_head);
694 } 721 }
695 722
696 if (!PageUnevictable(page)) 723 if (!PageUnevictable(page))
697 update_page_reclaim_stat(zone, page_tail, file, active); 724 update_page_reclaim_stat(lruvec, file, active);
698} 725}
699#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 726#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
700 727
701static void __pagevec_lru_add_fn(struct page *page, void *arg) 728static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec,
729 void *arg)
702{ 730{
703 enum lru_list lru = (enum lru_list)arg; 731 enum lru_list lru = (enum lru_list)arg;
704 struct zone *zone = page_zone(page);
705 int file = is_file_lru(lru); 732 int file = is_file_lru(lru);
706 int active = is_active_lru(lru); 733 int active = is_active_lru(lru);
707 734
@@ -712,8 +739,8 @@ static void __pagevec_lru_add_fn(struct page *page, void *arg)
712 SetPageLRU(page); 739 SetPageLRU(page);
713 if (active) 740 if (active)
714 SetPageActive(page); 741 SetPageActive(page);
715 add_page_to_lru_list(zone, page, lru); 742 add_page_to_lru_list(page, lruvec, lru);
716 update_page_reclaim_stat(zone, page, file, active); 743 update_page_reclaim_stat(lruvec, file, active);
717} 744}
718 745
719/* 746/*
diff --git a/mm/swapfile.c b/mm/swapfile.c
index fafc26d1b1dc..457b10baef59 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -601,7 +601,7 @@ void swapcache_free(swp_entry_t entry, struct page *page)
601 * This does not give an exact answer when swap count is continued, 601 * This does not give an exact answer when swap count is continued,
602 * but does include the high COUNT_CONTINUED flag to allow for that. 602 * but does include the high COUNT_CONTINUED flag to allow for that.
603 */ 603 */
604static inline int page_swapcount(struct page *page) 604int page_swapcount(struct page *page)
605{ 605{
606 int count = 0; 606 int count = 0;
607 struct swap_info_struct *p; 607 struct swap_info_struct *p;
@@ -717,37 +717,6 @@ int free_swap_and_cache(swp_entry_t entry)
717 return p != NULL; 717 return p != NULL;
718} 718}
719 719
720#ifdef CONFIG_CGROUP_MEM_RES_CTLR
721/**
722 * mem_cgroup_count_swap_user - count the user of a swap entry
723 * @ent: the swap entry to be checked
724 * @pagep: the pointer for the swap cache page of the entry to be stored
725 *
726 * Returns the number of the user of the swap entry. The number is valid only
727 * for swaps of anonymous pages.
728 * If the entry is found on swap cache, the page is stored to pagep with
729 * refcount of it being incremented.
730 */
731int mem_cgroup_count_swap_user(swp_entry_t ent, struct page **pagep)
732{
733 struct page *page;
734 struct swap_info_struct *p;
735 int count = 0;
736
737 page = find_get_page(&swapper_space, ent.val);
738 if (page)
739 count += page_mapcount(page);
740 p = swap_info_get(ent);
741 if (p) {
742 count += swap_count(p->swap_map[swp_offset(ent)]);
743 spin_unlock(&swap_lock);
744 }
745
746 *pagep = page;
747 return count;
748}
749#endif
750
751#ifdef CONFIG_HIBERNATION 720#ifdef CONFIG_HIBERNATION
752/* 721/*
753 * Find the swap type that corresponds to given device (if any). 722 * Find the swap type that corresponds to given device (if any).
diff --git a/mm/thrash.c b/mm/thrash.c
deleted file mode 100644
index 57ad495dbd54..000000000000
--- a/mm/thrash.c
+++ /dev/null
@@ -1,155 +0,0 @@
1/*
2 * mm/thrash.c
3 *
4 * Copyright (C) 2004, Red Hat, Inc.
5 * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
6 * Released under the GPL, see the file COPYING for details.
7 *
8 * Simple token based thrashing protection, using the algorithm
9 * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
10 *
11 * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
12 * Improved algorithm to pass token:
13 * Each task has a priority which is incremented if it contended
14 * for the token in an interval less than its previous attempt.
15 * If the token is acquired, that task's priority is boosted to prevent
16 * the token from bouncing around too often and to let the task make
17 * some progress in its execution.
18 */
19
20#include <linux/jiffies.h>
21#include <linux/mm.h>
22#include <linux/sched.h>
23#include <linux/swap.h>
24#include <linux/memcontrol.h>
25
26#include <trace/events/vmscan.h>
27
28#define TOKEN_AGING_INTERVAL (0xFF)
29
30static DEFINE_SPINLOCK(swap_token_lock);
31struct mm_struct *swap_token_mm;
32static struct mem_cgroup *swap_token_memcg;
33
34#ifdef CONFIG_CGROUP_MEM_RES_CTLR
35static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
36{
37 struct mem_cgroup *memcg;
38
39 memcg = try_get_mem_cgroup_from_mm(mm);
40 if (memcg)
41 css_put(mem_cgroup_css(memcg));
42
43 return memcg;
44}
45#else
46static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
47{
48 return NULL;
49}
50#endif
51
52void grab_swap_token(struct mm_struct *mm)
53{
54 int current_interval;
55 unsigned int old_prio = mm->token_priority;
56 static unsigned int global_faults;
57 static unsigned int last_aging;
58
59 global_faults++;
60
61 current_interval = global_faults - mm->faultstamp;
62
63 if (!spin_trylock(&swap_token_lock))
64 return;
65
66 /* First come first served */
67 if (!swap_token_mm)
68 goto replace_token;
69
70 /*
71 * Usually, we don't need priority aging because long interval faults
72 * makes priority decrease quickly. But there is one exception. If the
73 * token owner task is sleeping, it never make long interval faults.
74 * Thus, we need a priority aging mechanism instead. The requirements
75 * of priority aging are
76 * 1) An aging interval is reasonable enough long. Too short aging
77 * interval makes quick swap token lost and decrease performance.
78 * 2) The swap token owner task have to get priority aging even if
79 * it's under sleep.
80 */
81 if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
82 swap_token_mm->token_priority /= 2;
83 last_aging = global_faults;
84 }
85
86 if (mm == swap_token_mm) {
87 mm->token_priority += 2;
88 goto update_priority;
89 }
90
91 if (current_interval < mm->last_interval)
92 mm->token_priority++;
93 else {
94 if (likely(mm->token_priority > 0))
95 mm->token_priority--;
96 }
97
98 /* Check if we deserve the token */
99 if (mm->token_priority > swap_token_mm->token_priority)
100 goto replace_token;
101
102update_priority:
103 trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
104
105out:
106 mm->faultstamp = global_faults;
107 mm->last_interval = current_interval;
108 spin_unlock(&swap_token_lock);
109 return;
110
111replace_token:
112 mm->token_priority += 2;
113 trace_replace_swap_token(swap_token_mm, mm);
114 swap_token_mm = mm;
115 swap_token_memcg = swap_token_memcg_from_mm(mm);
116 last_aging = global_faults;
117 goto out;
118}
119
120/* Called on process exit. */
121void __put_swap_token(struct mm_struct *mm)
122{
123 spin_lock(&swap_token_lock);
124 if (likely(mm == swap_token_mm)) {
125 trace_put_swap_token(swap_token_mm);
126 swap_token_mm = NULL;
127 swap_token_memcg = NULL;
128 }
129 spin_unlock(&swap_token_lock);
130}
131
132static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
133{
134 if (!a)
135 return true;
136 if (!b)
137 return true;
138 if (a == b)
139 return true;
140 return false;
141}
142
143void disable_swap_token(struct mem_cgroup *memcg)
144{
145 /* memcg reclaim don't disable unrelated mm token. */
146 if (match_memcg(memcg, swap_token_memcg)) {
147 spin_lock(&swap_token_lock);
148 if (match_memcg(memcg, swap_token_memcg)) {
149 trace_disable_swap_token(swap_token_mm);
150 swap_token_mm = NULL;
151 swap_token_memcg = NULL;
152 }
153 spin_unlock(&swap_token_lock);
154 }
155}
diff --git a/mm/truncate.c b/mm/truncate.c
index 61a183b89df6..75801acdaac7 100644
--- a/mm/truncate.c
+++ b/mm/truncate.c
@@ -602,31 +602,6 @@ int vmtruncate(struct inode *inode, loff_t newsize)
602} 602}
603EXPORT_SYMBOL(vmtruncate); 603EXPORT_SYMBOL(vmtruncate);
604 604
605int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
606{
607 struct address_space *mapping = inode->i_mapping;
608 loff_t holebegin = round_up(lstart, PAGE_SIZE);
609 loff_t holelen = 1 + lend - holebegin;
610
611 /*
612 * If the underlying filesystem is not going to provide
613 * a way to truncate a range of blocks (punch a hole) -
614 * we should return failure right now.
615 */
616 if (!inode->i_op->truncate_range)
617 return -ENOSYS;
618
619 mutex_lock(&inode->i_mutex);
620 inode_dio_wait(inode);
621 unmap_mapping_range(mapping, holebegin, holelen, 1);
622 inode->i_op->truncate_range(inode, lstart, lend);
623 /* unmap again to remove racily COWed private pages */
624 unmap_mapping_range(mapping, holebegin, holelen, 1);
625 mutex_unlock(&inode->i_mutex);
626
627 return 0;
628}
629
630/** 605/**
631 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched 606 * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
632 * @inode: inode 607 * @inode: inode
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 94dff883b449..2aad49981b57 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1185,9 +1185,10 @@ void __init vmalloc_init(void)
1185 /* Import existing vmlist entries. */ 1185 /* Import existing vmlist entries. */
1186 for (tmp = vmlist; tmp; tmp = tmp->next) { 1186 for (tmp = vmlist; tmp; tmp = tmp->next) {
1187 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT); 1187 va = kzalloc(sizeof(struct vmap_area), GFP_NOWAIT);
1188 va->flags = tmp->flags | VM_VM_AREA; 1188 va->flags = VM_VM_AREA;
1189 va->va_start = (unsigned long)tmp->addr; 1189 va->va_start = (unsigned long)tmp->addr;
1190 va->va_end = va->va_start + tmp->size; 1190 va->va_end = va->va_start + tmp->size;
1191 va->vm = tmp;
1191 __insert_vmap_area(va); 1192 __insert_vmap_area(va);
1192 } 1193 }
1193 1194
@@ -2375,8 +2376,8 @@ struct vm_struct **pcpu_get_vm_areas(const unsigned long *offsets,
2375 return NULL; 2376 return NULL;
2376 } 2377 }
2377 2378
2378 vms = kzalloc(sizeof(vms[0]) * nr_vms, GFP_KERNEL); 2379 vms = kcalloc(nr_vms, sizeof(vms[0]), GFP_KERNEL);
2379 vas = kzalloc(sizeof(vas[0]) * nr_vms, GFP_KERNEL); 2380 vas = kcalloc(nr_vms, sizeof(vas[0]), GFP_KERNEL);
2380 if (!vas || !vms) 2381 if (!vas || !vms)
2381 goto err_free2; 2382 goto err_free2;
2382 2383
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 33dc256033b5..eeb3bc9d1d36 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -53,24 +53,6 @@
53#define CREATE_TRACE_POINTS 53#define CREATE_TRACE_POINTS
54#include <trace/events/vmscan.h> 54#include <trace/events/vmscan.h>
55 55
56/*
57 * reclaim_mode determines how the inactive list is shrunk
58 * RECLAIM_MODE_SINGLE: Reclaim only order-0 pages
59 * RECLAIM_MODE_ASYNC: Do not block
60 * RECLAIM_MODE_SYNC: Allow blocking e.g. call wait_on_page_writeback
61 * RECLAIM_MODE_LUMPYRECLAIM: For high-order allocations, take a reference
62 * page from the LRU and reclaim all pages within a
63 * naturally aligned range
64 * RECLAIM_MODE_COMPACTION: For high-order allocations, reclaim a number of
65 * order-0 pages and then compact the zone
66 */
67typedef unsigned __bitwise__ reclaim_mode_t;
68#define RECLAIM_MODE_SINGLE ((__force reclaim_mode_t)0x01u)
69#define RECLAIM_MODE_ASYNC ((__force reclaim_mode_t)0x02u)
70#define RECLAIM_MODE_SYNC ((__force reclaim_mode_t)0x04u)
71#define RECLAIM_MODE_LUMPYRECLAIM ((__force reclaim_mode_t)0x08u)
72#define RECLAIM_MODE_COMPACTION ((__force reclaim_mode_t)0x10u)
73
74struct scan_control { 56struct scan_control {
75 /* Incremented by the number of inactive pages that were scanned */ 57 /* Incremented by the number of inactive pages that were scanned */
76 unsigned long nr_scanned; 58 unsigned long nr_scanned;
@@ -96,11 +78,8 @@ struct scan_control {
96 78
97 int order; 79 int order;
98 80
99 /* 81 /* Scan (total_size >> priority) pages at once */
100 * Intend to reclaim enough continuous memory rather than reclaim 82 int priority;
101 * enough amount of memory. i.e, mode for high order allocation.
102 */
103 reclaim_mode_t reclaim_mode;
104 83
105 /* 84 /*
106 * The memory cgroup that hit its limit and as a result is the 85 * The memory cgroup that hit its limit and as a result is the
@@ -115,11 +94,6 @@ struct scan_control {
115 nodemask_t *nodemask; 94 nodemask_t *nodemask;
116}; 95};
117 96
118struct mem_cgroup_zone {
119 struct mem_cgroup *mem_cgroup;
120 struct zone *zone;
121};
122
123#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru)) 97#define lru_to_page(_head) (list_entry((_head)->prev, struct page, lru))
124 98
125#ifdef ARCH_HAS_PREFETCH 99#ifdef ARCH_HAS_PREFETCH
@@ -164,44 +138,21 @@ static bool global_reclaim(struct scan_control *sc)
164{ 138{
165 return !sc->target_mem_cgroup; 139 return !sc->target_mem_cgroup;
166} 140}
167
168static bool scanning_global_lru(struct mem_cgroup_zone *mz)
169{
170 return !mz->mem_cgroup;
171}
172#else 141#else
173static bool global_reclaim(struct scan_control *sc) 142static bool global_reclaim(struct scan_control *sc)
174{ 143{
175 return true; 144 return true;
176} 145}
177
178static bool scanning_global_lru(struct mem_cgroup_zone *mz)
179{
180 return true;
181}
182#endif 146#endif
183 147
184static struct zone_reclaim_stat *get_reclaim_stat(struct mem_cgroup_zone *mz) 148static unsigned long get_lru_size(struct lruvec *lruvec, enum lru_list lru)
185{
186 if (!scanning_global_lru(mz))
187 return mem_cgroup_get_reclaim_stat(mz->mem_cgroup, mz->zone);
188
189 return &mz->zone->reclaim_stat;
190}
191
192static unsigned long zone_nr_lru_pages(struct mem_cgroup_zone *mz,
193 enum lru_list lru)
194{ 149{
195 if (!scanning_global_lru(mz)) 150 if (!mem_cgroup_disabled())
196 return mem_cgroup_zone_nr_lru_pages(mz->mem_cgroup, 151 return mem_cgroup_get_lru_size(lruvec, lru);
197 zone_to_nid(mz->zone),
198 zone_idx(mz->zone),
199 BIT(lru));
200 152
201 return zone_page_state(mz->zone, NR_LRU_BASE + lru); 153 return zone_page_state(lruvec_zone(lruvec), NR_LRU_BASE + lru);
202} 154}
203 155
204
205/* 156/*
206 * Add a shrinker callback to be called from the vm 157 * Add a shrinker callback to be called from the vm
207 */ 158 */
@@ -364,39 +315,6 @@ out:
364 return ret; 315 return ret;
365} 316}
366 317
367static void set_reclaim_mode(int priority, struct scan_control *sc,
368 bool sync)
369{
370 reclaim_mode_t syncmode = sync ? RECLAIM_MODE_SYNC : RECLAIM_MODE_ASYNC;
371
372 /*
373 * Initially assume we are entering either lumpy reclaim or
374 * reclaim/compaction.Depending on the order, we will either set the
375 * sync mode or just reclaim order-0 pages later.
376 */
377 if (COMPACTION_BUILD)
378 sc->reclaim_mode = RECLAIM_MODE_COMPACTION;
379 else
380 sc->reclaim_mode = RECLAIM_MODE_LUMPYRECLAIM;
381
382 /*
383 * Avoid using lumpy reclaim or reclaim/compaction if possible by
384 * restricting when its set to either costly allocations or when
385 * under memory pressure
386 */
387 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
388 sc->reclaim_mode |= syncmode;
389 else if (sc->order && priority < DEF_PRIORITY - 2)
390 sc->reclaim_mode |= syncmode;
391 else
392 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
393}
394
395static void reset_reclaim_mode(struct scan_control *sc)
396{
397 sc->reclaim_mode = RECLAIM_MODE_SINGLE | RECLAIM_MODE_ASYNC;
398}
399
400static inline int is_page_cache_freeable(struct page *page) 318static inline int is_page_cache_freeable(struct page *page)
401{ 319{
402 /* 320 /*
@@ -416,10 +334,6 @@ static int may_write_to_queue(struct backing_dev_info *bdi,
416 return 1; 334 return 1;
417 if (bdi == current->backing_dev_info) 335 if (bdi == current->backing_dev_info)
418 return 1; 336 return 1;
419
420 /* lumpy reclaim for hugepage often need a lot of write */
421 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
422 return 1;
423 return 0; 337 return 0;
424} 338}
425 339
@@ -523,8 +437,7 @@ static pageout_t pageout(struct page *page, struct address_space *mapping,
523 /* synchronous write or broken a_ops? */ 437 /* synchronous write or broken a_ops? */
524 ClearPageReclaim(page); 438 ClearPageReclaim(page);
525 } 439 }
526 trace_mm_vmscan_writepage(page, 440 trace_mm_vmscan_writepage(page, trace_reclaim_flags(page));
527 trace_reclaim_flags(page, sc->reclaim_mode));
528 inc_zone_page_state(page, NR_VMSCAN_WRITE); 441 inc_zone_page_state(page, NR_VMSCAN_WRITE);
529 return PAGE_SUCCESS; 442 return PAGE_SUCCESS;
530 } 443 }
@@ -701,19 +614,15 @@ enum page_references {
701}; 614};
702 615
703static enum page_references page_check_references(struct page *page, 616static enum page_references page_check_references(struct page *page,
704 struct mem_cgroup_zone *mz,
705 struct scan_control *sc) 617 struct scan_control *sc)
706{ 618{
707 int referenced_ptes, referenced_page; 619 int referenced_ptes, referenced_page;
708 unsigned long vm_flags; 620 unsigned long vm_flags;
709 621
710 referenced_ptes = page_referenced(page, 1, mz->mem_cgroup, &vm_flags); 622 referenced_ptes = page_referenced(page, 1, sc->target_mem_cgroup,
623 &vm_flags);
711 referenced_page = TestClearPageReferenced(page); 624 referenced_page = TestClearPageReferenced(page);
712 625
713 /* Lumpy reclaim - ignore references */
714 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
715 return PAGEREF_RECLAIM;
716
717 /* 626 /*
718 * Mlock lost the isolation race with us. Let try_to_unmap() 627 * Mlock lost the isolation race with us. Let try_to_unmap()
719 * move the page to the unevictable list. 628 * move the page to the unevictable list.
@@ -722,7 +631,7 @@ static enum page_references page_check_references(struct page *page,
722 return PAGEREF_RECLAIM; 631 return PAGEREF_RECLAIM;
723 632
724 if (referenced_ptes) { 633 if (referenced_ptes) {
725 if (PageAnon(page)) 634 if (PageSwapBacked(page))
726 return PAGEREF_ACTIVATE; 635 return PAGEREF_ACTIVATE;
727 /* 636 /*
728 * All mapped pages start out with page table 637 * All mapped pages start out with page table
@@ -763,9 +672,8 @@ static enum page_references page_check_references(struct page *page,
763 * shrink_page_list() returns the number of reclaimed pages 672 * shrink_page_list() returns the number of reclaimed pages
764 */ 673 */
765static unsigned long shrink_page_list(struct list_head *page_list, 674static unsigned long shrink_page_list(struct list_head *page_list,
766 struct mem_cgroup_zone *mz, 675 struct zone *zone,
767 struct scan_control *sc, 676 struct scan_control *sc,
768 int priority,
769 unsigned long *ret_nr_dirty, 677 unsigned long *ret_nr_dirty,
770 unsigned long *ret_nr_writeback) 678 unsigned long *ret_nr_writeback)
771{ 679{
@@ -794,7 +702,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
794 goto keep; 702 goto keep;
795 703
796 VM_BUG_ON(PageActive(page)); 704 VM_BUG_ON(PageActive(page));
797 VM_BUG_ON(page_zone(page) != mz->zone); 705 VM_BUG_ON(page_zone(page) != zone);
798 706
799 sc->nr_scanned++; 707 sc->nr_scanned++;
800 708
@@ -813,22 +721,11 @@ static unsigned long shrink_page_list(struct list_head *page_list,
813 721
814 if (PageWriteback(page)) { 722 if (PageWriteback(page)) {
815 nr_writeback++; 723 nr_writeback++;
816 /* 724 unlock_page(page);
817 * Synchronous reclaim cannot queue pages for 725 goto keep;
818 * writeback due to the possibility of stack overflow
819 * but if it encounters a page under writeback, wait
820 * for the IO to complete.
821 */
822 if ((sc->reclaim_mode & RECLAIM_MODE_SYNC) &&
823 may_enter_fs)
824 wait_on_page_writeback(page);
825 else {
826 unlock_page(page);
827 goto keep_lumpy;
828 }
829 } 726 }
830 727
831 references = page_check_references(page, mz, sc); 728 references = page_check_references(page, sc);
832 switch (references) { 729 switch (references) {
833 case PAGEREF_ACTIVATE: 730 case PAGEREF_ACTIVATE:
834 goto activate_locked; 731 goto activate_locked;
@@ -879,7 +776,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
879 * unless under significant pressure. 776 * unless under significant pressure.
880 */ 777 */
881 if (page_is_file_cache(page) && 778 if (page_is_file_cache(page) &&
882 (!current_is_kswapd() || priority >= DEF_PRIORITY - 2)) { 779 (!current_is_kswapd() ||
780 sc->priority >= DEF_PRIORITY - 2)) {
883 /* 781 /*
884 * Immediately reclaim when written back. 782 * Immediately reclaim when written back.
885 * Similar in principal to deactivate_page() 783 * Similar in principal to deactivate_page()
@@ -908,7 +806,7 @@ static unsigned long shrink_page_list(struct list_head *page_list,
908 goto activate_locked; 806 goto activate_locked;
909 case PAGE_SUCCESS: 807 case PAGE_SUCCESS:
910 if (PageWriteback(page)) 808 if (PageWriteback(page))
911 goto keep_lumpy; 809 goto keep;
912 if (PageDirty(page)) 810 if (PageDirty(page))
913 goto keep; 811 goto keep;
914 812
@@ -994,7 +892,6 @@ cull_mlocked:
994 try_to_free_swap(page); 892 try_to_free_swap(page);
995 unlock_page(page); 893 unlock_page(page);
996 putback_lru_page(page); 894 putback_lru_page(page);
997 reset_reclaim_mode(sc);
998 continue; 895 continue;
999 896
1000activate_locked: 897activate_locked:
@@ -1007,8 +904,6 @@ activate_locked:
1007keep_locked: 904keep_locked:
1008 unlock_page(page); 905 unlock_page(page);
1009keep: 906keep:
1010 reset_reclaim_mode(sc);
1011keep_lumpy:
1012 list_add(&page->lru, &ret_pages); 907 list_add(&page->lru, &ret_pages);
1013 VM_BUG_ON(PageLRU(page) || PageUnevictable(page)); 908 VM_BUG_ON(PageLRU(page) || PageUnevictable(page));
1014 } 909 }
@@ -1020,7 +915,7 @@ keep_lumpy:
1020 * will encounter the same problem 915 * will encounter the same problem
1021 */ 916 */
1022 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc)) 917 if (nr_dirty && nr_dirty == nr_congested && global_reclaim(sc))
1023 zone_set_flag(mz->zone, ZONE_CONGESTED); 918 zone_set_flag(zone, ZONE_CONGESTED);
1024 919
1025 free_hot_cold_page_list(&free_pages, 1); 920 free_hot_cold_page_list(&free_pages, 1);
1026 921
@@ -1041,34 +936,15 @@ keep_lumpy:
1041 * 936 *
1042 * returns 0 on success, -ve errno on failure. 937 * returns 0 on success, -ve errno on failure.
1043 */ 938 */
1044int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file) 939int __isolate_lru_page(struct page *page, isolate_mode_t mode)
1045{ 940{
1046 bool all_lru_mode;
1047 int ret = -EINVAL; 941 int ret = -EINVAL;
1048 942
1049 /* Only take pages on the LRU. */ 943 /* Only take pages on the LRU. */
1050 if (!PageLRU(page)) 944 if (!PageLRU(page))
1051 return ret; 945 return ret;
1052 946
1053 all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) == 947 /* Do not give back unevictable pages for compaction */
1054 (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
1055
1056 /*
1057 * When checking the active state, we need to be sure we are
1058 * dealing with comparible boolean values. Take the logical not
1059 * of each.
1060 */
1061 if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
1062 return ret;
1063
1064 if (!all_lru_mode && !!page_is_file_cache(page) != file)
1065 return ret;
1066
1067 /*
1068 * When this function is being called for lumpy reclaim, we
1069 * initially look into all LRU pages, active, inactive and
1070 * unevictable; only give shrink_page_list evictable pages.
1071 */
1072 if (PageUnevictable(page)) 948 if (PageUnevictable(page))
1073 return ret; 949 return ret;
1074 950
@@ -1135,54 +1011,39 @@ int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
1135 * Appropriate locks must be held before calling this function. 1011 * Appropriate locks must be held before calling this function.
1136 * 1012 *
1137 * @nr_to_scan: The number of pages to look through on the list. 1013 * @nr_to_scan: The number of pages to look through on the list.
1138 * @mz: The mem_cgroup_zone to pull pages from. 1014 * @lruvec: The LRU vector to pull pages from.
1139 * @dst: The temp list to put pages on to. 1015 * @dst: The temp list to put pages on to.
1140 * @nr_scanned: The number of pages that were scanned. 1016 * @nr_scanned: The number of pages that were scanned.
1141 * @sc: The scan_control struct for this reclaim session 1017 * @sc: The scan_control struct for this reclaim session
1142 * @mode: One of the LRU isolation modes 1018 * @mode: One of the LRU isolation modes
1143 * @active: True [1] if isolating active pages 1019 * @lru: LRU list id for isolating
1144 * @file: True [1] if isolating file [!anon] pages
1145 * 1020 *
1146 * returns how many pages were moved onto *@dst. 1021 * returns how many pages were moved onto *@dst.
1147 */ 1022 */
1148static unsigned long isolate_lru_pages(unsigned long nr_to_scan, 1023static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1149 struct mem_cgroup_zone *mz, struct list_head *dst, 1024 struct lruvec *lruvec, struct list_head *dst,
1150 unsigned long *nr_scanned, struct scan_control *sc, 1025 unsigned long *nr_scanned, struct scan_control *sc,
1151 isolate_mode_t mode, int active, int file) 1026 isolate_mode_t mode, enum lru_list lru)
1152{ 1027{
1153 struct lruvec *lruvec; 1028 struct list_head *src = &lruvec->lists[lru];
1154 struct list_head *src;
1155 unsigned long nr_taken = 0; 1029 unsigned long nr_taken = 0;
1156 unsigned long nr_lumpy_taken = 0;
1157 unsigned long nr_lumpy_dirty = 0;
1158 unsigned long nr_lumpy_failed = 0;
1159 unsigned long scan; 1030 unsigned long scan;
1160 int lru = LRU_BASE;
1161
1162 lruvec = mem_cgroup_zone_lruvec(mz->zone, mz->mem_cgroup);
1163 if (active)
1164 lru += LRU_ACTIVE;
1165 if (file)
1166 lru += LRU_FILE;
1167 src = &lruvec->lists[lru];
1168 1031
1169 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) { 1032 for (scan = 0; scan < nr_to_scan && !list_empty(src); scan++) {
1170 struct page *page; 1033 struct page *page;
1171 unsigned long pfn; 1034 int nr_pages;
1172 unsigned long end_pfn;
1173 unsigned long page_pfn;
1174 int zone_id;
1175 1035
1176 page = lru_to_page(src); 1036 page = lru_to_page(src);
1177 prefetchw_prev_lru_page(page, src, flags); 1037 prefetchw_prev_lru_page(page, src, flags);
1178 1038
1179 VM_BUG_ON(!PageLRU(page)); 1039 VM_BUG_ON(!PageLRU(page));
1180 1040
1181 switch (__isolate_lru_page(page, mode, file)) { 1041 switch (__isolate_lru_page(page, mode)) {
1182 case 0: 1042 case 0:
1183 mem_cgroup_lru_del(page); 1043 nr_pages = hpage_nr_pages(page);
1044 mem_cgroup_update_lru_size(lruvec, lru, -nr_pages);
1184 list_move(&page->lru, dst); 1045 list_move(&page->lru, dst);
1185 nr_taken += hpage_nr_pages(page); 1046 nr_taken += nr_pages;
1186 break; 1047 break;
1187 1048
1188 case -EBUSY: 1049 case -EBUSY:
@@ -1193,93 +1054,11 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
1193 default: 1054 default:
1194 BUG(); 1055 BUG();
1195 } 1056 }
1196
1197 if (!sc->order || !(sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM))
1198 continue;
1199
1200 /*
1201 * Attempt to take all pages in the order aligned region
1202 * surrounding the tag page. Only take those pages of
1203 * the same active state as that tag page. We may safely
1204 * round the target page pfn down to the requested order
1205 * as the mem_map is guaranteed valid out to MAX_ORDER,
1206 * where that page is in a different zone we will detect
1207 * it from its zone id and abort this block scan.
1208 */
1209 zone_id = page_zone_id(page);
1210 page_pfn = page_to_pfn(page);
1211 pfn = page_pfn & ~((1 << sc->order) - 1);
1212 end_pfn = pfn + (1 << sc->order);
1213 for (; pfn < end_pfn; pfn++) {
1214 struct page *cursor_page;
1215
1216 /* The target page is in the block, ignore it. */
1217 if (unlikely(pfn == page_pfn))
1218 continue;
1219
1220 /* Avoid holes within the zone. */
1221 if (unlikely(!pfn_valid_within(pfn)))
1222 break;
1223
1224 cursor_page = pfn_to_page(pfn);
1225
1226 /* Check that we have not crossed a zone boundary. */
1227 if (unlikely(page_zone_id(cursor_page) != zone_id))
1228 break;
1229
1230 /*
1231 * If we don't have enough swap space, reclaiming of
1232 * anon page which don't already have a swap slot is
1233 * pointless.
1234 */
1235 if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
1236 !PageSwapCache(cursor_page))
1237 break;
1238
1239 if (__isolate_lru_page(cursor_page, mode, file) == 0) {
1240 unsigned int isolated_pages;
1241
1242 mem_cgroup_lru_del(cursor_page);
1243 list_move(&cursor_page->lru, dst);
1244 isolated_pages = hpage_nr_pages(cursor_page);
1245 nr_taken += isolated_pages;
1246 nr_lumpy_taken += isolated_pages;
1247 if (PageDirty(cursor_page))
1248 nr_lumpy_dirty += isolated_pages;
1249 scan++;
1250 pfn += isolated_pages - 1;
1251 } else {
1252 /*
1253 * Check if the page is freed already.
1254 *
1255 * We can't use page_count() as that
1256 * requires compound_head and we don't
1257 * have a pin on the page here. If a
1258 * page is tail, we may or may not
1259 * have isolated the head, so assume
1260 * it's not free, it'd be tricky to
1261 * track the head status without a
1262 * page pin.
1263 */
1264 if (!PageTail(cursor_page) &&
1265 !atomic_read(&cursor_page->_count))
1266 continue;
1267 break;
1268 }
1269 }
1270
1271 /* If we break out of the loop above, lumpy reclaim failed */
1272 if (pfn < end_pfn)
1273 nr_lumpy_failed++;
1274 } 1057 }
1275 1058
1276 *nr_scanned = scan; 1059 *nr_scanned = scan;
1277 1060 trace_mm_vmscan_lru_isolate(sc->order, nr_to_scan, scan,
1278 trace_mm_vmscan_lru_isolate(sc->order, 1061 nr_taken, mode, is_file_lru(lru));
1279 nr_to_scan, scan,
1280 nr_taken,
1281 nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed,
1282 mode, file);
1283 return nr_taken; 1062 return nr_taken;
1284} 1063}
1285 1064
@@ -1316,15 +1095,16 @@ int isolate_lru_page(struct page *page)
1316 1095
1317 if (PageLRU(page)) { 1096 if (PageLRU(page)) {
1318 struct zone *zone = page_zone(page); 1097 struct zone *zone = page_zone(page);
1098 struct lruvec *lruvec;
1319 1099
1320 spin_lock_irq(&zone->lru_lock); 1100 spin_lock_irq(&zone->lru_lock);
1101 lruvec = mem_cgroup_page_lruvec(page, zone);
1321 if (PageLRU(page)) { 1102 if (PageLRU(page)) {
1322 int lru = page_lru(page); 1103 int lru = page_lru(page);
1323 ret = 0;
1324 get_page(page); 1104 get_page(page);
1325 ClearPageLRU(page); 1105 ClearPageLRU(page);
1326 1106 del_page_from_lru_list(page, lruvec, lru);
1327 del_page_from_lru_list(zone, page, lru); 1107 ret = 0;
1328 } 1108 }
1329 spin_unlock_irq(&zone->lru_lock); 1109 spin_unlock_irq(&zone->lru_lock);
1330 } 1110 }
@@ -1357,11 +1137,10 @@ static int too_many_isolated(struct zone *zone, int file,
1357} 1137}
1358 1138
1359static noinline_for_stack void 1139static noinline_for_stack void
1360putback_inactive_pages(struct mem_cgroup_zone *mz, 1140putback_inactive_pages(struct lruvec *lruvec, struct list_head *page_list)
1361 struct list_head *page_list)
1362{ 1141{
1363 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1142 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1364 struct zone *zone = mz->zone; 1143 struct zone *zone = lruvec_zone(lruvec);
1365 LIST_HEAD(pages_to_free); 1144 LIST_HEAD(pages_to_free);
1366 1145
1367 /* 1146 /*
@@ -1379,9 +1158,13 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
1379 spin_lock_irq(&zone->lru_lock); 1158 spin_lock_irq(&zone->lru_lock);
1380 continue; 1159 continue;
1381 } 1160 }
1161
1162 lruvec = mem_cgroup_page_lruvec(page, zone);
1163
1382 SetPageLRU(page); 1164 SetPageLRU(page);
1383 lru = page_lru(page); 1165 lru = page_lru(page);
1384 add_page_to_lru_list(zone, page, lru); 1166 add_page_to_lru_list(page, lruvec, lru);
1167
1385 if (is_active_lru(lru)) { 1168 if (is_active_lru(lru)) {
1386 int file = is_file_lru(lru); 1169 int file = is_file_lru(lru);
1387 int numpages = hpage_nr_pages(page); 1170 int numpages = hpage_nr_pages(page);
@@ -1390,7 +1173,7 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
1390 if (put_page_testzero(page)) { 1173 if (put_page_testzero(page)) {
1391 __ClearPageLRU(page); 1174 __ClearPageLRU(page);
1392 __ClearPageActive(page); 1175 __ClearPageActive(page);
1393 del_page_from_lru_list(zone, page, lru); 1176 del_page_from_lru_list(page, lruvec, lru);
1394 1177
1395 if (unlikely(PageCompound(page))) { 1178 if (unlikely(PageCompound(page))) {
1396 spin_unlock_irq(&zone->lru_lock); 1179 spin_unlock_irq(&zone->lru_lock);
@@ -1407,112 +1190,24 @@ putback_inactive_pages(struct mem_cgroup_zone *mz,
1407 list_splice(&pages_to_free, page_list); 1190 list_splice(&pages_to_free, page_list);
1408} 1191}
1409 1192
1410static noinline_for_stack void
1411update_isolated_counts(struct mem_cgroup_zone *mz,
1412 struct list_head *page_list,
1413 unsigned long *nr_anon,
1414 unsigned long *nr_file)
1415{
1416 struct zone *zone = mz->zone;
1417 unsigned int count[NR_LRU_LISTS] = { 0, };
1418 unsigned long nr_active = 0;
1419 struct page *page;
1420 int lru;
1421
1422 /*
1423 * Count pages and clear active flags
1424 */
1425 list_for_each_entry(page, page_list, lru) {
1426 int numpages = hpage_nr_pages(page);
1427 lru = page_lru_base_type(page);
1428 if (PageActive(page)) {
1429 lru += LRU_ACTIVE;
1430 ClearPageActive(page);
1431 nr_active += numpages;
1432 }
1433 count[lru] += numpages;
1434 }
1435
1436 preempt_disable();
1437 __count_vm_events(PGDEACTIVATE, nr_active);
1438
1439 __mod_zone_page_state(zone, NR_ACTIVE_FILE,
1440 -count[LRU_ACTIVE_FILE]);
1441 __mod_zone_page_state(zone, NR_INACTIVE_FILE,
1442 -count[LRU_INACTIVE_FILE]);
1443 __mod_zone_page_state(zone, NR_ACTIVE_ANON,
1444 -count[LRU_ACTIVE_ANON]);
1445 __mod_zone_page_state(zone, NR_INACTIVE_ANON,
1446 -count[LRU_INACTIVE_ANON]);
1447
1448 *nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
1449 *nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
1450
1451 __mod_zone_page_state(zone, NR_ISOLATED_ANON, *nr_anon);
1452 __mod_zone_page_state(zone, NR_ISOLATED_FILE, *nr_file);
1453 preempt_enable();
1454}
1455
1456/*
1457 * Returns true if a direct reclaim should wait on pages under writeback.
1458 *
1459 * If we are direct reclaiming for contiguous pages and we do not reclaim
1460 * everything in the list, try again and wait for writeback IO to complete.
1461 * This will stall high-order allocations noticeably. Only do that when really
1462 * need to free the pages under high memory pressure.
1463 */
1464static inline bool should_reclaim_stall(unsigned long nr_taken,
1465 unsigned long nr_freed,
1466 int priority,
1467 struct scan_control *sc)
1468{
1469 int lumpy_stall_priority;
1470
1471 /* kswapd should not stall on sync IO */
1472 if (current_is_kswapd())
1473 return false;
1474
1475 /* Only stall on lumpy reclaim */
1476 if (sc->reclaim_mode & RECLAIM_MODE_SINGLE)
1477 return false;
1478
1479 /* If we have reclaimed everything on the isolated list, no stall */
1480 if (nr_freed == nr_taken)
1481 return false;
1482
1483 /*
1484 * For high-order allocations, there are two stall thresholds.
1485 * High-cost allocations stall immediately where as lower
1486 * order allocations such as stacks require the scanning
1487 * priority to be much higher before stalling.
1488 */
1489 if (sc->order > PAGE_ALLOC_COSTLY_ORDER)
1490 lumpy_stall_priority = DEF_PRIORITY;
1491 else
1492 lumpy_stall_priority = DEF_PRIORITY / 3;
1493
1494 return priority <= lumpy_stall_priority;
1495}
1496
1497/* 1193/*
1498 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number 1194 * shrink_inactive_list() is a helper for shrink_zone(). It returns the number
1499 * of reclaimed pages 1195 * of reclaimed pages
1500 */ 1196 */
1501static noinline_for_stack unsigned long 1197static noinline_for_stack unsigned long
1502shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz, 1198shrink_inactive_list(unsigned long nr_to_scan, struct lruvec *lruvec,
1503 struct scan_control *sc, int priority, int file) 1199 struct scan_control *sc, enum lru_list lru)
1504{ 1200{
1505 LIST_HEAD(page_list); 1201 LIST_HEAD(page_list);
1506 unsigned long nr_scanned; 1202 unsigned long nr_scanned;
1507 unsigned long nr_reclaimed = 0; 1203 unsigned long nr_reclaimed = 0;
1508 unsigned long nr_taken; 1204 unsigned long nr_taken;
1509 unsigned long nr_anon;
1510 unsigned long nr_file;
1511 unsigned long nr_dirty = 0; 1205 unsigned long nr_dirty = 0;
1512 unsigned long nr_writeback = 0; 1206 unsigned long nr_writeback = 0;
1513 isolate_mode_t isolate_mode = ISOLATE_INACTIVE; 1207 isolate_mode_t isolate_mode = 0;
1514 struct zone *zone = mz->zone; 1208 int file = is_file_lru(lru);
1515 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1209 struct zone *zone = lruvec_zone(lruvec);
1210 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1516 1211
1517 while (unlikely(too_many_isolated(zone, file, sc))) { 1212 while (unlikely(too_many_isolated(zone, file, sc))) {
1518 congestion_wait(BLK_RW_ASYNC, HZ/10); 1213 congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1522,10 +1217,6 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1522 return SWAP_CLUSTER_MAX; 1217 return SWAP_CLUSTER_MAX;
1523 } 1218 }
1524 1219
1525 set_reclaim_mode(priority, sc, false);
1526 if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
1527 isolate_mode |= ISOLATE_ACTIVE;
1528
1529 lru_add_drain(); 1220 lru_add_drain();
1530 1221
1531 if (!sc->may_unmap) 1222 if (!sc->may_unmap)
@@ -1535,38 +1226,30 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1535 1226
1536 spin_lock_irq(&zone->lru_lock); 1227 spin_lock_irq(&zone->lru_lock);
1537 1228
1538 nr_taken = isolate_lru_pages(nr_to_scan, mz, &page_list, &nr_scanned, 1229 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &page_list,
1539 sc, isolate_mode, 0, file); 1230 &nr_scanned, sc, isolate_mode, lru);
1231
1232 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1233 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1234
1540 if (global_reclaim(sc)) { 1235 if (global_reclaim(sc)) {
1541 zone->pages_scanned += nr_scanned; 1236 zone->pages_scanned += nr_scanned;
1542 if (current_is_kswapd()) 1237 if (current_is_kswapd())
1543 __count_zone_vm_events(PGSCAN_KSWAPD, zone, 1238 __count_zone_vm_events(PGSCAN_KSWAPD, zone, nr_scanned);
1544 nr_scanned);
1545 else 1239 else
1546 __count_zone_vm_events(PGSCAN_DIRECT, zone, 1240 __count_zone_vm_events(PGSCAN_DIRECT, zone, nr_scanned);
1547 nr_scanned);
1548 } 1241 }
1549 spin_unlock_irq(&zone->lru_lock); 1242 spin_unlock_irq(&zone->lru_lock);
1550 1243
1551 if (nr_taken == 0) 1244 if (nr_taken == 0)
1552 return 0; 1245 return 0;
1553 1246
1554 update_isolated_counts(mz, &page_list, &nr_anon, &nr_file); 1247 nr_reclaimed = shrink_page_list(&page_list, zone, sc,
1555
1556 nr_reclaimed = shrink_page_list(&page_list, mz, sc, priority,
1557 &nr_dirty, &nr_writeback); 1248 &nr_dirty, &nr_writeback);
1558 1249
1559 /* Check if we should syncronously wait for writeback */
1560 if (should_reclaim_stall(nr_taken, nr_reclaimed, priority, sc)) {
1561 set_reclaim_mode(priority, sc, true);
1562 nr_reclaimed += shrink_page_list(&page_list, mz, sc,
1563 priority, &nr_dirty, &nr_writeback);
1564 }
1565
1566 spin_lock_irq(&zone->lru_lock); 1250 spin_lock_irq(&zone->lru_lock);
1567 1251
1568 reclaim_stat->recent_scanned[0] += nr_anon; 1252 reclaim_stat->recent_scanned[file] += nr_taken;
1569 reclaim_stat->recent_scanned[1] += nr_file;
1570 1253
1571 if (global_reclaim(sc)) { 1254 if (global_reclaim(sc)) {
1572 if (current_is_kswapd()) 1255 if (current_is_kswapd())
@@ -1577,10 +1260,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1577 nr_reclaimed); 1260 nr_reclaimed);
1578 } 1261 }
1579 1262
1580 putback_inactive_pages(mz, &page_list); 1263 putback_inactive_pages(lruvec, &page_list);
1581 1264
1582 __mod_zone_page_state(zone, NR_ISOLATED_ANON, -nr_anon); 1265 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1583 __mod_zone_page_state(zone, NR_ISOLATED_FILE, -nr_file);
1584 1266
1585 spin_unlock_irq(&zone->lru_lock); 1267 spin_unlock_irq(&zone->lru_lock);
1586 1268
@@ -1609,14 +1291,15 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1609 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any 1291 * DEF_PRIORITY-6 For SWAP_CLUSTER_MAX isolated pages, throttle if any
1610 * isolated page is PageWriteback 1292 * isolated page is PageWriteback
1611 */ 1293 */
1612 if (nr_writeback && nr_writeback >= (nr_taken >> (DEF_PRIORITY-priority))) 1294 if (nr_writeback && nr_writeback >=
1295 (nr_taken >> (DEF_PRIORITY - sc->priority)))
1613 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10); 1296 wait_iff_congested(zone, BLK_RW_ASYNC, HZ/10);
1614 1297
1615 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id, 1298 trace_mm_vmscan_lru_shrink_inactive(zone->zone_pgdat->node_id,
1616 zone_idx(zone), 1299 zone_idx(zone),
1617 nr_scanned, nr_reclaimed, 1300 nr_scanned, nr_reclaimed,
1618 priority, 1301 sc->priority,
1619 trace_shrink_flags(file, sc->reclaim_mode)); 1302 trace_shrink_flags(file));
1620 return nr_reclaimed; 1303 return nr_reclaimed;
1621} 1304}
1622 1305
@@ -1638,30 +1321,32 @@ shrink_inactive_list(unsigned long nr_to_scan, struct mem_cgroup_zone *mz,
1638 * But we had to alter page->flags anyway. 1321 * But we had to alter page->flags anyway.
1639 */ 1322 */
1640 1323
1641static void move_active_pages_to_lru(struct zone *zone, 1324static void move_active_pages_to_lru(struct lruvec *lruvec,
1642 struct list_head *list, 1325 struct list_head *list,
1643 struct list_head *pages_to_free, 1326 struct list_head *pages_to_free,
1644 enum lru_list lru) 1327 enum lru_list lru)
1645{ 1328{
1329 struct zone *zone = lruvec_zone(lruvec);
1646 unsigned long pgmoved = 0; 1330 unsigned long pgmoved = 0;
1647 struct page *page; 1331 struct page *page;
1332 int nr_pages;
1648 1333
1649 while (!list_empty(list)) { 1334 while (!list_empty(list)) {
1650 struct lruvec *lruvec;
1651
1652 page = lru_to_page(list); 1335 page = lru_to_page(list);
1336 lruvec = mem_cgroup_page_lruvec(page, zone);
1653 1337
1654 VM_BUG_ON(PageLRU(page)); 1338 VM_BUG_ON(PageLRU(page));
1655 SetPageLRU(page); 1339 SetPageLRU(page);
1656 1340
1657 lruvec = mem_cgroup_lru_add_list(zone, page, lru); 1341 nr_pages = hpage_nr_pages(page);
1342 mem_cgroup_update_lru_size(lruvec, lru, nr_pages);
1658 list_move(&page->lru, &lruvec->lists[lru]); 1343 list_move(&page->lru, &lruvec->lists[lru]);
1659 pgmoved += hpage_nr_pages(page); 1344 pgmoved += nr_pages;
1660 1345
1661 if (put_page_testzero(page)) { 1346 if (put_page_testzero(page)) {
1662 __ClearPageLRU(page); 1347 __ClearPageLRU(page);
1663 __ClearPageActive(page); 1348 __ClearPageActive(page);
1664 del_page_from_lru_list(zone, page, lru); 1349 del_page_from_lru_list(page, lruvec, lru);
1665 1350
1666 if (unlikely(PageCompound(page))) { 1351 if (unlikely(PageCompound(page))) {
1667 spin_unlock_irq(&zone->lru_lock); 1352 spin_unlock_irq(&zone->lru_lock);
@@ -1677,9 +1362,9 @@ static void move_active_pages_to_lru(struct zone *zone,
1677} 1362}
1678 1363
1679static void shrink_active_list(unsigned long nr_to_scan, 1364static void shrink_active_list(unsigned long nr_to_scan,
1680 struct mem_cgroup_zone *mz, 1365 struct lruvec *lruvec,
1681 struct scan_control *sc, 1366 struct scan_control *sc,
1682 int priority, int file) 1367 enum lru_list lru)
1683{ 1368{
1684 unsigned long nr_taken; 1369 unsigned long nr_taken;
1685 unsigned long nr_scanned; 1370 unsigned long nr_scanned;
@@ -1688,15 +1373,14 @@ static void shrink_active_list(unsigned long nr_to_scan,
1688 LIST_HEAD(l_active); 1373 LIST_HEAD(l_active);
1689 LIST_HEAD(l_inactive); 1374 LIST_HEAD(l_inactive);
1690 struct page *page; 1375 struct page *page;
1691 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1376 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1692 unsigned long nr_rotated = 0; 1377 unsigned long nr_rotated = 0;
1693 isolate_mode_t isolate_mode = ISOLATE_ACTIVE; 1378 isolate_mode_t isolate_mode = 0;
1694 struct zone *zone = mz->zone; 1379 int file = is_file_lru(lru);
1380 struct zone *zone = lruvec_zone(lruvec);
1695 1381
1696 lru_add_drain(); 1382 lru_add_drain();
1697 1383
1698 reset_reclaim_mode(sc);
1699
1700 if (!sc->may_unmap) 1384 if (!sc->may_unmap)
1701 isolate_mode |= ISOLATE_UNMAPPED; 1385 isolate_mode |= ISOLATE_UNMAPPED;
1702 if (!sc->may_writepage) 1386 if (!sc->may_writepage)
@@ -1704,18 +1388,15 @@ static void shrink_active_list(unsigned long nr_to_scan,
1704 1388
1705 spin_lock_irq(&zone->lru_lock); 1389 spin_lock_irq(&zone->lru_lock);
1706 1390
1707 nr_taken = isolate_lru_pages(nr_to_scan, mz, &l_hold, &nr_scanned, sc, 1391 nr_taken = isolate_lru_pages(nr_to_scan, lruvec, &l_hold,
1708 isolate_mode, 1, file); 1392 &nr_scanned, sc, isolate_mode, lru);
1709 if (global_reclaim(sc)) 1393 if (global_reclaim(sc))
1710 zone->pages_scanned += nr_scanned; 1394 zone->pages_scanned += nr_scanned;
1711 1395
1712 reclaim_stat->recent_scanned[file] += nr_taken; 1396 reclaim_stat->recent_scanned[file] += nr_taken;
1713 1397
1714 __count_zone_vm_events(PGREFILL, zone, nr_scanned); 1398 __count_zone_vm_events(PGREFILL, zone, nr_scanned);
1715 if (file) 1399 __mod_zone_page_state(zone, NR_LRU_BASE + lru, -nr_taken);
1716 __mod_zone_page_state(zone, NR_ACTIVE_FILE, -nr_taken);
1717 else
1718 __mod_zone_page_state(zone, NR_ACTIVE_ANON, -nr_taken);
1719 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken); 1400 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, nr_taken);
1720 spin_unlock_irq(&zone->lru_lock); 1401 spin_unlock_irq(&zone->lru_lock);
1721 1402
@@ -1737,7 +1418,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
1737 } 1418 }
1738 } 1419 }
1739 1420
1740 if (page_referenced(page, 0, mz->mem_cgroup, &vm_flags)) { 1421 if (page_referenced(page, 0, sc->target_mem_cgroup,
1422 &vm_flags)) {
1741 nr_rotated += hpage_nr_pages(page); 1423 nr_rotated += hpage_nr_pages(page);
1742 /* 1424 /*
1743 * Identify referenced, file-backed active pages and 1425 * Identify referenced, file-backed active pages and
@@ -1770,10 +1452,8 @@ static void shrink_active_list(unsigned long nr_to_scan,
1770 */ 1452 */
1771 reclaim_stat->recent_rotated[file] += nr_rotated; 1453 reclaim_stat->recent_rotated[file] += nr_rotated;
1772 1454
1773 move_active_pages_to_lru(zone, &l_active, &l_hold, 1455 move_active_pages_to_lru(lruvec, &l_active, &l_hold, lru);
1774 LRU_ACTIVE + file * LRU_FILE); 1456 move_active_pages_to_lru(lruvec, &l_inactive, &l_hold, lru - LRU_ACTIVE);
1775 move_active_pages_to_lru(zone, &l_inactive, &l_hold,
1776 LRU_BASE + file * LRU_FILE);
1777 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken); 1457 __mod_zone_page_state(zone, NR_ISOLATED_ANON + file, -nr_taken);
1778 spin_unlock_irq(&zone->lru_lock); 1458 spin_unlock_irq(&zone->lru_lock);
1779 1459
@@ -1796,13 +1476,12 @@ static int inactive_anon_is_low_global(struct zone *zone)
1796 1476
1797/** 1477/**
1798 * inactive_anon_is_low - check if anonymous pages need to be deactivated 1478 * inactive_anon_is_low - check if anonymous pages need to be deactivated
1799 * @zone: zone to check 1479 * @lruvec: LRU vector to check
1800 * @sc: scan control of this context
1801 * 1480 *
1802 * Returns true if the zone does not have enough inactive anon pages, 1481 * Returns true if the zone does not have enough inactive anon pages,
1803 * meaning some active anon pages need to be deactivated. 1482 * meaning some active anon pages need to be deactivated.
1804 */ 1483 */
1805static int inactive_anon_is_low(struct mem_cgroup_zone *mz) 1484static int inactive_anon_is_low(struct lruvec *lruvec)
1806{ 1485{
1807 /* 1486 /*
1808 * If we don't have swap space, anonymous page deactivation 1487 * If we don't have swap space, anonymous page deactivation
@@ -1811,14 +1490,13 @@ static int inactive_anon_is_low(struct mem_cgroup_zone *mz)
1811 if (!total_swap_pages) 1490 if (!total_swap_pages)
1812 return 0; 1491 return 0;
1813 1492
1814 if (!scanning_global_lru(mz)) 1493 if (!mem_cgroup_disabled())
1815 return mem_cgroup_inactive_anon_is_low(mz->mem_cgroup, 1494 return mem_cgroup_inactive_anon_is_low(lruvec);
1816 mz->zone);
1817 1495
1818 return inactive_anon_is_low_global(mz->zone); 1496 return inactive_anon_is_low_global(lruvec_zone(lruvec));
1819} 1497}
1820#else 1498#else
1821static inline int inactive_anon_is_low(struct mem_cgroup_zone *mz) 1499static inline int inactive_anon_is_low(struct lruvec *lruvec)
1822{ 1500{
1823 return 0; 1501 return 0;
1824} 1502}
@@ -1836,7 +1514,7 @@ static int inactive_file_is_low_global(struct zone *zone)
1836 1514
1837/** 1515/**
1838 * inactive_file_is_low - check if file pages need to be deactivated 1516 * inactive_file_is_low - check if file pages need to be deactivated
1839 * @mz: memory cgroup and zone to check 1517 * @lruvec: LRU vector to check
1840 * 1518 *
1841 * When the system is doing streaming IO, memory pressure here 1519 * When the system is doing streaming IO, memory pressure here
1842 * ensures that active file pages get deactivated, until more 1520 * ensures that active file pages get deactivated, until more
@@ -1848,44 +1526,39 @@ static int inactive_file_is_low_global(struct zone *zone)
1848 * This uses a different ratio than the anonymous pages, because 1526 * This uses a different ratio than the anonymous pages, because
1849 * the page cache uses a use-once replacement algorithm. 1527 * the page cache uses a use-once replacement algorithm.
1850 */ 1528 */
1851static int inactive_file_is_low(struct mem_cgroup_zone *mz) 1529static int inactive_file_is_low(struct lruvec *lruvec)
1852{ 1530{
1853 if (!scanning_global_lru(mz)) 1531 if (!mem_cgroup_disabled())
1854 return mem_cgroup_inactive_file_is_low(mz->mem_cgroup, 1532 return mem_cgroup_inactive_file_is_low(lruvec);
1855 mz->zone);
1856 1533
1857 return inactive_file_is_low_global(mz->zone); 1534 return inactive_file_is_low_global(lruvec_zone(lruvec));
1858} 1535}
1859 1536
1860static int inactive_list_is_low(struct mem_cgroup_zone *mz, int file) 1537static int inactive_list_is_low(struct lruvec *lruvec, enum lru_list lru)
1861{ 1538{
1862 if (file) 1539 if (is_file_lru(lru))
1863 return inactive_file_is_low(mz); 1540 return inactive_file_is_low(lruvec);
1864 else 1541 else
1865 return inactive_anon_is_low(mz); 1542 return inactive_anon_is_low(lruvec);
1866} 1543}
1867 1544
1868static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan, 1545static unsigned long shrink_list(enum lru_list lru, unsigned long nr_to_scan,
1869 struct mem_cgroup_zone *mz, 1546 struct lruvec *lruvec, struct scan_control *sc)
1870 struct scan_control *sc, int priority)
1871{ 1547{
1872 int file = is_file_lru(lru);
1873
1874 if (is_active_lru(lru)) { 1548 if (is_active_lru(lru)) {
1875 if (inactive_list_is_low(mz, file)) 1549 if (inactive_list_is_low(lruvec, lru))
1876 shrink_active_list(nr_to_scan, mz, sc, priority, file); 1550 shrink_active_list(nr_to_scan, lruvec, sc, lru);
1877 return 0; 1551 return 0;
1878 } 1552 }
1879 1553
1880 return shrink_inactive_list(nr_to_scan, mz, sc, priority, file); 1554 return shrink_inactive_list(nr_to_scan, lruvec, sc, lru);
1881} 1555}
1882 1556
1883static int vmscan_swappiness(struct mem_cgroup_zone *mz, 1557static int vmscan_swappiness(struct scan_control *sc)
1884 struct scan_control *sc)
1885{ 1558{
1886 if (global_reclaim(sc)) 1559 if (global_reclaim(sc))
1887 return vm_swappiness; 1560 return vm_swappiness;
1888 return mem_cgroup_swappiness(mz->mem_cgroup); 1561 return mem_cgroup_swappiness(sc->target_mem_cgroup);
1889} 1562}
1890 1563
1891/* 1564/*
@@ -1896,17 +1569,18 @@ static int vmscan_swappiness(struct mem_cgroup_zone *mz,
1896 * 1569 *
1897 * nr[0] = anon pages to scan; nr[1] = file pages to scan 1570 * nr[0] = anon pages to scan; nr[1] = file pages to scan
1898 */ 1571 */
1899static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc, 1572static void get_scan_count(struct lruvec *lruvec, struct scan_control *sc,
1900 unsigned long *nr, int priority) 1573 unsigned long *nr)
1901{ 1574{
1902 unsigned long anon, file, free; 1575 unsigned long anon, file, free;
1903 unsigned long anon_prio, file_prio; 1576 unsigned long anon_prio, file_prio;
1904 unsigned long ap, fp; 1577 unsigned long ap, fp;
1905 struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(mz); 1578 struct zone_reclaim_stat *reclaim_stat = &lruvec->reclaim_stat;
1906 u64 fraction[2], denominator; 1579 u64 fraction[2], denominator;
1907 enum lru_list lru; 1580 enum lru_list lru;
1908 int noswap = 0; 1581 int noswap = 0;
1909 bool force_scan = false; 1582 bool force_scan = false;
1583 struct zone *zone = lruvec_zone(lruvec);
1910 1584
1911 /* 1585 /*
1912 * If the zone or memcg is small, nr[l] can be 0. This 1586 * If the zone or memcg is small, nr[l] can be 0. This
@@ -1918,7 +1592,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1918 * latencies, so it's better to scan a minimum amount there as 1592 * latencies, so it's better to scan a minimum amount there as
1919 * well. 1593 * well.
1920 */ 1594 */
1921 if (current_is_kswapd() && mz->zone->all_unreclaimable) 1595 if (current_is_kswapd() && zone->all_unreclaimable)
1922 force_scan = true; 1596 force_scan = true;
1923 if (!global_reclaim(sc)) 1597 if (!global_reclaim(sc))
1924 force_scan = true; 1598 force_scan = true;
@@ -1932,16 +1606,16 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1932 goto out; 1606 goto out;
1933 } 1607 }
1934 1608
1935 anon = zone_nr_lru_pages(mz, LRU_ACTIVE_ANON) + 1609 anon = get_lru_size(lruvec, LRU_ACTIVE_ANON) +
1936 zone_nr_lru_pages(mz, LRU_INACTIVE_ANON); 1610 get_lru_size(lruvec, LRU_INACTIVE_ANON);
1937 file = zone_nr_lru_pages(mz, LRU_ACTIVE_FILE) + 1611 file = get_lru_size(lruvec, LRU_ACTIVE_FILE) +
1938 zone_nr_lru_pages(mz, LRU_INACTIVE_FILE); 1612 get_lru_size(lruvec, LRU_INACTIVE_FILE);
1939 1613
1940 if (global_reclaim(sc)) { 1614 if (global_reclaim(sc)) {
1941 free = zone_page_state(mz->zone, NR_FREE_PAGES); 1615 free = zone_page_state(zone, NR_FREE_PAGES);
1942 /* If we have very few page cache pages, 1616 /* If we have very few page cache pages,
1943 force-scan anon pages. */ 1617 force-scan anon pages. */
1944 if (unlikely(file + free <= high_wmark_pages(mz->zone))) { 1618 if (unlikely(file + free <= high_wmark_pages(zone))) {
1945 fraction[0] = 1; 1619 fraction[0] = 1;
1946 fraction[1] = 0; 1620 fraction[1] = 0;
1947 denominator = 1; 1621 denominator = 1;
@@ -1953,8 +1627,8 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1953 * With swappiness at 100, anonymous and file have the same priority. 1627 * With swappiness at 100, anonymous and file have the same priority.
1954 * This scanning priority is essentially the inverse of IO cost. 1628 * This scanning priority is essentially the inverse of IO cost.
1955 */ 1629 */
1956 anon_prio = vmscan_swappiness(mz, sc); 1630 anon_prio = vmscan_swappiness(sc);
1957 file_prio = 200 - vmscan_swappiness(mz, sc); 1631 file_prio = 200 - anon_prio;
1958 1632
1959 /* 1633 /*
1960 * OK, so we have swap space and a fair amount of page cache 1634 * OK, so we have swap space and a fair amount of page cache
@@ -1967,7 +1641,7 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1967 * 1641 *
1968 * anon in [0], file in [1] 1642 * anon in [0], file in [1]
1969 */ 1643 */
1970 spin_lock_irq(&mz->zone->lru_lock); 1644 spin_lock_irq(&zone->lru_lock);
1971 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) { 1645 if (unlikely(reclaim_stat->recent_scanned[0] > anon / 4)) {
1972 reclaim_stat->recent_scanned[0] /= 2; 1646 reclaim_stat->recent_scanned[0] /= 2;
1973 reclaim_stat->recent_rotated[0] /= 2; 1647 reclaim_stat->recent_rotated[0] /= 2;
@@ -1983,12 +1657,12 @@ static void get_scan_count(struct mem_cgroup_zone *mz, struct scan_control *sc,
1983 * proportional to the fraction of recently scanned pages on 1657 * proportional to the fraction of recently scanned pages on
1984 * each list that were recently referenced and in active use. 1658 * each list that were recently referenced and in active use.
1985 */ 1659 */
1986 ap = (anon_prio + 1) * (reclaim_stat->recent_scanned[0] + 1); 1660 ap = anon_prio * (reclaim_stat->recent_scanned[0] + 1);
1987 ap /= reclaim_stat->recent_rotated[0] + 1; 1661 ap /= reclaim_stat->recent_rotated[0] + 1;
1988 1662
1989 fp = (file_prio + 1) * (reclaim_stat->recent_scanned[1] + 1); 1663 fp = file_prio * (reclaim_stat->recent_scanned[1] + 1);
1990 fp /= reclaim_stat->recent_rotated[1] + 1; 1664 fp /= reclaim_stat->recent_rotated[1] + 1;
1991 spin_unlock_irq(&mz->zone->lru_lock); 1665 spin_unlock_irq(&zone->lru_lock);
1992 1666
1993 fraction[0] = ap; 1667 fraction[0] = ap;
1994 fraction[1] = fp; 1668 fraction[1] = fp;
@@ -1998,9 +1672,9 @@ out:
1998 int file = is_file_lru(lru); 1672 int file = is_file_lru(lru);
1999 unsigned long scan; 1673 unsigned long scan;
2000 1674
2001 scan = zone_nr_lru_pages(mz, lru); 1675 scan = get_lru_size(lruvec, lru);
2002 if (priority || noswap) { 1676 if (sc->priority || noswap || !vmscan_swappiness(sc)) {
2003 scan >>= priority; 1677 scan >>= sc->priority;
2004 if (!scan && force_scan) 1678 if (!scan && force_scan)
2005 scan = SWAP_CLUSTER_MAX; 1679 scan = SWAP_CLUSTER_MAX;
2006 scan = div64_u64(scan * fraction[file], denominator); 1680 scan = div64_u64(scan * fraction[file], denominator);
@@ -2009,14 +1683,25 @@ out:
2009 } 1683 }
2010} 1684}
2011 1685
1686/* Use reclaim/compaction for costly allocs or under memory pressure */
1687static bool in_reclaim_compaction(struct scan_control *sc)
1688{
1689 if (COMPACTION_BUILD && sc->order &&
1690 (sc->order > PAGE_ALLOC_COSTLY_ORDER ||
1691 sc->priority < DEF_PRIORITY - 2))
1692 return true;
1693
1694 return false;
1695}
1696
2012/* 1697/*
2013 * Reclaim/compaction depends on a number of pages being freed. To avoid 1698 * Reclaim/compaction is used for high-order allocation requests. It reclaims
2014 * disruption to the system, a small number of order-0 pages continue to be 1699 * order-0 pages before compacting the zone. should_continue_reclaim() returns
2015 * rotated and reclaimed in the normal fashion. However, by the time we get 1700 * true if more pages should be reclaimed such that when the page allocator
2016 * back to the allocator and call try_to_compact_zone(), we ensure that 1701 * calls try_to_compact_zone() that it will have enough free pages to succeed.
2017 * there are enough free pages for it to be likely successful 1702 * It will give up earlier than that if there is difficulty reclaiming pages.
2018 */ 1703 */
2019static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz, 1704static inline bool should_continue_reclaim(struct lruvec *lruvec,
2020 unsigned long nr_reclaimed, 1705 unsigned long nr_reclaimed,
2021 unsigned long nr_scanned, 1706 unsigned long nr_scanned,
2022 struct scan_control *sc) 1707 struct scan_control *sc)
@@ -2025,7 +1710,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
2025 unsigned long inactive_lru_pages; 1710 unsigned long inactive_lru_pages;
2026 1711
2027 /* If not in reclaim/compaction mode, stop */ 1712 /* If not in reclaim/compaction mode, stop */
2028 if (!(sc->reclaim_mode & RECLAIM_MODE_COMPACTION)) 1713 if (!in_reclaim_compaction(sc))
2029 return false; 1714 return false;
2030 1715
2031 /* Consider stopping depending on scan and reclaim activity */ 1716 /* Consider stopping depending on scan and reclaim activity */
@@ -2056,15 +1741,15 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
2056 * inactive lists are large enough, continue reclaiming 1741 * inactive lists are large enough, continue reclaiming
2057 */ 1742 */
2058 pages_for_compaction = (2UL << sc->order); 1743 pages_for_compaction = (2UL << sc->order);
2059 inactive_lru_pages = zone_nr_lru_pages(mz, LRU_INACTIVE_FILE); 1744 inactive_lru_pages = get_lru_size(lruvec, LRU_INACTIVE_FILE);
2060 if (nr_swap_pages > 0) 1745 if (nr_swap_pages > 0)
2061 inactive_lru_pages += zone_nr_lru_pages(mz, LRU_INACTIVE_ANON); 1746 inactive_lru_pages += get_lru_size(lruvec, LRU_INACTIVE_ANON);
2062 if (sc->nr_reclaimed < pages_for_compaction && 1747 if (sc->nr_reclaimed < pages_for_compaction &&
2063 inactive_lru_pages > pages_for_compaction) 1748 inactive_lru_pages > pages_for_compaction)
2064 return true; 1749 return true;
2065 1750
2066 /* If compaction would go ahead or the allocation would succeed, stop */ 1751 /* If compaction would go ahead or the allocation would succeed, stop */
2067 switch (compaction_suitable(mz->zone, sc->order)) { 1752 switch (compaction_suitable(lruvec_zone(lruvec), sc->order)) {
2068 case COMPACT_PARTIAL: 1753 case COMPACT_PARTIAL:
2069 case COMPACT_CONTINUE: 1754 case COMPACT_CONTINUE:
2070 return false; 1755 return false;
@@ -2076,8 +1761,7 @@ static inline bool should_continue_reclaim(struct mem_cgroup_zone *mz,
2076/* 1761/*
2077 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim. 1762 * This is a basic per-zone page freer. Used by both kswapd and direct reclaim.
2078 */ 1763 */
2079static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz, 1764static void shrink_lruvec(struct lruvec *lruvec, struct scan_control *sc)
2080 struct scan_control *sc)
2081{ 1765{
2082 unsigned long nr[NR_LRU_LISTS]; 1766 unsigned long nr[NR_LRU_LISTS];
2083 unsigned long nr_to_scan; 1767 unsigned long nr_to_scan;
@@ -2089,7 +1773,7 @@ static void shrink_mem_cgroup_zone(int priority, struct mem_cgroup_zone *mz,
2089restart: 1773restart:
2090 nr_reclaimed = 0; 1774 nr_reclaimed = 0;
2091 nr_scanned = sc->nr_scanned; 1775 nr_scanned = sc->nr_scanned;
2092 get_scan_count(mz, sc, nr, priority); 1776 get_scan_count(lruvec, sc, nr);
2093 1777
2094 blk_start_plug(&plug); 1778 blk_start_plug(&plug);
2095 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] || 1779 while (nr[LRU_INACTIVE_ANON] || nr[LRU_ACTIVE_FILE] ||
@@ -2101,7 +1785,7 @@ restart:
2101 nr[lru] -= nr_to_scan; 1785 nr[lru] -= nr_to_scan;
2102 1786
2103 nr_reclaimed += shrink_list(lru, nr_to_scan, 1787 nr_reclaimed += shrink_list(lru, nr_to_scan,
2104 mz, sc, priority); 1788 lruvec, sc);
2105 } 1789 }
2106 } 1790 }
2107 /* 1791 /*
@@ -2112,7 +1796,8 @@ restart:
2112 * with multiple processes reclaiming pages, the total 1796 * with multiple processes reclaiming pages, the total
2113 * freeing target can get unreasonably large. 1797 * freeing target can get unreasonably large.
2114 */ 1798 */
2115 if (nr_reclaimed >= nr_to_reclaim && priority < DEF_PRIORITY) 1799 if (nr_reclaimed >= nr_to_reclaim &&
1800 sc->priority < DEF_PRIORITY)
2116 break; 1801 break;
2117 } 1802 }
2118 blk_finish_plug(&plug); 1803 blk_finish_plug(&plug);
@@ -2122,35 +1807,33 @@ restart:
2122 * Even if we did not try to evict anon pages at all, we want to 1807 * Even if we did not try to evict anon pages at all, we want to
2123 * rebalance the anon lru active/inactive ratio. 1808 * rebalance the anon lru active/inactive ratio.
2124 */ 1809 */
2125 if (inactive_anon_is_low(mz)) 1810 if (inactive_anon_is_low(lruvec))
2126 shrink_active_list(SWAP_CLUSTER_MAX, mz, sc, priority, 0); 1811 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
1812 sc, LRU_ACTIVE_ANON);
2127 1813
2128 /* reclaim/compaction might need reclaim to continue */ 1814 /* reclaim/compaction might need reclaim to continue */
2129 if (should_continue_reclaim(mz, nr_reclaimed, 1815 if (should_continue_reclaim(lruvec, nr_reclaimed,
2130 sc->nr_scanned - nr_scanned, sc)) 1816 sc->nr_scanned - nr_scanned, sc))
2131 goto restart; 1817 goto restart;
2132 1818
2133 throttle_vm_writeout(sc->gfp_mask); 1819 throttle_vm_writeout(sc->gfp_mask);
2134} 1820}
2135 1821
2136static void shrink_zone(int priority, struct zone *zone, 1822static void shrink_zone(struct zone *zone, struct scan_control *sc)
2137 struct scan_control *sc)
2138{ 1823{
2139 struct mem_cgroup *root = sc->target_mem_cgroup; 1824 struct mem_cgroup *root = sc->target_mem_cgroup;
2140 struct mem_cgroup_reclaim_cookie reclaim = { 1825 struct mem_cgroup_reclaim_cookie reclaim = {
2141 .zone = zone, 1826 .zone = zone,
2142 .priority = priority, 1827 .priority = sc->priority,
2143 }; 1828 };
2144 struct mem_cgroup *memcg; 1829 struct mem_cgroup *memcg;
2145 1830
2146 memcg = mem_cgroup_iter(root, NULL, &reclaim); 1831 memcg = mem_cgroup_iter(root, NULL, &reclaim);
2147 do { 1832 do {
2148 struct mem_cgroup_zone mz = { 1833 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2149 .mem_cgroup = memcg, 1834
2150 .zone = zone, 1835 shrink_lruvec(lruvec, sc);
2151 };
2152 1836
2153 shrink_mem_cgroup_zone(priority, &mz, sc);
2154 /* 1837 /*
2155 * Limit reclaim has historically picked one memcg and 1838 * Limit reclaim has historically picked one memcg and
2156 * scanned it with decreasing priority levels until 1839 * scanned it with decreasing priority levels until
@@ -2226,8 +1909,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
2226 * the caller that it should consider retrying the allocation instead of 1909 * the caller that it should consider retrying the allocation instead of
2227 * further reclaim. 1910 * further reclaim.
2228 */ 1911 */
2229static bool shrink_zones(int priority, struct zonelist *zonelist, 1912static bool shrink_zones(struct zonelist *zonelist, struct scan_control *sc)
2230 struct scan_control *sc)
2231{ 1913{
2232 struct zoneref *z; 1914 struct zoneref *z;
2233 struct zone *zone; 1915 struct zone *zone;
@@ -2254,7 +1936,8 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
2254 if (global_reclaim(sc)) { 1936 if (global_reclaim(sc)) {
2255 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL)) 1937 if (!cpuset_zone_allowed_hardwall(zone, GFP_KERNEL))
2256 continue; 1938 continue;
2257 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 1939 if (zone->all_unreclaimable &&
1940 sc->priority != DEF_PRIORITY)
2258 continue; /* Let kswapd poll it */ 1941 continue; /* Let kswapd poll it */
2259 if (COMPACTION_BUILD) { 1942 if (COMPACTION_BUILD) {
2260 /* 1943 /*
@@ -2286,7 +1969,7 @@ static bool shrink_zones(int priority, struct zonelist *zonelist,
2286 /* need some check for avoid more shrink_zone() */ 1969 /* need some check for avoid more shrink_zone() */
2287 } 1970 }
2288 1971
2289 shrink_zone(priority, zone, sc); 1972 shrink_zone(zone, sc);
2290 } 1973 }
2291 1974
2292 return aborted_reclaim; 1975 return aborted_reclaim;
@@ -2337,7 +2020,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2337 struct scan_control *sc, 2020 struct scan_control *sc,
2338 struct shrink_control *shrink) 2021 struct shrink_control *shrink)
2339{ 2022{
2340 int priority;
2341 unsigned long total_scanned = 0; 2023 unsigned long total_scanned = 0;
2342 struct reclaim_state *reclaim_state = current->reclaim_state; 2024 struct reclaim_state *reclaim_state = current->reclaim_state;
2343 struct zoneref *z; 2025 struct zoneref *z;
@@ -2350,11 +2032,9 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2350 if (global_reclaim(sc)) 2032 if (global_reclaim(sc))
2351 count_vm_event(ALLOCSTALL); 2033 count_vm_event(ALLOCSTALL);
2352 2034
2353 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2035 do {
2354 sc->nr_scanned = 0; 2036 sc->nr_scanned = 0;
2355 if (!priority) 2037 aborted_reclaim = shrink_zones(zonelist, sc);
2356 disable_swap_token(sc->target_mem_cgroup);
2357 aborted_reclaim = shrink_zones(priority, zonelist, sc);
2358 2038
2359 /* 2039 /*
2360 * Don't shrink slabs when reclaiming memory from 2040 * Don't shrink slabs when reclaiming memory from
@@ -2396,7 +2076,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2396 2076
2397 /* Take a nap, wait for some writeback to complete */ 2077 /* Take a nap, wait for some writeback to complete */
2398 if (!sc->hibernation_mode && sc->nr_scanned && 2078 if (!sc->hibernation_mode && sc->nr_scanned &&
2399 priority < DEF_PRIORITY - 2) { 2079 sc->priority < DEF_PRIORITY - 2) {
2400 struct zone *preferred_zone; 2080 struct zone *preferred_zone;
2401 2081
2402 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask), 2082 first_zones_zonelist(zonelist, gfp_zone(sc->gfp_mask),
@@ -2404,7 +2084,7 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
2404 &preferred_zone); 2084 &preferred_zone);
2405 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10); 2085 wait_iff_congested(preferred_zone, BLK_RW_ASYNC, HZ/10);
2406 } 2086 }
2407 } 2087 } while (--sc->priority >= 0);
2408 2088
2409out: 2089out:
2410 delayacct_freepages_end(); 2090 delayacct_freepages_end();
@@ -2442,6 +2122,7 @@ unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
2442 .may_unmap = 1, 2122 .may_unmap = 1,
2443 .may_swap = 1, 2123 .may_swap = 1,
2444 .order = order, 2124 .order = order,
2125 .priority = DEF_PRIORITY,
2445 .target_mem_cgroup = NULL, 2126 .target_mem_cgroup = NULL,
2446 .nodemask = nodemask, 2127 .nodemask = nodemask,
2447 }; 2128 };
@@ -2474,17 +2155,15 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2474 .may_unmap = 1, 2155 .may_unmap = 1,
2475 .may_swap = !noswap, 2156 .may_swap = !noswap,
2476 .order = 0, 2157 .order = 0,
2158 .priority = 0,
2477 .target_mem_cgroup = memcg, 2159 .target_mem_cgroup = memcg,
2478 }; 2160 };
2479 struct mem_cgroup_zone mz = { 2161 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2480 .mem_cgroup = memcg,
2481 .zone = zone,
2482 };
2483 2162
2484 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2163 sc.gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
2485 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK); 2164 (GFP_HIGHUSER_MOVABLE & ~GFP_RECLAIM_MASK);
2486 2165
2487 trace_mm_vmscan_memcg_softlimit_reclaim_begin(0, 2166 trace_mm_vmscan_memcg_softlimit_reclaim_begin(sc.order,
2488 sc.may_writepage, 2167 sc.may_writepage,
2489 sc.gfp_mask); 2168 sc.gfp_mask);
2490 2169
@@ -2495,7 +2174,7 @@ unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *memcg,
2495 * will pick up pages from other mem cgroup's as well. We hack 2174 * will pick up pages from other mem cgroup's as well. We hack
2496 * the priority and make it zero. 2175 * the priority and make it zero.
2497 */ 2176 */
2498 shrink_mem_cgroup_zone(0, &mz, &sc); 2177 shrink_lruvec(lruvec, &sc);
2499 2178
2500 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed); 2179 trace_mm_vmscan_memcg_softlimit_reclaim_end(sc.nr_reclaimed);
2501 2180
@@ -2516,6 +2195,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2516 .may_swap = !noswap, 2195 .may_swap = !noswap,
2517 .nr_to_reclaim = SWAP_CLUSTER_MAX, 2196 .nr_to_reclaim = SWAP_CLUSTER_MAX,
2518 .order = 0, 2197 .order = 0,
2198 .priority = DEF_PRIORITY,
2519 .target_mem_cgroup = memcg, 2199 .target_mem_cgroup = memcg,
2520 .nodemask = NULL, /* we don't care the placement */ 2200 .nodemask = NULL, /* we don't care the placement */
2521 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) | 2201 .gfp_mask = (gfp_mask & GFP_RECLAIM_MASK) |
@@ -2546,8 +2226,7 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *memcg,
2546} 2226}
2547#endif 2227#endif
2548 2228
2549static void age_active_anon(struct zone *zone, struct scan_control *sc, 2229static void age_active_anon(struct zone *zone, struct scan_control *sc)
2550 int priority)
2551{ 2230{
2552 struct mem_cgroup *memcg; 2231 struct mem_cgroup *memcg;
2553 2232
@@ -2556,14 +2235,11 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc,
2556 2235
2557 memcg = mem_cgroup_iter(NULL, NULL, NULL); 2236 memcg = mem_cgroup_iter(NULL, NULL, NULL);
2558 do { 2237 do {
2559 struct mem_cgroup_zone mz = { 2238 struct lruvec *lruvec = mem_cgroup_zone_lruvec(zone, memcg);
2560 .mem_cgroup = memcg,
2561 .zone = zone,
2562 };
2563 2239
2564 if (inactive_anon_is_low(&mz)) 2240 if (inactive_anon_is_low(lruvec))
2565 shrink_active_list(SWAP_CLUSTER_MAX, &mz, 2241 shrink_active_list(SWAP_CLUSTER_MAX, lruvec,
2566 sc, priority, 0); 2242 sc, LRU_ACTIVE_ANON);
2567 2243
2568 memcg = mem_cgroup_iter(NULL, memcg, NULL); 2244 memcg = mem_cgroup_iter(NULL, memcg, NULL);
2569 } while (memcg); 2245 } while (memcg);
@@ -2672,7 +2348,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2672{ 2348{
2673 int all_zones_ok; 2349 int all_zones_ok;
2674 unsigned long balanced; 2350 unsigned long balanced;
2675 int priority;
2676 int i; 2351 int i;
2677 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */ 2352 int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
2678 unsigned long total_scanned; 2353 unsigned long total_scanned;
@@ -2696,18 +2371,15 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
2696 }; 2371 };
2697loop_again: 2372loop_again:
2698 total_scanned = 0; 2373 total_scanned = 0;
2374 sc.priority = DEF_PRIORITY;
2699 sc.nr_reclaimed = 0; 2375 sc.nr_reclaimed = 0;
2700 sc.may_writepage = !laptop_mode; 2376 sc.may_writepage = !laptop_mode;
2701 count_vm_event(PAGEOUTRUN); 2377 count_vm_event(PAGEOUTRUN);
2702 2378
2703 for (priority = DEF_PRIORITY; priority >= 0; priority--) { 2379 do {
2704 unsigned long lru_pages = 0; 2380 unsigned long lru_pages = 0;
2705 int has_under_min_watermark_zone = 0; 2381 int has_under_min_watermark_zone = 0;
2706 2382
2707 /* The swap token gets in the way of swapout... */
2708 if (!priority)
2709 disable_swap_token(NULL);
2710
2711 all_zones_ok = 1; 2383 all_zones_ok = 1;
2712 balanced = 0; 2384 balanced = 0;
2713 2385
@@ -2721,14 +2393,15 @@ loop_again:
2721 if (!populated_zone(zone)) 2393 if (!populated_zone(zone))
2722 continue; 2394 continue;
2723 2395
2724 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2396 if (zone->all_unreclaimable &&
2397 sc.priority != DEF_PRIORITY)
2725 continue; 2398 continue;
2726 2399
2727 /* 2400 /*
2728 * Do some background aging of the anon list, to give 2401 * Do some background aging of the anon list, to give
2729 * pages a chance to be referenced before reclaiming. 2402 * pages a chance to be referenced before reclaiming.
2730 */ 2403 */
2731 age_active_anon(zone, &sc, priority); 2404 age_active_anon(zone, &sc);
2732 2405
2733 /* 2406 /*
2734 * If the number of buffer_heads in the machine 2407 * If the number of buffer_heads in the machine
@@ -2776,7 +2449,8 @@ loop_again:
2776 if (!populated_zone(zone)) 2449 if (!populated_zone(zone))
2777 continue; 2450 continue;
2778 2451
2779 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2452 if (zone->all_unreclaimable &&
2453 sc.priority != DEF_PRIORITY)
2780 continue; 2454 continue;
2781 2455
2782 sc.nr_scanned = 0; 2456 sc.nr_scanned = 0;
@@ -2820,7 +2494,7 @@ loop_again:
2820 !zone_watermark_ok_safe(zone, testorder, 2494 !zone_watermark_ok_safe(zone, testorder,
2821 high_wmark_pages(zone) + balance_gap, 2495 high_wmark_pages(zone) + balance_gap,
2822 end_zone, 0)) { 2496 end_zone, 0)) {
2823 shrink_zone(priority, zone, &sc); 2497 shrink_zone(zone, &sc);
2824 2498
2825 reclaim_state->reclaimed_slab = 0; 2499 reclaim_state->reclaimed_slab = 0;
2826 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages); 2500 nr_slab = shrink_slab(&shrink, sc.nr_scanned, lru_pages);
@@ -2877,7 +2551,7 @@ loop_again:
2877 * OK, kswapd is getting into trouble. Take a nap, then take 2551 * OK, kswapd is getting into trouble. Take a nap, then take
2878 * another pass across the zones. 2552 * another pass across the zones.
2879 */ 2553 */
2880 if (total_scanned && (priority < DEF_PRIORITY - 2)) { 2554 if (total_scanned && (sc.priority < DEF_PRIORITY - 2)) {
2881 if (has_under_min_watermark_zone) 2555 if (has_under_min_watermark_zone)
2882 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT); 2556 count_vm_event(KSWAPD_SKIP_CONGESTION_WAIT);
2883 else 2557 else
@@ -2892,7 +2566,7 @@ loop_again:
2892 */ 2566 */
2893 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX) 2567 if (sc.nr_reclaimed >= SWAP_CLUSTER_MAX)
2894 break; 2568 break;
2895 } 2569 } while (--sc.priority >= 0);
2896out: 2570out:
2897 2571
2898 /* 2572 /*
@@ -2942,7 +2616,8 @@ out:
2942 if (!populated_zone(zone)) 2616 if (!populated_zone(zone))
2943 continue; 2617 continue;
2944 2618
2945 if (zone->all_unreclaimable && priority != DEF_PRIORITY) 2619 if (zone->all_unreclaimable &&
2620 sc.priority != DEF_PRIORITY)
2946 continue; 2621 continue;
2947 2622
2948 /* Would compaction fail due to lack of free memory? */ 2623 /* Would compaction fail due to lack of free memory? */
@@ -3209,6 +2884,7 @@ unsigned long shrink_all_memory(unsigned long nr_to_reclaim)
3209 .nr_to_reclaim = nr_to_reclaim, 2884 .nr_to_reclaim = nr_to_reclaim,
3210 .hibernation_mode = 1, 2885 .hibernation_mode = 1,
3211 .order = 0, 2886 .order = 0,
2887 .priority = DEF_PRIORITY,
3212 }; 2888 };
3213 struct shrink_control shrink = { 2889 struct shrink_control shrink = {
3214 .gfp_mask = sc.gfp_mask, 2890 .gfp_mask = sc.gfp_mask,
@@ -3386,7 +3062,6 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3386 const unsigned long nr_pages = 1 << order; 3062 const unsigned long nr_pages = 1 << order;
3387 struct task_struct *p = current; 3063 struct task_struct *p = current;
3388 struct reclaim_state reclaim_state; 3064 struct reclaim_state reclaim_state;
3389 int priority;
3390 struct scan_control sc = { 3065 struct scan_control sc = {
3391 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE), 3066 .may_writepage = !!(zone_reclaim_mode & RECLAIM_WRITE),
3392 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP), 3067 .may_unmap = !!(zone_reclaim_mode & RECLAIM_SWAP),
@@ -3395,6 +3070,7 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3395 SWAP_CLUSTER_MAX), 3070 SWAP_CLUSTER_MAX),
3396 .gfp_mask = gfp_mask, 3071 .gfp_mask = gfp_mask,
3397 .order = order, 3072 .order = order,
3073 .priority = ZONE_RECLAIM_PRIORITY,
3398 }; 3074 };
3399 struct shrink_control shrink = { 3075 struct shrink_control shrink = {
3400 .gfp_mask = sc.gfp_mask, 3076 .gfp_mask = sc.gfp_mask,
@@ -3417,11 +3093,9 @@ static int __zone_reclaim(struct zone *zone, gfp_t gfp_mask, unsigned int order)
3417 * Free memory by calling shrink zone with increasing 3093 * Free memory by calling shrink zone with increasing
3418 * priorities until we have enough memory freed. 3094 * priorities until we have enough memory freed.
3419 */ 3095 */
3420 priority = ZONE_RECLAIM_PRIORITY;
3421 do { 3096 do {
3422 shrink_zone(priority, zone, &sc); 3097 shrink_zone(zone, &sc);
3423 priority--; 3098 } while (sc.nr_reclaimed < nr_pages && --sc.priority >= 0);
3424 } while (priority >= 0 && sc.nr_reclaimed < nr_pages);
3425 } 3099 }
3426 3100
3427 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE); 3101 nr_slab_pages0 = zone_page_state(zone, NR_SLAB_RECLAIMABLE);
@@ -3536,7 +3210,7 @@ int page_evictable(struct page *page, struct vm_area_struct *vma)
3536 if (mapping_unevictable(page_mapping(page))) 3210 if (mapping_unevictable(page_mapping(page)))
3537 return 0; 3211 return 0;
3538 3212
3539 if (PageMlocked(page) || (vma && is_mlocked_vma(vma, page))) 3213 if (PageMlocked(page) || (vma && mlocked_vma_newpage(vma, page)))
3540 return 0; 3214 return 0;
3541 3215
3542 return 1; 3216 return 1;
@@ -3572,6 +3246,7 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3572 zone = pagezone; 3246 zone = pagezone;
3573 spin_lock_irq(&zone->lru_lock); 3247 spin_lock_irq(&zone->lru_lock);
3574 } 3248 }
3249 lruvec = mem_cgroup_page_lruvec(page, zone);
3575 3250
3576 if (!PageLRU(page) || !PageUnevictable(page)) 3251 if (!PageLRU(page) || !PageUnevictable(page))
3577 continue; 3252 continue;
@@ -3581,11 +3256,8 @@ void check_move_unevictable_pages(struct page **pages, int nr_pages)
3581 3256
3582 VM_BUG_ON(PageActive(page)); 3257 VM_BUG_ON(PageActive(page));
3583 ClearPageUnevictable(page); 3258 ClearPageUnevictable(page);
3584 __dec_zone_state(zone, NR_UNEVICTABLE); 3259 del_page_from_lru_list(page, lruvec, LRU_UNEVICTABLE);
3585 lruvec = mem_cgroup_lru_move_lists(zone, page, 3260 add_page_to_lru_list(page, lruvec, lru);
3586 LRU_UNEVICTABLE, lru);
3587 list_move(&page->lru, &lruvec->lists[lru]);
3588 __inc_zone_state(zone, NR_INACTIVE_ANON + lru);
3589 pgrescued++; 3261 pgrescued++;
3590 } 3262 }
3591 } 3263 }
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7db1b9bab492..1bbbbd9776ad 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
613 "Reclaimable", 613 "Reclaimable",
614 "Movable", 614 "Movable",
615 "Reserve", 615 "Reserve",
616#ifdef CONFIG_CMA
617 "CMA",
618#endif
616 "Isolate", 619 "Isolate",
617}; 620};
618 621
@@ -1220,7 +1223,6 @@ module_init(setup_vmstat)
1220#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION) 1223#if defined(CONFIG_DEBUG_FS) && defined(CONFIG_COMPACTION)
1221#include <linux/debugfs.h> 1224#include <linux/debugfs.h>
1222 1225
1223static struct dentry *extfrag_debug_root;
1224 1226
1225/* 1227/*
1226 * Return an index indicating how much of the available free memory is 1228 * Return an index indicating how much of the available free memory is
@@ -1358,19 +1360,24 @@ static const struct file_operations extfrag_file_ops = {
1358 1360
1359static int __init extfrag_debug_init(void) 1361static int __init extfrag_debug_init(void)
1360{ 1362{
1363 struct dentry *extfrag_debug_root;
1364
1361 extfrag_debug_root = debugfs_create_dir("extfrag", NULL); 1365 extfrag_debug_root = debugfs_create_dir("extfrag", NULL);
1362 if (!extfrag_debug_root) 1366 if (!extfrag_debug_root)
1363 return -ENOMEM; 1367 return -ENOMEM;
1364 1368
1365 if (!debugfs_create_file("unusable_index", 0444, 1369 if (!debugfs_create_file("unusable_index", 0444,
1366 extfrag_debug_root, NULL, &unusable_file_ops)) 1370 extfrag_debug_root, NULL, &unusable_file_ops))
1367 return -ENOMEM; 1371 goto fail;
1368 1372
1369 if (!debugfs_create_file("extfrag_index", 0444, 1373 if (!debugfs_create_file("extfrag_index", 0444,
1370 extfrag_debug_root, NULL, &extfrag_file_ops)) 1374 extfrag_debug_root, NULL, &extfrag_file_ops))
1371 return -ENOMEM; 1375 goto fail;
1372 1376
1373 return 0; 1377 return 0;
1378fail:
1379 debugfs_remove_recursive(extfrag_debug_root);
1380 return -ENOMEM;
1374} 1381}
1375 1382
1376module_init(extfrag_debug_init); 1383module_init(extfrag_debug_init);
diff --git a/net/ipv4/tcp_memcontrol.c b/net/ipv4/tcp_memcontrol.c
index 151703791bb0..b6f3583ddfe8 100644
--- a/net/ipv4/tcp_memcontrol.c
+++ b/net/ipv4/tcp_memcontrol.c
@@ -74,9 +74,6 @@ void tcp_destroy_cgroup(struct mem_cgroup *memcg)
74 percpu_counter_destroy(&tcp->tcp_sockets_allocated); 74 percpu_counter_destroy(&tcp->tcp_sockets_allocated);
75 75
76 val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT); 76 val = res_counter_read_u64(&tcp->tcp_memory_allocated, RES_LIMIT);
77
78 if (val != RESOURCE_MAX)
79 static_key_slow_dec(&memcg_socket_limit_enabled);
80} 77}
81EXPORT_SYMBOL(tcp_destroy_cgroup); 78EXPORT_SYMBOL(tcp_destroy_cgroup);
82 79
@@ -107,10 +104,33 @@ static int tcp_update_limit(struct mem_cgroup *memcg, u64 val)
107 tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT, 104 tcp->tcp_prot_mem[i] = min_t(long, val >> PAGE_SHIFT,
108 net->ipv4.sysctl_tcp_mem[i]); 105 net->ipv4.sysctl_tcp_mem[i]);
109 106
110 if (val == RESOURCE_MAX && old_lim != RESOURCE_MAX) 107 if (val == RESOURCE_MAX)
111 static_key_slow_dec(&memcg_socket_limit_enabled); 108 clear_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
112 else if (old_lim == RESOURCE_MAX && val != RESOURCE_MAX) 109 else if (val != RESOURCE_MAX) {
113 static_key_slow_inc(&memcg_socket_limit_enabled); 110 /*
111 * The active bit needs to be written after the static_key
112 * update. This is what guarantees that the socket activation
113 * function is the last one to run. See sock_update_memcg() for
114 * details, and note that we don't mark any socket as belonging
115 * to this memcg until that flag is up.
116 *
117 * We need to do this, because static_keys will span multiple
118 * sites, but we can't control their order. If we mark a socket
119 * as accounted, but the accounting functions are not patched in
120 * yet, we'll lose accounting.
121 *
122 * We never race with the readers in sock_update_memcg(),
123 * because when this value change, the code to process it is not
124 * patched in yet.
125 *
126 * The activated bit is used to guarantee that no two writers
127 * will do the update in the same memcg. Without that, we can't
128 * properly shutdown the static key.
129 */
130 if (!test_and_set_bit(MEMCG_SOCK_ACTIVATED, &cg_proto->flags))
131 static_key_slow_inc(&memcg_socket_limit_enabled);
132 set_bit(MEMCG_SOCK_ACTIVE, &cg_proto->flags);
133 }
114 134
115 return 0; 135 return 0;
116} 136}
diff --git a/net/sunrpc/clnt.c b/net/sunrpc/clnt.c
index 7fee13b331d1..f56f045778ae 100644
--- a/net/sunrpc/clnt.c
+++ b/net/sunrpc/clnt.c
@@ -1286,6 +1286,8 @@ call_reserveresult(struct rpc_task *task)
1286 } 1286 }
1287 1287
1288 switch (status) { 1288 switch (status) {
1289 case -ENOMEM:
1290 rpc_delay(task, HZ >> 2);
1289 case -EAGAIN: /* woken up; retry */ 1291 case -EAGAIN: /* woken up; retry */
1290 task->tk_action = call_reserve; 1292 task->tk_action = call_reserve;
1291 return; 1293 return;
diff --git a/net/sunrpc/rpc_pipe.c b/net/sunrpc/rpc_pipe.c
index fd2423991c2d..04040476082e 100644
--- a/net/sunrpc/rpc_pipe.c
+++ b/net/sunrpc/rpc_pipe.c
@@ -120,7 +120,7 @@ EXPORT_SYMBOL_GPL(rpc_pipe_generic_upcall);
120 120
121/** 121/**
122 * rpc_queue_upcall - queue an upcall message to userspace 122 * rpc_queue_upcall - queue an upcall message to userspace
123 * @inode: inode of upcall pipe on which to queue given message 123 * @pipe: upcall pipe on which to queue given message
124 * @msg: message to queue 124 * @msg: message to queue
125 * 125 *
126 * Call with an @inode created by rpc_mkpipe() to queue an upcall. 126 * Call with an @inode created by rpc_mkpipe() to queue an upcall.
@@ -819,9 +819,7 @@ static int rpc_rmdir_depopulate(struct dentry *dentry,
819 * @parent: dentry of directory to create new "pipe" in 819 * @parent: dentry of directory to create new "pipe" in
820 * @name: name of pipe 820 * @name: name of pipe
821 * @private: private data to associate with the pipe, for the caller's use 821 * @private: private data to associate with the pipe, for the caller's use
822 * @ops: operations defining the behavior of the pipe: upcall, downcall, 822 * @pipe: &rpc_pipe containing input parameters
823 * release_pipe, open_pipe, and destroy_msg.
824 * @flags: rpc_pipe flags
825 * 823 *
826 * Data is made available for userspace to read by calls to 824 * Data is made available for userspace to read by calls to
827 * rpc_queue_upcall(). The actual reads will result in calls to 825 * rpc_queue_upcall(). The actual reads will result in calls to
@@ -943,7 +941,7 @@ struct dentry *rpc_create_client_dir(struct dentry *dentry,
943 941
944/** 942/**
945 * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir() 943 * rpc_remove_client_dir - Remove a directory created with rpc_create_client_dir()
946 * @clnt: rpc client 944 * @dentry: dentry for the pipe
947 */ 945 */
948int rpc_remove_client_dir(struct dentry *dentry) 946int rpc_remove_client_dir(struct dentry *dentry)
949{ 947{
@@ -1115,7 +1113,7 @@ rpc_fill_super(struct super_block *sb, void *data, int silent)
1115 sb->s_op = &s_ops; 1113 sb->s_op = &s_ops;
1116 sb->s_time_gran = 1; 1114 sb->s_time_gran = 1;
1117 1115
1118 inode = rpc_get_inode(sb, S_IFDIR | 0755); 1116 inode = rpc_get_inode(sb, S_IFDIR | S_IRUGO | S_IXUGO);
1119 sb->s_root = root = d_make_root(inode); 1117 sb->s_root = root = d_make_root(inode);
1120 if (!root) 1118 if (!root)
1121 return -ENOMEM; 1119 return -ENOMEM;
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index 78ac39fd9fe7..3c0653439f3d 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -394,6 +394,7 @@ static int rpcb_register_call(struct rpc_clnt *clnt, struct rpc_message *msg)
394 394
395/** 395/**
396 * rpcb_register - set or unset a port registration with the local rpcbind svc 396 * rpcb_register - set or unset a port registration with the local rpcbind svc
397 * @net: target network namespace
397 * @prog: RPC program number to bind 398 * @prog: RPC program number to bind
398 * @vers: RPC version number to bind 399 * @vers: RPC version number to bind
399 * @prot: transport protocol to register 400 * @prot: transport protocol to register
@@ -521,6 +522,7 @@ static int rpcb_unregister_all_protofamilies(struct sunrpc_net *sn,
521 522
522/** 523/**
523 * rpcb_v4_register - set or unset a port registration with the local rpcbind 524 * rpcb_v4_register - set or unset a port registration with the local rpcbind
525 * @net: target network namespace
524 * @program: RPC program number of service to (un)register 526 * @program: RPC program number of service to (un)register
525 * @version: RPC version number of service to (un)register 527 * @version: RPC version number of service to (un)register
526 * @address: address family, IP address, and port to (un)register 528 * @address: address family, IP address, and port to (un)register
diff --git a/net/sunrpc/xprt.c b/net/sunrpc/xprt.c
index 6fe2dcead150..3c83035cdaa9 100644
--- a/net/sunrpc/xprt.c
+++ b/net/sunrpc/xprt.c
@@ -979,20 +979,21 @@ static void xprt_alloc_slot(struct rpc_task *task)
979 list_del(&req->rq_list); 979 list_del(&req->rq_list);
980 goto out_init_req; 980 goto out_init_req;
981 } 981 }
982 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT); 982 req = xprt_dynamic_alloc_slot(xprt, GFP_NOWAIT|__GFP_NOWARN);
983 if (!IS_ERR(req)) 983 if (!IS_ERR(req))
984 goto out_init_req; 984 goto out_init_req;
985 switch (PTR_ERR(req)) { 985 switch (PTR_ERR(req)) {
986 case -ENOMEM: 986 case -ENOMEM:
987 rpc_delay(task, HZ >> 2);
988 dprintk("RPC: dynamic allocation of request slot " 987 dprintk("RPC: dynamic allocation of request slot "
989 "failed! Retrying\n"); 988 "failed! Retrying\n");
989 task->tk_status = -ENOMEM;
990 break; 990 break;
991 case -EAGAIN: 991 case -EAGAIN:
992 rpc_sleep_on(&xprt->backlog, task, NULL); 992 rpc_sleep_on(&xprt->backlog, task, NULL);
993 dprintk("RPC: waiting for request slot\n"); 993 dprintk("RPC: waiting for request slot\n");
994 default:
995 task->tk_status = -EAGAIN;
994 } 996 }
995 task->tk_status = -EAGAIN;
996 return; 997 return;
997out_init_req: 998out_init_req:
998 task->tk_status = 0; 999 task->tk_status = 0;
diff --git a/scripts/coccinelle/misc/ifaddr.cocci b/scripts/coccinelle/misc/ifaddr.cocci
new file mode 100644
index 000000000000..3e4089a77000
--- /dev/null
+++ b/scripts/coccinelle/misc/ifaddr.cocci
@@ -0,0 +1,35 @@
1/// the address of a variable or field is non-zero is likely always to bo
2/// non-zero
3///
4// Confidence: High
5// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
6// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
7// URL: http://coccinelle.lip6.fr/
8// Comments:
9// Options: -no_includes -include_headers
10
11virtual org
12virtual report
13virtual context
14
15@r@
16expression x;
17statement S1,S2;
18position p;
19@@
20
21*if@p (&x)
22 S1 else S2
23
24@script:python depends on org@
25p << r.p;
26@@
27
28cocci.print_main("test of a variable/field address",p)
29
30@script:python depends on report@
31p << r.p;
32@@
33
34msg = "ERROR: test of a variable/field address"
35coccilib.report.print_report(p[0],msg)
diff --git a/scripts/coccinelle/misc/noderef.cocci b/scripts/coccinelle/misc/noderef.cocci
new file mode 100644
index 000000000000..c1707214e602
--- /dev/null
+++ b/scripts/coccinelle/misc/noderef.cocci
@@ -0,0 +1,65 @@
1/// sizeof when applied to a pointer typed expression gives the size of
2/// the pointer
3///
4// Confidence: High
5// Copyright: (C) 2012 Julia Lawall, INRIA/LIP6. GPLv2.
6// Copyright: (C) 2012 Gilles Muller, INRIA/LiP6. GPLv2.
7// URL: http://coccinelle.lip6.fr/
8// Comments:
9// Options: -no_includes -include_headers
10
11virtual org
12virtual report
13virtual context
14virtual patch
15
16@depends on patch@
17expression *x;
18expression f;
19type T;
20@@
21
22(
23x = <+... sizeof(
24- x
25+ *x
26 ) ...+>
27|
28f(...,(T)(x),...,sizeof(
29- x
30+ *x
31 ),...)
32|
33f(...,sizeof(x),...,(T)(
34- x
35+ *x
36 ),...)
37)
38
39@r depends on !patch@
40expression *x;
41expression f;
42position p;
43type T;
44@@
45
46(
47*x = <+... sizeof@p(x) ...+>
48|
49*f(...,(T)(x),...,sizeof@p(x),...)
50|
51*f(...,sizeof@p(x),...,(T)(x),...)
52)
53
54@script:python depends on org@
55p << r.p;
56@@
57
58cocci.print_main("application of sizeof to pointer",p)
59
60@script:python depends on report@
61p << r.p;
62@@
63
64msg = "ERROR: application of sizeof to pointer"
65coccilib.report.print_report(p[0],msg)
diff --git a/scripts/config b/scripts/config
index a7c7c4b8e957..ed6653ef9702 100755
--- a/scripts/config
+++ b/scripts/config
@@ -107,7 +107,8 @@ while [ "$1" != "" ] ; do
107 ;; 107 ;;
108 108
109 --set-str) 109 --set-str)
110 set_var "CONFIG_$ARG" "CONFIG_$ARG=\"$1\"" 110 # sed swallows one level of escaping, so we need double-escaping
111 set_var "CONFIG_$ARG" "CONFIG_$ARG=\"${1//\"/\\\\\"}\""
111 shift 112 shift
112 ;; 113 ;;
113 114
@@ -124,9 +125,11 @@ while [ "$1" != "" ] ; do
124 if [ $? != 0 ] ; then 125 if [ $? != 0 ] ; then
125 echo undef 126 echo undef
126 else 127 else
127 V="${V/CONFIG_$ARG=/}" 128 V="${V/#CONFIG_$ARG=/}"
128 V="${V/\"/}" 129 V="${V/#\"/}"
129 echo "$V" 130 V="${V/%\"/}"
131 V="${V/\\\"/\"}"
132 echo "${V}"
130 fi 133 fi
131 fi 134 fi
132 ;; 135 ;;
diff --git a/scripts/kconfig/conf.c b/scripts/kconfig/conf.c
index f208f900ed3a..0dc4a2c779b1 100644
--- a/scripts/kconfig/conf.c
+++ b/scripts/kconfig/conf.c
@@ -574,8 +574,15 @@ int main(int ac, char **av)
574 case alldefconfig: 574 case alldefconfig:
575 case randconfig: 575 case randconfig:
576 name = getenv("KCONFIG_ALLCONFIG"); 576 name = getenv("KCONFIG_ALLCONFIG");
577 if (name && !stat(name, &tmpstat)) { 577 if (!name)
578 conf_read_simple(name, S_DEF_USER); 578 break;
579 if ((strcmp(name, "") != 0) && (strcmp(name, "1") != 0)) {
580 if (conf_read_simple(name, S_DEF_USER)) {
581 fprintf(stderr,
582 _("*** Can't read seed configuration \"%s\"!\n"),
583 name);
584 exit(1);
585 }
579 break; 586 break;
580 } 587 }
581 switch (input_mode) { 588 switch (input_mode) {
@@ -586,10 +593,13 @@ int main(int ac, char **av)
586 case randconfig: name = "allrandom.config"; break; 593 case randconfig: name = "allrandom.config"; break;
587 default: break; 594 default: break;
588 } 595 }
589 if (!stat(name, &tmpstat)) 596 if (conf_read_simple(name, S_DEF_USER) &&
590 conf_read_simple(name, S_DEF_USER); 597 conf_read_simple("all.config", S_DEF_USER)) {
591 else if (!stat("all.config", &tmpstat)) 598 fprintf(stderr,
592 conf_read_simple("all.config", S_DEF_USER); 599 _("*** KCONFIG_ALLCONFIG set, but no \"%s\" or \"all.config\" file found\n"),
600 name);
601 exit(1);
602 }
593 break; 603 break;
594 default: 604 default:
595 break; 605 break;
diff --git a/scripts/link-vmlinux.sh b/scripts/link-vmlinux.sh
new file mode 100644
index 000000000000..cd9c6c6bb4c9
--- /dev/null
+++ b/scripts/link-vmlinux.sh
@@ -0,0 +1,221 @@
1#!/bin/sh
2#
3# link vmlinux
4#
5# vmlinux is linked from the objects selected by $(KBUILD_VMLINUX_INIT) and
6# $(KBUILD_VMLINUX_MAIN). Most are built-in.o files from top-level directories
7# in the kernel tree, others are specified in arch/$(ARCH)/Makefile.
8# Ordering when linking is important, and $(KBUILD_VMLINUX_INIT) must be first.
9#
10# vmlinux
11# ^
12# |
13# +-< $(KBUILD_VMLINUX_INIT)
14# | +--< init/version.o + more
15# |
16# +--< $(KBUILD_VMLINUX_MAIN)
17# | +--< drivers/built-in.o mm/built-in.o + more
18# |
19# +-< ${kallsymso} (see description in KALLSYMS section)
20#
21# vmlinux version (uname -v) cannot be updated during normal
22# descending-into-subdirs phase since we do not yet know if we need to
23# update vmlinux.
24# Therefore this step is delayed until just before final link of vmlinux.
25#
26# System.map is generated to document addresses of all kernel symbols
27
28# Error out on error
29set -e
30
31# Nice output in kbuild format
32# Will be supressed by "make -s"
33info()
34{
35 if [ "${quiet}" != "silent_" ]; then
36 printf " %-7s %s\n" ${1} ${2}
37 fi
38}
39
40# Link of vmlinux.o used for section mismatch analysis
41# ${1} output file
42modpost_link()
43{
44 ${LD} ${LDFLAGS} -r -o ${1} ${KBUILD_VMLINUX_INIT} \
45 --start-group ${KBUILD_VMLINUX_MAIN} --end-group
46}
47
48# Link of vmlinux
49# ${1} - optional extra .o files
50# ${2} - output file
51vmlinux_link()
52{
53 local lds="${objtree}/${KBUILD_LDS}"
54
55 if [ "${SRCARCH}" != "um" ]; then
56 ${LD} ${LDFLAGS} ${LDFLAGS_vmlinux} -o ${2} \
57 -T ${lds} ${KBUILD_VMLINUX_INIT} \
58 --start-group ${KBUILD_VMLINUX_MAIN} --end-group ${1}
59 else
60 ${CC} ${CFLAGS_vmlinux} -o ${2} \
61 -Wl,-T,${lds} ${KBUILD_VMLINUX_INIT} \
62 -Wl,--start-group \
63 ${KBUILD_VMLINUX_MAIN} \
64 -Wl,--end-group \
65 -lutil ${1}
66 rm -f linux
67 fi
68}
69
70
71# Create ${2} .o file with all symbols from the ${1} object file
72kallsyms()
73{
74 info KSYM ${2}
75 local kallsymopt;
76
77 if [ -n "${CONFIG_KALLSYMS_ALL}" ]; then
78 kallsymopt=--all-symbols
79 fi
80
81 local aflags="${KBUILD_AFLAGS} ${KBUILD_AFLAGS_KERNEL} \
82 ${NOSTDINC_FLAGS} ${LINUXINCLUDE} ${KBUILD_CPPFLAGS}"
83
84 ${NM} -n ${1} | \
85 scripts/kallsyms ${kallsymopt} | \
86 ${CC} ${aflags} -c -o ${2} -x assembler-with-cpp -
87}
88
89# Create map file with all symbols from ${1}
90# See mksymap for additional details
91mksysmap()
92{
93 ${CONFIG_SHELL} "${srctree}/scripts/mksysmap" ${1} ${2}
94}
95
96sortextable()
97{
98 ${objtree}/scripts/sortextable ${1}
99}
100
101# Delete output files in case of error
102trap cleanup SIGHUP SIGINT SIGQUIT SIGTERM ERR
103cleanup()
104{
105 rm -f .old_version
106 rm -f .tmp_System.map
107 rm -f .tmp_kallsyms*
108 rm -f .tmp_version
109 rm -f .tmp_vmlinux*
110 rm -f System.map
111 rm -f vmlinux
112 rm -f vmlinux.o
113}
114
115#
116#
117# Use "make V=1" to debug this script
118case "${KBUILD_VERBOSE}" in
119*1*)
120 set -x
121 ;;
122esac
123
124if [ "$1" = "clean" ]; then
125 cleanup
126 exit 0
127fi
128
129# We need access to CONFIG_ symbols
130. ./.config
131
132#link vmlinux.o
133info LD vmlinux.o
134modpost_link vmlinux.o
135
136# modpost vmlinux.o to check for section mismatches
137${MAKE} -f "${srctree}/scripts/Makefile.modpost" vmlinux.o
138
139# Update version
140info GEN .version
141if [ ! -r .version ]; then
142 rm -f .version;
143 echo 1 >.version;
144else
145 mv .version .old_version;
146 expr 0$(cat .old_version) + 1 >.version;
147fi;
148
149# final build of init/
150${MAKE} -f "${srctree}/scripts/Makefile.build" obj=init
151
152kallsymso=""
153kallsyms_vmlinux=""
154if [ -n "${CONFIG_KALLSYMS}" ]; then
155
156 # kallsyms support
157 # Generate section listing all symbols and add it into vmlinux
158 # It's a three step process:
159 # 1) Link .tmp_vmlinux1 so it has all symbols and sections,
160 # but __kallsyms is empty.
161 # Running kallsyms on that gives us .tmp_kallsyms1.o with
162 # the right size
163 # 2) Link .tmp_vmlinux2 so it now has a __kallsyms section of
164 # the right size, but due to the added section, some
165 # addresses have shifted.
166 # From here, we generate a correct .tmp_kallsyms2.o
167 # 2a) We may use an extra pass as this has been necessary to
168 # woraround some alignment related bugs.
169 # KALLSYMS_EXTRA_PASS=1 is used to trigger this.
170 # 3) The correct ${kallsymso} is linked into the final vmlinux.
171 #
172 # a) Verify that the System.map from vmlinux matches the map from
173 # ${kallsymso}.
174
175 kallsymso=.tmp_kallsyms2.o
176 kallsyms_vmlinux=.tmp_vmlinux2
177
178 # step 1
179 vmlinux_link "" .tmp_vmlinux1
180 kallsyms .tmp_vmlinux1 .tmp_kallsyms1.o
181
182 # step 2
183 vmlinux_link .tmp_kallsyms1.o .tmp_vmlinux2
184 kallsyms .tmp_vmlinux2 .tmp_kallsyms2.o
185
186 # step 2a
187 if [ -n "${KALLSYMS_EXTRA_PASS}" ]; then
188 kallsymso=.tmp_kallsyms3.o
189 kallsyms_vmlinux=.tmp_vmlinux3
190
191 vmlinux_link .tmp_kallsyms2.o .tmp_vmlinux3
192
193 kallsyms .tmp_vmlinux3 .tmp_kallsyms3.o
194 fi
195fi
196
197info LD vmlinux
198vmlinux_link "${kallsymso}" vmlinux
199
200if [ -n "${CONFIG_BUILDTIME_EXTABLE_SORT}" ]; then
201 info SORTEX vmlinux
202 sortextable vmlinux
203fi
204
205info SYSMAP System.map
206mksysmap vmlinux System.map
207
208# step a (see comment above)
209if [ -n "${CONFIG_KALLSYMS}" ]; then
210 mksysmap ${kallsyms_vmlinux} .tmp_System.map
211
212 if ! cmp -s System.map .tmp_System.map; then
213 echo Inconsistent kallsyms data
214 echo echo Try "make KALLSYMS_EXTRA_PASS=1" as a workaround
215 cleanup
216 exit 1
217 fi
218fi
219
220# We made a new kernel - delete old version file
221rm -f .old_version
diff --git a/scripts/package/builddeb b/scripts/package/builddeb
index eee5f8ed2493..c95fdda58414 100644
--- a/scripts/package/builddeb
+++ b/scripts/package/builddeb
@@ -245,7 +245,7 @@ fi
245# Build header package 245# Build header package
246(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles") 246(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles")
247(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles") 247(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles")
248(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles") 248(cd $objtree; find arch/$SRCARCH/include .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles")
249destdir=$kernel_headers_dir/usr/src/linux-headers-$version 249destdir=$kernel_headers_dir/usr/src/linux-headers-$version
250mkdir -p "$destdir" 250mkdir -p "$destdir"
251(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -) 251(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -)
diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c
index faedb1481b24..8f312fa6c282 100644
--- a/sound/core/pcm_lib.c
+++ b/sound/core/pcm_lib.c
@@ -313,9 +313,22 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
313 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base; 313 snd_pcm_uframes_t old_hw_ptr, new_hw_ptr, hw_base;
314 snd_pcm_sframes_t hdelta, delta; 314 snd_pcm_sframes_t hdelta, delta;
315 unsigned long jdelta; 315 unsigned long jdelta;
316 unsigned long curr_jiffies;
317 struct timespec curr_tstamp;
316 318
317 old_hw_ptr = runtime->status->hw_ptr; 319 old_hw_ptr = runtime->status->hw_ptr;
320
321 /*
322 * group pointer, time and jiffies reads to allow for more
323 * accurate correlations/corrections.
324 * The values are stored at the end of this routine after
325 * corrections for hw_ptr position
326 */
318 pos = substream->ops->pointer(substream); 327 pos = substream->ops->pointer(substream);
328 curr_jiffies = jiffies;
329 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
330 snd_pcm_gettime(runtime, (struct timespec *)&curr_tstamp);
331
319 if (pos == SNDRV_PCM_POS_XRUN) { 332 if (pos == SNDRV_PCM_POS_XRUN) {
320 xrun(substream); 333 xrun(substream);
321 return -EPIPE; 334 return -EPIPE;
@@ -343,7 +356,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
343 delta = runtime->hw_ptr_interrupt + runtime->period_size; 356 delta = runtime->hw_ptr_interrupt + runtime->period_size;
344 if (delta > new_hw_ptr) { 357 if (delta > new_hw_ptr) {
345 /* check for double acknowledged interrupts */ 358 /* check for double acknowledged interrupts */
346 hdelta = jiffies - runtime->hw_ptr_jiffies; 359 hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
347 if (hdelta > runtime->hw_ptr_buffer_jiffies/2) { 360 if (hdelta > runtime->hw_ptr_buffer_jiffies/2) {
348 hw_base += runtime->buffer_size; 361 hw_base += runtime->buffer_size;
349 if (hw_base >= runtime->boundary) 362 if (hw_base >= runtime->boundary)
@@ -388,7 +401,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
388 * Without regular period interrupts, we have to check 401 * Without regular period interrupts, we have to check
389 * the elapsed time to detect xruns. 402 * the elapsed time to detect xruns.
390 */ 403 */
391 jdelta = jiffies - runtime->hw_ptr_jiffies; 404 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
392 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2) 405 if (jdelta < runtime->hw_ptr_buffer_jiffies / 2)
393 goto no_delta_check; 406 goto no_delta_check;
394 hdelta = jdelta - delta * HZ / runtime->rate; 407 hdelta = jdelta - delta * HZ / runtime->rate;
@@ -430,7 +443,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
430 if (hdelta < runtime->delay) 443 if (hdelta < runtime->delay)
431 goto no_jiffies_check; 444 goto no_jiffies_check;
432 hdelta -= runtime->delay; 445 hdelta -= runtime->delay;
433 jdelta = jiffies - runtime->hw_ptr_jiffies; 446 jdelta = curr_jiffies - runtime->hw_ptr_jiffies;
434 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) { 447 if (((hdelta * HZ) / runtime->rate) > jdelta + HZ/100) {
435 delta = jdelta / 448 delta = jdelta /
436 (((runtime->period_size * HZ) / runtime->rate) 449 (((runtime->period_size * HZ) / runtime->rate)
@@ -492,9 +505,9 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
492 } 505 }
493 runtime->hw_ptr_base = hw_base; 506 runtime->hw_ptr_base = hw_base;
494 runtime->status->hw_ptr = new_hw_ptr; 507 runtime->status->hw_ptr = new_hw_ptr;
495 runtime->hw_ptr_jiffies = jiffies; 508 runtime->hw_ptr_jiffies = curr_jiffies;
496 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE) 509 if (runtime->tstamp_mode == SNDRV_PCM_TSTAMP_ENABLE)
497 snd_pcm_gettime(runtime, (struct timespec *)&runtime->status->tstamp); 510 runtime->status->tstamp = curr_tstamp;
498 511
499 return snd_pcm_update_state(substream, runtime); 512 return snd_pcm_update_state(substream, runtime);
500} 513}
diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
index eb09a3348325..41ca803a1fff 100644
--- a/sound/pci/hda/hda_codec.c
+++ b/sound/pci/hda/hda_codec.c
@@ -2239,24 +2239,50 @@ void snd_hda_ctls_clear(struct hda_codec *codec)
2239/* pseudo device locking 2239/* pseudo device locking
2240 * toggle card->shutdown to allow/disallow the device access (as a hack) 2240 * toggle card->shutdown to allow/disallow the device access (as a hack)
2241 */ 2241 */
2242static int hda_lock_devices(struct snd_card *card) 2242int snd_hda_lock_devices(struct hda_bus *bus)
2243{ 2243{
2244 struct snd_card *card = bus->card;
2245 struct hda_codec *codec;
2246
2244 spin_lock(&card->files_lock); 2247 spin_lock(&card->files_lock);
2245 if (card->shutdown) { 2248 if (card->shutdown)
2246 spin_unlock(&card->files_lock); 2249 goto err_unlock;
2247 return -EINVAL;
2248 }
2249 card->shutdown = 1; 2250 card->shutdown = 1;
2251 if (!list_empty(&card->ctl_files))
2252 goto err_clear;
2253
2254 list_for_each_entry(codec, &bus->codec_list, list) {
2255 int pcm;
2256 for (pcm = 0; pcm < codec->num_pcms; pcm++) {
2257 struct hda_pcm *cpcm = &codec->pcm_info[pcm];
2258 if (!cpcm->pcm)
2259 continue;
2260 if (cpcm->pcm->streams[0].substream_opened ||
2261 cpcm->pcm->streams[1].substream_opened)
2262 goto err_clear;
2263 }
2264 }
2250 spin_unlock(&card->files_lock); 2265 spin_unlock(&card->files_lock);
2251 return 0; 2266 return 0;
2267
2268 err_clear:
2269 card->shutdown = 0;
2270 err_unlock:
2271 spin_unlock(&card->files_lock);
2272 return -EINVAL;
2252} 2273}
2274EXPORT_SYMBOL_HDA(snd_hda_lock_devices);
2253 2275
2254static void hda_unlock_devices(struct snd_card *card) 2276void snd_hda_unlock_devices(struct hda_bus *bus)
2255{ 2277{
2278 struct snd_card *card = bus->card;
2279
2280 card = bus->card;
2256 spin_lock(&card->files_lock); 2281 spin_lock(&card->files_lock);
2257 card->shutdown = 0; 2282 card->shutdown = 0;
2258 spin_unlock(&card->files_lock); 2283 spin_unlock(&card->files_lock);
2259} 2284}
2285EXPORT_SYMBOL_HDA(snd_hda_unlock_devices);
2260 2286
2261/** 2287/**
2262 * snd_hda_codec_reset - Clear all objects assigned to the codec 2288 * snd_hda_codec_reset - Clear all objects assigned to the codec
@@ -2270,26 +2296,12 @@ static void hda_unlock_devices(struct snd_card *card)
2270 */ 2296 */
2271int snd_hda_codec_reset(struct hda_codec *codec) 2297int snd_hda_codec_reset(struct hda_codec *codec)
2272{ 2298{
2273 struct snd_card *card = codec->bus->card; 2299 struct hda_bus *bus = codec->bus;
2274 int i, pcm; 2300 struct snd_card *card = bus->card;
2301 int i;
2275 2302
2276 if (hda_lock_devices(card) < 0) 2303 if (snd_hda_lock_devices(bus) < 0)
2277 return -EBUSY;
2278 /* check whether the codec isn't used by any mixer or PCM streams */
2279 if (!list_empty(&card->ctl_files)) {
2280 hda_unlock_devices(card);
2281 return -EBUSY; 2304 return -EBUSY;
2282 }
2283 for (pcm = 0; pcm < codec->num_pcms; pcm++) {
2284 struct hda_pcm *cpcm = &codec->pcm_info[pcm];
2285 if (!cpcm->pcm)
2286 continue;
2287 if (cpcm->pcm->streams[0].substream_opened ||
2288 cpcm->pcm->streams[1].substream_opened) {
2289 hda_unlock_devices(card);
2290 return -EBUSY;
2291 }
2292 }
2293 2305
2294 /* OK, let it free */ 2306 /* OK, let it free */
2295 2307
@@ -2298,7 +2310,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
2298 codec->power_on = 0; 2310 codec->power_on = 0;
2299 codec->power_transition = 0; 2311 codec->power_transition = 0;
2300 codec->power_jiffies = jiffies; 2312 codec->power_jiffies = jiffies;
2301 flush_workqueue(codec->bus->workq); 2313 flush_workqueue(bus->workq);
2302#endif 2314#endif
2303 snd_hda_ctls_clear(codec); 2315 snd_hda_ctls_clear(codec);
2304 /* relase PCMs */ 2316 /* relase PCMs */
@@ -2306,7 +2318,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
2306 if (codec->pcm_info[i].pcm) { 2318 if (codec->pcm_info[i].pcm) {
2307 snd_device_free(card, codec->pcm_info[i].pcm); 2319 snd_device_free(card, codec->pcm_info[i].pcm);
2308 clear_bit(codec->pcm_info[i].device, 2320 clear_bit(codec->pcm_info[i].device,
2309 codec->bus->pcm_dev_bits); 2321 bus->pcm_dev_bits);
2310 } 2322 }
2311 } 2323 }
2312 if (codec->patch_ops.free) 2324 if (codec->patch_ops.free)
@@ -2331,7 +2343,7 @@ int snd_hda_codec_reset(struct hda_codec *codec)
2331 codec->owner = NULL; 2343 codec->owner = NULL;
2332 2344
2333 /* allow device access again */ 2345 /* allow device access again */
2334 hda_unlock_devices(card); 2346 snd_hda_unlock_devices(bus);
2335 return 0; 2347 return 0;
2336} 2348}
2337 2349
diff --git a/sound/pci/hda/hda_codec.h b/sound/pci/hda/hda_codec.h
index 54b52819fb47..4fc3960c8591 100644
--- a/sound/pci/hda/hda_codec.h
+++ b/sound/pci/hda/hda_codec.h
@@ -1023,6 +1023,9 @@ void snd_hda_codec_set_power_to_all(struct hda_codec *codec, hda_nid_t fg,
1023 unsigned int power_state, 1023 unsigned int power_state,
1024 bool eapd_workaround); 1024 bool eapd_workaround);
1025 1025
1026int snd_hda_lock_devices(struct hda_bus *bus);
1027void snd_hda_unlock_devices(struct hda_bus *bus);
1028
1026/* 1029/*
1027 * power management 1030 * power management
1028 */ 1031 */
diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c
index 4ab8102f87ea..2b6392be451c 100644
--- a/sound/pci/hda/hda_intel.c
+++ b/sound/pci/hda/hda_intel.c
@@ -53,6 +53,8 @@
53#endif 53#endif
54#include <sound/core.h> 54#include <sound/core.h>
55#include <sound/initval.h> 55#include <sound/initval.h>
56#include <linux/vgaarb.h>
57#include <linux/vga_switcheroo.h>
56#include "hda_codec.h" 58#include "hda_codec.h"
57 59
58 60
@@ -175,6 +177,13 @@ MODULE_DESCRIPTION("Intel HDA driver");
175#define SFX "hda-intel: " 177#define SFX "hda-intel: "
176#endif 178#endif
177 179
180#if defined(CONFIG_PM) && defined(CONFIG_VGA_SWITCHEROO)
181#ifdef CONFIG_SND_HDA_CODEC_HDMI
182#define SUPPORT_VGA_SWITCHEROO
183#endif
184#endif
185
186
178/* 187/*
179 * registers 188 * registers
180 */ 189 */
@@ -472,6 +481,12 @@ struct azx {
472 unsigned int probing :1; /* codec probing phase */ 481 unsigned int probing :1; /* codec probing phase */
473 unsigned int snoop:1; 482 unsigned int snoop:1;
474 unsigned int align_buffer_size:1; 483 unsigned int align_buffer_size:1;
484 unsigned int region_requested:1;
485
486 /* VGA-switcheroo setup */
487 unsigned int use_vga_switcheroo:1;
488 unsigned int init_failed:1; /* delayed init failed */
489 unsigned int disabled:1; /* disabled by VGA-switcher */
475 490
476 /* for debugging */ 491 /* for debugging */
477 unsigned int last_cmd[AZX_MAX_CODECS]; 492 unsigned int last_cmd[AZX_MAX_CODECS];
@@ -538,7 +553,20 @@ enum {
538#define AZX_DCAPS_PRESET_CTHDA \ 553#define AZX_DCAPS_PRESET_CTHDA \
539 (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY) 554 (AZX_DCAPS_NO_MSI | AZX_DCAPS_POSFIX_LPIB | AZX_DCAPS_4K_BDLE_BOUNDARY)
540 555
541static char *driver_short_names[] __devinitdata = { 556/*
557 * VGA-switcher support
558 */
559#ifdef SUPPORT_VGA_SWITCHEROO
560#define DELAYED_INIT_MARK
561#define DELAYED_INITDATA_MARK
562#define use_vga_switcheroo(chip) ((chip)->use_vga_switcheroo)
563#else
564#define DELAYED_INIT_MARK __devinit
565#define DELAYED_INITDATA_MARK __devinitdata
566#define use_vga_switcheroo(chip) 0
567#endif
568
569static char *driver_short_names[] DELAYED_INITDATA_MARK = {
542 [AZX_DRIVER_ICH] = "HDA Intel", 570 [AZX_DRIVER_ICH] = "HDA Intel",
543 [AZX_DRIVER_PCH] = "HDA Intel PCH", 571 [AZX_DRIVER_PCH] = "HDA Intel PCH",
544 [AZX_DRIVER_SCH] = "HDA Intel MID", 572 [AZX_DRIVER_SCH] = "HDA Intel MID",
@@ -959,6 +987,8 @@ static int azx_send_cmd(struct hda_bus *bus, unsigned int val)
959{ 987{
960 struct azx *chip = bus->private_data; 988 struct azx *chip = bus->private_data;
961 989
990 if (chip->disabled)
991 return 0;
962 chip->last_cmd[azx_command_addr(val)] = val; 992 chip->last_cmd[azx_command_addr(val)] = val;
963 if (chip->single_cmd) 993 if (chip->single_cmd)
964 return azx_single_send_cmd(bus, val); 994 return azx_single_send_cmd(bus, val);
@@ -971,6 +1001,8 @@ static unsigned int azx_get_response(struct hda_bus *bus,
971 unsigned int addr) 1001 unsigned int addr)
972{ 1002{
973 struct azx *chip = bus->private_data; 1003 struct azx *chip = bus->private_data;
1004 if (chip->disabled)
1005 return 0;
974 if (chip->single_cmd) 1006 if (chip->single_cmd)
975 return azx_single_get_response(bus, addr); 1007 return azx_single_get_response(bus, addr);
976 else 1008 else
@@ -1236,6 +1268,11 @@ static irqreturn_t azx_interrupt(int irq, void *dev_id)
1236 1268
1237 spin_lock(&chip->reg_lock); 1269 spin_lock(&chip->reg_lock);
1238 1270
1271 if (chip->disabled) {
1272 spin_unlock(&chip->reg_lock);
1273 return IRQ_NONE;
1274 }
1275
1239 status = azx_readl(chip, INTSTS); 1276 status = azx_readl(chip, INTSTS);
1240 if (status == 0) { 1277 if (status == 0) {
1241 spin_unlock(&chip->reg_lock); 1278 spin_unlock(&chip->reg_lock);
@@ -1521,12 +1558,12 @@ static void azx_bus_reset(struct hda_bus *bus)
1521 */ 1558 */
1522 1559
1523/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */ 1560/* number of codec slots for each chipset: 0 = default slots (i.e. 4) */
1524static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] __devinitdata = { 1561static unsigned int azx_max_codecs[AZX_NUM_DRIVERS] DELAYED_INITDATA_MARK = {
1525 [AZX_DRIVER_NVIDIA] = 8, 1562 [AZX_DRIVER_NVIDIA] = 8,
1526 [AZX_DRIVER_TERA] = 1, 1563 [AZX_DRIVER_TERA] = 1,
1527}; 1564};
1528 1565
1529static int __devinit azx_codec_create(struct azx *chip, const char *model) 1566static int DELAYED_INIT_MARK azx_codec_create(struct azx *chip, const char *model)
1530{ 1567{
1531 struct hda_bus_template bus_temp; 1568 struct hda_bus_template bus_temp;
1532 int c, codecs, err; 1569 int c, codecs, err;
@@ -2444,6 +2481,105 @@ static void azx_notifier_unregister(struct azx *chip)
2444 unregister_reboot_notifier(&chip->reboot_notifier); 2481 unregister_reboot_notifier(&chip->reboot_notifier);
2445} 2482}
2446 2483
2484static int DELAYED_INIT_MARK azx_first_init(struct azx *chip);
2485static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip);
2486
2487static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci);
2488
2489#ifdef SUPPORT_VGA_SWITCHEROO
2490static void azx_vs_set_state(struct pci_dev *pci,
2491 enum vga_switcheroo_state state)
2492{
2493 struct snd_card *card = pci_get_drvdata(pci);
2494 struct azx *chip = card->private_data;
2495 bool disabled;
2496
2497 if (chip->init_failed)
2498 return;
2499
2500 disabled = (state == VGA_SWITCHEROO_OFF);
2501 if (chip->disabled == disabled)
2502 return;
2503
2504 if (!chip->bus) {
2505 chip->disabled = disabled;
2506 if (!disabled) {
2507 snd_printk(KERN_INFO SFX
2508 "%s: Start delayed initialization\n",
2509 pci_name(chip->pci));
2510 if (azx_first_init(chip) < 0 ||
2511 azx_probe_continue(chip) < 0) {
2512 snd_printk(KERN_ERR SFX
2513 "%s: initialization error\n",
2514 pci_name(chip->pci));
2515 chip->init_failed = true;
2516 }
2517 }
2518 } else {
2519 snd_printk(KERN_INFO SFX
2520 "%s %s via VGA-switcheroo\n",
2521 disabled ? "Disabling" : "Enabling",
2522 pci_name(chip->pci));
2523 if (disabled) {
2524 azx_suspend(pci, PMSG_FREEZE);
2525 chip->disabled = true;
2526 snd_hda_lock_devices(chip->bus);
2527 } else {
2528 snd_hda_unlock_devices(chip->bus);
2529 chip->disabled = false;
2530 azx_resume(pci);
2531 }
2532 }
2533}
2534
2535static bool azx_vs_can_switch(struct pci_dev *pci)
2536{
2537 struct snd_card *card = pci_get_drvdata(pci);
2538 struct azx *chip = card->private_data;
2539
2540 if (chip->init_failed)
2541 return false;
2542 if (chip->disabled || !chip->bus)
2543 return true;
2544 if (snd_hda_lock_devices(chip->bus))
2545 return false;
2546 snd_hda_unlock_devices(chip->bus);
2547 return true;
2548}
2549
2550static void __devinit init_vga_switcheroo(struct azx *chip)
2551{
2552 struct pci_dev *p = get_bound_vga(chip->pci);
2553 if (p) {
2554 snd_printk(KERN_INFO SFX
2555 "%s: Handle VGA-switcheroo audio client\n",
2556 pci_name(chip->pci));
2557 chip->use_vga_switcheroo = 1;
2558 pci_dev_put(p);
2559 }
2560}
2561
2562static const struct vga_switcheroo_client_ops azx_vs_ops = {
2563 .set_gpu_state = azx_vs_set_state,
2564 .can_switch = azx_vs_can_switch,
2565};
2566
2567static int __devinit register_vga_switcheroo(struct azx *chip)
2568{
2569 if (!chip->use_vga_switcheroo)
2570 return 0;
2571 /* FIXME: currently only handling DIS controller
2572 * is there any machine with two switchable HDMI audio controllers?
2573 */
2574 return vga_switcheroo_register_audio_client(chip->pci, &azx_vs_ops,
2575 VGA_SWITCHEROO_DIS,
2576 chip->bus != NULL);
2577}
2578#else
2579#define init_vga_switcheroo(chip) /* NOP */
2580#define register_vga_switcheroo(chip) 0
2581#endif /* SUPPORT_VGA_SWITCHER */
2582
2447/* 2583/*
2448 * destructor 2584 * destructor
2449 */ 2585 */
@@ -2453,6 +2589,12 @@ static int azx_free(struct azx *chip)
2453 2589
2454 azx_notifier_unregister(chip); 2590 azx_notifier_unregister(chip);
2455 2591
2592 if (use_vga_switcheroo(chip)) {
2593 if (chip->disabled && chip->bus)
2594 snd_hda_unlock_devices(chip->bus);
2595 vga_switcheroo_unregister_client(chip->pci);
2596 }
2597
2456 if (chip->initialized) { 2598 if (chip->initialized) {
2457 azx_clear_irq_pending(chip); 2599 azx_clear_irq_pending(chip);
2458 for (i = 0; i < chip->num_streams; i++) 2600 for (i = 0; i < chip->num_streams; i++)
@@ -2482,7 +2624,8 @@ static int azx_free(struct azx *chip)
2482 mark_pages_wc(chip, &chip->posbuf, false); 2624 mark_pages_wc(chip, &chip->posbuf, false);
2483 snd_dma_free_pages(&chip->posbuf); 2625 snd_dma_free_pages(&chip->posbuf);
2484 } 2626 }
2485 pci_release_regions(chip->pci); 2627 if (chip->region_requested)
2628 pci_release_regions(chip->pci);
2486 pci_disable_device(chip->pci); 2629 pci_disable_device(chip->pci);
2487 kfree(chip->azx_dev); 2630 kfree(chip->azx_dev);
2488 kfree(chip); 2631 kfree(chip);
@@ -2496,6 +2639,45 @@ static int azx_dev_free(struct snd_device *device)
2496} 2639}
2497 2640
2498/* 2641/*
2642 * Check of disabled HDMI controller by vga-switcheroo
2643 */
2644static struct pci_dev __devinit *get_bound_vga(struct pci_dev *pci)
2645{
2646 struct pci_dev *p;
2647
2648 /* check only discrete GPU */
2649 switch (pci->vendor) {
2650 case PCI_VENDOR_ID_ATI:
2651 case PCI_VENDOR_ID_AMD:
2652 case PCI_VENDOR_ID_NVIDIA:
2653 if (pci->devfn == 1) {
2654 p = pci_get_domain_bus_and_slot(pci_domain_nr(pci->bus),
2655 pci->bus->number, 0);
2656 if (p) {
2657 if ((p->class >> 8) == PCI_CLASS_DISPLAY_VGA)
2658 return p;
2659 pci_dev_put(p);
2660 }
2661 }
2662 break;
2663 }
2664 return NULL;
2665}
2666
2667static bool __devinit check_hdmi_disabled(struct pci_dev *pci)
2668{
2669 bool vga_inactive = false;
2670 struct pci_dev *p = get_bound_vga(pci);
2671
2672 if (p) {
2673 if (vga_default_device() && p != vga_default_device())
2674 vga_inactive = true;
2675 pci_dev_put(p);
2676 }
2677 return vga_inactive;
2678}
2679
2680/*
2499 * white/black-listing for position_fix 2681 * white/black-listing for position_fix
2500 */ 2682 */
2501static struct snd_pci_quirk position_fix_list[] __devinitdata = { 2683static struct snd_pci_quirk position_fix_list[] __devinitdata = {
@@ -2672,12 +2854,11 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2672 int dev, unsigned int driver_caps, 2854 int dev, unsigned int driver_caps,
2673 struct azx **rchip) 2855 struct azx **rchip)
2674{ 2856{
2675 struct azx *chip;
2676 int i, err;
2677 unsigned short gcap;
2678 static struct snd_device_ops ops = { 2857 static struct snd_device_ops ops = {
2679 .dev_free = azx_dev_free, 2858 .dev_free = azx_dev_free,
2680 }; 2859 };
2860 struct azx *chip;
2861 int err;
2681 2862
2682 *rchip = NULL; 2863 *rchip = NULL;
2683 2864
@@ -2703,6 +2884,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2703 chip->dev_index = dev; 2884 chip->dev_index = dev;
2704 INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work); 2885 INIT_WORK(&chip->irq_pending_work, azx_irq_pending_work);
2705 INIT_LIST_HEAD(&chip->pcm_list); 2886 INIT_LIST_HEAD(&chip->pcm_list);
2887 init_vga_switcheroo(chip);
2706 2888
2707 chip->position_fix[0] = chip->position_fix[1] = 2889 chip->position_fix[0] = chip->position_fix[1] =
2708 check_position_fix(chip, position_fix[dev]); 2890 check_position_fix(chip, position_fix[dev]);
@@ -2730,6 +2912,53 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2730 } 2912 }
2731 } 2913 }
2732 2914
2915 if (check_hdmi_disabled(pci)) {
2916 snd_printk(KERN_INFO SFX "VGA controller for %s is disabled\n",
2917 pci_name(pci));
2918 if (use_vga_switcheroo(chip)) {
2919 snd_printk(KERN_INFO SFX "Delaying initialization\n");
2920 chip->disabled = true;
2921 goto ok;
2922 }
2923 kfree(chip);
2924 pci_disable_device(pci);
2925 return -ENXIO;
2926 }
2927
2928 err = azx_first_init(chip);
2929 if (err < 0) {
2930 azx_free(chip);
2931 return err;
2932 }
2933
2934 ok:
2935 err = register_vga_switcheroo(chip);
2936 if (err < 0) {
2937 snd_printk(KERN_ERR SFX
2938 "Error registering VGA-switcheroo client\n");
2939 azx_free(chip);
2940 return err;
2941 }
2942
2943 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
2944 if (err < 0) {
2945 snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
2946 azx_free(chip);
2947 return err;
2948 }
2949
2950 *rchip = chip;
2951 return 0;
2952}
2953
2954static int DELAYED_INIT_MARK azx_first_init(struct azx *chip)
2955{
2956 int dev = chip->dev_index;
2957 struct pci_dev *pci = chip->pci;
2958 struct snd_card *card = chip->card;
2959 int i, err;
2960 unsigned short gcap;
2961
2733#if BITS_PER_LONG != 64 2962#if BITS_PER_LONG != 64
2734 /* Fix up base address on ULI M5461 */ 2963 /* Fix up base address on ULI M5461 */
2735 if (chip->driver_type == AZX_DRIVER_ULI) { 2964 if (chip->driver_type == AZX_DRIVER_ULI) {
@@ -2741,28 +2970,23 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2741#endif 2970#endif
2742 2971
2743 err = pci_request_regions(pci, "ICH HD audio"); 2972 err = pci_request_regions(pci, "ICH HD audio");
2744 if (err < 0) { 2973 if (err < 0)
2745 kfree(chip);
2746 pci_disable_device(pci);
2747 return err; 2974 return err;
2748 } 2975 chip->region_requested = 1;
2749 2976
2750 chip->addr = pci_resource_start(pci, 0); 2977 chip->addr = pci_resource_start(pci, 0);
2751 chip->remap_addr = pci_ioremap_bar(pci, 0); 2978 chip->remap_addr = pci_ioremap_bar(pci, 0);
2752 if (chip->remap_addr == NULL) { 2979 if (chip->remap_addr == NULL) {
2753 snd_printk(KERN_ERR SFX "ioremap error\n"); 2980 snd_printk(KERN_ERR SFX "ioremap error\n");
2754 err = -ENXIO; 2981 return -ENXIO;
2755 goto errout;
2756 } 2982 }
2757 2983
2758 if (chip->msi) 2984 if (chip->msi)
2759 if (pci_enable_msi(pci) < 0) 2985 if (pci_enable_msi(pci) < 0)
2760 chip->msi = 0; 2986 chip->msi = 0;
2761 2987
2762 if (azx_acquire_irq(chip, 0) < 0) { 2988 if (azx_acquire_irq(chip, 0) < 0)
2763 err = -EBUSY; 2989 return -EBUSY;
2764 goto errout;
2765 }
2766 2990
2767 pci_set_master(pci); 2991 pci_set_master(pci);
2768 synchronize_irq(chip->irq); 2992 synchronize_irq(chip->irq);
@@ -2841,7 +3065,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2841 GFP_KERNEL); 3065 GFP_KERNEL);
2842 if (!chip->azx_dev) { 3066 if (!chip->azx_dev) {
2843 snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n"); 3067 snd_printk(KERN_ERR SFX "cannot malloc azx_dev\n");
2844 goto errout; 3068 return -ENOMEM;
2845 } 3069 }
2846 3070
2847 for (i = 0; i < chip->num_streams; i++) { 3071 for (i = 0; i < chip->num_streams; i++) {
@@ -2851,7 +3075,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2851 BDL_SIZE, &chip->azx_dev[i].bdl); 3075 BDL_SIZE, &chip->azx_dev[i].bdl);
2852 if (err < 0) { 3076 if (err < 0) {
2853 snd_printk(KERN_ERR SFX "cannot allocate BDL\n"); 3077 snd_printk(KERN_ERR SFX "cannot allocate BDL\n");
2854 goto errout; 3078 return -ENOMEM;
2855 } 3079 }
2856 mark_pages_wc(chip, &chip->azx_dev[i].bdl, true); 3080 mark_pages_wc(chip, &chip->azx_dev[i].bdl, true);
2857 } 3081 }
@@ -2861,13 +3085,13 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2861 chip->num_streams * 8, &chip->posbuf); 3085 chip->num_streams * 8, &chip->posbuf);
2862 if (err < 0) { 3086 if (err < 0) {
2863 snd_printk(KERN_ERR SFX "cannot allocate posbuf\n"); 3087 snd_printk(KERN_ERR SFX "cannot allocate posbuf\n");
2864 goto errout; 3088 return -ENOMEM;
2865 } 3089 }
2866 mark_pages_wc(chip, &chip->posbuf, true); 3090 mark_pages_wc(chip, &chip->posbuf, true);
2867 /* allocate CORB/RIRB */ 3091 /* allocate CORB/RIRB */
2868 err = azx_alloc_cmd_io(chip); 3092 err = azx_alloc_cmd_io(chip);
2869 if (err < 0) 3093 if (err < 0)
2870 goto errout; 3094 return err;
2871 3095
2872 /* initialize streams */ 3096 /* initialize streams */
2873 azx_init_stream(chip); 3097 azx_init_stream(chip);
@@ -2879,14 +3103,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2879 /* codec detection */ 3103 /* codec detection */
2880 if (!chip->codec_mask) { 3104 if (!chip->codec_mask) {
2881 snd_printk(KERN_ERR SFX "no codecs found!\n"); 3105 snd_printk(KERN_ERR SFX "no codecs found!\n");
2882 err = -ENODEV; 3106 return -ENODEV;
2883 goto errout;
2884 }
2885
2886 err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, chip, &ops);
2887 if (err <0) {
2888 snd_printk(KERN_ERR SFX "Error creating device [card]!\n");
2889 goto errout;
2890 } 3107 }
2891 3108
2892 strcpy(card->driver, "HDA-Intel"); 3109 strcpy(card->driver, "HDA-Intel");
@@ -2896,12 +3113,7 @@ static int __devinit azx_create(struct snd_card *card, struct pci_dev *pci,
2896 "%s at 0x%lx irq %i", 3113 "%s at 0x%lx irq %i",
2897 card->shortname, chip->addr, chip->irq); 3114 card->shortname, chip->addr, chip->irq);
2898 3115
2899 *rchip = chip;
2900 return 0; 3116 return 0;
2901
2902 errout:
2903 azx_free(chip);
2904 return err;
2905} 3117}
2906 3118
2907static void power_down_all_codecs(struct azx *chip) 3119static void power_down_all_codecs(struct azx *chip)
@@ -2946,6 +3158,27 @@ static int __devinit azx_probe(struct pci_dev *pci,
2946 goto out_free; 3158 goto out_free;
2947 card->private_data = chip; 3159 card->private_data = chip;
2948 3160
3161 if (!chip->disabled) {
3162 err = azx_probe_continue(chip);
3163 if (err < 0)
3164 goto out_free;
3165 }
3166
3167 pci_set_drvdata(pci, card);
3168
3169 dev++;
3170 return 0;
3171
3172out_free:
3173 snd_card_free(card);
3174 return err;
3175}
3176
3177static int DELAYED_INIT_MARK azx_probe_continue(struct azx *chip)
3178{
3179 int dev = chip->dev_index;
3180 int err;
3181
2949#ifdef CONFIG_SND_HDA_INPUT_BEEP 3182#ifdef CONFIG_SND_HDA_INPUT_BEEP
2950 chip->beep_mode = beep_mode[dev]; 3183 chip->beep_mode = beep_mode[dev];
2951#endif 3184#endif
@@ -2979,25 +3212,26 @@ static int __devinit azx_probe(struct pci_dev *pci,
2979 if (err < 0) 3212 if (err < 0)
2980 goto out_free; 3213 goto out_free;
2981 3214
2982 err = snd_card_register(card); 3215 err = snd_card_register(chip->card);
2983 if (err < 0) 3216 if (err < 0)
2984 goto out_free; 3217 goto out_free;
2985 3218
2986 pci_set_drvdata(pci, card);
2987 chip->running = 1; 3219 chip->running = 1;
2988 power_down_all_codecs(chip); 3220 power_down_all_codecs(chip);
2989 azx_notifier_register(chip); 3221 azx_notifier_register(chip);
2990 3222
2991 dev++; 3223 return 0;
2992 return err; 3224
2993out_free: 3225out_free:
2994 snd_card_free(card); 3226 chip->init_failed = 1;
2995 return err; 3227 return err;
2996} 3228}
2997 3229
2998static void __devexit azx_remove(struct pci_dev *pci) 3230static void __devexit azx_remove(struct pci_dev *pci)
2999{ 3231{
3000 snd_card_free(pci_get_drvdata(pci)); 3232 struct snd_card *card = pci_get_drvdata(pci);
3233 if (card)
3234 snd_card_free(card);
3001 pci_set_drvdata(pci, NULL); 3235 pci_set_drvdata(pci, NULL);
3002} 3236}
3003 3237
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index ff71dcef08ef..224410e8e9e7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -2368,6 +2368,7 @@ static struct alc_codec_rename_table rename_tbl[] = {
2368 { 0x10ec0269, 0xffff, 0xa023, "ALC259" }, 2368 { 0x10ec0269, 0xffff, 0xa023, "ALC259" },
2369 { 0x10ec0269, 0xffff, 0x6023, "ALC281X" }, 2369 { 0x10ec0269, 0xffff, 0x6023, "ALC281X" },
2370 { 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" }, 2370 { 0x10ec0269, 0x00f0, 0x0020, "ALC269VC" },
2371 { 0x10ec0269, 0x00f0, 0x0030, "ALC269VD" },
2371 { 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" }, 2372 { 0x10ec0887, 0x00f0, 0x0030, "ALC887-VD" },
2372 { 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" }, 2373 { 0x10ec0888, 0x00f0, 0x0030, "ALC888-VD" },
2373 { 0x10ec0888, 0xf0f0, 0x3020, "ALC886" }, 2374 { 0x10ec0888, 0xf0f0, 0x3020, "ALC886" },
@@ -5614,6 +5615,7 @@ enum {
5614 ALC269_TYPE_ALC269VA, 5615 ALC269_TYPE_ALC269VA,
5615 ALC269_TYPE_ALC269VB, 5616 ALC269_TYPE_ALC269VB,
5616 ALC269_TYPE_ALC269VC, 5617 ALC269_TYPE_ALC269VC,
5618 ALC269_TYPE_ALC269VD,
5617}; 5619};
5618 5620
5619/* 5621/*
@@ -5625,8 +5627,21 @@ static int alc269_parse_auto_config(struct hda_codec *codec)
5625 static const hda_nid_t alc269_ssids[] = { 0, 0x1b, 0x14, 0x21 }; 5627 static const hda_nid_t alc269_ssids[] = { 0, 0x1b, 0x14, 0x21 };
5626 static const hda_nid_t alc269va_ssids[] = { 0x15, 0x1b, 0x14, 0 }; 5628 static const hda_nid_t alc269va_ssids[] = { 0x15, 0x1b, 0x14, 0 };
5627 struct alc_spec *spec = codec->spec; 5629 struct alc_spec *spec = codec->spec;
5628 const hda_nid_t *ssids = spec->codec_variant == ALC269_TYPE_ALC269VA ? 5630 const hda_nid_t *ssids;
5629 alc269va_ssids : alc269_ssids; 5631
5632 switch (spec->codec_variant) {
5633 case ALC269_TYPE_ALC269VA:
5634 case ALC269_TYPE_ALC269VC:
5635 ssids = alc269va_ssids;
5636 break;
5637 case ALC269_TYPE_ALC269VB:
5638 case ALC269_TYPE_ALC269VD:
5639 ssids = alc269_ssids;
5640 break;
5641 default:
5642 ssids = alc269_ssids;
5643 break;
5644 }
5630 5645
5631 return alc_parse_auto_config(codec, alc269_ignore, ssids); 5646 return alc_parse_auto_config(codec, alc269_ignore, ssids);
5632} 5647}
@@ -5643,6 +5658,11 @@ static void alc269_toggle_power_output(struct hda_codec *codec, int power_up)
5643 5658
5644static void alc269_shutup(struct hda_codec *codec) 5659static void alc269_shutup(struct hda_codec *codec)
5645{ 5660{
5661 struct alc_spec *spec = codec->spec;
5662
5663 if (spec->codec_variant != ALC269_TYPE_ALC269VB)
5664 return;
5665
5646 if ((alc_get_coef0(codec) & 0x00ff) == 0x017) 5666 if ((alc_get_coef0(codec) & 0x00ff) == 0x017)
5647 alc269_toggle_power_output(codec, 0); 5667 alc269_toggle_power_output(codec, 0);
5648 if ((alc_get_coef0(codec) & 0x00ff) == 0x018) { 5668 if ((alc_get_coef0(codec) & 0x00ff) == 0x018) {
@@ -5654,19 +5674,24 @@ static void alc269_shutup(struct hda_codec *codec)
5654#ifdef CONFIG_PM 5674#ifdef CONFIG_PM
5655static int alc269_resume(struct hda_codec *codec) 5675static int alc269_resume(struct hda_codec *codec)
5656{ 5676{
5657 if ((alc_get_coef0(codec) & 0x00ff) == 0x018) { 5677 struct alc_spec *spec = codec->spec;
5678
5679 if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
5680 (alc_get_coef0(codec) & 0x00ff) == 0x018) {
5658 alc269_toggle_power_output(codec, 0); 5681 alc269_toggle_power_output(codec, 0);
5659 msleep(150); 5682 msleep(150);
5660 } 5683 }
5661 5684
5662 codec->patch_ops.init(codec); 5685 codec->patch_ops.init(codec);
5663 5686
5664 if ((alc_get_coef0(codec) & 0x00ff) == 0x017) { 5687 if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
5688 (alc_get_coef0(codec) & 0x00ff) == 0x017) {
5665 alc269_toggle_power_output(codec, 1); 5689 alc269_toggle_power_output(codec, 1);
5666 msleep(200); 5690 msleep(200);
5667 } 5691 }
5668 5692
5669 if ((alc_get_coef0(codec) & 0x00ff) == 0x018) 5693 if (spec->codec_variant == ALC269_TYPE_ALC269VB ||
5694 (alc_get_coef0(codec) & 0x00ff) == 0x018)
5670 alc269_toggle_power_output(codec, 1); 5695 alc269_toggle_power_output(codec, 1);
5671 5696
5672 snd_hda_codec_resume_amp(codec); 5697 snd_hda_codec_resume_amp(codec);
@@ -6081,6 +6106,9 @@ static int patch_alc269(struct hda_codec *codec)
6081 err = alc_codec_rename(codec, "ALC3202"); 6106 err = alc_codec_rename(codec, "ALC3202");
6082 spec->codec_variant = ALC269_TYPE_ALC269VC; 6107 spec->codec_variant = ALC269_TYPE_ALC269VC;
6083 break; 6108 break;
6109 case 0x0030:
6110 spec->codec_variant = ALC269_TYPE_ALC269VD;
6111 break;
6084 default: 6112 default:
6085 alc_fix_pll_init(codec, 0x20, 0x04, 15); 6113 alc_fix_pll_init(codec, 0x20, 0x04, 15);
6086 } 6114 }
diff --git a/sound/soc/kirkwood/kirkwood-i2s.c b/sound/soc/kirkwood/kirkwood-i2s.c
index 3cb9aa4299d3..fa4556750451 100644
--- a/sound/soc/kirkwood/kirkwood-i2s.c
+++ b/sound/soc/kirkwood/kirkwood-i2s.c
@@ -17,6 +17,7 @@
17#include <linux/slab.h> 17#include <linux/slab.h>
18#include <linux/mbus.h> 18#include <linux/mbus.h>
19#include <linux/delay.h> 19#include <linux/delay.h>
20#include <linux/clk.h>
20#include <sound/pcm.h> 21#include <sound/pcm.h>
21#include <sound/pcm_params.h> 22#include <sound/pcm_params.h>
22#include <sound/soc.h> 23#include <sound/soc.h>
@@ -449,6 +450,14 @@ static __devinit int kirkwood_i2s_dev_probe(struct platform_device *pdev)
449 450
450 priv->burst = data->burst; 451 priv->burst = data->burst;
451 452
453 priv->clk = clk_get(&pdev->dev, NULL);
454 if (IS_ERR(priv->clk)) {
455 dev_err(&pdev->dev, "no clock\n");
456 err = PTR_ERR(priv->clk);
457 goto err_ioremap;
458 }
459 clk_prepare_enable(priv->clk);
460
452 return snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai); 461 return snd_soc_register_dai(&pdev->dev, &kirkwood_i2s_dai);
453 462
454err_ioremap: 463err_ioremap:
@@ -466,6 +475,10 @@ static __devexit int kirkwood_i2s_dev_remove(struct platform_device *pdev)
466 struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev); 475 struct kirkwood_dma_data *priv = dev_get_drvdata(&pdev->dev);
467 476
468 snd_soc_unregister_dai(&pdev->dev); 477 snd_soc_unregister_dai(&pdev->dev);
478
479 clk_disable_unprepare(priv->clk);
480 clk_put(priv->clk);
481
469 iounmap(priv->io); 482 iounmap(priv->io);
470 release_mem_region(priv->mem->start, SZ_16K); 483 release_mem_region(priv->mem->start, SZ_16K);
471 kfree(priv); 484 kfree(priv);
diff --git a/sound/soc/kirkwood/kirkwood.h b/sound/soc/kirkwood/kirkwood.h
index 9047436b3937..f9084d83e6bd 100644
--- a/sound/soc/kirkwood/kirkwood.h
+++ b/sound/soc/kirkwood/kirkwood.h
@@ -123,6 +123,7 @@ struct kirkwood_dma_data {
123 void __iomem *io; 123 void __iomem *io;
124 int irq; 124 int irq;
125 int burst; 125 int burst;
126 struct clk *clk;
126}; 127};
127 128
128#endif 129#endif
diff --git a/sound/soc/omap/Kconfig b/sound/soc/omap/Kconfig
index 9ccfa5e1c11b..57a2fa751085 100644
--- a/sound/soc/omap/Kconfig
+++ b/sound/soc/omap/Kconfig
@@ -109,11 +109,12 @@ config SND_OMAP_SOC_OMAP_ABE_TWL6040
109 - PandaBoard (4430) 109 - PandaBoard (4430)
110 - PandaBoardES (4460) 110 - PandaBoardES (4460)
111 111
112config SND_OMAP_SOC_OMAP4_HDMI 112config SND_OMAP_SOC_OMAP_HDMI
113 tristate "SoC Audio support for Texas Instruments OMAP4 HDMI" 113 tristate "SoC Audio support for Texas Instruments OMAP HDMI"
114 depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS && ARCH_OMAP4 114 depends on SND_OMAP_SOC && OMAP4_DSS_HDMI && OMAP2_DSS
115 select SND_OMAP_SOC_HDMI 115 select SND_OMAP_SOC_HDMI
116 select SND_SOC_OMAP_HDMI_CODEC 116 select SND_SOC_OMAP_HDMI_CODEC
117 select OMAP4_DSS_HDMI_AUDIO
117 help 118 help
118 Say Y if you want to add support for SoC HDMI audio on Texas Instruments 119 Say Y if you want to add support for SoC HDMI audio on Texas Instruments
119 OMAP4 chips 120 OMAP4 chips
diff --git a/sound/soc/omap/Makefile b/sound/soc/omap/Makefile
index 1d656bce01d4..0e14dd322565 100644
--- a/sound/soc/omap/Makefile
+++ b/sound/soc/omap/Makefile
@@ -25,7 +25,7 @@ snd-soc-omap3pandora-objs := omap3pandora.o
25snd-soc-omap3beagle-objs := omap3beagle.o 25snd-soc-omap3beagle-objs := omap3beagle.o
26snd-soc-zoom2-objs := zoom2.o 26snd-soc-zoom2-objs := zoom2.o
27snd-soc-igep0020-objs := igep0020.o 27snd-soc-igep0020-objs := igep0020.o
28snd-soc-omap4-hdmi-objs := omap4-hdmi-card.o 28snd-soc-omap-hdmi-card-objs := omap-hdmi-card.o
29 29
30obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o 30obj-$(CONFIG_SND_OMAP_SOC_N810) += snd-soc-n810.o
31obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o 31obj-$(CONFIG_SND_OMAP_SOC_RX51) += snd-soc-rx51.o
@@ -41,4 +41,4 @@ obj-$(CONFIG_SND_OMAP_SOC_OMAP3_PANDORA) += snd-soc-omap3pandora.o
41obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o 41obj-$(CONFIG_SND_OMAP_SOC_OMAP3_BEAGLE) += snd-soc-omap3beagle.o
42obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o 42obj-$(CONFIG_SND_OMAP_SOC_ZOOM2) += snd-soc-zoom2.o
43obj-$(CONFIG_SND_OMAP_SOC_IGEP0020) += snd-soc-igep0020.o 43obj-$(CONFIG_SND_OMAP_SOC_IGEP0020) += snd-soc-igep0020.o
44obj-$(CONFIG_SND_OMAP_SOC_OMAP4_HDMI) += snd-soc-omap4-hdmi.o 44obj-$(CONFIG_SND_OMAP_SOC_OMAP_HDMI) += snd-soc-omap-hdmi-card.o
diff --git a/sound/soc/omap/mcbsp.c b/sound/soc/omap/mcbsp.c
index e5f44440d1b9..34835e8a9160 100644
--- a/sound/soc/omap/mcbsp.c
+++ b/sound/soc/omap/mcbsp.c
@@ -109,6 +109,47 @@ static void omap_mcbsp_dump_reg(struct omap_mcbsp *mcbsp)
109 dev_dbg(mcbsp->dev, "***********************\n"); 109 dev_dbg(mcbsp->dev, "***********************\n");
110} 110}
111 111
112static irqreturn_t omap_mcbsp_irq_handler(int irq, void *dev_id)
113{
114 struct omap_mcbsp *mcbsp = dev_id;
115 u16 irqst;
116
117 irqst = MCBSP_READ(mcbsp, IRQST);
118 dev_dbg(mcbsp->dev, "IRQ callback : 0x%x\n", irqst);
119
120 if (irqst & RSYNCERREN)
121 dev_err(mcbsp->dev, "RX Frame Sync Error!\n");
122 if (irqst & RFSREN)
123 dev_dbg(mcbsp->dev, "RX Frame Sync\n");
124 if (irqst & REOFEN)
125 dev_dbg(mcbsp->dev, "RX End Of Frame\n");
126 if (irqst & RRDYEN)
127 dev_dbg(mcbsp->dev, "RX Buffer Threshold Reached\n");
128 if (irqst & RUNDFLEN)
129 dev_err(mcbsp->dev, "RX Buffer Underflow!\n");
130 if (irqst & ROVFLEN)
131 dev_err(mcbsp->dev, "RX Buffer Overflow!\n");
132
133 if (irqst & XSYNCERREN)
134 dev_err(mcbsp->dev, "TX Frame Sync Error!\n");
135 if (irqst & XFSXEN)
136 dev_dbg(mcbsp->dev, "TX Frame Sync\n");
137 if (irqst & XEOFEN)
138 dev_dbg(mcbsp->dev, "TX End Of Frame\n");
139 if (irqst & XRDYEN)
140 dev_dbg(mcbsp->dev, "TX Buffer threshold Reached\n");
141 if (irqst & XUNDFLEN)
142 dev_err(mcbsp->dev, "TX Buffer Underflow!\n");
143 if (irqst & XOVFLEN)
144 dev_err(mcbsp->dev, "TX Buffer Overflow!\n");
145 if (irqst & XEMPTYEOFEN)
146 dev_dbg(mcbsp->dev, "TX Buffer empty at end of frame\n");
147
148 MCBSP_WRITE(mcbsp, IRQST, irqst);
149
150 return IRQ_HANDLED;
151}
152
112static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id) 153static irqreturn_t omap_mcbsp_tx_irq_handler(int irq, void *dev_id)
113{ 154{
114 struct omap_mcbsp *mcbsp_tx = dev_id; 155 struct omap_mcbsp *mcbsp_tx = dev_id;
@@ -176,6 +217,10 @@ void omap_mcbsp_config(struct omap_mcbsp *mcbsp,
176 /* Enable wakeup behavior */ 217 /* Enable wakeup behavior */
177 if (mcbsp->pdata->has_wakeup) 218 if (mcbsp->pdata->has_wakeup)
178 MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN); 219 MCBSP_WRITE(mcbsp, WAKEUPEN, XRDYEN | RRDYEN);
220
221 /* Enable TX/RX sync error interrupts by default */
222 if (mcbsp->irq)
223 MCBSP_WRITE(mcbsp, IRQEN, RSYNCERREN | XSYNCERREN);
179} 224}
180 225
181/** 226/**
@@ -489,23 +534,25 @@ int omap_mcbsp_request(struct omap_mcbsp *mcbsp)
489 MCBSP_WRITE(mcbsp, SPCR1, 0); 534 MCBSP_WRITE(mcbsp, SPCR1, 0);
490 MCBSP_WRITE(mcbsp, SPCR2, 0); 535 MCBSP_WRITE(mcbsp, SPCR2, 0);
491 536
492 err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 537 if (mcbsp->irq) {
493 0, "McBSP", (void *)mcbsp); 538 err = request_irq(mcbsp->irq, omap_mcbsp_irq_handler, 0,
494 if (err != 0) { 539 "McBSP", (void *)mcbsp);
495 dev_err(mcbsp->dev, "Unable to request TX IRQ %d " 540 if (err != 0) {
496 "for McBSP%d\n", mcbsp->tx_irq, 541 dev_err(mcbsp->dev, "Unable to request IRQ\n");
497 mcbsp->id); 542 goto err_clk_disable;
498 goto err_clk_disable; 543 }
499 } 544 } else {
545 err = request_irq(mcbsp->tx_irq, omap_mcbsp_tx_irq_handler, 0,
546 "McBSP TX", (void *)mcbsp);
547 if (err != 0) {
548 dev_err(mcbsp->dev, "Unable to request TX IRQ\n");
549 goto err_clk_disable;
550 }
500 551
501 if (mcbsp->rx_irq) { 552 err = request_irq(mcbsp->rx_irq, omap_mcbsp_rx_irq_handler, 0,
502 err = request_irq(mcbsp->rx_irq, 553 "McBSP RX", (void *)mcbsp);
503 omap_mcbsp_rx_irq_handler,
504 0, "McBSP", (void *)mcbsp);
505 if (err != 0) { 554 if (err != 0) {
506 dev_err(mcbsp->dev, "Unable to request RX IRQ %d " 555 dev_err(mcbsp->dev, "Unable to request RX IRQ\n");
507 "for McBSP%d\n", mcbsp->rx_irq,
508 mcbsp->id);
509 goto err_free_irq; 556 goto err_free_irq;
510 } 557 }
511 } 558 }
@@ -542,9 +589,16 @@ void omap_mcbsp_free(struct omap_mcbsp *mcbsp)
542 if (mcbsp->pdata->has_wakeup) 589 if (mcbsp->pdata->has_wakeup)
543 MCBSP_WRITE(mcbsp, WAKEUPEN, 0); 590 MCBSP_WRITE(mcbsp, WAKEUPEN, 0);
544 591
545 if (mcbsp->rx_irq) 592 /* Disable interrupt requests */
593 if (mcbsp->irq)
594 MCBSP_WRITE(mcbsp, IRQEN, 0);
595
596 if (mcbsp->irq) {
597 free_irq(mcbsp->irq, (void *)mcbsp);
598 } else {
546 free_irq(mcbsp->rx_irq, (void *)mcbsp); 599 free_irq(mcbsp->rx_irq, (void *)mcbsp);
547 free_irq(mcbsp->tx_irq, (void *)mcbsp); 600 free_irq(mcbsp->tx_irq, (void *)mcbsp);
601 }
548 602
549 reg_cache = mcbsp->reg_cache; 603 reg_cache = mcbsp->reg_cache;
550 604
@@ -754,7 +808,7 @@ THRESHOLD_PROP_BUILDER(max_tx_thres);
754THRESHOLD_PROP_BUILDER(max_rx_thres); 808THRESHOLD_PROP_BUILDER(max_rx_thres);
755 809
756static const char *dma_op_modes[] = { 810static const char *dma_op_modes[] = {
757 "element", "threshold", "frame", 811 "element", "threshold",
758}; 812};
759 813
760static ssize_t dma_op_mode_show(struct device *dev, 814static ssize_t dma_op_mode_show(struct device *dev,
@@ -949,13 +1003,24 @@ int __devinit omap_mcbsp_init(struct platform_device *pdev)
949 else 1003 else
950 mcbsp->phys_dma_base = res->start; 1004 mcbsp->phys_dma_base = res->start;
951 1005
952 mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx"); 1006 /*
953 mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx"); 1007 * OMAP1, 2 uses two interrupt lines: TX, RX
954 1008 * OMAP2430, OMAP3 SoC have combined IRQ line as well.
955 /* From OMAP4 there will be a single irq line */ 1009 * OMAP4 and newer SoC only have the combined IRQ line.
956 if (mcbsp->tx_irq == -ENXIO) { 1010 * Use the combined IRQ if available since it gives better debugging
957 mcbsp->tx_irq = platform_get_irq(pdev, 0); 1011 * possibilities.
958 mcbsp->rx_irq = 0; 1012 */
1013 mcbsp->irq = platform_get_irq_byname(pdev, "common");
1014 if (mcbsp->irq == -ENXIO) {
1015 mcbsp->tx_irq = platform_get_irq_byname(pdev, "tx");
1016
1017 if (mcbsp->tx_irq == -ENXIO) {
1018 mcbsp->irq = platform_get_irq(pdev, 0);
1019 mcbsp->tx_irq = 0;
1020 } else {
1021 mcbsp->rx_irq = platform_get_irq_byname(pdev, "rx");
1022 mcbsp->irq = 0;
1023 }
959 } 1024 }
960 1025
961 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx"); 1026 res = platform_get_resource_byname(pdev, IORESOURCE_DMA, "rx");
diff --git a/sound/soc/omap/mcbsp.h b/sound/soc/omap/mcbsp.h
index a944fcc9073c..262a6152111f 100644
--- a/sound/soc/omap/mcbsp.h
+++ b/sound/soc/omap/mcbsp.h
@@ -217,17 +217,20 @@ enum {
217/********************** McBSP DMA operating modes **************************/ 217/********************** McBSP DMA operating modes **************************/
218#define MCBSP_DMA_MODE_ELEMENT 0 218#define MCBSP_DMA_MODE_ELEMENT 0
219#define MCBSP_DMA_MODE_THRESHOLD 1 219#define MCBSP_DMA_MODE_THRESHOLD 1
220#define MCBSP_DMA_MODE_FRAME 2
221 220
222/********************** McBSP WAKEUPEN bit definitions *********************/ 221/********************** McBSP WAKEUPEN/IRQST/IRQEN bit definitions *********/
223#define RSYNCERREN BIT(0) 222#define RSYNCERREN BIT(0)
224#define RFSREN BIT(1) 223#define RFSREN BIT(1)
225#define REOFEN BIT(2) 224#define REOFEN BIT(2)
226#define RRDYEN BIT(3) 225#define RRDYEN BIT(3)
226#define RUNDFLEN BIT(4)
227#define ROVFLEN BIT(5)
227#define XSYNCERREN BIT(7) 228#define XSYNCERREN BIT(7)
228#define XFSXEN BIT(8) 229#define XFSXEN BIT(8)
229#define XEOFEN BIT(9) 230#define XEOFEN BIT(9)
230#define XRDYEN BIT(10) 231#define XRDYEN BIT(10)
232#define XUNDFLEN BIT(11)
233#define XOVFLEN BIT(12)
231#define XEMPTYEOFEN BIT(14) 234#define XEMPTYEOFEN BIT(14)
232 235
233/* Clock signal muxing options */ 236/* Clock signal muxing options */
@@ -295,6 +298,7 @@ struct omap_mcbsp {
295 int configured; 298 int configured;
296 u8 free; 299 u8 free;
297 300
301 int irq;
298 int rx_irq; 302 int rx_irq;
299 int tx_irq; 303 int tx_irq;
300 304
diff --git a/sound/soc/omap/omap-abe-twl6040.c b/sound/soc/omap/omap-abe-twl6040.c
index 93bb8eee22b3..9d93793d3077 100644
--- a/sound/soc/omap/omap-abe-twl6040.c
+++ b/sound/soc/omap/omap-abe-twl6040.c
@@ -40,6 +40,11 @@
40#include "omap-pcm.h" 40#include "omap-pcm.h"
41#include "../codecs/twl6040.h" 41#include "../codecs/twl6040.h"
42 42
43struct abe_twl6040 {
44 int jack_detection; /* board can detect jack events */
45 int mclk_freq; /* MCLK frequency speed for twl6040 */
46};
47
43static int omap_abe_hw_params(struct snd_pcm_substream *substream, 48static int omap_abe_hw_params(struct snd_pcm_substream *substream,
44 struct snd_pcm_hw_params *params) 49 struct snd_pcm_hw_params *params)
45{ 50{
@@ -47,13 +52,13 @@ static int omap_abe_hw_params(struct snd_pcm_substream *substream,
47 struct snd_soc_dai *codec_dai = rtd->codec_dai; 52 struct snd_soc_dai *codec_dai = rtd->codec_dai;
48 struct snd_soc_codec *codec = rtd->codec; 53 struct snd_soc_codec *codec = rtd->codec;
49 struct snd_soc_card *card = codec->card; 54 struct snd_soc_card *card = codec->card;
50 struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev); 55 struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
51 int clk_id, freq; 56 int clk_id, freq;
52 int ret; 57 int ret;
53 58
54 clk_id = twl6040_get_clk_id(rtd->codec); 59 clk_id = twl6040_get_clk_id(rtd->codec);
55 if (clk_id == TWL6040_SYSCLK_SEL_HPPLL) 60 if (clk_id == TWL6040_SYSCLK_SEL_HPPLL)
56 freq = pdata->mclk_freq; 61 freq = priv->mclk_freq;
57 else if (clk_id == TWL6040_SYSCLK_SEL_LPPLL) 62 else if (clk_id == TWL6040_SYSCLK_SEL_LPPLL)
58 freq = 32768; 63 freq = 32768;
59 else 64 else
@@ -128,6 +133,9 @@ static const struct snd_soc_dapm_widget twl6040_dapm_widgets[] = {
128 SND_SOC_DAPM_MIC("Main Handset Mic", NULL), 133 SND_SOC_DAPM_MIC("Main Handset Mic", NULL),
129 SND_SOC_DAPM_MIC("Sub Handset Mic", NULL), 134 SND_SOC_DAPM_MIC("Sub Handset Mic", NULL),
130 SND_SOC_DAPM_LINE("Line In", NULL), 135 SND_SOC_DAPM_LINE("Line In", NULL),
136
137 /* Digital microphones */
138 SND_SOC_DAPM_MIC("Digital Mic", NULL),
131}; 139};
132 140
133static const struct snd_soc_dapm_route audio_map[] = { 141static const struct snd_soc_dapm_route audio_map[] = {
@@ -173,6 +181,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
173 struct snd_soc_card *card = codec->card; 181 struct snd_soc_card *card = codec->card;
174 struct snd_soc_dapm_context *dapm = &codec->dapm; 182 struct snd_soc_dapm_context *dapm = &codec->dapm;
175 struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev); 183 struct omap_abe_twl6040_data *pdata = dev_get_platdata(card->dev);
184 struct abe_twl6040 *priv = snd_soc_card_get_drvdata(card);
176 int hs_trim; 185 int hs_trim;
177 int ret = 0; 186 int ret = 0;
178 187
@@ -196,7 +205,7 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
196 TWL6040_HSF_TRIM_RIGHT(hs_trim)); 205 TWL6040_HSF_TRIM_RIGHT(hs_trim));
197 206
198 /* Headset jack detection only if it is supported */ 207 /* Headset jack detection only if it is supported */
199 if (pdata->jack_detection) { 208 if (priv->jack_detection) {
200 ret = snd_soc_jack_new(codec, "Headset Jack", 209 ret = snd_soc_jack_new(codec, "Headset Jack",
201 SND_JACK_HEADSET, &hs_jack); 210 SND_JACK_HEADSET, &hs_jack);
202 if (ret) 211 if (ret)
@@ -210,10 +219,6 @@ static int omap_abe_twl6040_init(struct snd_soc_pcm_runtime *rtd)
210 return ret; 219 return ret;
211} 220}
212 221
213static const struct snd_soc_dapm_widget dmic_dapm_widgets[] = {
214 SND_SOC_DAPM_MIC("Digital Mic", NULL),
215};
216
217static const struct snd_soc_dapm_route dmic_audio_map[] = { 222static const struct snd_soc_dapm_route dmic_audio_map[] = {
218 {"DMic", NULL, "Digital Mic"}, 223 {"DMic", NULL, "Digital Mic"},
219 {"Digital Mic", NULL, "Digital Mic1 Bias"}, 224 {"Digital Mic", NULL, "Digital Mic1 Bias"},
@@ -223,19 +228,13 @@ static int omap_abe_dmic_init(struct snd_soc_pcm_runtime *rtd)
223{ 228{
224 struct snd_soc_codec *codec = rtd->codec; 229 struct snd_soc_codec *codec = rtd->codec;
225 struct snd_soc_dapm_context *dapm = &codec->dapm; 230 struct snd_soc_dapm_context *dapm = &codec->dapm;
226 int ret;
227
228 ret = snd_soc_dapm_new_controls(dapm, dmic_dapm_widgets,
229 ARRAY_SIZE(dmic_dapm_widgets));
230 if (ret)
231 return ret;
232 231
233 return snd_soc_dapm_add_routes(dapm, dmic_audio_map, 232 return snd_soc_dapm_add_routes(dapm, dmic_audio_map,
234 ARRAY_SIZE(dmic_audio_map)); 233 ARRAY_SIZE(dmic_audio_map));
235} 234}
236 235
237/* Digital audio interface glue - connects codec <--> CPU */ 236/* Digital audio interface glue - connects codec <--> CPU */
238static struct snd_soc_dai_link twl6040_dmic_dai[] = { 237static struct snd_soc_dai_link abe_twl6040_dai_links[] = {
239 { 238 {
240 .name = "TWL6040", 239 .name = "TWL6040",
241 .stream_name = "TWL6040", 240 .stream_name = "TWL6040",
@@ -258,19 +257,6 @@ static struct snd_soc_dai_link twl6040_dmic_dai[] = {
258 }, 257 },
259}; 258};
260 259
261static struct snd_soc_dai_link twl6040_only_dai[] = {
262 {
263 .name = "TWL6040",
264 .stream_name = "TWL6040",
265 .cpu_dai_name = "omap-mcpdm",
266 .codec_dai_name = "twl6040-legacy",
267 .platform_name = "omap-pcm-audio",
268 .codec_name = "twl6040-codec",
269 .init = omap_abe_twl6040_init,
270 .ops = &omap_abe_ops,
271 },
272};
273
274/* Audio machine driver */ 260/* Audio machine driver */
275static struct snd_soc_card omap_abe_card = { 261static struct snd_soc_card omap_abe_card = {
276 .owner = THIS_MODULE, 262 .owner = THIS_MODULE,
@@ -285,6 +271,8 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
285{ 271{
286 struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev); 272 struct omap_abe_twl6040_data *pdata = dev_get_platdata(&pdev->dev);
287 struct snd_soc_card *card = &omap_abe_card; 273 struct snd_soc_card *card = &omap_abe_card;
274 struct abe_twl6040 *priv;
275 int num_links = 0;
288 int ret; 276 int ret;
289 277
290 card->dev = &pdev->dev; 278 card->dev = &pdev->dev;
@@ -294,6 +282,10 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
294 return -ENODEV; 282 return -ENODEV;
295 } 283 }
296 284
285 priv = devm_kzalloc(&pdev->dev, sizeof(struct abe_twl6040), GFP_KERNEL);
286 if (priv == NULL)
287 return -ENOMEM;
288
297 if (pdata->card_name) { 289 if (pdata->card_name) {
298 card->name = pdata->card_name; 290 card->name = pdata->card_name;
299 } else { 291 } else {
@@ -301,18 +293,24 @@ static __devinit int omap_abe_probe(struct platform_device *pdev)
301 return -ENODEV; 293 return -ENODEV;
302 } 294 }
303 295
304 if (!pdata->mclk_freq) { 296 priv->jack_detection = pdata->jack_detection;
297 priv->mclk_freq = pdata->mclk_freq;
298
299
300 if (!priv->mclk_freq) {
305 dev_err(&pdev->dev, "MCLK frequency missing\n"); 301 dev_err(&pdev->dev, "MCLK frequency missing\n");
306 return -ENODEV; 302 return -ENODEV;
307 } 303 }
308 304
309 if (pdata->has_dmic) { 305 if (pdata->has_dmic)
310 card->dai_link = twl6040_dmic_dai; 306 num_links = 2;
311 card->num_links = ARRAY_SIZE(twl6040_dmic_dai); 307 else
312 } else { 308 num_links = 1;
313 card->dai_link = twl6040_only_dai; 309
314 card->num_links = ARRAY_SIZE(twl6040_only_dai); 310 card->dai_link = abe_twl6040_dai_links;
315 } 311 card->num_links = num_links;
312
313 snd_soc_card_set_drvdata(card, priv);
316 314
317 ret = snd_soc_register_card(card); 315 ret = snd_soc_register_card(card);
318 if (ret) 316 if (ret)
diff --git a/sound/soc/omap/omap-dmic.c b/sound/soc/omap/omap-dmic.c
index 4dcb5a7e40e8..75f5dca0e8d2 100644
--- a/sound/soc/omap/omap-dmic.c
+++ b/sound/soc/omap/omap-dmic.c
@@ -32,6 +32,7 @@
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/slab.h> 33#include <linux/slab.h>
34#include <linux/pm_runtime.h> 34#include <linux/pm_runtime.h>
35#include <linux/of_device.h>
35#include <plat/dma.h> 36#include <plat/dma.h>
36 37
37#include <sound/core.h> 38#include <sound/core.h>
@@ -528,10 +529,17 @@ static int __devexit asoc_dmic_remove(struct platform_device *pdev)
528 return 0; 529 return 0;
529} 530}
530 531
532static const struct of_device_id omap_dmic_of_match[] = {
533 { .compatible = "ti,omap4-dmic", },
534 { }
535};
536MODULE_DEVICE_TABLE(of, omap_dmic_of_match);
537
531static struct platform_driver asoc_dmic_driver = { 538static struct platform_driver asoc_dmic_driver = {
532 .driver = { 539 .driver = {
533 .name = "omap-dmic", 540 .name = "omap-dmic",
534 .owner = THIS_MODULE, 541 .owner = THIS_MODULE,
542 .of_match_table = omap_dmic_of_match,
535 }, 543 },
536 .probe = asoc_dmic_probe, 544 .probe = asoc_dmic_probe,
537 .remove = __devexit_p(asoc_dmic_remove), 545 .remove = __devexit_p(asoc_dmic_remove),
diff --git a/sound/soc/omap/omap-hdmi-card.c b/sound/soc/omap/omap-hdmi-card.c
new file mode 100644
index 000000000000..eaa2ea0e3f81
--- /dev/null
+++ b/sound/soc/omap/omap-hdmi-card.c
@@ -0,0 +1,87 @@
1/*
2 * omap-hdmi-card.c
3 *
4 * OMAP ALSA SoC machine driver for TI OMAP HDMI
5 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
6 * Author: Ricardo Neri <ricardo.neri@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <sound/pcm.h>
26#include <sound/soc.h>
27#include <asm/mach-types.h>
28#include <video/omapdss.h>
29
30#define DRV_NAME "omap-hdmi-audio"
31
32static struct snd_soc_dai_link omap_hdmi_dai = {
33 .name = "HDMI",
34 .stream_name = "HDMI",
35 .cpu_dai_name = "omap-hdmi-audio-dai",
36 .platform_name = "omap-pcm-audio",
37 .codec_name = "hdmi-audio-codec",
38 .codec_dai_name = "omap-hdmi-hifi",
39};
40
41static struct snd_soc_card snd_soc_omap_hdmi = {
42 .name = "OMAPHDMI",
43 .owner = THIS_MODULE,
44 .dai_link = &omap_hdmi_dai,
45 .num_links = 1,
46};
47
48static __devinit int omap_hdmi_probe(struct platform_device *pdev)
49{
50 struct snd_soc_card *card = &snd_soc_omap_hdmi;
51 int ret;
52
53 card->dev = &pdev->dev;
54
55 ret = snd_soc_register_card(card);
56 if (ret) {
57 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
58 card->dev = NULL;
59 return ret;
60 }
61 return 0;
62}
63
64static int __devexit omap_hdmi_remove(struct platform_device *pdev)
65{
66 struct snd_soc_card *card = platform_get_drvdata(pdev);
67
68 snd_soc_unregister_card(card);
69 card->dev = NULL;
70 return 0;
71}
72
73static struct platform_driver omap_hdmi_driver = {
74 .driver = {
75 .name = DRV_NAME,
76 .owner = THIS_MODULE,
77 },
78 .probe = omap_hdmi_probe,
79 .remove = __devexit_p(omap_hdmi_remove),
80};
81
82module_platform_driver(omap_hdmi_driver);
83
84MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
85MODULE_DESCRIPTION("OMAP HDMI machine ASoC driver");
86MODULE_LICENSE("GPL");
87MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/sound/soc/omap/omap-hdmi.c b/sound/soc/omap/omap-hdmi.c
index 38e0defa7078..a08245d9203c 100644
--- a/sound/soc/omap/omap-hdmi.c
+++ b/sound/soc/omap/omap-hdmi.c
@@ -30,21 +30,28 @@
30#include <sound/pcm_params.h> 30#include <sound/pcm_params.h>
31#include <sound/initval.h> 31#include <sound/initval.h>
32#include <sound/soc.h> 32#include <sound/soc.h>
33#include <sound/asound.h>
34#include <sound/asoundef.h>
35#include <video/omapdss.h>
33 36
34#include <plat/dma.h> 37#include <plat/dma.h>
35#include "omap-pcm.h" 38#include "omap-pcm.h"
36#include "omap-hdmi.h" 39#include "omap-hdmi.h"
37 40
38#define DRV_NAME "hdmi-audio-dai" 41#define DRV_NAME "omap-hdmi-audio-dai"
39 42
40static struct omap_pcm_dma_data omap_hdmi_dai_dma_params = { 43struct hdmi_priv {
41 .name = "HDMI playback", 44 struct omap_pcm_dma_data dma_params;
42 .sync_mode = OMAP_DMA_SYNC_PACKET, 45 struct omap_dss_audio dss_audio;
46 struct snd_aes_iec958 iec;
47 struct snd_cea_861_aud_if cea;
48 struct omap_dss_device *dssdev;
43}; 49};
44 50
45static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream, 51static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
46 struct snd_soc_dai *dai) 52 struct snd_soc_dai *dai)
47{ 53{
54 struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
48 int err; 55 int err;
49 /* 56 /*
50 * Make sure that the period bytes are multiple of the DMA packet size. 57 * Make sure that the period bytes are multiple of the DMA packet size.
@@ -52,46 +59,201 @@ static int omap_hdmi_dai_startup(struct snd_pcm_substream *substream,
52 */ 59 */
53 err = snd_pcm_hw_constraint_step(substream->runtime, 0, 60 err = snd_pcm_hw_constraint_step(substream->runtime, 0,
54 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128); 61 SNDRV_PCM_HW_PARAM_PERIOD_BYTES, 128);
55 if (err < 0) 62 if (err < 0) {
63 dev_err(dai->dev, "could not apply constraint\n");
56 return err; 64 return err;
65 }
57 66
67 if (!priv->dssdev->driver->audio_supported(priv->dssdev)) {
68 dev_err(dai->dev, "audio not supported\n");
69 return -ENODEV;
70 }
58 return 0; 71 return 0;
59} 72}
60 73
74static int omap_hdmi_dai_prepare(struct snd_pcm_substream *substream,
75 struct snd_soc_dai *dai)
76{
77 struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
78
79 return priv->dssdev->driver->audio_enable(priv->dssdev);
80}
81
61static int omap_hdmi_dai_hw_params(struct snd_pcm_substream *substream, 82static int omap_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
62 struct snd_pcm_hw_params *params, 83 struct snd_pcm_hw_params *params,
63 struct snd_soc_dai *dai) 84 struct snd_soc_dai *dai)
64{ 85{
86 struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
87 struct snd_aes_iec958 *iec = &priv->iec;
88 struct snd_cea_861_aud_if *cea = &priv->cea;
65 int err = 0; 89 int err = 0;
66 90
67 switch (params_format(params)) { 91 switch (params_format(params)) {
68 case SNDRV_PCM_FORMAT_S16_LE: 92 case SNDRV_PCM_FORMAT_S16_LE:
69 omap_hdmi_dai_dma_params.packet_size = 16; 93 priv->dma_params.packet_size = 16;
70 break; 94 break;
71 case SNDRV_PCM_FORMAT_S24_LE: 95 case SNDRV_PCM_FORMAT_S24_LE:
72 omap_hdmi_dai_dma_params.packet_size = 32; 96 priv->dma_params.packet_size = 32;
73 break; 97 break;
74 default: 98 default:
75 err = -EINVAL; 99 dev_err(dai->dev, "format not supported!\n");
100 return -EINVAL;
76 } 101 }
77 102
78 omap_hdmi_dai_dma_params.data_type = OMAP_DMA_DATA_TYPE_S32; 103 priv->dma_params.data_type = OMAP_DMA_DATA_TYPE_S32;
79 104
80 snd_soc_dai_set_dma_data(dai, substream, 105 snd_soc_dai_set_dma_data(dai, substream,
81 &omap_hdmi_dai_dma_params); 106 &priv->dma_params);
107
108 /*
109 * fill the IEC-60958 channel status word
110 */
111
112 /* specify IEC-60958-3 (commercial use) */
113 iec->status[0] &= ~IEC958_AES0_PROFESSIONAL;
114
115 /* specify that the audio is LPCM*/
116 iec->status[0] &= ~IEC958_AES0_NONAUDIO;
117
118 iec->status[0] |= IEC958_AES0_CON_NOT_COPYRIGHT;
119
120 iec->status[0] |= IEC958_AES0_CON_EMPHASIS_NONE;
121
122 iec->status[0] |= IEC958_AES1_PRO_MODE_NOTID;
123
124 iec->status[1] = IEC958_AES1_CON_GENERAL;
125
126 iec->status[2] |= IEC958_AES2_CON_SOURCE_UNSPEC;
127
128 iec->status[2] |= IEC958_AES2_CON_CHANNEL_UNSPEC;
129
130 switch (params_rate(params)) {
131 case 32000:
132 iec->status[3] |= IEC958_AES3_CON_FS_32000;
133 break;
134 case 44100:
135 iec->status[3] |= IEC958_AES3_CON_FS_44100;
136 break;
137 case 48000:
138 iec->status[3] |= IEC958_AES3_CON_FS_48000;
139 break;
140 case 88200:
141 iec->status[3] |= IEC958_AES3_CON_FS_88200;
142 break;
143 case 96000:
144 iec->status[3] |= IEC958_AES3_CON_FS_96000;
145 break;
146 case 176400:
147 iec->status[3] |= IEC958_AES3_CON_FS_176400;
148 break;
149 case 192000:
150 iec->status[3] |= IEC958_AES3_CON_FS_192000;
151 break;
152 default:
153 dev_err(dai->dev, "rate not supported!\n");
154 return -EINVAL;
155 }
156
157 /* specify the clock accuracy */
158 iec->status[3] |= IEC958_AES3_CON_CLOCK_1000PPM;
159
160 /*
161 * specify the word length. The same word length value can mean
162 * two different lengths. Hence, we need to specify the maximum
163 * word length as well.
164 */
165 switch (params_format(params)) {
166 case SNDRV_PCM_FORMAT_S16_LE:
167 iec->status[4] |= IEC958_AES4_CON_WORDLEN_20_16;
168 iec->status[4] &= ~IEC958_AES4_CON_MAX_WORDLEN_24;
169 break;
170 case SNDRV_PCM_FORMAT_S24_LE:
171 iec->status[4] |= IEC958_AES4_CON_WORDLEN_24_20;
172 iec->status[4] |= IEC958_AES4_CON_MAX_WORDLEN_24;
173 break;
174 default:
175 dev_err(dai->dev, "format not supported!\n");
176 return -EINVAL;
177 }
178
179 /*
180 * Fill the CEA-861 audio infoframe (see spec for details)
181 */
182
183 cea->db1_ct_cc = (params_channels(params) - 1)
184 & CEA861_AUDIO_INFOFRAME_DB1CC;
185 cea->db1_ct_cc |= CEA861_AUDIO_INFOFRAME_DB1CT_FROM_STREAM;
186
187 cea->db2_sf_ss = CEA861_AUDIO_INFOFRAME_DB2SF_FROM_STREAM;
188 cea->db2_sf_ss |= CEA861_AUDIO_INFOFRAME_DB2SS_FROM_STREAM;
189
190 cea->db3 = 0; /* not used, all zeros */
191
192 /*
193 * The OMAP HDMI IP requires to use the 8-channel channel code when
194 * transmitting more than two channels.
195 */
196 if (params_channels(params) == 2)
197 cea->db4_ca = 0x0;
198 else
199 cea->db4_ca = 0x13;
200
201 cea->db5_dminh_lsv = CEA861_AUDIO_INFOFRAME_DB5_DM_INH_PROHIBITED;
202 /* the expression is trivial but makes clear what we are doing */
203 cea->db5_dminh_lsv |= (0 & CEA861_AUDIO_INFOFRAME_DB5_LSV);
204
205 priv->dss_audio.iec = iec;
206 priv->dss_audio.cea = cea;
207
208 err = priv->dssdev->driver->audio_config(priv->dssdev,
209 &priv->dss_audio);
82 210
83 return err; 211 return err;
84} 212}
85 213
214static int omap_hdmi_dai_trigger(struct snd_pcm_substream *substream, int cmd,
215 struct snd_soc_dai *dai)
216{
217 struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
218 int err = 0;
219
220 switch (cmd) {
221 case SNDRV_PCM_TRIGGER_START:
222 case SNDRV_PCM_TRIGGER_RESUME:
223 case SNDRV_PCM_TRIGGER_PAUSE_RELEASE:
224 err = priv->dssdev->driver->audio_start(priv->dssdev);
225 break;
226 case SNDRV_PCM_TRIGGER_STOP:
227 case SNDRV_PCM_TRIGGER_SUSPEND:
228 case SNDRV_PCM_TRIGGER_PAUSE_PUSH:
229 priv->dssdev->driver->audio_stop(priv->dssdev);
230 break;
231 default:
232 err = -EINVAL;
233 }
234 return err;
235}
236
237static void omap_hdmi_dai_shutdown(struct snd_pcm_substream *substream,
238 struct snd_soc_dai *dai)
239{
240 struct hdmi_priv *priv = snd_soc_dai_get_drvdata(dai);
241
242 priv->dssdev->driver->audio_disable(priv->dssdev);
243}
244
86static const struct snd_soc_dai_ops omap_hdmi_dai_ops = { 245static const struct snd_soc_dai_ops omap_hdmi_dai_ops = {
87 .startup = omap_hdmi_dai_startup, 246 .startup = omap_hdmi_dai_startup,
88 .hw_params = omap_hdmi_dai_hw_params, 247 .hw_params = omap_hdmi_dai_hw_params,
248 .prepare = omap_hdmi_dai_prepare,
249 .trigger = omap_hdmi_dai_trigger,
250 .shutdown = omap_hdmi_dai_shutdown,
89}; 251};
90 252
91static struct snd_soc_dai_driver omap_hdmi_dai = { 253static struct snd_soc_dai_driver omap_hdmi_dai = {
92 .playback = { 254 .playback = {
93 .channels_min = 2, 255 .channels_min = 2,
94 .channels_max = 2, 256 .channels_max = 8,
95 .rates = OMAP_HDMI_RATES, 257 .rates = OMAP_HDMI_RATES,
96 .formats = OMAP_HDMI_FORMATS, 258 .formats = OMAP_HDMI_FORMATS,
97 }, 259 },
@@ -102,31 +264,77 @@ static __devinit int omap_hdmi_probe(struct platform_device *pdev)
102{ 264{
103 int ret; 265 int ret;
104 struct resource *hdmi_rsrc; 266 struct resource *hdmi_rsrc;
267 struct hdmi_priv *hdmi_data;
268 bool hdmi_dev_found = false;
269
270 hdmi_data = devm_kzalloc(&pdev->dev, sizeof(*hdmi_data), GFP_KERNEL);
271 if (hdmi_data == NULL) {
272 dev_err(&pdev->dev, "Cannot allocate memory for HDMI data\n");
273 return -ENOMEM;
274 }
105 275
106 hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0); 276 hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_MEM, 0);
107 if (!hdmi_rsrc) { 277 if (!hdmi_rsrc) {
108 dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n"); 278 dev_err(&pdev->dev, "Cannot obtain IORESOURCE_MEM HDMI\n");
109 return -EINVAL; 279 return -ENODEV;
110 } 280 }
111 281
112 omap_hdmi_dai_dma_params.port_addr = hdmi_rsrc->start 282 hdmi_data->dma_params.port_addr = hdmi_rsrc->start
113 + OMAP_HDMI_AUDIO_DMA_PORT; 283 + OMAP_HDMI_AUDIO_DMA_PORT;
114 284
115 hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_DMA, 0); 285 hdmi_rsrc = platform_get_resource(pdev, IORESOURCE_DMA, 0);
116 if (!hdmi_rsrc) { 286 if (!hdmi_rsrc) {
117 dev_err(&pdev->dev, "Cannot obtain IORESOURCE_DMA HDMI\n"); 287 dev_err(&pdev->dev, "Cannot obtain IORESOURCE_DMA HDMI\n");
118 return -EINVAL; 288 return -ENODEV;
119 } 289 }
120 290
121 omap_hdmi_dai_dma_params.dma_req = hdmi_rsrc->start; 291 hdmi_data->dma_params.dma_req = hdmi_rsrc->start;
292 hdmi_data->dma_params.name = "HDMI playback";
293 hdmi_data->dma_params.sync_mode = OMAP_DMA_SYNC_PACKET;
294
295 /*
296 * TODO: We assume that there is only one DSS HDMI device. Future
297 * OMAP implementations may support more than one HDMI devices and
298 * we should provided separate audio support for all of them.
299 */
300 /* Find an HDMI device. */
301 for_each_dss_dev(hdmi_data->dssdev) {
302 omap_dss_get_device(hdmi_data->dssdev);
122 303
304 if (!hdmi_data->dssdev->driver) {
305 omap_dss_put_device(hdmi_data->dssdev);
306 continue;
307 }
308
309 if (hdmi_data->dssdev->type == OMAP_DISPLAY_TYPE_HDMI) {
310 hdmi_dev_found = true;
311 break;
312 }
313 }
314
315 if (!hdmi_dev_found) {
316 dev_err(&pdev->dev, "no driver for HDMI display found\n");
317 return -ENODEV;
318 }
319
320 dev_set_drvdata(&pdev->dev, hdmi_data);
123 ret = snd_soc_register_dai(&pdev->dev, &omap_hdmi_dai); 321 ret = snd_soc_register_dai(&pdev->dev, &omap_hdmi_dai);
322
124 return ret; 323 return ret;
125} 324}
126 325
127static int __devexit omap_hdmi_remove(struct platform_device *pdev) 326static int __devexit omap_hdmi_remove(struct platform_device *pdev)
128{ 327{
328 struct hdmi_priv *hdmi_data = dev_get_drvdata(&pdev->dev);
329
129 snd_soc_unregister_dai(&pdev->dev); 330 snd_soc_unregister_dai(&pdev->dev);
331
332 if (hdmi_data == NULL) {
333 dev_err(&pdev->dev, "cannot obtain HDMi data\n");
334 return -ENODEV;
335 }
336
337 omap_dss_put_device(hdmi_data->dssdev);
130 return 0; 338 return 0;
131} 339}
132 340
diff --git a/sound/soc/omap/omap-hdmi.h b/sound/soc/omap/omap-hdmi.h
index 34c298d5057e..6ad2bf4f2697 100644
--- a/sound/soc/omap/omap-hdmi.h
+++ b/sound/soc/omap/omap-hdmi.h
@@ -28,7 +28,9 @@
28#define OMAP_HDMI_AUDIO_DMA_PORT 0x8c 28#define OMAP_HDMI_AUDIO_DMA_PORT 0x8c
29 29
30#define OMAP_HDMI_RATES (SNDRV_PCM_RATE_32000 | \ 30#define OMAP_HDMI_RATES (SNDRV_PCM_RATE_32000 | \
31 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000) 31 SNDRV_PCM_RATE_44100 | SNDRV_PCM_RATE_48000 | \
32 SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000 | \
33 SNDRV_PCM_RATE_176400 | SNDRV_PCM_RATE_192000)
32 34
33#define OMAP_HDMI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \ 35#define OMAP_HDMI_FORMATS (SNDRV_PCM_FMTBIT_S16_LE | \
34 SNDRV_PCM_FMTBIT_S24_LE) 36 SNDRV_PCM_FMTBIT_S24_LE)
diff --git a/sound/soc/omap/omap-mcbsp.c b/sound/soc/omap/omap-mcbsp.c
index 6912ac7cb625..1046083e90a0 100644
--- a/sound/soc/omap/omap-mcbsp.c
+++ b/sound/soc/omap/omap-mcbsp.c
@@ -71,18 +71,17 @@ static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream)
71 71
72 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); 72 dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream);
73 73
74 /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ 74 /*
75 if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) 75 * Configure McBSP threshold based on either:
76 /* 76 * packet_size, when the sDMA is in packet mode, or based on the
77 * Configure McBSP threshold based on either: 77 * period size in THRESHOLD mode, otherwise use McBSP threshold = 1
78 * packet_size, when the sDMA is in packet mode, or 78 * for mono streams.
79 * based on the period size. 79 */
80 */ 80 if (dma_data->packet_size)
81 if (dma_data->packet_size) 81 words = dma_data->packet_size;
82 words = dma_data->packet_size; 82 else if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD)
83 else 83 words = snd_pcm_lib_period_bytes(substream) /
84 words = snd_pcm_lib_period_bytes(substream) / 84 (mcbsp->wlen / 8);
85 (mcbsp->wlen / 8);
86 else 85 else
87 words = 1; 86 words = 1;
88 87
@@ -139,13 +138,15 @@ static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream,
139 if (mcbsp->pdata->buffer_size) { 138 if (mcbsp->pdata->buffer_size) {
140 /* 139 /*
141 * Rule for the buffer size. We should not allow 140 * Rule for the buffer size. We should not allow
142 * smaller buffer than the FIFO size to avoid underruns 141 * smaller buffer than the FIFO size to avoid underruns.
142 * This applies only for the playback stream.
143 */ 143 */
144 snd_pcm_hw_rule_add(substream->runtime, 0, 144 if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)
145 SNDRV_PCM_HW_PARAM_BUFFER_SIZE, 145 snd_pcm_hw_rule_add(substream->runtime, 0,
146 omap_mcbsp_hwrule_min_buffersize, 146 SNDRV_PCM_HW_PARAM_BUFFER_SIZE,
147 mcbsp, 147 omap_mcbsp_hwrule_min_buffersize,
148 SNDRV_PCM_HW_PARAM_CHANNELS, -1); 148 mcbsp,
149 SNDRV_PCM_HW_PARAM_CHANNELS, -1);
149 150
150 /* Make sure, that the period size is always even */ 151 /* Make sure, that the period size is always even */
151 snd_pcm_hw_constraint_step(substream->runtime, 0, 152 snd_pcm_hw_constraint_step(substream->runtime, 0,
@@ -230,6 +231,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
230 unsigned int format, div, framesize, master; 231 unsigned int format, div, framesize, master;
231 232
232 dma_data = &mcbsp->dma_data[substream->stream]; 233 dma_data = &mcbsp->dma_data[substream->stream];
234 channels = params_channels(params);
233 235
234 switch (params_format(params)) { 236 switch (params_format(params)) {
235 case SNDRV_PCM_FORMAT_S16_LE: 237 case SNDRV_PCM_FORMAT_S16_LE:
@@ -245,7 +247,6 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
245 } 247 }
246 if (mcbsp->pdata->buffer_size) { 248 if (mcbsp->pdata->buffer_size) {
247 dma_data->set_threshold = omap_mcbsp_set_threshold; 249 dma_data->set_threshold = omap_mcbsp_set_threshold;
248 /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */
249 if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) { 250 if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) {
250 int period_words, max_thrsh; 251 int period_words, max_thrsh;
251 252
@@ -283,6 +284,10 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
283 } else { 284 } else {
284 sync_mode = OMAP_DMA_SYNC_FRAME; 285 sync_mode = OMAP_DMA_SYNC_FRAME;
285 } 286 }
287 } else if (channels > 1) {
288 /* Use packet mode for non mono streams */
289 pkt_size = channels;
290 sync_mode = OMAP_DMA_SYNC_PACKET;
286 } 291 }
287 } 292 }
288 293
@@ -301,7 +306,7 @@ static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream,
301 regs->rcr1 &= ~(RFRLEN1(0x7f) | RWDLEN1(7)); 306 regs->rcr1 &= ~(RFRLEN1(0x7f) | RWDLEN1(7));
302 regs->xcr1 &= ~(XFRLEN1(0x7f) | XWDLEN1(7)); 307 regs->xcr1 &= ~(XFRLEN1(0x7f) | XWDLEN1(7));
303 format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK; 308 format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK;
304 wpf = channels = params_channels(params); 309 wpf = channels;
305 if (channels == 2 && (format == SND_SOC_DAIFMT_I2S || 310 if (channels == 2 && (format == SND_SOC_DAIFMT_I2S ||
306 format == SND_SOC_DAIFMT_LEFT_J)) { 311 format == SND_SOC_DAIFMT_LEFT_J)) {
307 /* Use dual-phase frames */ 312 /* Use dual-phase frames */
diff --git a/sound/soc/omap/omap-mcpdm.c b/sound/soc/omap/omap-mcpdm.c
index 39705561131a..59d47ab5b15d 100644
--- a/sound/soc/omap/omap-mcpdm.c
+++ b/sound/soc/omap/omap-mcpdm.c
@@ -33,6 +33,7 @@
33#include <linux/irq.h> 33#include <linux/irq.h>
34#include <linux/slab.h> 34#include <linux/slab.h>
35#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
36#include <linux/of_device.h>
36 37
37#include <sound/core.h> 38#include <sound/core.h>
38#include <sound/pcm.h> 39#include <sound/pcm.h>
@@ -507,10 +508,17 @@ static int __devexit asoc_mcpdm_remove(struct platform_device *pdev)
507 return 0; 508 return 0;
508} 509}
509 510
511static const struct of_device_id omap_mcpdm_of_match[] = {
512 { .compatible = "ti,omap4-mcpdm", },
513 { }
514};
515MODULE_DEVICE_TABLE(of, omap_mcpdm_of_match);
516
510static struct platform_driver asoc_mcpdm_driver = { 517static struct platform_driver asoc_mcpdm_driver = {
511 .driver = { 518 .driver = {
512 .name = "omap-mcpdm", 519 .name = "omap-mcpdm",
513 .owner = THIS_MODULE, 520 .owner = THIS_MODULE,
521 .of_match_table = omap_mcpdm_of_match,
514 }, 522 },
515 523
516 .probe = asoc_mcpdm_probe, 524 .probe = asoc_mcpdm_probe,
diff --git a/sound/soc/omap/omap4-hdmi-card.c b/sound/soc/omap/omap4-hdmi-card.c
deleted file mode 100644
index 28d689b2714d..000000000000
--- a/sound/soc/omap/omap4-hdmi-card.c
+++ /dev/null
@@ -1,121 +0,0 @@
1/*
2 * omap4-hdmi-card.c
3 *
4 * OMAP ALSA SoC machine driver for TI OMAP4 HDMI
5 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
6 * Author: Ricardo Neri <ricardo.neri@ti.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#include <linux/module.h>
25#include <sound/pcm.h>
26#include <sound/soc.h>
27#include <asm/mach-types.h>
28#include <video/omapdss.h>
29
30#define DRV_NAME "omap4-hdmi-audio"
31
32static int omap4_hdmi_dai_hw_params(struct snd_pcm_substream *substream,
33 struct snd_pcm_hw_params *params)
34{
35 int i;
36 struct omap_overlay_manager *mgr = NULL;
37 struct device *dev = substream->pcm->card->dev;
38
39 /* Find DSS HDMI device */
40 for (i = 0; i < omap_dss_get_num_overlay_managers(); i++) {
41 mgr = omap_dss_get_overlay_manager(i);
42 if (mgr && mgr->device
43 && mgr->device->type == OMAP_DISPLAY_TYPE_HDMI)
44 break;
45 }
46
47 if (i == omap_dss_get_num_overlay_managers()) {
48 dev_err(dev, "HDMI display device not found!\n");
49 return -ENODEV;
50 }
51
52 /* Make sure HDMI is power-on to avoid L3 interconnect errors */
53 if (mgr->device->state != OMAP_DSS_DISPLAY_ACTIVE) {
54 dev_err(dev, "HDMI display is not active!\n");
55 return -EIO;
56 }
57
58 return 0;
59}
60
61static struct snd_soc_ops omap4_hdmi_dai_ops = {
62 .hw_params = omap4_hdmi_dai_hw_params,
63};
64
65static struct snd_soc_dai_link omap4_hdmi_dai = {
66 .name = "HDMI",
67 .stream_name = "HDMI",
68 .cpu_dai_name = "hdmi-audio-dai",
69 .platform_name = "omap-pcm-audio",
70 .codec_name = "omapdss_hdmi",
71 .codec_dai_name = "hdmi-audio-codec",
72 .ops = &omap4_hdmi_dai_ops,
73};
74
75static struct snd_soc_card snd_soc_omap4_hdmi = {
76 .name = "OMAP4HDMI",
77 .owner = THIS_MODULE,
78 .dai_link = &omap4_hdmi_dai,
79 .num_links = 1,
80};
81
82static __devinit int omap4_hdmi_probe(struct platform_device *pdev)
83{
84 struct snd_soc_card *card = &snd_soc_omap4_hdmi;
85 int ret;
86
87 card->dev = &pdev->dev;
88
89 ret = snd_soc_register_card(card);
90 if (ret) {
91 dev_err(&pdev->dev, "snd_soc_register_card failed (%d)\n", ret);
92 card->dev = NULL;
93 return ret;
94 }
95 return 0;
96}
97
98static int __devexit omap4_hdmi_remove(struct platform_device *pdev)
99{
100 struct snd_soc_card *card = platform_get_drvdata(pdev);
101
102 snd_soc_unregister_card(card);
103 card->dev = NULL;
104 return 0;
105}
106
107static struct platform_driver omap4_hdmi_driver = {
108 .driver = {
109 .name = "omap4-hdmi-audio",
110 .owner = THIS_MODULE,
111 },
112 .probe = omap4_hdmi_probe,
113 .remove = __devexit_p(omap4_hdmi_remove),
114};
115
116module_platform_driver(omap4_hdmi_driver);
117
118MODULE_AUTHOR("Ricardo Neri <ricardo.neri@ti.com>");
119MODULE_DESCRIPTION("OMAP4 HDMI machine ASoC driver");
120MODULE_LICENSE("GPL");
121MODULE_ALIAS("platform:" DRV_NAME);
diff --git a/tools/vm/page-types.c b/tools/vm/page-types.c
index 7dab7b25b5c6..f576971f6556 100644
--- a/tools/vm/page-types.c
+++ b/tools/vm/page-types.c
@@ -35,6 +35,7 @@
35#include <sys/mount.h> 35#include <sys/mount.h>
36#include <sys/statfs.h> 36#include <sys/statfs.h>
37#include "../../include/linux/magic.h" 37#include "../../include/linux/magic.h"
38#include "../../include/linux/kernel-page-flags.h"
38 39
39 40
40#ifndef MAX_PATH 41#ifndef MAX_PATH
@@ -73,33 +74,6 @@
73#define KPF_BYTES 8 74#define KPF_BYTES 8
74#define PROC_KPAGEFLAGS "/proc/kpageflags" 75#define PROC_KPAGEFLAGS "/proc/kpageflags"
75 76
76/* copied from kpageflags_read() */
77#define KPF_LOCKED 0
78#define KPF_ERROR 1
79#define KPF_REFERENCED 2
80#define KPF_UPTODATE 3
81#define KPF_DIRTY 4
82#define KPF_LRU 5
83#define KPF_ACTIVE 6
84#define KPF_SLAB 7
85#define KPF_WRITEBACK 8
86#define KPF_RECLAIM 9
87#define KPF_BUDDY 10
88
89/* [11-20] new additions in 2.6.31 */
90#define KPF_MMAP 11
91#define KPF_ANON 12
92#define KPF_SWAPCACHE 13
93#define KPF_SWAPBACKED 14
94#define KPF_COMPOUND_HEAD 15
95#define KPF_COMPOUND_TAIL 16
96#define KPF_HUGE 17
97#define KPF_UNEVICTABLE 18
98#define KPF_HWPOISON 19
99#define KPF_NOPAGE 20
100#define KPF_KSM 21
101#define KPF_THP 22
102
103/* [32-] kernel hacking assistances */ 77/* [32-] kernel hacking assistances */
104#define KPF_RESERVED 32 78#define KPF_RESERVED 32
105#define KPF_MLOCKED 33 79#define KPF_MLOCKED 33
@@ -326,7 +300,7 @@ static char *page_flag_name(uint64_t flags)
326{ 300{
327 static char buf[65]; 301 static char buf[65];
328 int present; 302 int present;
329 int i, j; 303 size_t i, j;
330 304
331 for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) { 305 for (i = 0, j = 0; i < ARRAY_SIZE(page_flag_names); i++) {
332 present = (flags >> i) & 1; 306 present = (flags >> i) & 1;
@@ -344,7 +318,7 @@ static char *page_flag_name(uint64_t flags)
344static char *page_flag_longname(uint64_t flags) 318static char *page_flag_longname(uint64_t flags)
345{ 319{
346 static char buf[1024]; 320 static char buf[1024];
347 int i, n; 321 size_t i, n;
348 322
349 for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) { 323 for (i = 0, n = 0; i < ARRAY_SIZE(page_flag_names); i++) {
350 if (!page_flag_names[i]) 324 if (!page_flag_names[i])
@@ -402,7 +376,7 @@ static void show_page(unsigned long voffset,
402 376
403static void show_summary(void) 377static void show_summary(void)
404{ 378{
405 int i; 379 size_t i;
406 380
407 printf(" flags\tpage-count MB" 381 printf(" flags\tpage-count MB"
408 " symbolic-flags\t\t\tlong-symbolic-flags\n"); 382 " symbolic-flags\t\t\tlong-symbolic-flags\n");
@@ -500,7 +474,7 @@ static int debugfs_valid_mountpoint(const char *debugfs)
500/* find the path to the mounted debugfs */ 474/* find the path to the mounted debugfs */
501static const char *debugfs_find_mountpoint(void) 475static const char *debugfs_find_mountpoint(void)
502{ 476{
503 const char **ptr; 477 const char *const *ptr;
504 char type[100]; 478 char type[100];
505 FILE *fp; 479 FILE *fp;
506 480
@@ -537,7 +511,7 @@ static const char *debugfs_find_mountpoint(void)
537 511
538static void debugfs_mount(void) 512static void debugfs_mount(void)
539{ 513{
540 const char **ptr; 514 const char *const *ptr;
541 515
542 /* see if it's already mounted */ 516 /* see if it's already mounted */
543 if (debugfs_find_mountpoint()) 517 if (debugfs_find_mountpoint())
@@ -614,10 +588,10 @@ static int unpoison_page(unsigned long offset)
614 * page frame walker 588 * page frame walker
615 */ 589 */
616 590
617static int hash_slot(uint64_t flags) 591static size_t hash_slot(uint64_t flags)
618{ 592{
619 int k = HASH_KEY(flags); 593 size_t k = HASH_KEY(flags);
620 int i; 594 size_t i;
621 595
622 /* Explicitly reserve slot 0 for flags 0: the following logic 596 /* Explicitly reserve slot 0 for flags 0: the following logic
623 * cannot distinguish an unoccupied slot from slot (flags==0). 597 * cannot distinguish an unoccupied slot from slot (flags==0).
@@ -670,7 +644,7 @@ static void walk_pfn(unsigned long voffset,
670{ 644{
671 uint64_t buf[KPAGEFLAGS_BATCH]; 645 uint64_t buf[KPAGEFLAGS_BATCH];
672 unsigned long batch; 646 unsigned long batch;
673 long pages; 647 unsigned long pages;
674 unsigned long i; 648 unsigned long i;
675 649
676 while (count) { 650 while (count) {
@@ -779,7 +753,7 @@ static const char *page_flag_type(uint64_t flag)
779 753
780static void usage(void) 754static void usage(void)
781{ 755{
782 int i, j; 756 size_t i, j;
783 757
784 printf( 758 printf(
785"page-types [options]\n" 759"page-types [options]\n"
@@ -938,7 +912,7 @@ static void add_bits_filter(uint64_t mask, uint64_t bits)
938 912
939static uint64_t parse_flag_name(const char *str, int len) 913static uint64_t parse_flag_name(const char *str, int len)
940{ 914{
941 int i; 915 size_t i;
942 916
943 if (!*str || !len) 917 if (!*str || !len)
944 return 0; 918 return 0;
diff --git a/virt/kvm/Kconfig b/virt/kvm/Kconfig
index f63ccb0a5982..28694f4a9139 100644
--- a/virt/kvm/Kconfig
+++ b/virt/kvm/Kconfig
@@ -18,3 +18,6 @@ config KVM_MMIO
18 18
19config KVM_ASYNC_PF 19config KVM_ASYNC_PF
20 bool 20 bool
21
22config HAVE_KVM_MSI
23 bool
diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c
index dcaf272c26c0..26fd54dc459e 100644
--- a/virt/kvm/ioapic.c
+++ b/virt/kvm/ioapic.c
@@ -254,13 +254,17 @@ static void __kvm_ioapic_update_eoi(struct kvm_ioapic *ioapic, int vector,
254 } 254 }
255} 255}
256 256
257bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector)
258{
259 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
260 smp_rmb();
261 return test_bit(vector, ioapic->handled_vectors);
262}
263
257void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode) 264void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode)
258{ 265{
259 struct kvm_ioapic *ioapic = kvm->arch.vioapic; 266 struct kvm_ioapic *ioapic = kvm->arch.vioapic;
260 267
261 smp_rmb();
262 if (!test_bit(vector, ioapic->handled_vectors))
263 return;
264 spin_lock(&ioapic->lock); 268 spin_lock(&ioapic->lock);
265 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode); 269 __kvm_ioapic_update_eoi(ioapic, vector, trigger_mode);
266 spin_unlock(&ioapic->lock); 270 spin_unlock(&ioapic->lock);
diff --git a/virt/kvm/ioapic.h b/virt/kvm/ioapic.h
index 0b190c34ccc3..32872a09b63f 100644
--- a/virt/kvm/ioapic.h
+++ b/virt/kvm/ioapic.h
@@ -71,6 +71,7 @@ int kvm_apic_match_dest(struct kvm_vcpu *vcpu, struct kvm_lapic *source,
71 int short_hand, int dest, int dest_mode); 71 int short_hand, int dest, int dest_mode);
72int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2); 72int kvm_apic_compare_prio(struct kvm_vcpu *vcpu1, struct kvm_vcpu *vcpu2);
73void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode); 73void kvm_ioapic_update_eoi(struct kvm *kvm, int vector, int trigger_mode);
74bool kvm_ioapic_handles_vector(struct kvm *kvm, int vector);
74int kvm_ioapic_init(struct kvm *kvm); 75int kvm_ioapic_init(struct kvm *kvm);
75void kvm_ioapic_destroy(struct kvm *kvm); 76void kvm_ioapic_destroy(struct kvm *kvm);
76int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level); 77int kvm_ioapic_set_irq(struct kvm_ioapic *ioapic, int irq, int level);
diff --git a/virt/kvm/irq_comm.c b/virt/kvm/irq_comm.c
index 9f614b4e365f..a6a0365475ed 100644
--- a/virt/kvm/irq_comm.c
+++ b/virt/kvm/irq_comm.c
@@ -138,6 +138,20 @@ int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e,
138 return kvm_irq_delivery_to_apic(kvm, NULL, &irq); 138 return kvm_irq_delivery_to_apic(kvm, NULL, &irq);
139} 139}
140 140
141int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi)
142{
143 struct kvm_kernel_irq_routing_entry route;
144
145 if (!irqchip_in_kernel(kvm) || msi->flags != 0)
146 return -EINVAL;
147
148 route.msi.address_lo = msi->address_lo;
149 route.msi.address_hi = msi->address_hi;
150 route.msi.data = msi->data;
151
152 return kvm_set_msi(&route, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
153}
154
141/* 155/*
142 * Return value: 156 * Return value:
143 * < 0 Interrupt was ignored (masked or not delivered for other reasons) 157 * < 0 Interrupt was ignored (masked or not delivered for other reasons)
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9739b533ca2e..7e140683ff14 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -522,12 +522,11 @@ static void kvm_destroy_dirty_bitmap(struct kvm_memory_slot *memslot)
522 return; 522 return;
523 523
524 if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE) 524 if (2 * kvm_dirty_bitmap_bytes(memslot) > PAGE_SIZE)
525 vfree(memslot->dirty_bitmap_head); 525 vfree(memslot->dirty_bitmap);
526 else 526 else
527 kfree(memslot->dirty_bitmap_head); 527 kfree(memslot->dirty_bitmap);
528 528
529 memslot->dirty_bitmap = NULL; 529 memslot->dirty_bitmap = NULL;
530 memslot->dirty_bitmap_head = NULL;
531} 530}
532 531
533/* 532/*
@@ -611,8 +610,7 @@ static int kvm_vm_release(struct inode *inode, struct file *filp)
611 610
612/* 611/*
613 * Allocation size is twice as large as the actual dirty bitmap size. 612 * Allocation size is twice as large as the actual dirty bitmap size.
614 * This makes it possible to do double buffering: see x86's 613 * See x86's kvm_vm_ioctl_get_dirty_log() why this is needed.
615 * kvm_vm_ioctl_get_dirty_log().
616 */ 614 */
617static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot) 615static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
618{ 616{
@@ -627,8 +625,6 @@ static int kvm_create_dirty_bitmap(struct kvm_memory_slot *memslot)
627 if (!memslot->dirty_bitmap) 625 if (!memslot->dirty_bitmap)
628 return -ENOMEM; 626 return -ENOMEM;
629 627
630 memslot->dirty_bitmap_head = memslot->dirty_bitmap;
631 memslot->nr_dirty_pages = 0;
632#endif /* !CONFIG_S390 */ 628#endif /* !CONFIG_S390 */
633 return 0; 629 return 0;
634} 630}
@@ -1477,8 +1473,8 @@ void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot,
1477 if (memslot && memslot->dirty_bitmap) { 1473 if (memslot && memslot->dirty_bitmap) {
1478 unsigned long rel_gfn = gfn - memslot->base_gfn; 1474 unsigned long rel_gfn = gfn - memslot->base_gfn;
1479 1475
1480 if (!test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap)) 1476 /* TODO: introduce set_bit_le() and use it */
1481 memslot->nr_dirty_pages++; 1477 test_and_set_bit_le(rel_gfn, memslot->dirty_bitmap);
1482 } 1478 }
1483} 1479}
1484 1480
@@ -1515,6 +1511,30 @@ void kvm_vcpu_block(struct kvm_vcpu *vcpu)
1515 finish_wait(&vcpu->wq, &wait); 1511 finish_wait(&vcpu->wq, &wait);
1516} 1512}
1517 1513
1514#ifndef CONFIG_S390
1515/*
1516 * Kick a sleeping VCPU, or a guest VCPU in guest mode, into host kernel mode.
1517 */
1518void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
1519{
1520 int me;
1521 int cpu = vcpu->cpu;
1522 wait_queue_head_t *wqp;
1523
1524 wqp = kvm_arch_vcpu_wq(vcpu);
1525 if (waitqueue_active(wqp)) {
1526 wake_up_interruptible(wqp);
1527 ++vcpu->stat.halt_wakeup;
1528 }
1529
1530 me = get_cpu();
1531 if (cpu != me && (unsigned)cpu < nr_cpu_ids && cpu_online(cpu))
1532 if (kvm_arch_vcpu_should_kick(vcpu))
1533 smp_send_reschedule(cpu);
1534 put_cpu();
1535}
1536#endif /* !CONFIG_S390 */
1537
1518void kvm_resched(struct kvm_vcpu *vcpu) 1538void kvm_resched(struct kvm_vcpu *vcpu)
1519{ 1539{
1520 if (!need_resched()) 1540 if (!need_resched())
@@ -1523,6 +1543,31 @@ void kvm_resched(struct kvm_vcpu *vcpu)
1523} 1543}
1524EXPORT_SYMBOL_GPL(kvm_resched); 1544EXPORT_SYMBOL_GPL(kvm_resched);
1525 1545
1546bool kvm_vcpu_yield_to(struct kvm_vcpu *target)
1547{
1548 struct pid *pid;
1549 struct task_struct *task = NULL;
1550
1551 rcu_read_lock();
1552 pid = rcu_dereference(target->pid);
1553 if (pid)
1554 task = get_pid_task(target->pid, PIDTYPE_PID);
1555 rcu_read_unlock();
1556 if (!task)
1557 return false;
1558 if (task->flags & PF_VCPU) {
1559 put_task_struct(task);
1560 return false;
1561 }
1562 if (yield_to(task, 1)) {
1563 put_task_struct(task);
1564 return true;
1565 }
1566 put_task_struct(task);
1567 return false;
1568}
1569EXPORT_SYMBOL_GPL(kvm_vcpu_yield_to);
1570
1526void kvm_vcpu_on_spin(struct kvm_vcpu *me) 1571void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1527{ 1572{
1528 struct kvm *kvm = me->kvm; 1573 struct kvm *kvm = me->kvm;
@@ -1541,8 +1586,6 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1541 */ 1586 */
1542 for (pass = 0; pass < 2 && !yielded; pass++) { 1587 for (pass = 0; pass < 2 && !yielded; pass++) {
1543 kvm_for_each_vcpu(i, vcpu, kvm) { 1588 kvm_for_each_vcpu(i, vcpu, kvm) {
1544 struct task_struct *task = NULL;
1545 struct pid *pid;
1546 if (!pass && i < last_boosted_vcpu) { 1589 if (!pass && i < last_boosted_vcpu) {
1547 i = last_boosted_vcpu; 1590 i = last_boosted_vcpu;
1548 continue; 1591 continue;
@@ -1552,24 +1595,11 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *me)
1552 continue; 1595 continue;
1553 if (waitqueue_active(&vcpu->wq)) 1596 if (waitqueue_active(&vcpu->wq))
1554 continue; 1597 continue;
1555 rcu_read_lock(); 1598 if (kvm_vcpu_yield_to(vcpu)) {
1556 pid = rcu_dereference(vcpu->pid);
1557 if (pid)
1558 task = get_pid_task(vcpu->pid, PIDTYPE_PID);
1559 rcu_read_unlock();
1560 if (!task)
1561 continue;
1562 if (task->flags & PF_VCPU) {
1563 put_task_struct(task);
1564 continue;
1565 }
1566 if (yield_to(task, 1)) {
1567 put_task_struct(task);
1568 kvm->last_boosted_vcpu = i; 1599 kvm->last_boosted_vcpu = i;
1569 yielded = 1; 1600 yielded = 1;
1570 break; 1601 break;
1571 } 1602 }
1572 put_task_struct(task);
1573 } 1603 }
1574 } 1604 }
1575} 1605}
@@ -2040,6 +2070,17 @@ static long kvm_vm_ioctl(struct file *filp,
2040 mutex_unlock(&kvm->lock); 2070 mutex_unlock(&kvm->lock);
2041 break; 2071 break;
2042#endif 2072#endif
2073#ifdef CONFIG_HAVE_KVM_MSI
2074 case KVM_SIGNAL_MSI: {
2075 struct kvm_msi msi;
2076
2077 r = -EFAULT;
2078 if (copy_from_user(&msi, argp, sizeof msi))
2079 goto out;
2080 r = kvm_send_userspace_msi(kvm, &msi);
2081 break;
2082 }
2083#endif
2043 default: 2084 default:
2044 r = kvm_arch_vm_ioctl(filp, ioctl, arg); 2085 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
2045 if (r == -ENOTTY) 2086 if (r == -ENOTTY)
@@ -2168,6 +2209,9 @@ static long kvm_dev_ioctl_check_extension_generic(long arg)
2168 case KVM_CAP_SET_BOOT_CPU_ID: 2209 case KVM_CAP_SET_BOOT_CPU_ID:
2169#endif 2210#endif
2170 case KVM_CAP_INTERNAL_ERROR_DATA: 2211 case KVM_CAP_INTERNAL_ERROR_DATA:
2212#ifdef CONFIG_HAVE_KVM_MSI
2213 case KVM_CAP_SIGNAL_MSI:
2214#endif
2171 return 1; 2215 return 1;
2172#ifdef CONFIG_HAVE_KVM_IRQCHIP 2216#ifdef CONFIG_HAVE_KVM_IRQCHIP
2173 case KVM_CAP_IRQ_ROUTING: 2217 case KVM_CAP_IRQ_ROUTING:
@@ -2394,9 +2438,6 @@ int kvm_io_bus_sort_cmp(const void *p1, const void *p2)
2394int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev, 2438int kvm_io_bus_insert_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev,
2395 gpa_t addr, int len) 2439 gpa_t addr, int len)
2396{ 2440{
2397 if (bus->dev_count == NR_IOBUS_DEVS)
2398 return -ENOSPC;
2399
2400 bus->range[bus->dev_count++] = (struct kvm_io_range) { 2441 bus->range[bus->dev_count++] = (struct kvm_io_range) {
2401 .addr = addr, 2442 .addr = addr,
2402 .len = len, 2443 .len = len,
@@ -2496,12 +2537,15 @@ int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
2496 struct kvm_io_bus *new_bus, *bus; 2537 struct kvm_io_bus *new_bus, *bus;
2497 2538
2498 bus = kvm->buses[bus_idx]; 2539 bus = kvm->buses[bus_idx];
2499 if (bus->dev_count > NR_IOBUS_DEVS-1) 2540 if (bus->dev_count > NR_IOBUS_DEVS - 1)
2500 return -ENOSPC; 2541 return -ENOSPC;
2501 2542
2502 new_bus = kmemdup(bus, sizeof(struct kvm_io_bus), GFP_KERNEL); 2543 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count + 1) *
2544 sizeof(struct kvm_io_range)), GFP_KERNEL);
2503 if (!new_bus) 2545 if (!new_bus)
2504 return -ENOMEM; 2546 return -ENOMEM;
2547 memcpy(new_bus, bus, sizeof(*bus) + (bus->dev_count *
2548 sizeof(struct kvm_io_range)));
2505 kvm_io_bus_insert_dev(new_bus, dev, addr, len); 2549 kvm_io_bus_insert_dev(new_bus, dev, addr, len);
2506 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2550 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2507 synchronize_srcu_expedited(&kvm->srcu); 2551 synchronize_srcu_expedited(&kvm->srcu);
@@ -2518,27 +2562,25 @@ int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
2518 struct kvm_io_bus *new_bus, *bus; 2562 struct kvm_io_bus *new_bus, *bus;
2519 2563
2520 bus = kvm->buses[bus_idx]; 2564 bus = kvm->buses[bus_idx];
2521
2522 new_bus = kmemdup(bus, sizeof(*bus), GFP_KERNEL);
2523 if (!new_bus)
2524 return -ENOMEM;
2525
2526 r = -ENOENT; 2565 r = -ENOENT;
2527 for (i = 0; i < new_bus->dev_count; i++) 2566 for (i = 0; i < bus->dev_count; i++)
2528 if (new_bus->range[i].dev == dev) { 2567 if (bus->range[i].dev == dev) {
2529 r = 0; 2568 r = 0;
2530 new_bus->dev_count--;
2531 new_bus->range[i] = new_bus->range[new_bus->dev_count];
2532 sort(new_bus->range, new_bus->dev_count,
2533 sizeof(struct kvm_io_range),
2534 kvm_io_bus_sort_cmp, NULL);
2535 break; 2569 break;
2536 } 2570 }
2537 2571
2538 if (r) { 2572 if (r)
2539 kfree(new_bus);
2540 return r; 2573 return r;
2541 } 2574
2575 new_bus = kzalloc(sizeof(*bus) + ((bus->dev_count - 1) *
2576 sizeof(struct kvm_io_range)), GFP_KERNEL);
2577 if (!new_bus)
2578 return -ENOMEM;
2579
2580 memcpy(new_bus, bus, sizeof(*bus) + i * sizeof(struct kvm_io_range));
2581 new_bus->dev_count--;
2582 memcpy(new_bus->range + i, bus->range + i + 1,
2583 (new_bus->dev_count - i) * sizeof(struct kvm_io_range));
2542 2584
2543 rcu_assign_pointer(kvm->buses[bus_idx], new_bus); 2585 rcu_assign_pointer(kvm->buses[bus_idx], new_bus);
2544 synchronize_srcu_expedited(&kvm->srcu); 2586 synchronize_srcu_expedited(&kvm->srcu);